aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-module12
-rw-r--r--Documentation/DocBook/kernel-api.tmpl3
-rw-r--r--Documentation/block/00-INDEX4
-rw-r--r--Documentation/block/barrier.txt261
-rw-r--r--Documentation/block/writeback_cache_control.txt86
-rw-r--r--Documentation/cgroups/blkio-controller.txt106
-rw-r--r--Documentation/devices.txt6
-rw-r--r--Documentation/dynamic-debug-howto.txt22
-rw-r--r--Documentation/filesystems/proc.txt32
-rw-r--r--Documentation/kernel-parameters.txt16
-rw-r--r--Documentation/lguest/lguest.c29
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/usb.txt22
-rw-r--r--Documentation/scsi/st.txt15
-rw-r--r--Documentation/usb/proc_usb_info.txt34
-rw-r--r--Documentation/workqueue.txt29
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/arm/include/asm/ioctls.h83
-rw-r--r--arch/arm/mach-mx3/mach-cpuimx35.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c30
-rw-r--r--arch/arm/mach-omap2/usb-musb.c4
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h21
-rw-r--r--arch/avr32/include/asm/ioctls.h86
-rw-r--r--arch/cris/include/asm/ioctls.h84
-rw-r--r--arch/frv/include/asm/ioctls.h80
-rw-r--r--arch/h8300/include/asm/ioctls.h81
-rw-r--r--arch/ia64/hp/sim/simserial.c12
-rw-r--r--arch/ia64/include/asm/ioctls.h89
-rw-r--r--arch/m32r/include/asm/ioctls.h83
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/include/asm/amigahw.h1
-rw-r--r--arch/m68k/include/asm/atomic.h210
-rw-r--r--arch/m68k/include/asm/atomic_mm.h200
-rw-r--r--arch/m68k/include/asm/atomic_no.h155
-rw-r--r--arch/m68k/include/asm/entry_mm.h3
-rw-r--r--arch/m68k/include/asm/io_mm.h43
-rw-r--r--arch/m68k/include/asm/ioctls.h80
-rw-r--r--arch/m68k/include/asm/machdep.h49
-rw-r--r--arch/m68k/include/asm/machdep_mm.h35
-rw-r--r--arch/m68k/include/asm/machdep_no.h26
-rw-r--r--arch/m68k/include/asm/page.h48
-rw-r--r--arch/m68k/include/asm/page_mm.h57
-rw-r--r--arch/m68k/include/asm/page_no.h51
-rw-r--r--arch/m68k/include/asm/string.h134
-rw-r--r--arch/m68k/include/asm/string_mm.h131
-rw-r--r--arch/m68k/include/asm/string_no.h126
-rw-r--r--arch/m68k/include/asm/system_mm.h2
-rw-r--r--arch/m68k/include/asm/system_no.h9
-rw-r--r--arch/m68k/include/asm/thread_info.h109
-rw-r--r--arch/m68k/include/asm/thread_info_mm.h71
-rw-r--r--arch/m68k/include/asm/thread_info_no.h102
-rw-r--r--arch/m68k/include/asm/traps.h275
-rw-r--r--arch/m68k/include/asm/traps_mm.h272
-rw-r--r--arch/m68k/include/asm/traps_no.h154
-rw-r--r--arch/m68k/kernel/setup.c6
-rw-r--r--arch/m68k/kernel/sys_m68k.c3
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/m68k/sun3/sun3ints.c2
-rw-r--r--arch/m68knommu/kernel/time.c3
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S4
-rw-r--r--arch/mips/alchemy/common/platform.c28
-rw-r--r--arch/mips/alchemy/common/power.c35
-rw-r--r--arch/mn10300/include/asm/ioctls.h84
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c163
-rw-r--r--arch/s390/include/asm/ioctls.h88
-rw-r--r--arch/x86/include/asm/percpu.h14
-rw-r--r--block/Kconfig12
-rw-r--r--block/Makefile3
-rw-r--r--block/blk-barrier.c350
-rw-r--r--block/blk-cgroup.c804
-rw-r--r--block/blk-cgroup.h87
-rw-r--r--block/blk-core.c125
-rw-r--r--block/blk-exec.c9
-rw-r--r--block/blk-flush.c262
-rw-r--r--block/blk-integrity.c94
-rw-r--r--block/blk-lib.c39
-rw-r--r--block/blk-map.c5
-rw-r--r--block/blk-merge.c25
-rw-r--r--block/blk-settings.c32
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/blk-throttle.c1123
-rw-r--r--block/blk.h20
-rw-r--r--block/cfq-iosched.c39
-rw-r--r--block/cfq.h2
-rw-r--r--block/elevator.c79
-rw-r--r--block/genhd.c37
-rw-r--r--block/ioctl.c6
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/Makefile4
-rw-r--r--drivers/base/bus.c22
-rw-r--r--drivers/base/class.c23
-rw-r--r--drivers/base/core.c208
-rw-r--r--drivers/base/memory.c94
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/platform.c80
-rw-r--r--drivers/base/sys.c8
-rw-r--r--drivers/block/amiflop.c60
-rw-r--r--drivers/block/ataflop.c50
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/cciss.c864
-rw-r--r--drivers/block/drbd/drbd_actlog.c41
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h219
-rw-r--r--drivers/block/drbd/drbd_main.c593
-rw-r--r--drivers/block/drbd/drbd_nl.c270
-rw-r--r--drivers/block/drbd/drbd_proc.c34
-rw-r--r--drivers/block/drbd/drbd_receiver.c949
-rw-r--r--drivers/block/drbd/drbd_req.c165
-rw-r--r--drivers/block/drbd/drbd_req.h62
-rw-r--r--drivers/block/drbd/drbd_worker.c292
-rw-r--r--drivers/block/floppy.c66
-rw-r--r--drivers/block/loop.c121
-rw-r--r--drivers/block/osdblk.c5
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/virtio_blk.c37
-rw-r--r--drivers/block/xen-blkfront.c54
-rw-r--r--drivers/char/Kconfig15
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/amiserial.c56
-rw-r--r--drivers/char/cyclades.c49
-rw-r--r--drivers/char/ip2/ip2main.c72
-rw-r--r--drivers/char/mxser.c109
-rw-r--r--drivers/char/nozomi.c37
-rw-r--r--drivers/char/pcmcia/synclink_cs.c60
-rw-r--r--drivers/char/pty.c4
-rw-r--r--drivers/char/synclink.c73
-rw-r--r--drivers/char/synclink_gt.c55
-rw-r--r--drivers/char/synclinkmp.c61
-rw-r--r--drivers/char/tty_io.c77
-rw-r--r--drivers/char/ttyprintk.c225
-rw-r--r--drivers/char/vc_screen.c135
-rw-r--r--drivers/char/vt.c5
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/ide/ide-disk.c13
-rw-r--r--drivers/ide/ide-io.c13
-rw-r--r--drivers/input/serio/serport.c1
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-io.c20
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-region-hash.c16
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c8
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm.c398
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c117
-rw-r--r--drivers/md/md.h23
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c176
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c43
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/pch_phub.c717
-rw-r--r--drivers/mmc/card/queue.c1
-rw-r--r--drivers/mtd/ubi/Kconfig17
-rw-r--r--drivers/mtd/ubi/Kconfig.debug29
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/debug.h4
-rw-r--r--drivers/mtd/ubi/eba.c10
-rw-r--r--drivers/mtd/ubi/io.c138
-rw-r--r--drivers/mtd/ubi/misc.c19
-rw-r--r--drivers/mtd/ubi/scan.c387
-rw-r--r--drivers/mtd/ubi/scan.h19
-rw-r--r--drivers/mtd/ubi/ubi.h29
-rw-r--r--drivers/mtd/ubi/vmt.c6
-rw-r--r--drivers/mtd/ubi/vtbl.c10
-rw-r--r--drivers/mtd/ubi/wl.c31
-rw-r--r--drivers/net/usb/hso.c35
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c9
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c20
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchp_core.c20
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c7
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c26
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/scsi/Makefile5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c126
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c17
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c186
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c32
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h78
-rw-r--r--drivers/s390/scsi/zfcp_erp.c631
-rw-r--r--drivers/s390/scsi/zfcp_ext.h63
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c609
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c159
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c221
-rw-r--r--drivers/s390/scsi/zfcp_unit.c244
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aic7xxx_old.c22
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/Makefile17
-rw-r--r--drivers/scsi/bfa/bfa.h438
-rw-r--r--drivers/scsi/bfa/bfa_callback_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim.h (renamed from drivers/scsi/bfa/bfa_cb_ioim_macros.h)30
-rw-r--r--drivers/scsi/bfa/bfa_cee.c492
-rw-r--r--drivers/scsi/bfa/bfa_core.c1131
-rw-r--r--drivers/scsi/bfa/bfa_cs.h364
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c58
-rw-r--r--drivers/scsi/bfa/bfa_defs.h466
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h457
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h1081
-rw-r--r--drivers/scsi/bfa/bfa_drv.c (renamed from drivers/scsi/bfa/bfa_module.c)41
-rw-r--r--drivers/scsi/bfa/bfa_fc.h (renamed from drivers/scsi/bfa/include/protocol/fc.h)1011
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c (renamed from drivers/scsi/bfa/fcbuild.c)293
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h316
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3460
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h401
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h192
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1962
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c1609
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h779
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c (renamed from drivers/scsi/bfa/fcpim.c)237
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c5411
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c61
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c (renamed from drivers/scsi/bfa/rport.c)1662
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c99
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c774
-rw-r--r--drivers/scsi/bfa/bfa_fcxp_priv.h138
-rw-r--r--drivers/scsi/bfa/bfa_fwimg_priv.h44
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c8
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c11
-rw-r--r--drivers/scsi/bfa/bfa_intr.c270
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h117
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c1888
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h248
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c124
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c137
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c927
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h184
-rw-r--r--drivers/scsi/bfa/bfa_iocfc_q.c44
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c1364
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c1088
-rw-r--r--drivers/scsi/bfa/bfa_log.c346
-rw-r--r--drivers/scsi/bfa/bfa_log_module.c537
-rw-r--r--drivers/scsi/bfa/bfa_lps.c892
-rw-r--r--drivers/scsi/bfa/bfa_lps_priv.h38
-rw-r--r--drivers/scsi/bfa/bfa_modules.h (renamed from drivers/scsi/bfa/bfa_priv.h)64
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h43
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h144
-rw-r--r--drivers/scsi/bfa/bfa_plog.h (renamed from drivers/scsi/bfa/include/cs/bfa_plog.h)120
-rw-r--r--drivers/scsi/bfa/bfa_port.c134
-rw-r--r--drivers/scsi/bfa/bfa_port.h66
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h94
-rw-r--r--drivers/scsi/bfa/bfa_rport.c906
-rw-r--r--drivers/scsi/bfa/bfa_rport_priv.h45
-rw-r--r--drivers/scsi/bfa/bfa_sgpg.c226
-rw-r--r--drivers/scsi/bfa/bfa_sgpg_priv.h79
-rw-r--r--drivers/scsi/bfa/bfa_sm.c38
-rw-r--r--drivers/scsi/bfa/bfa_svc.c5423
-rw-r--r--drivers/scsi/bfa/bfa_svc.h657
-rw-r--r--drivers/scsi/bfa/bfa_timer.c90
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h64
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c690
-rw-r--r--drivers/scsi/bfa/bfa_uf.c343
-rw-r--r--drivers/scsi/bfa/bfa_uf_priv.h47
-rw-r--r--drivers/scsi/bfa/bfad.c1355
-rw-r--r--drivers/scsi/bfa/bfad_attr.c241
-rw-r--r--drivers/scsi/bfa/bfad_attr.h56
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c10
-rw-r--r--drivers/scsi/bfa/bfad_drv.h254
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c131
-rw-r--r--drivers/scsi/bfa/bfad_im.c257
-rw-r--r--drivers/scsi/bfa/bfad_im.h56
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h45
-rw-r--r--drivers/scsi/bfa/bfad_intr.c222
-rw-r--r--drivers/scsi/bfa/bfad_ipfc.h42
-rw-r--r--drivers/scsi/bfa/bfad_os.c50
-rw-r--r--drivers/scsi/bfa/bfad_tm.h59
-rw-r--r--drivers/scsi/bfa/bfad_trcmod.h52
-rw-r--r--drivers/scsi/bfa/bfi.h579
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h (renamed from drivers/scsi/bfa/include/bfi/bfi_cbreg.h)25
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h627
-rw-r--r--drivers/scsi/bfa/bfi_ms.h765
-rw-r--r--drivers/scsi/bfa/fab.c62
-rw-r--r--drivers/scsi/bfa/fabric.c1323
-rw-r--r--drivers/scsi/bfa/fcbuild.h279
-rw-r--r--drivers/scsi/bfa/fcptm.c68
-rw-r--r--drivers/scsi/bfa/fcs.h30
-rw-r--r--drivers/scsi/bfa/fcs_auth.h37
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h68
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h39
-rw-r--r--drivers/scsi/bfa/fcs_fcptm.h45
-rw-r--r--drivers/scsi/bfa/fcs_fcxp.h29
-rw-r--r--drivers/scsi/bfa/fcs_lport.h118
-rw-r--r--drivers/scsi/bfa/fcs_ms.h35
-rw-r--r--drivers/scsi/bfa/fcs_port.h31
-rw-r--r--drivers/scsi/bfa/fcs_rport.h61
-rw-r--r--drivers/scsi/bfa/fcs_trcmod.h56
-rw-r--r--drivers/scsi/bfa/fcs_uf.h31
-rw-r--r--drivers/scsi/bfa/fcs_vport.h32
-rw-r--r--drivers/scsi/bfa/fdmi.c1230
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h96
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_adapter.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_audit.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ethport.h35
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ioc.h45
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_itnim.h33
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_lport.h51
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_port.h57
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_rport.h37
-rw-r--r--drivers/scsi/bfa/include/bfa.h203
-rw-r--r--drivers/scsi/bfa/include/bfa_fcpim.h177
-rw-r--r--drivers/scsi/bfa/include/bfa_fcptm.h47
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h338
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h53
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h174
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_boot.h34
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cee.h119
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h640
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fabric.h92
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcpim.h301
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcxp.h71
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h208
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_iocfc.h179
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lport.h89
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pbc.h62
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_port.h115
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h118
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_uf.h52
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h40
-rw-r--r--drivers/scsi/bfa/include/cna/cee/bfa_cee.h77
-rw-r--r--drivers/scsi/bfa/include/cna/port/bfa_port.h70
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/ethport_defs.h36
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/phyport_defs.h218
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h60
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_debug.h45
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h184
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_perf.h34
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_q.h81
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h77
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h176
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_wc.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_adapter.h83
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h83
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_audit.h38
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h134
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_boot.h81
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h157
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h41
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h45
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h88
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h158
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h322
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h70
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_itnim.h136
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_led.h35
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h144
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pci.h48
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pm.h33
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pom.h56
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h248
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h393
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_qos.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_rport.h199
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h282
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tin.h118
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h43
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_types.h30
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_version.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vf.h74
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vport.h91
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb.h33
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h75
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_port.h113
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h80
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h47
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h48
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h76
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h82
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h112
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h132
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h63
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h219
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h105
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h67
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_fcs.h28
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h36
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h62
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_wdrv.h36
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h492
-rw-r--r--drivers/scsi/bfa/include/protocol/fc_sp.h224
-rw-r--r--drivers/scsi/bfa/include/protocol/fcp.h184
-rw-r--r--drivers/scsi/bfa/include/protocol/fdmi.h163
-rw-r--r--drivers/scsi/bfa/include/protocol/scsi.h1648
-rw-r--r--drivers/scsi/bfa/include/protocol/types.h42
-rw-r--r--drivers/scsi/bfa/loop.c213
-rw-r--r--drivers/scsi/bfa/lport_api.c303
-rw-r--r--drivers/scsi/bfa/lport_priv.h82
-rw-r--r--drivers/scsi/bfa/ms.c759
-rw-r--r--drivers/scsi/bfa/n2n.c105
-rw-r--r--drivers/scsi/bfa/ns.c1242
-rw-r--r--drivers/scsi/bfa/plog.c184
-rw-r--r--drivers/scsi/bfa/rport_api.c185
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c379
-rw-r--r--drivers/scsi/bfa/scn.c482
-rw-r--r--drivers/scsi/bfa/vfapi.c292
-rw-r--r--drivers/scsi/bfa/vport.c903
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c63
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c62
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c15
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h161
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c773
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h312
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c132
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c1018
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1944
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h243
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c495
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/cxgbi/Kconfig2
-rw-r--r--drivers/scsi/cxgbi/Makefile2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kbuild (renamed from drivers/scsi/cxgb3i/Kbuild)1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig (renamed from drivers/scsi/cxgb3i/Kconfig)4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1465
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h51
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kbuild3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig7
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1604
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h43
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2612
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h745
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c65
-rw-r--r--drivers/scsi/fnic/fnic_main.c15
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c91
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h6
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h17
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c237
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c756
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h88
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c431
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c156
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c1
-rw-r--r--drivers/scsi/scsi_debug.c125
-rw-r--r--drivers/scsi/scsi_lib.c26
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c163
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c108
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/sd_dif.c11
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c8
-rw-r--r--drivers/scsi/st.c15
-rw-r--r--drivers/serial/68360serial.c51
-rw-r--r--drivers/serial/8250.c69
-rw-r--r--drivers/serial/Kconfig7
-rw-r--r--drivers/serial/altera_uart.c156
-rw-r--r--drivers/serial/bfin_sport_uart.c7
-rw-r--r--drivers/serial/imx.c5
-rw-r--r--drivers/serial/jsm/jsm_driver.c4
-rw-r--r--drivers/serial/max3107.c34
-rw-r--r--drivers/serial/mfd.c47
-rw-r--r--drivers/serial/mrst_max3110.c358
-rw-r--r--drivers/serial/mrst_max3110.h1
-rw-r--r--drivers/serial/serial_core.c49
-rw-r--r--drivers/serial/uartlite.c26
-rw-r--r--drivers/uio/uio.c163
-rw-r--r--drivers/uio/uio_pci_generic.c13
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/atm/Makefile6
-rw-r--r--drivers/usb/c67x00/Makefile6
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/Makefile21
-rw-r--r--drivers/usb/core/devices.c11
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hcd.c19
-rw-r--r--drivers/usb/core/hub.c41
-rw-r--r--drivers/usb/core/message.c14
-rw-r--r--drivers/usb/core/urb.c5
-rw-r--r--drivers/usb/early/Makefile2
-rw-r--r--drivers/usb/gadget/Kconfig22
-rw-r--r--drivers/usb/gadget/Makefile41
-rw-r--r--drivers/usb/gadget/amd5536udc.c15
-rw-r--r--drivers/usb/gadget/at91_udc.c11
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c10
-rw-r--r--drivers/usb/gadget/audio.c10
-rw-r--r--drivers/usb/gadget/cdc2.c10
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c18
-rw-r--r--drivers/usb/gadget/composite.c139
-rw-r--r--drivers/usb/gadget/dbgp.c19
-rw-r--r--drivers/usb/gadget/dummy_hcd.c18
-rw-r--r--drivers/usb/gadget/ether.c16
-rw-r--r--drivers/usb/gadget/f_acm.c2
-rw-r--r--drivers/usb/gadget/f_loopback.c7
-rw-r--r--drivers/usb/gadget/f_mass_storage.c88
-rw-r--r--drivers/usb/gadget/f_sourcesink.c5
-rw-r--r--drivers/usb/gadget/file_storage.c128
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c15
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c12
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c10
-rw-r--r--drivers/usb/gadget/g_ffs.c91
-rw-r--r--drivers/usb/gadget/gmidi.c5
-rw-r--r--drivers/usb/gadget/goku_udc.c35
-rw-r--r--drivers/usb/gadget/hid.c10
-rw-r--r--drivers/usb/gadget/imx_udc.c9
-rw-r--r--drivers/usb/gadget/inode.c6
-rw-r--r--drivers/usb/gadget/langwell_udc.c1058
-rw-r--r--drivers/usb/gadget/langwell_udc.h15
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c10
-rw-r--r--drivers/usb/gadget/m66592-udc.c9
-rw-r--r--drivers/usb/gadget/mass_storage.c82
-rw-r--r--drivers/usb/gadget/multi.c36
-rw-r--r--drivers/usb/gadget/net2280.c10
-rw-r--r--drivers/usb/gadget/nokia.c11
-rw-r--r--drivers/usb/gadget/omap_udc.c10
-rw-r--r--drivers/usb/gadget/printer.c7
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c12
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c14
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h2
-rw-r--r--drivers/usb/gadget/rndis.c492
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c9
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c17
-rw-r--r--drivers/usb/gadget/serial.c11
-rw-r--r--drivers/usb/gadget/storage_common.c49
-rw-r--r--drivers/usb/gadget/webcam.c11
-rw-r--r--drivers/usb/gadget/zero.c5
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile24
-rw-r--r--drivers/usb/host/ehci-fsl.c105
-rw-r--r--drivers/usb/host/ehci-fsl.h14
-rw-r--r--drivers/usb/host/ehci-hcd.c15
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c13
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c308
-rw-r--r--drivers/usb/host/imx21-hcd.c284
-rw-r--r--drivers/usb/host/imx21-hcd.h8
-rw-r--r--drivers/usb/host/isp116x-hcd.c6
-rw-r--r--drivers/usb/host/isp1362-hcd.c25
-rw-r--r--drivers/usb/host/ohci-hcd.c12
-rw-r--r--drivers/usb/host/ohci-pci.c18
-rw-r--r--drivers/usb/host/ohci-sh.c3
-rw-r--r--drivers/usb/host/ohci-sm501.c4
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/pci-quirks.c18
-rw-r--r--drivers/usb/host/r8a66597.h2
-rw-r--r--drivers/usb/host/uhci-q.c33
-rw-r--r--drivers/usb/host/whci/Kbuild2
-rw-r--r--drivers/usb/host/xhci-hub.c419
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c38
-rw-r--r--drivers/usb/host/xhci-ring.c101
-rw-r--r--drivers/usb/host/xhci.c343
-rw-r--r--drivers/usb/host/xhci.h65
-rw-r--r--drivers/usb/misc/Kconfig13
-rw-r--r--drivers/usb/misc/Makefile45
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/sisusbvga/Makefile3
-rw-r--r--drivers/usb/misc/usbtest.c667
-rw-r--r--drivers/usb/misc/yurex.c563
-rw-r--r--drivers/usb/mon/Makefile2
-rw-r--r--drivers/usb/musb/Kconfig16
-rw-r--r--drivers/usb/musb/Makefile71
-rw-r--r--drivers/usb/musb/am35x.c524
-rw-r--r--drivers/usb/musb/blackfin.c7
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/musb/da8xx.c469
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c57
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_debug.h11
-rw-r--r--drivers/usb/musb/musb_gadget.c132
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_host.c11
-rw-r--r--drivers/usb/musb/musbhsdma.c2
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/musb/tusb6010.c4
-rw-r--r--drivers/usb/otg/Kconfig14
-rw-r--r--drivers/usb/otg/Makefile8
-rw-r--r--drivers/usb/otg/langwell_otg.c2408
-rw-r--r--drivers/usb/otg/ulpi.c60
-rw-r--r--drivers/usb/serial/Kconfig9
-rw-r--r--drivers/usb/serial/Makefile10
-rw-r--r--drivers/usb/serial/ark3116.c40
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c43
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/io_edgeport.c49
-rw-r--r--drivers/usb/serial/io_tables.h4
-rw-r--r--drivers/usb/serial/io_ti.c29
-rw-r--r--drivers/usb/serial/mct_u232.c7
-rw-r--r--drivers/usb/serial/mos7720.c54
-rw-r--r--drivers/usb/serial/mos7840.c53
-rw-r--r--drivers/usb/serial/opticon.c44
-rw-r--r--drivers/usb/serial/option.c30
-rw-r--r--drivers/usb/serial/qcserial.c33
-rw-r--r--drivers/usb/serial/sam-ba.c206
-rw-r--r--drivers/usb/serial/ssu100.c48
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c37
-rw-r--r--drivers/usb/serial/usb-serial.c13
-rw-r--r--drivers/usb/serial/visor.c11
-rw-r--r--drivers/usb/storage/Kconfig13
-rw-r--r--drivers/usb/storage/Makefile35
-rw-r--r--drivers/usb/storage/scsiglue.c16
-rw-r--r--drivers/usb/storage/sddr09.c2
-rw-r--r--drivers/usb/storage/transport.c10
-rw-r--r--drivers/usb/storage/uas.c751
-rw-r--r--drivers/usb/storage/unusual_alauda.h4
-rw-r--r--drivers/usb/storage/unusual_cypress.h4
-rw-r--r--drivers/usb/storage/unusual_datafab.h20
-rw-r--r--drivers/usb/storage/unusual_devs.h580
-rw-r--r--drivers/usb/storage/unusual_freecom.h2
-rw-r--r--drivers/usb/storage/unusual_isd200.h12
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h2
-rw-r--r--drivers/usb/storage/unusual_karma.h2
-rw-r--r--drivers/usb/storage/unusual_onetouch.h4
-rw-r--r--drivers/usb/storage/unusual_sddr09.h12
-rw-r--r--drivers/usb/storage/unusual_sddr55.h8
-rw-r--r--drivers/usb/storage/unusual_usbat.h8
-rw-r--r--drivers/usb/storage/usb.c30
-rw-r--r--drivers/usb/wusbcore/Makefile19
-rw-r--r--drivers/uwb/address.c5
-rw-r--r--drivers/uwb/wlp/wss-lc.c7
-rw-r--r--drivers/video/atafb.c2
-rw-r--r--drivers/video/q40fb.c4
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/volumes.h1
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/cifs_debug.c12
-rw-r--r--fs/cifs/cifs_debug.h2
-rw-r--r--fs/cifs/cifs_dfs_ref.c24
-rw-r--r--fs/cifs/cifs_fs_sb.h13
-rw-r--r--fs/cifs/cifsacl.c46
-rw-r--r--fs/cifs/cifsencrypt.c214
-rw-r--r--fs/cifs/cifsfs.c92
-rw-r--r--fs/cifs/cifsfs.h10
-rw-r--r--fs/cifs/cifsglob.h91
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h22
-rw-r--r--fs/cifs/cifssmb.c30
-rw-r--r--fs/cifs/cn_cifs.h37
-rw-r--r--fs/cifs/connect.c534
-rw-r--r--fs/cifs/dir.c212
-rw-r--r--fs/cifs/file.c791
-rw-r--r--fs/cifs/fscache.c13
-rw-r--r--fs/cifs/inode.c237
-rw-r--r--fs/cifs/ioctl.c17
-rw-r--r--fs/cifs/link.c372
-rw-r--r--fs/cifs/misc.c32
-rw-r--r--fs/cifs/ntlmssp.h15
-rw-r--r--fs/cifs/readdir.c79
-rw-r--r--fs/cifs/sess.c167
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/cifs/xattr.c60
-rw-r--r--fs/dlm/lock.c3
-rw-r--r--fs/ext3/fsync.c3
-rw-r--r--fs/ext4/fsync.c5
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fat/misc.c5
-rw-r--r--fs/gfs2/log.c19
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/rgrp.c6
-rw-r--r--fs/jbd/commit.c32
-rw-r--r--fs/jbd2/checkpoint.c3
-rw-r--r--fs/jbd2/commit.c76
-rw-r--r--fs/nilfs2/super.c10
-rw-r--r--fs/nilfs2/the_nilfs.c7
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/partitions/check.c54
-rw-r--r--fs/partitions/check.h3
-rw-r--r--fs/partitions/efi.c25
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/proc_tty.c158
-rw-r--r--fs/reiserfs/file.c3
-rw-r--r--fs/reiserfs/journal.c106
-rw-r--r--fs/sysfs/bin.c68
-rw-r--r--fs/ubifs/commit.c4
-rw-r--r--fs/ubifs/debug.c156
-rw-r--r--fs/ubifs/debug.h4
-rw-r--r--fs/ubifs/file.c7
-rw-r--r--fs/ubifs/gc.c82
-rw-r--r--fs/ubifs/io.c20
-rw-r--r--fs/ubifs/journal.c3
-rw-r--r--fs/ubifs/key.h14
-rw-r--r--fs/ubifs/log.c6
-rw-r--r--fs/ubifs/lpt.c7
-rw-r--r--fs/ubifs/lpt_commit.c3
-rw-r--r--fs/ubifs/master.c3
-rw-r--r--fs/ubifs/misc.h9
-rw-r--r--fs/ubifs/recovery.c11
-rw-r--r--fs/ubifs/replay.c20
-rw-r--r--fs/ubifs/sb.c9
-rw-r--r--fs/ubifs/scan.c6
-rw-r--r--fs/ubifs/shrinker.c2
-rw-r--r--fs/ubifs/super.c80
-rw-r--r--fs/ubifs/tnc.c5
-rw-r--r--fs/ubifs/ubifs.h23
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c237
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h81
-rw-r--r--fs/xfs/linux-2.6/xfs_cred.h28
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c31
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.h23
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c39
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c413
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_version.h29
-rw-r--r--fs/xfs/quota/xfs_dquot.c164
-rw-r--r--fs/xfs/quota/xfs_qm.c221
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c16
-rw-r--r--fs/xfs/xfs_ag.h9
-rw-r--r--fs/xfs/xfs_alloc.c4
-rw-r--r--fs/xfs/xfs_alloc_btree.c33
-rw-r--r--fs/xfs/xfs_attr.c37
-rw-r--r--fs/xfs/xfs_bmap.c44
-rw-r--r--fs/xfs/xfs_bmap.h9
-rw-r--r--fs/xfs/xfs_btree.c56
-rw-r--r--fs/xfs/xfs_btree.h14
-rw-r--r--fs/xfs/xfs_buf_item.c7
-rw-r--r--fs/xfs/xfs_da_btree.c2
-rw-r--r--fs/xfs/xfs_dinode.h5
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_fs.h7
-rw-r--r--fs/xfs/xfs_fsops.c14
-rw-r--r--fs/xfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.c33
-rw-r--r--fs/xfs/xfs_iget.c4
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_inode.h30
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--fs/xfs/xfs_log.c18
-rw-r--r--fs/xfs/xfs_log_cil.c232
-rw-r--r--fs/xfs/xfs_log_recover.c25
-rw-r--r--fs/xfs/xfs_mount.c308
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_refcache.h52
-rw-r--r--fs/xfs/xfs_rename.c14
-rw-r--r--fs/xfs/xfs_rtalloc.c29
-rw-r--r--fs/xfs/xfs_sb.h10
-rw-r--r--fs/xfs/xfs_trans.c91
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_trans_buf.c2
-rw-r--r--fs/xfs/xfs_trans_inode.c30
-rw-r--r--fs/xfs/xfs_types.h2
-rw-r--r--fs/xfs/xfs_utils.c9
-rw-r--r--fs/xfs/xfs_utils.h3
-rw-r--r--fs/xfs/xfs_vnodeops.c65
-rw-r--r--fs/xfs/xfs_vnodeops.h6
-rw-r--r--include/asm-generic/ioctls.h2
-rw-r--r--include/asm-generic/percpu.h14
-rw-r--r--include/linux/altera_uart.h5
-rw-r--r--include/linux/bio.h15
-rw-r--r--include/linux/blk_types.h11
-rw-r--r--include/linux/blkdev.h165
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/dlm.h4
-rw-r--r--include/linux/drbd.h22
-rw-r--r--include/linux/drbd_limits.h29
-rw-r--r--include/linux/drbd_nl.h6
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fsl_devices.h18
-rw-r--r--include/linux/genhd.h54
-rw-r--r--include/linux/init.h13
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/memory.h4
-rw-r--r--include/linux/mtio.h1
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu.h31
-rw-r--r--include/linux/platform_device.h3
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/selection.h1
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/tty_driver.h9
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb/cdc.h79
-rw-r--r--include/linux/usb/ch9.h68
-rw-r--r--include/linux/usb/composite.h40
-rw-r--r--include/linux/usb/gadget.h20
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/intel_mid_otg.h180
-rw-r--r--include/linux/usb/langwell_otg.h139
-rw-r--r--include/linux/usb/ncm.h114
-rw-r--r--include/linux/usb/otg.h11
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/storage.h48
-rw-r--r--include/linux/usb_usual.h43
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/workqueue.h51
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi.h8
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_host.h7
-rw-r--r--include/scsi/scsi_tcq.h6
-rw-r--r--include/scsi/scsi_transport_fc.h3
-rw-r--r--include/sound/core.h6
-rw-r--r--include/trace/events/workqueue.h77
-rw-r--r--init/Kconfig73
-rw-r--r--init/do_mounts.c70
-rw-r--r--kernel/workqueue.c310
-rw-r--r--lib/dynamic_debug.c98
-rw-r--r--lib/kobject.c39
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/Makefile7
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/percpu-km.c8
-rw-r--r--mm/percpu.c401
-rw-r--r--mm/percpu_up.c30
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmalloc.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--sound/core/init.c11
881 files changed, 63347 insertions, 58239 deletions
diff --git a/Documentation/ABI/testing/sysfs-module b/Documentation/ABI/testing/sysfs-module
new file mode 100644
index 000000000000..cfcec3bffc0a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-module
@@ -0,0 +1,12 @@
1What: /sys/module/pch_phub/drivers/.../pch_mac
2Date: August 2010
3KernelVersion: 2.6.35
4Contact: masa-korg@dsn.okisemi.com
5Description: Write/read GbE MAC address.
6
7What: /sys/module/pch_phub/drivers/.../pch_firmware
8Date: August 2010
9KernelVersion: 2.6.35
10Contact: masa-korg@dsn.okisemi.com
11Description: Write/read Option ROM data.
12
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 6899f471fb15..6b4e07f28b69 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -257,7 +257,8 @@ X!Earch/x86/kernel/mca_32.c
257!Iblock/blk-sysfs.c 257!Iblock/blk-sysfs.c
258!Eblock/blk-settings.c 258!Eblock/blk-settings.c
259!Eblock/blk-exec.c 259!Eblock/blk-exec.c
260!Eblock/blk-barrier.c 260!Eblock/blk-flush.c
261!Eblock/blk-lib.c
261!Eblock/blk-tag.c 262!Eblock/blk-tag.c
262!Iblock/blk-tag.c 263!Iblock/blk-tag.c
263!Eblock/blk-integrity.c 264!Eblock/blk-integrity.c
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index a406286f6f3e..d111e3b23db0 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -1,7 +1,5 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3barrier.txt
4 - I/O Barriers
5biodoc.txt 3biodoc.txt
6 - Notes on the Generic Block Layer Rewrite in Linux 2.5 4 - Notes on the Generic Block Layer Rewrite in Linux 2.5
7capability.txt 5capability.txt
@@ -16,3 +14,5 @@ stat.txt
16 - Block layer statistics in /sys/block/<dev>/stat 14 - Block layer statistics in /sys/block/<dev>/stat
17switching-sched.txt 15switching-sched.txt
18 - Switching I/O schedulers at runtime 16 - Switching I/O schedulers at runtime
17writeback_cache_control.txt
18 - Control of volatile write back caches
diff --git a/Documentation/block/barrier.txt b/Documentation/block/barrier.txt
deleted file mode 100644
index 2c2f24f634e4..000000000000
--- a/Documentation/block/barrier.txt
+++ /dev/null
@@ -1,261 +0,0 @@
1I/O Barriers
2============
3Tejun Heo <htejun@gmail.com>, July 22 2005
4
5I/O barrier requests are used to guarantee ordering around the barrier
6requests. Unless you're crazy enough to use disk drives for
7implementing synchronization constructs (wow, sounds interesting...),
8the ordering is meaningful only for write requests for things like
9journal checkpoints. All requests queued before a barrier request
10must be finished (made it to the physical medium) before the barrier
11request is started, and all requests queued after the barrier request
12must be started only after the barrier request is finished (again,
13made it to the physical medium).
14
15In other words, I/O barrier requests have the following two properties.
16
171. Request ordering
18
19Requests cannot pass the barrier request. Preceding requests are
20processed before the barrier and following requests after.
21
22Depending on what features a drive supports, this can be done in one
23of the following three ways.
24
25i. For devices which have queue depth greater than 1 (TCQ devices) and
26support ordered tags, block layer can just issue the barrier as an
27ordered request and the lower level driver, controller and drive
28itself are responsible for making sure that the ordering constraint is
29met. Most modern SCSI controllers/drives should support this.
30
31NOTE: SCSI ordered tag isn't currently used due to limitation in the
32 SCSI midlayer, see the following random notes section.
33
34ii. For devices which have queue depth greater than 1 but don't
35support ordered tags, block layer ensures that the requests preceding
36a barrier request finishes before issuing the barrier request. Also,
37it defers requests following the barrier until the barrier request is
38finished. Older SCSI controllers/drives and SATA drives fall in this
39category.
40
41iii. Devices which have queue depth of 1. This is a degenerate case
42of ii. Just keeping issue order suffices. Ancient SCSI
43controllers/drives and IDE drives are in this category.
44
452. Forced flushing to physical medium
46
47Again, if you're not gonna do synchronization with disk drives (dang,
48it sounds even more appealing now!), the reason you use I/O barriers
49is mainly to protect filesystem integrity when power failure or some
50other events abruptly stop the drive from operating and possibly make
51the drive lose data in its cache. So, I/O barriers need to guarantee
52that requests actually get written to non-volatile medium in order.
53
54There are four cases,
55
56i. No write-back cache. Keeping requests ordered is enough.
57
58ii. Write-back cache but no flush operation. There's no way to
59guarantee physical-medium commit order. This kind of devices can't to
60I/O barriers.
61
62iii. Write-back cache and flush operation but no FUA (forced unit
63access). We need two cache flushes - before and after the barrier
64request.
65
66iv. Write-back cache, flush operation and FUA. We still need one
67flush to make sure requests preceding a barrier are written to medium,
68but post-barrier flush can be avoided by using FUA write on the
69barrier itself.
70
71
72How to support barrier requests in drivers
73------------------------------------------
74
75All barrier handling is done inside block layer proper. All low level
76drivers have to are implementing its prepare_flush_fn and using one
77the following two functions to indicate what barrier type it supports
78and how to prepare flush requests. Note that the term 'ordered' is
79used to indicate the whole sequence of performing barrier requests
80including draining and flushing.
81
82typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
83
84int blk_queue_ordered(struct request_queue *q, unsigned ordered,
85 prepare_flush_fn *prepare_flush_fn);
86
87@q : the queue in question
88@ordered : the ordered mode the driver/device supports
89@prepare_flush_fn : this function should prepare @rq such that it
90 flushes cache to physical medium when executed
91
92For example, SCSI disk driver's prepare_flush_fn looks like the
93following.
94
95static void sd_prepare_flush(struct request_queue *q, struct request *rq)
96{
97 memset(rq->cmd, 0, sizeof(rq->cmd));
98 rq->cmd_type = REQ_TYPE_BLOCK_PC;
99 rq->timeout = SD_TIMEOUT;
100 rq->cmd[0] = SYNCHRONIZE_CACHE;
101 rq->cmd_len = 10;
102}
103
104The following seven ordered modes are supported. The following table
105shows which mode should be used depending on what features a
106device/driver supports. In the leftmost column of table,
107QUEUE_ORDERED_ prefix is omitted from the mode names to save space.
108
109The table is followed by description of each mode. Note that in the
110descriptions of QUEUE_ORDERED_DRAIN*, '=>' is used whereas '->' is
111used for QUEUE_ORDERED_TAG* descriptions. '=>' indicates that the
112preceding step must be complete before proceeding to the next step.
113'->' indicates that the next step can start as soon as the previous
114step is issued.
115
116 write-back cache ordered tag flush FUA
117-----------------------------------------------------------------------
118NONE yes/no N/A no N/A
119DRAIN no no N/A N/A
120DRAIN_FLUSH yes no yes no
121DRAIN_FUA yes no yes yes
122TAG no yes N/A N/A
123TAG_FLUSH yes yes yes no
124TAG_FUA yes yes yes yes
125
126
127QUEUE_ORDERED_NONE
128 I/O barriers are not needed and/or supported.
129
130 Sequence: N/A
131
132QUEUE_ORDERED_DRAIN
133 Requests are ordered by draining the request queue and cache
134 flushing isn't needed.
135
136 Sequence: drain => barrier
137
138QUEUE_ORDERED_DRAIN_FLUSH
139 Requests are ordered by draining the request queue and both
140 pre-barrier and post-barrier cache flushings are needed.
141
142 Sequence: drain => preflush => barrier => postflush
143
144QUEUE_ORDERED_DRAIN_FUA
145 Requests are ordered by draining the request queue and
146 pre-barrier cache flushing is needed. By using FUA on barrier
147 request, post-barrier flushing can be skipped.
148
149 Sequence: drain => preflush => barrier
150
151QUEUE_ORDERED_TAG
152 Requests are ordered by ordered tag and cache flushing isn't
153 needed.
154
155 Sequence: barrier
156
157QUEUE_ORDERED_TAG_FLUSH
158 Requests are ordered by ordered tag and both pre-barrier and
159 post-barrier cache flushings are needed.
160
161 Sequence: preflush -> barrier -> postflush
162
163QUEUE_ORDERED_TAG_FUA
164 Requests are ordered by ordered tag and pre-barrier cache
165 flushing is needed. By using FUA on barrier request,
166 post-barrier flushing can be skipped.
167
168 Sequence: preflush -> barrier
169
170
171Random notes/caveats
172--------------------
173
174* SCSI layer currently can't use TAG ordering even if the drive,
175controller and driver support it. The problem is that SCSI midlayer
176request dispatch function is not atomic. It releases queue lock and
177switch to SCSI host lock during issue and it's possible and likely to
178happen in time that requests change their relative positions. Once
179this problem is solved, TAG ordering can be enabled.
180
181* Currently, no matter which ordered mode is used, there can be only
182one barrier request in progress. All I/O barriers are held off by
183block layer until the previous I/O barrier is complete. This doesn't
184make any difference for DRAIN ordered devices, but, for TAG ordered
185devices with very high command latency, passing multiple I/O barriers
186to low level *might* be helpful if they are very frequent. Well, this
187certainly is a non-issue. I'm writing this just to make clear that no
188two I/O barrier is ever passed to low-level driver.
189
190* Completion order. Requests in ordered sequence are issued in order
191but not required to finish in order. Barrier implementation can
192handle out-of-order completion of ordered sequence. IOW, the requests
193MUST be processed in order but the hardware/software completion paths
194are allowed to reorder completion notifications - eg. current SCSI
195midlayer doesn't preserve completion order during error handling.
196
197* Requeueing order. Low-level drivers are free to requeue any request
198after they removed it from the request queue with
199blkdev_dequeue_request(). As barrier sequence should be kept in order
200when requeued, generic elevator code takes care of putting requests in
201order around barrier. See blk_ordered_req_seq() and
202ELEVATOR_INSERT_REQUEUE handling in __elv_add_request() for details.
203
204Note that block drivers must not requeue preceding requests while
205completing latter requests in an ordered sequence. Currently, no
206error checking is done against this.
207
208* Error handling. Currently, block layer will report error to upper
209layer if any of requests in an ordered sequence fails. Unfortunately,
210this doesn't seem to be enough. Look at the following request flow.
211QUEUE_ORDERED_TAG_FLUSH is in use.
212
213 [0] [1] [2] [3] [pre] [barrier] [post] < [4] [5] [6] ... >
214 still in elevator
215
216Let's say request [2], [3] are write requests to update file system
217metadata (journal or whatever) and [barrier] is used to mark that
218those updates are valid. Consider the following sequence.
219
220 i. Requests [0] ~ [post] leaves the request queue and enters
221 low-level driver.
222 ii. After a while, unfortunately, something goes wrong and the
223 drive fails [2]. Note that any of [0], [1] and [3] could have
224 completed by this time, but [pre] couldn't have been finished
225 as the drive must process it in order and it failed before
226 processing that command.
227 iii. Error handling kicks in and determines that the error is
228 unrecoverable and fails [2], and resumes operation.
229 iv. [pre] [barrier] [post] gets processed.
230 v. *BOOM* power fails
231
232The problem here is that the barrier request is *supposed* to indicate
233that filesystem update requests [2] and [3] made it safely to the
234physical medium and, if the machine crashes after the barrier is
235written, filesystem recovery code can depend on that. Sadly, that
236isn't true in this case anymore. IOW, the success of a I/O barrier
237should also be dependent on success of some of the preceding requests,
238where only upper layer (filesystem) knows what 'some' is.
239
240This can be solved by implementing a way to tell the block layer which
241requests affect the success of the following barrier request and
242making lower lever drivers to resume operation on error only after
243block layer tells it to do so.
244
245As the probability of this happening is very low and the drive should
246be faulty, implementing the fix is probably an overkill. But, still,
247it's there.
248
249* In previous drafts of barrier implementation, there was fallback
250mechanism such that, if FUA or ordered TAG fails, less fancy ordered
251mode can be selected and the failed barrier request is retried
252automatically. The rationale for this feature was that as FUA is
253pretty new in ATA world and ordered tag was never used widely, there
254could be devices which report to support those features but choke when
255actually given such requests.
256
257 This was removed for two reasons 1. it's an overkill 2. it's
258impossible to implement properly when TAG ordering is used as low
259level drivers resume after an error automatically. If it's ever
260needed adding it back and modifying low level drivers accordingly
261shouldn't be difficult.
diff --git a/Documentation/block/writeback_cache_control.txt b/Documentation/block/writeback_cache_control.txt
new file mode 100644
index 000000000000..83407d36630a
--- /dev/null
+++ b/Documentation/block/writeback_cache_control.txt
@@ -0,0 +1,86 @@
1
2Explicit volatile write back cache control
3=====================================
4
5Introduction
6------------
7
8Many storage devices, especially in the consumer market, come with volatile
9write back caches. That means the devices signal I/O completion to the
10operating system before data actually has hit the non-volatile storage. This
11behavior obviously speeds up various workloads, but it means the operating
12system needs to force data out to the non-volatile storage when it performs
13a data integrity operation like fsync, sync or an unmount.
14
15The Linux block layer provides two simple mechanisms that let filesystems
16control the caching behavior of the storage device. These mechanisms are
17a forced cache flush, and the Force Unit Access (FUA) flag for requests.
18
19
20Explicit cache flushes
21----------------------
22
23The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from
24the filesystem and will make sure the volatile cache of the storage device
25has been flushed before the actual I/O operation is started. This explicitly
26guarantees that previously completed write requests are on non-volatile
27storage before the flagged bio starts. In addition the REQ_FLUSH flag can be
28set on an otherwise empty bio structure, which causes only an explicit cache
29flush without any dependent I/O. It is recommend to use
30the blkdev_issue_flush() helper for a pure cache flush.
31
32
33Forced Unit Access
34-----------------
35
36The REQ_FUA flag can be OR ed into the r/w flags of a bio submitted from the
37filesystem and will make sure that I/O completion for this request is only
38signaled after the data has been committed to non-volatile storage.
39
40
41Implementation details for filesystems
42--------------------------------------
43
44Filesystems can simply set the REQ_FLUSH and REQ_FUA bits and do not have to
45worry if the underlying devices need any explicit cache flushing and how
46the Forced Unit Access is implemented. The REQ_FLUSH and REQ_FUA flags
47may both be set on a single bio.
48
49
50Implementation details for make_request_fn based block drivers
51--------------------------------------------------------------
52
53These drivers will always see the REQ_FLUSH and REQ_FUA bits as they sit
54directly below the submit_bio interface. For remapping drivers the REQ_FUA
55bits need to be propagated to underlying devices, and a global flush needs
56to be implemented for bios with the REQ_FLUSH bit set. For real device
57drivers that do not have a volatile cache the REQ_FLUSH and REQ_FUA bits
58on non-empty bios can simply be ignored, and REQ_FLUSH requests without
59data can be completed successfully without doing any work. Drivers for
60devices with volatile caches need to implement the support for these
61flags themselves without any help from the block layer.
62
63
64Implementation details for request_fn based block drivers
65--------------------------------------------------------------
66
67For devices that do not support volatile write caches there is no driver
68support required, the block layer completes empty REQ_FLUSH requests before
69entering the driver and strips off the REQ_FLUSH and REQ_FUA bits from
70requests that have a payload. For devices with volatile write caches the
71driver needs to tell the block layer that it supports flushing caches by
72doing:
73
74 blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
75
76and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
77REQ_FLUSH requests with a payload are automatically turned into a sequence
78of an empty REQ_FLUSH request followed by the actual write by the block
79layer. For devices that also support the FUA bit the block layer needs
80to be told to pass through the REQ_FUA bit using:
81
82 blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
83
84and the driver must handle write requests that have the REQ_FUA bit set
85in prep_fn/request_fn. If the FUA bit is not natively supported the block
86layer turns it into an empty REQ_FLUSH request after the actual write.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 6919d62591d9..d6da611f8f63 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -8,12 +8,17 @@ both at leaf nodes as well as at intermediate nodes in a storage hierarchy.
8Plan is to use the same cgroup based management interface for blkio controller 8Plan is to use the same cgroup based management interface for blkio controller
9and based on user options switch IO policies in the background. 9and based on user options switch IO policies in the background.
10 10
11In the first phase, this patchset implements proportional weight time based 11Currently two IO control policies are implemented. First one is proportional
12division of disk policy. It is implemented in CFQ. Hence this policy takes 12weight time based division of disk policy. It is implemented in CFQ. Hence
13effect only on leaf nodes when CFQ is being used. 13this policy takes effect only on leaf nodes when CFQ is being used. The second
14one is throttling policy which can be used to specify upper IO rate limits
15on devices. This policy is implemented in generic block layer and can be
16used on leaf nodes as well as higher level logical devices like device mapper.
14 17
15HOWTO 18HOWTO
16===== 19=====
20Proportional Weight division of bandwidth
21-----------------------------------------
17You can do a very simple testing of running two dd threads in two different 22You can do a very simple testing of running two dd threads in two different
18cgroups. Here is what you can do. 23cgroups. Here is what you can do.
19 24
@@ -55,6 +60,35 @@ cgroups. Here is what you can do.
55 group dispatched to the disk. We provide fairness in terms of disk time, so 60 group dispatched to the disk. We provide fairness in terms of disk time, so
56 ideally io.disk_time of cgroups should be in proportion to the weight. 61 ideally io.disk_time of cgroups should be in proportion to the weight.
57 62
63Throttling/Upper Limit policy
64-----------------------------
65- Enable Block IO controller
66 CONFIG_BLK_CGROUP=y
67
68- Enable throttling in block layer
69 CONFIG_BLK_DEV_THROTTLING=y
70
71- Mount blkio controller
72 mount -t cgroup -o blkio none /cgroup/blkio
73
74- Specify a bandwidth rate on particular device for root group. The format
75 for policy is "<major>:<minor> <byes_per_second>".
76
77 echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device
78
79 Above will put a limit of 1MB/second on reads happening for root group
80 on device having major/minor number 8:16.
81
82- Run dd to read a file and see if rate is throttled to 1MB/s or not.
83
84 # dd if=/mnt/common/zerofile of=/dev/null bs=4K count=1024
85 # iflag=direct
86 1024+0 records in
87 1024+0 records out
88 4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
89
90 Limits for writes can be put using blkio.write_bps_device file.
91
58Various user visible config options 92Various user visible config options
59=================================== 93===================================
60CONFIG_BLK_CGROUP 94CONFIG_BLK_CGROUP
@@ -68,8 +102,13 @@ CONFIG_CFQ_GROUP_IOSCHED
68 - Enables group scheduling in CFQ. Currently only 1 level of group 102 - Enables group scheduling in CFQ. Currently only 1 level of group
69 creation is allowed. 103 creation is allowed.
70 104
105CONFIG_BLK_DEV_THROTTLING
106 - Enable block device throttling support in block layer.
107
71Details of cgroup files 108Details of cgroup files
72======================= 109=======================
110Proportional weight policy files
111--------------------------------
73- blkio.weight 112- blkio.weight
74 - Specifies per cgroup weight. This is default weight of the group 113 - Specifies per cgroup weight. This is default weight of the group
75 on all the devices until and unless overridden by per device rule. 114 on all the devices until and unless overridden by per device rule.
@@ -210,6 +249,67 @@ Details of cgroup files
210 and minor number of the device and third field specifies the number 249 and minor number of the device and third field specifies the number
211 of times a group was dequeued from a particular device. 250 of times a group was dequeued from a particular device.
212 251
252Throttling/Upper limit policy files
253-----------------------------------
254- blkio.throttle.read_bps_device
255 - Specifies upper limit on READ rate from the device. IO rate is
256 specified in bytes per second. Rules are per deivce. Following is
257 the format.
258
259 echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.read_bps_device
260
261- blkio.throttle.write_bps_device
262 - Specifies upper limit on WRITE rate to the device. IO rate is
263 specified in bytes per second. Rules are per deivce. Following is
264 the format.
265
266 echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.write_bps_device
267
268- blkio.throttle.read_iops_device
269 - Specifies upper limit on READ rate from the device. IO rate is
270 specified in IO per second. Rules are per deivce. Following is
271 the format.
272
273 echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.read_iops_device
274
275- blkio.throttle.write_iops_device
276 - Specifies upper limit on WRITE rate to the device. IO rate is
277 specified in io per second. Rules are per deivce. Following is
278 the format.
279
280 echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.write_iops_device
281
282Note: If both BW and IOPS rules are specified for a device, then IO is
283 subjectd to both the constraints.
284
285- blkio.throttle.io_serviced
286 - Number of IOs (bio) completed to/from the disk by the group (as
287 seen by throttling policy). These are further divided by the type
288 of operation - read or write, sync or async. First two fields specify
289 the major and minor number of the device, third field specifies the
290 operation type and the fourth field specifies the number of IOs.
291
292 blkio.io_serviced does accounting as seen by CFQ and counts are in
293 number of requests (struct request). On the other hand,
294 blkio.throttle.io_serviced counts number of IO in terms of number
295 of bios as seen by throttling policy. These bios can later be
296 merged by elevator and total number of requests completed can be
297 lesser.
298
299- blkio.throttle.io_service_bytes
300 - Number of bytes transferred to/from the disk by the group. These
301 are further divided by the type of operation - read or write, sync
302 or async. First two fields specify the major and minor number of the
303 device, third field specifies the operation type and the fourth field
304 specifies the number of bytes.
305
306 These numbers should roughly be same as blkio.io_service_bytes as
307 updated by CFQ. The difference between two is that
308 blkio.io_service_bytes will not be updated if CFQ is not operating
309 on request queue.
310
311Common files among various policies
312-----------------------------------
213- blkio.reset_stats 313- blkio.reset_stats
214 - Writing an int to this file will result in resetting all the stats 314 - Writing an int to this file will result in resetting all the stats
215 for that cgroup. 315 for that cgroup.
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index d0d1df6cb5de..c58abf1ccc71 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -239,6 +239,7 @@ Your cooperation is appreciated.
239 0 = /dev/tty Current TTY device 239 0 = /dev/tty Current TTY device
240 1 = /dev/console System console 240 1 = /dev/console System console
241 2 = /dev/ptmx PTY master multiplex 241 2 = /dev/ptmx PTY master multiplex
242 3 = /dev/ttyprintk User messages via printk TTY device
242 64 = /dev/cua0 Callout device for ttyS0 243 64 = /dev/cua0 Callout device for ttyS0
243 ... 244 ...
244 255 = /dev/cua191 Callout device for ttyS191 245 255 = /dev/cua191 Callout device for ttyS191
@@ -2553,7 +2554,10 @@ Your cooperation is appreciated.
2553 175 = /dev/usb/legousbtower15 16th USB Legotower device 2554 175 = /dev/usb/legousbtower15 16th USB Legotower device
2554 176 = /dev/usb/usbtmc1 First USB TMC device 2555 176 = /dev/usb/usbtmc1 First USB TMC device
2555 ... 2556 ...
2556 192 = /dev/usb/usbtmc16 16th USB TMC device 2557 191 = /dev/usb/usbtmc16 16th USB TMC device
2558 192 = /dev/usb/yurex1 First USB Yurex device
2559 ...
2560 209 = /dev/usb/yurex16 16th USB Yurex device
2557 240 = /dev/usb/dabusb0 First daubusb device 2561 240 = /dev/usb/dabusb0 First daubusb device
2558 ... 2562 ...
2559 243 = /dev/usb/dabusb3 Fourth dabusb device 2563 243 = /dev/usb/dabusb3 Fourth dabusb device
diff --git a/Documentation/dynamic-debug-howto.txt b/Documentation/dynamic-debug-howto.txt
index 674c5663d346..58ea64a96165 100644
--- a/Documentation/dynamic-debug-howto.txt
+++ b/Documentation/dynamic-debug-howto.txt
@@ -24,7 +24,7 @@ Dynamic debug has even more useful features:
24 read to display the complete list of known debug statements, to help guide you 24 read to display the complete list of known debug statements, to help guide you
25 25
26Controlling dynamic debug Behaviour 26Controlling dynamic debug Behaviour
27=============================== 27===================================
28 28
29The behaviour of pr_debug()/dev_debug()s are controlled via writing to a 29The behaviour of pr_debug()/dev_debug()s are controlled via writing to a
30control file in the 'debugfs' filesystem. Thus, you must first mount the debugfs 30control file in the 'debugfs' filesystem. Thus, you must first mount the debugfs
@@ -212,6 +212,26 @@ Note the regexp ^[-+=][scp]+$ matches a flags specification.
212Note also that there is no convenient syntax to remove all 212Note also that there is no convenient syntax to remove all
213the flags at once, you need to use "-psc". 213the flags at once, you need to use "-psc".
214 214
215
216Debug messages during boot process
217==================================
218
219To be able to activate debug messages during the boot process,
220even before userspace and debugfs exists, use the boot parameter:
221ddebug_query="QUERY"
222
223QUERY follows the syntax described above, but must not exceed 1023
224characters. The enablement of debug messages is done as an arch_initcall.
225Thus you can enable debug messages in all code processed after this
226arch_initcall via this boot parameter.
227On an x86 system for example ACPI enablement is a subsys_initcall and
228ddebug_query="file ec.c +p"
229will show early Embedded Controller transactions during ACPI setup if
230your machine (typically a laptop) has an Embedded Controller.
231PCI (or other devices) initialization also is a hot candidate for using
232this boot parameter for debugging purposes.
233
234
215Examples 235Examples
216======== 236========
217 237
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a6aca8740883..98223a676940 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1075,6 +1075,7 @@ Table 1-11: Files in /proc/tty
1075 drivers list of drivers and their usage 1075 drivers list of drivers and their usage
1076 ldiscs registered line disciplines 1076 ldiscs registered line disciplines
1077 driver/serial usage statistic and status of single tty lines 1077 driver/serial usage statistic and status of single tty lines
1078 consoles registered system console lines
1078.............................................................................. 1079..............................................................................
1079 1080
1080To see which tty's are currently in use, you can simply look into the file 1081To see which tty's are currently in use, you can simply look into the file
@@ -1093,6 +1094,37 @@ To see which tty's are currently in use, you can simply look into the file
1093 /dev/tty /dev/tty 5 0 system:/dev/tty 1094 /dev/tty /dev/tty 5 0 system:/dev/tty
1094 unknown /dev/tty 4 1-63 console 1095 unknown /dev/tty 4 1-63 console
1095 1096
1097To see which character device lines are currently used for the system console
1098/dev/console, you may simply look into the file /proc/tty/consoles:
1099
1100 > cat /proc/tty/consoles
1101 tty0 -WU (ECp) 4:7
1102 ttyS0 -W- (Ep) 4:64
1103
1104The columns are:
1105
1106 device name of the device
1107 operations R = can do read operations
1108 W = can do write operations
1109 U = can do unblank
1110 flags E = it is enabled
1111 C = it is prefered console
1112 B = it is primary boot console
1113 p = it is used for printk buffer
1114 b = it is not a TTY but a Braille device
1115 a = it is safe to use when cpu is offline
1116 * = it is standard input of the reading process
1117 major:minor major and minor number of the device separated by a colon
1118
1119If the reading process holds /dev/console open at the regular standard input
1120stream the active device will be marked by an asterisk:
1121
1122 > cat /proc/tty/consoles < /dev/console
1123 tty0 -WU (ECp*) 4:7
1124 ttyS0 -W- (Ep) 4:64
1125 > tty
1126 /dev/pts/3
1127
1096 1128
10971.8 Miscellaneous kernel statistics in /proc/stat 11291.8 Miscellaneous kernel statistics in /proc/stat
1098------------------------------------------------- 1130-------------------------------------------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 02f21d9220ce..4cd8b86e00ea 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -43,10 +43,11 @@ parameter is applicable:
43 AVR32 AVR32 architecture is enabled. 43 AVR32 AVR32 architecture is enabled.
44 AX25 Appropriate AX.25 support is enabled. 44 AX25 Appropriate AX.25 support is enabled.
45 BLACKFIN Blackfin architecture is enabled. 45 BLACKFIN Blackfin architecture is enabled.
46 DRM Direct Rendering Management support is enabled.
47 EDD BIOS Enhanced Disk Drive Services (EDD) is enabled 46 EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
48 EFI EFI Partitioning (GPT) is enabled 47 EFI EFI Partitioning (GPT) is enabled
49 EIDE EIDE/ATAPI support is enabled. 48 EIDE EIDE/ATAPI support is enabled.
49 DRM Direct Rendering Management support is enabled.
50 DYNAMIC_DEBUG Build in debug messages and enable them at runtime
50 FB The frame buffer device is enabled. 51 FB The frame buffer device is enabled.
51 GCOV GCOV profiling is enabled. 52 GCOV GCOV profiling is enabled.
52 HW Appropriate hardware is enabled. 53 HW Appropriate hardware is enabled.
@@ -570,6 +571,10 @@ and is between 256 and 4096 characters. It is defined in the file
570 Format: <port#>,<type> 571 Format: <port#>,<type>
571 See also Documentation/input/joystick-parport.txt 572 See also Documentation/input/joystick-parport.txt
572 573
574 ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
575 time. See Documentation/dynamic-debug-howto.txt for
576 details.
577
573 debug [KNL] Enable kernel debugging (events log level). 578 debug [KNL] Enable kernel debugging (events log level).
574 579
575 debug_locks_verbose= 580 debug_locks_verbose=
@@ -2370,6 +2375,15 @@ and is between 256 and 4096 characters. It is defined in the file
2370 2375
2371 switches= [HW,M68k] 2376 switches= [HW,M68k]
2372 2377
2378 sysfs.deprecated=0|1 [KNL]
2379 Enable/disable old style sysfs layout for old udev
2380 on older distributions. When this option is enabled
2381 very new udev will not work anymore. When this option
2382 is disabled (or CONFIG_SYSFS_DEPRECATED not compiled)
2383 in older udev will not work anymore.
2384 Default depends on CONFIG_SYSFS_DEPRECATED_V2 set in
2385 the kernel configuration.
2386
2373 sysrq_always_enabled 2387 sysrq_always_enabled
2374 [KNL] 2388 [KNL]
2375 Ignore sysrq setting - this boot parameter will 2389 Ignore sysrq setting - this boot parameter will
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 8a6a8c6d4980..dc73bc54cc4e 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -1640,15 +1640,6 @@ static void blk_request(struct virtqueue *vq)
1640 off = out->sector * 512; 1640 off = out->sector * 512;
1641 1641
1642 /* 1642 /*
1643 * The block device implements "barriers", where the Guest indicates
1644 * that it wants all previous writes to occur before this write. We
1645 * don't have a way of asking our kernel to do a barrier, so we just
1646 * synchronize all the data in the file. Pretty poor, no?
1647 */
1648 if (out->type & VIRTIO_BLK_T_BARRIER)
1649 fdatasync(vblk->fd);
1650
1651 /*
1652 * In general the virtio block driver is allowed to try SCSI commands. 1643 * In general the virtio block driver is allowed to try SCSI commands.
1653 * It'd be nice if we supported eject, for example, but we don't. 1644 * It'd be nice if we supported eject, for example, but we don't.
1654 */ 1645 */
@@ -1680,6 +1671,13 @@ static void blk_request(struct virtqueue *vq)
1680 /* Die, bad Guest, die. */ 1671 /* Die, bad Guest, die. */
1681 errx(1, "Write past end %llu+%u", off, ret); 1672 errx(1, "Write past end %llu+%u", off, ret);
1682 } 1673 }
1674
1675 wlen = sizeof(*in);
1676 *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
1677 } else if (out->type & VIRTIO_BLK_T_FLUSH) {
1678 /* Flush */
1679 ret = fdatasync(vblk->fd);
1680 verbose("FLUSH fdatasync: %i\n", ret);
1683 wlen = sizeof(*in); 1681 wlen = sizeof(*in);
1684 *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); 1682 *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
1685 } else { 1683 } else {
@@ -1703,15 +1701,6 @@ static void blk_request(struct virtqueue *vq)
1703 } 1701 }
1704 } 1702 }
1705 1703
1706 /*
1707 * OK, so we noted that it was pretty poor to use an fdatasync as a
1708 * barrier. But Christoph Hellwig points out that we need a sync
1709 * *afterwards* as well: "Barriers specify no reordering to the front
1710 * or the back." And Jens Axboe confirmed it, so here we are:
1711 */
1712 if (out->type & VIRTIO_BLK_T_BARRIER)
1713 fdatasync(vblk->fd);
1714
1715 /* Finished that request. */ 1704 /* Finished that request. */
1716 add_used(vq, head, wlen); 1705 add_used(vq, head, wlen);
1717} 1706}
@@ -1736,8 +1725,8 @@ static void setup_block_file(const char *filename)
1736 vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); 1725 vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
1737 vblk->len = lseek64(vblk->fd, 0, SEEK_END); 1726 vblk->len = lseek64(vblk->fd, 0, SEEK_END);
1738 1727
1739 /* We support barriers. */ 1728 /* We support FLUSH. */
1740 add_feature(dev, VIRTIO_BLK_F_BARRIER); 1729 add_feature(dev, VIRTIO_BLK_F_FLUSH);
1741 1730
1742 /* Tell Guest how many sectors this device has. */ 1731 /* Tell Guest how many sectors this device has. */
1743 conf.capacity = cpu_to_le64(vblk->len / 512); 1732 conf.capacity = cpu_to_le64(vblk->len / 512);
diff --git a/Documentation/powerpc/dts-bindings/fsl/usb.txt b/Documentation/powerpc/dts-bindings/fsl/usb.txt
index b00152402694..bd5723f0b67e 100644
--- a/Documentation/powerpc/dts-bindings/fsl/usb.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/usb.txt
@@ -8,6 +8,7 @@ and additions :
8Required properties : 8Required properties :
9 - compatible : Should be "fsl-usb2-mph" for multi port host USB 9 - compatible : Should be "fsl-usb2-mph" for multi port host USB
10 controllers, or "fsl-usb2-dr" for dual role USB controllers 10 controllers, or "fsl-usb2-dr" for dual role USB controllers
11 or "fsl,mpc5121-usb2-dr" for dual role USB controllers of MPC5121
11 - phy_type : For multi port host USB controllers, should be one of 12 - phy_type : For multi port host USB controllers, should be one of
12 "ulpi", or "serial". For dual role USB controllers, should be 13 "ulpi", or "serial". For dual role USB controllers, should be
13 one of "ulpi", "utmi", "utmi_wide", or "serial". 14 one of "ulpi", "utmi", "utmi_wide", or "serial".
@@ -33,6 +34,12 @@ Recommended properties :
33 - interrupt-parent : the phandle for the interrupt controller that 34 - interrupt-parent : the phandle for the interrupt controller that
34 services interrupts for this device. 35 services interrupts for this device.
35 36
37Optional properties :
38 - fsl,invert-drvvbus : boolean; for MPC5121 USB0 only. Indicates the
39 port power polarity of internal PHY signal DRVVBUS is inverted.
40 - fsl,invert-pwr-fault : boolean; for MPC5121 USB0 only. Indicates
41 the PWR_FAULT signal polarity is inverted.
42
36Example multi port host USB controller device node : 43Example multi port host USB controller device node :
37 usb@22000 { 44 usb@22000 {
38 compatible = "fsl-usb2-mph"; 45 compatible = "fsl-usb2-mph";
@@ -57,3 +64,18 @@ Example dual role USB controller device node :
57 dr_mode = "otg"; 64 dr_mode = "otg";
58 phy = "ulpi"; 65 phy = "ulpi";
59 }; 66 };
67
68Example dual role USB controller device node for MPC5121ADS:
69
70 usb@4000 {
71 compatible = "fsl,mpc5121-usb2-dr";
72 reg = <0x4000 0x1000>;
73 #address-cells = <1>;
74 #size-cells = <0>;
75 interrupt-parent = < &ipic >;
76 interrupts = <44 0x8>;
77 dr_mode = "otg";
78 phy_type = "utmi_wide";
79 fsl,invert-drvvbus;
80 fsl,invert-pwr-fault;
81 };
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 40752602c050..691ca292c24d 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -2,7 +2,7 @@ This file contains brief information about the SCSI tape driver.
2The driver is currently maintained by Kai Mäkisara (email 2The driver is currently maintained by Kai Mäkisara (email
3Kai.Makisara@kolumbus.fi) 3Kai.Makisara@kolumbus.fi)
4 4
5Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara 5Last modified: Sun Aug 29 18:25:47 2010 by kai.makisara
6 6
7 7
8BASICS 8BASICS
@@ -85,6 +85,17 @@ writing and the last operation has been a write. Two filemarks can be
85optionally written. In both cases end of data is signified by 85optionally written. In both cases end of data is signified by
86returning zero bytes for two consecutive reads. 86returning zero bytes for two consecutive reads.
87 87
88Writing filemarks without the immediate bit set in the SCSI command block acts
89as a synchronization point, i.e., all remaining data form the drive buffers is
90written to tape before the command returns. This makes sure that write errors
91are caught at that point, but this takes time. In some applications, several
92consecutive files must be written fast. The MTWEOFI operation can be used to
93write the filemarks without flushing the drive buffer. Writing filemark at
94close() is always flushing the drive buffers. However, if the previous
95operation is MTWEOFI, close() does not write a filemark. This can be used if
96the program wants to close/open the tape device between files and wants to
97skip waiting.
98
88If rewind, offline, bsf, or seek is done and previous tape operation was 99If rewind, offline, bsf, or seek is done and previous tape operation was
89write, a filemark is written before moving tape. 100write, a filemark is written before moving tape.
90 101
@@ -301,6 +312,8 @@ MTBSR Space backward over count records.
301MTFSS Space forward over count setmarks. 312MTFSS Space forward over count setmarks.
302MTBSS Space backward over count setmarks. 313MTBSS Space backward over count setmarks.
303MTWEOF Write count filemarks. 314MTWEOF Write count filemarks.
315MTWEOFI Write count filemarks with immediate bit set (i.e., does not
316 wait until data is on tape)
304MTWSM Write count setmarks. 317MTWSM Write count setmarks.
305MTREW Rewind tape. 318MTREW Rewind tape.
306MTOFFL Set device off line (often rewind plus eject). 319MTOFFL Set device off line (often rewind plus eject).
diff --git a/Documentation/usb/proc_usb_info.txt b/Documentation/usb/proc_usb_info.txt
index fafcd4723260..afe596d5f201 100644
--- a/Documentation/usb/proc_usb_info.txt
+++ b/Documentation/usb/proc_usb_info.txt
@@ -1,12 +1,17 @@
1/proc/bus/usb filesystem output 1/proc/bus/usb filesystem output
2=============================== 2===============================
3(version 2003.05.30) 3(version 2010.09.13)
4 4
5 5
6The usbfs filesystem for USB devices is traditionally mounted at 6The usbfs filesystem for USB devices is traditionally mounted at
7/proc/bus/usb. It provides the /proc/bus/usb/devices file, as well as 7/proc/bus/usb. It provides the /proc/bus/usb/devices file, as well as
8the /proc/bus/usb/BBB/DDD files. 8the /proc/bus/usb/BBB/DDD files.
9 9
10In many modern systems the usbfs filsystem isn't used at all. Instead
11USB device nodes are created under /dev/usb/ or someplace similar. The
12"devices" file is available in debugfs, typically as
13/sys/kernel/debug/usb/devices.
14
10 15
11**NOTE**: If /proc/bus/usb appears empty, and a host controller 16**NOTE**: If /proc/bus/usb appears empty, and a host controller
12 driver has been linked, then you need to mount the 17 driver has been linked, then you need to mount the
@@ -106,8 +111,8 @@ Legend:
106 111
107Topology info: 112Topology info:
108 113
109T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd 114T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=dddd MxCh=dd
110| | | | | | | | |__MaxChildren 115| | | | | | | | |__MaxChildren
111| | | | | | | |__Device Speed in Mbps 116| | | | | | | |__Device Speed in Mbps
112| | | | | | |__DeviceNumber 117| | | | | | |__DeviceNumber
113| | | | | |__Count of devices at this level 118| | | | | |__Count of devices at this level
@@ -120,8 +125,13 @@ T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd
120 Speed may be: 125 Speed may be:
121 1.5 Mbit/s for low speed USB 126 1.5 Mbit/s for low speed USB
122 12 Mbit/s for full speed USB 127 12 Mbit/s for full speed USB
123 480 Mbit/s for high speed USB (added for USB 2.0) 128 480 Mbit/s for high speed USB (added for USB 2.0);
129 also used for Wireless USB, which has no fixed speed
130 5000 Mbit/s for SuperSpeed USB (added for USB 3.0)
124 131
132 For reasons lost in the mists of time, the Port number is always
133 too low by 1. For example, a device plugged into port 4 will
134 show up with "Port=03".
125 135
126Bandwidth info: 136Bandwidth info:
127B: Alloc=ddd/ddd us (xx%), #Int=ddd, #Iso=ddd 137B: Alloc=ddd/ddd us (xx%), #Int=ddd, #Iso=ddd
@@ -291,7 +301,7 @@ Here's an example, from a system which has a UHCI root hub,
291an external hub connected to the root hub, and a mouse and 301an external hub connected to the root hub, and a mouse and
292a serial converter connected to the external hub. 302a serial converter connected to the external hub.
293 303
294T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2 304T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
295B: Alloc= 28/900 us ( 3%), #Int= 2, #Iso= 0 305B: Alloc= 28/900 us ( 3%), #Int= 2, #Iso= 0
296D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 306D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
297P: Vendor=0000 ProdID=0000 Rev= 0.00 307P: Vendor=0000 ProdID=0000 Rev= 0.00
@@ -301,21 +311,21 @@ C:* #Ifs= 1 Cfg#= 1 Atr=40 MxPwr= 0mA
301I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub 311I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
302E: Ad=81(I) Atr=03(Int.) MxPS= 8 Ivl=255ms 312E: Ad=81(I) Atr=03(Int.) MxPS= 8 Ivl=255ms
303 313
304T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4 314T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
305D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 315D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
306P: Vendor=0451 ProdID=1446 Rev= 1.00 316P: Vendor=0451 ProdID=1446 Rev= 1.00
307C:* #Ifs= 1 Cfg#= 1 Atr=e0 MxPwr=100mA 317C:* #Ifs= 1 Cfg#= 1 Atr=e0 MxPwr=100mA
308I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub 318I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
309E: Ad=81(I) Atr=03(Int.) MxPS= 1 Ivl=255ms 319E: Ad=81(I) Atr=03(Int.) MxPS= 1 Ivl=255ms
310 320
311T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0 321T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
312D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 322D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
313P: Vendor=04b4 ProdID=0001 Rev= 0.00 323P: Vendor=04b4 ProdID=0001 Rev= 0.00
314C:* #Ifs= 1 Cfg#= 1 Atr=80 MxPwr=100mA 324C:* #Ifs= 1 Cfg#= 1 Atr=80 MxPwr=100mA
315I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse 325I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse
316E: Ad=81(I) Atr=03(Int.) MxPS= 3 Ivl= 10ms 326E: Ad=81(I) Atr=03(Int.) MxPS= 3 Ivl= 10ms
317 327
318T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0 328T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
319D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 329D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
320P: Vendor=0565 ProdID=0001 Rev= 1.08 330P: Vendor=0565 ProdID=0001 Rev= 1.08
321S: Manufacturer=Peracom Networks, Inc. 331S: Manufacturer=Peracom Networks, Inc.
@@ -330,12 +340,12 @@ E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl= 8ms
330Selecting only the "T:" and "I:" lines from this (for example, by using 340Selecting only the "T:" and "I:" lines from this (for example, by using
331"procusb ti"), we have: 341"procusb ti"), we have:
332 342
333T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2 343T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
334T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4 344T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
335I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub 345I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
336T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0 346T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
337I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse 347I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse
338T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0 348T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
339I: If#= 0 Alt= 0 #EPs= 3 Cls=00(>ifc ) Sub=00 Prot=00 Driver=serial 349I: If#= 0 Alt= 0 #EPs= 3 Cls=00(>ifc ) Sub=00 Prot=00 Driver=serial
340 350
341 351
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index e4498a2872c3..996a27d9b8db 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -196,11 +196,11 @@ resources, scheduled and executed.
196 suspend operations. Work items on the wq are drained and no 196 suspend operations. Work items on the wq are drained and no
197 new work item starts execution until thawed. 197 new work item starts execution until thawed.
198 198
199 WQ_RESCUER 199 WQ_MEM_RECLAIM
200 200
201 All wq which might be used in the memory reclaim paths _MUST_ 201 All wq which might be used in the memory reclaim paths _MUST_
202 have this flag set. This reserves one worker exclusively for 202 have this flag set. The wq is guaranteed to have at least one
203 the execution of this wq under memory pressure. 203 execution context regardless of memory pressure.
204 204
205 WQ_HIGHPRI 205 WQ_HIGHPRI
206 206
@@ -356,11 +356,11 @@ If q1 has WQ_CPU_INTENSIVE set,
356 356
3576. Guidelines 3576. Guidelines
358 358
359* Do not forget to use WQ_RESCUER if a wq may process work items which 359* Do not forget to use WQ_MEM_RECLAIM if a wq may process work items
360 are used during memory reclaim. Each wq with WQ_RESCUER set has one 360 which are used during memory reclaim. Each wq with WQ_MEM_RECLAIM
361 rescuer thread reserved for it. If there is dependency among 361 set has an execution context reserved for it. If there is
362 multiple work items used during memory reclaim, they should be 362 dependency among multiple work items used during memory reclaim,
363 queued to separate wq each with WQ_RESCUER. 363 they should be queued to separate wq each with WQ_MEM_RECLAIM.
364 364
365* Unless strict ordering is required, there is no need to use ST wq. 365* Unless strict ordering is required, there is no need to use ST wq.
366 366
@@ -368,12 +368,13 @@ If q1 has WQ_CPU_INTENSIVE set,
368 recommended. In most use cases, concurrency level usually stays 368 recommended. In most use cases, concurrency level usually stays
369 well under the default limit. 369 well under the default limit.
370 370
371* A wq serves as a domain for forward progress guarantee (WQ_RESCUER), 371* A wq serves as a domain for forward progress guarantee
372 flush and work item attributes. Work items which are not involved 372 (WQ_MEM_RECLAIM, flush and work item attributes. Work items which
373 in memory reclaim and don't need to be flushed as a part of a group 373 are not involved in memory reclaim and don't need to be flushed as a
374 of work items, and don't require any special attribute, can use one 374 part of a group of work items, and don't require any special
375 of the system wq. There is no difference in execution 375 attribute, can use one of the system wq. There is no difference in
376 characteristics between using a dedicated wq and a system wq. 376 execution characteristics between using a dedicated wq and a system
377 wq.
377 378
378* Unless work items are expected to consume a huge amount of CPU 379* Unless work items are expected to consume a huge amount of CPU
379 cycles, using a bound wq is usually beneficial due to the increased 380 cycles, using a bound wq is usually beneficial due to the increased
diff --git a/MAINTAINERS b/MAINTAINERS
index b618b1e86c46..9a0432de9141 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -157,9 +157,11 @@ S: Maintained
157F: drivers/net/r8169.c 157F: drivers/net/r8169.c
158 158
1598250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1598250/16?50 (AND CLONE UARTS) SERIAL DRIVER
160M: Greg Kroah-Hartman <gregkh@suse.de>
160L: linux-serial@vger.kernel.org 161L: linux-serial@vger.kernel.org
161W: http://serial.sourceforge.net 162W: http://serial.sourceforge.net
162S: Orphan 163S: Maintained
164T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
163F: drivers/serial/8250* 165F: drivers/serial/8250*
164F: include/linux/serial_8250.h 166F: include/linux/serial_8250.h
165 167
@@ -2064,14 +2066,16 @@ F: drivers/block/drbd/
2064F: lib/lru_cache.c 2066F: lib/lru_cache.c
2065F: Documentation/blockdev/drbd/ 2067F: Documentation/blockdev/drbd/
2066 2068
2067DRIVER CORE, KOBJECTS, AND SYSFS 2069DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
2068M: Greg Kroah-Hartman <gregkh@suse.de> 2070M: Greg Kroah-Hartman <gregkh@suse.de>
2069T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 2071T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
2070S: Supported 2072S: Supported
2071F: Documentation/kobject.txt 2073F: Documentation/kobject.txt
2072F: drivers/base/ 2074F: drivers/base/
2073F: fs/sysfs/ 2075F: fs/sysfs/
2076F: fs/debugfs/
2074F: include/linux/kobj* 2077F: include/linux/kobj*
2078F: include/linux/debugfs.h
2075F: lib/kobj* 2079F: lib/kobj*
2076 2080
2077DRM DRIVERS 2081DRM DRIVERS
@@ -3351,6 +3355,12 @@ F: fs/jbd*/
3351F: include/linux/ext*jbd*.h 3355F: include/linux/ext*jbd*.h
3352F: include/linux/jbd*.h 3356F: include/linux/jbd*.h
3353 3357
3358JSM Neo PCI based serial card
3359M: Breno Leitao <leitao@linux.vnet.ibm.com>
3360L: linux-serial@vger.kernel.org
3361S: Maintained
3362F: drivers/serial/jsm/
3363
3354K8TEMP HARDWARE MONITORING DRIVER 3364K8TEMP HARDWARE MONITORING DRIVER
3355M: Rudolf Marek <r.marek@assembler.cz> 3365M: Rudolf Marek <r.marek@assembler.cz>
3356L: lm-sensors@lm-sensors.org 3366L: lm-sensors@lm-sensors.org
@@ -5963,6 +5973,14 @@ S: Maintained
5963F: Documentation/usb/acm.txt 5973F: Documentation/usb/acm.txt
5964F: drivers/usb/class/cdc-acm.* 5974F: drivers/usb/class/cdc-acm.*
5965 5975
5976USB ATTACHED SCSI
5977M: Matthew Wilcox <willy@linux.intel.com>
5978M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
5979L: linux-usb@vger.kernel.org
5980L: linux-scsi@vger.kernel.org
5981S: Supported
5982F: drivers/usb/storage/uas.c
5983
5966USB BLOCK DRIVER (UB ub) 5984USB BLOCK DRIVER (UB ub)
5967M: Pete Zaitcev <zaitcev@redhat.com> 5985M: Pete Zaitcev <zaitcev@redhat.com>
5968L: linux-usb@vger.kernel.org 5986L: linux-usb@vger.kernel.org
diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h
index 0b30894b5482..9c9629816128 100644
--- a/arch/arm/include/asm/ioctls.h
+++ b/arch/arm/include/asm/ioctls.h
@@ -1,89 +1,8 @@
1#ifndef __ASM_ARM_IOCTLS_H 1#ifndef __ASM_ARM_IOCTLS_H
2#define __ASM_ARM_IOCTLS_H 2#define __ASM_ARM_IOCTLS_H
3 3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
56
57#define TIOCGRS485 0x542E
58#define TIOCSRS485 0x542F
59
60#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
61#define FIOCLEX 0x5451
62#define FIOASYNC 0x5452
63#define TIOCSERCONFIG 0x5453
64#define TIOCSERGWILD 0x5454
65#define TIOCSERSWILD 0x5455
66#define TIOCGLCKTRMIOS 0x5456
67#define TIOCSLCKTRMIOS 0x5457
68#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
69#define TIOCSERGETLSR 0x5459 /* Get line status register */
70#define TIOCSERGETMULTI 0x545A /* Get multiport config */
71#define TIOCSERSETMULTI 0x545B /* Set multiport config */
72
73#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
74#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
75#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
76 5
77/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
78#define TIOCPKT_DATA 0
79#define TIOCPKT_FLUSHREAD 1
80#define TIOCPKT_FLUSHWRITE 2
81#define TIOCPKT_STOP 4
82#define TIOCPKT_START 8
83#define TIOCPKT_NOSTOP 16
84#define TIOCPKT_DOSTOP 32
85#define TIOCPKT_IOCTL 64
86
87#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
88 7
89#endif 8#endif
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 8533bf04284a..9fde873f5889 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -131,6 +131,7 @@ static struct mxc_usbh_platform_data __maybe_unused usbh1_pdata = {
131static struct fsl_usb2_platform_data otg_device_pdata = { 131static struct fsl_usb2_platform_data otg_device_pdata = {
132 .operating_mode = FSL_USB2_DR_DEVICE, 132 .operating_mode = FSL_USB2_DR_DEVICE,
133 .phy_mode = FSL_USB2_PHY_UTMI, 133 .phy_mode = FSL_USB2_PHY_UTMI,
134 .workaround = FLS_USB2_WORKAROUND_ENGCM09152,
134}; 135};
135 136
136static int otg_mode_host; 137static int otg_mode_host;
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index f85c8da17e8b..d547036aff3f 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -375,6 +375,31 @@ static void __init am3517_evm_init_irq(void)
375 omap_gpio_init(); 375 omap_gpio_init();
376} 376}
377 377
378static struct omap_musb_board_data musb_board_data = {
379 .interface_type = MUSB_INTERFACE_ULPI,
380 .mode = MUSB_OTG,
381 .power = 500,
382};
383
384static __init void am3517_evm_musb_init(void)
385{
386 u32 devconf2;
387
388 /*
389 * Set up USB clock/mode in the DEVCONF2 register.
390 */
391 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
392
393 /* USB2.0 PHY reference clock is 13 MHz */
394 devconf2 &= ~(CONF2_REFFREQ | CONF2_OTGMODE | CONF2_PHY_GPIOMODE);
395 devconf2 |= CONF2_REFFREQ_13MHZ | CONF2_SESENDEN | CONF2_VBDTCTEN
396 | CONF2_DATPOL;
397
398 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
399
400 usb_musb_init(&musb_board_data);
401}
402
378static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 403static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
379 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 404 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
380#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ 405#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
@@ -393,6 +418,8 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
393 418
394#ifdef CONFIG_OMAP_MUX 419#ifdef CONFIG_OMAP_MUX
395static struct omap_board_mux board_mux[] __initdata = { 420static struct omap_board_mux board_mux[] __initdata = {
421 /* USB OTG DRVVBUS offset = 0x212 */
422 OMAP3_MUX(SAD2D_MCAD23, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
396 { .reg_offset = OMAP_MUX_TERMINATOR }, 423 { .reg_offset = OMAP_MUX_TERMINATOR },
397}; 424};
398#else 425#else
@@ -459,6 +486,9 @@ static void __init am3517_evm_init(void)
459 ARRAY_SIZE(am3517evm_i2c1_boardinfo)); 486 ARRAY_SIZE(am3517evm_i2c1_boardinfo));
460 /*Ethernet*/ 487 /*Ethernet*/
461 am3517_evm_ethernet_init(&am3517_evm_emac_pdata); 488 am3517_evm_ethernet_init(&am3517_evm_emac_pdata);
489
490 /* MUSB */
491 am3517_evm_musb_init();
462} 492}
463 493
464MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM") 494MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 33a5cde1c227..72605584bfff 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -28,6 +28,7 @@
28 28
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30#include <mach/irqs.h> 30#include <mach/irqs.h>
31#include <mach/am35xx.h>
31#include <plat/usb.h> 32#include <plat/usb.h>
32 33
33#ifdef CONFIG_USB_MUSB_SOC 34#ifdef CONFIG_USB_MUSB_SOC
@@ -89,6 +90,9 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
89{ 90{
90 if (cpu_is_omap243x()) { 91 if (cpu_is_omap243x()) {
91 musb_resources[0].start = OMAP243X_HS_BASE; 92 musb_resources[0].start = OMAP243X_HS_BASE;
93 } else if (cpu_is_omap3517() || cpu_is_omap3505()) {
94 musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE;
95 musb_resources[1].start = INT_35XX_USBOTG_IRQ;
92 } else if (cpu_is_omap34xx()) { 96 } else if (cpu_is_omap34xx()) {
93 musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE; 97 musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
94 } else if (cpu_is_omap44xx()) { 98 } else if (cpu_is_omap44xx()) {
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 2a9427c8cc48..9feddacfe850 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -218,6 +218,27 @@ static inline omap2_usbfs_init(struct omap_usb_config *pdata)
218# define USBT2TLL5PI (1 << 17) 218# define USBT2TLL5PI (1 << 17)
219# define USB0PUENACTLOI (1 << 16) 219# define USB0PUENACTLOI (1 << 16)
220# define USBSTANDBYCTRL (1 << 15) 220# define USBSTANDBYCTRL (1 << 15)
221/* AM35x */
222/* USB 2.0 PHY Control */
223#define CONF2_PHY_GPIOMODE (1 << 23)
224#define CONF2_OTGMODE (3 << 14)
225#define CONF2_NO_OVERRIDE (0 << 14)
226#define CONF2_FORCE_HOST (1 << 14)
227#define CONF2_FORCE_DEVICE (2 << 14)
228#define CONF2_FORCE_HOST_VBUS_LOW (3 << 14)
229#define CONF2_SESENDEN (1 << 13)
230#define CONF2_VBDTCTEN (1 << 12)
231#define CONF2_REFFREQ_24MHZ (2 << 8)
232#define CONF2_REFFREQ_26MHZ (7 << 8)
233#define CONF2_REFFREQ_13MHZ (6 << 8)
234#define CONF2_REFFREQ (0xf << 8)
235#define CONF2_PHYCLKGD (1 << 7)
236#define CONF2_VBUSSENSE (1 << 6)
237#define CONF2_PHY_PLLON (1 << 5)
238#define CONF2_RESET (1 << 4)
239#define CONF2_PHYPWRDN (1 << 3)
240#define CONF2_OTGPWRDN (1 << 2)
241#define CONF2_DATPOL (1 << 1)
221 242
222#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_USB) 243#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_USB)
223u32 omap1_usb0_init(unsigned nwires, unsigned is_device); 244u32 omap1_usb0_init(unsigned nwires, unsigned is_device);
diff --git a/arch/avr32/include/asm/ioctls.h b/arch/avr32/include/asm/ioctls.h
index b7dd324b46a9..909cf66feaf5 100644
--- a/arch/avr32/include/asm/ioctls.h
+++ b/arch/avr32/include/asm/ioctls.h
@@ -1,90 +1,6 @@
1#ifndef __ASM_AVR32_IOCTLS_H 1#ifndef __ASM_AVR32_IOCTLS_H
2#define __ASM_AVR32_IOCTLS_H 2#define __ASM_AVR32_IOCTLS_H
3 3
4#include <asm/ioctl.h> 4#include <asm-generic/ioctls.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
57
58#define TIOCGRS485 0x542E
59#define TIOCSRS485 0x542F
60
61#define FIONCLEX 0x5450
62#define FIOCLEX 0x5451
63#define FIOASYNC 0x5452
64#define TIOCSERCONFIG 0x5453
65#define TIOCSERGWILD 0x5454
66#define TIOCSERSWILD 0x5455
67#define TIOCGLCKTRMIOS 0x5456
68#define TIOCSLCKTRMIOS 0x5457
69#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
70#define TIOCSERGETLSR 0x5459 /* Get line status register */
71#define TIOCSERGETMULTI 0x545A /* Get multiport config */
72#define TIOCSERSETMULTI 0x545B /* Set multiport config */
73
74#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
75#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
76#define FIOQSIZE 0x5460
77
78/* Used for packet mode */
79#define TIOCPKT_DATA 0
80#define TIOCPKT_FLUSHREAD 1
81#define TIOCPKT_FLUSHWRITE 2
82#define TIOCPKT_STOP 4
83#define TIOCPKT_START 8
84#define TIOCPKT_NOSTOP 16
85#define TIOCPKT_DOSTOP 32
86#define TIOCPKT_IOCTL 64
87
88#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
89 5
90#endif /* __ASM_AVR32_IOCTLS_H */ 6#endif /* __ASM_AVR32_IOCTLS_H */
diff --git a/arch/cris/include/asm/ioctls.h b/arch/cris/include/asm/ioctls.h
index c9129ed37443..488fbb3f5e84 100644
--- a/arch/cris/include/asm/ioctls.h
+++ b/arch/cris/include/asm/ioctls.h
@@ -1,93 +1,11 @@
1#ifndef __ARCH_CRIS_IOCTLS_H__ 1#ifndef __ARCH_CRIS_IOCTLS_H__
2#define __ARCH_CRIS_IOCTLS_H__ 2#define __ARCH_CRIS_IOCTLS_H__
3 3
4/* verbatim copy of asm-i386/ioctls.h */
5
6#include <asm/ioctl.h>
7
8/* 0x54 is just a magic number to make these relatively unique ('T') */
9
10#define TCGETS 0x5401
11#define TCSETS 0x5402
12#define TCSETSW 0x5403
13#define TCSETSF 0x5404
14#define TCGETA 0x5405
15#define TCSETA 0x5406
16#define TCSETAW 0x5407
17#define TCSETAF 0x5408
18#define TCSBRK 0x5409
19#define TCXONC 0x540A
20#define TCFLSH 0x540B
21#define TIOCEXCL 0x540C
22#define TIOCNXCL 0x540D
23#define TIOCSCTTY 0x540E
24#define TIOCGPGRP 0x540F
25#define TIOCSPGRP 0x5410
26#define TIOCOUTQ 0x5411
27#define TIOCSTI 0x5412
28#define TIOCGWINSZ 0x5413
29#define TIOCSWINSZ 0x5414
30#define TIOCMGET 0x5415
31#define TIOCMBIS 0x5416
32#define TIOCMBIC 0x5417
33#define TIOCMSET 0x5418
34#define TIOCGSOFTCAR 0x5419
35#define TIOCSSOFTCAR 0x541A
36#define FIONREAD 0x541B
37#define TIOCINQ FIONREAD
38#define TIOCLINUX 0x541C
39#define TIOCCONS 0x541D
40#define TIOCGSERIAL 0x541E
41#define TIOCSSERIAL 0x541F
42#define TIOCPKT 0x5420
43#define FIONBIO 0x5421
44#define TIOCNOTTY 0x5422
45#define TIOCSETD 0x5423
46#define TIOCGETD 0x5424
47#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
48#define TIOCSBRK 0x5427 /* BSD compatibility */
49#define TIOCCBRK 0x5428 /* BSD compatibility */
50#define TIOCGSID 0x5429 /* Return the session ID of FD */
51#define TCGETS2 _IOR('T',0x2A, struct termios2)
52#define TCSETS2 _IOW('T',0x2B, struct termios2)
53#define TCSETSW2 _IOW('T',0x2C, struct termios2)
54#define TCSETSF2 _IOW('T',0x2D, struct termios2)
55#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
56#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
57#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
58
59#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
60#define FIOCLEX 0x5451
61#define FIOASYNC 0x5452
62#define TIOCSERCONFIG 0x5453
63#define TIOCSERGWILD 0x5454
64#define TIOCSERSWILD 0x5455
65#define TIOCGLCKTRMIOS 0x5456
66#define TIOCSLCKTRMIOS 0x5457
67#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ 4#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
68#define TIOCSERGETLSR 0x5459 /* Get line status register */
69#define TIOCSERGETMULTI 0x545A /* Get multiport config */
70#define TIOCSERSETMULTI 0x545B /* Set multiport config */
71
72#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
73#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
74#define FIOQSIZE 0x5460
75
76#define TIOCSERSETRS485 0x5461 /* enable rs-485 (deprecated) */ 5#define TIOCSERSETRS485 0x5461 /* enable rs-485 (deprecated) */
77#define TIOCSERWRRS485 0x5462 /* write rs-485 */ 6#define TIOCSERWRRS485 0x5462 /* write rs-485 */
78#define TIOCSRS485 0x5463 /* enable rs-485 */ 7#define TIOCSRS485 0x5463 /* enable rs-485 */
79#define TIOCGRS485 0x542E /* get rs-485 */
80
81/* Used for packet mode */
82#define TIOCPKT_DATA 0
83#define TIOCPKT_FLUSHREAD 1
84#define TIOCPKT_FLUSHWRITE 2
85#define TIOCPKT_STOP 4
86#define TIOCPKT_START 8
87#define TIOCPKT_NOSTOP 16
88#define TIOCPKT_DOSTOP 32
89#define TIOCPKT_IOCTL 64
90 8
91#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 9#include <asm-generic/ioctls.h>
92 10
93#endif 11#endif
diff --git a/arch/frv/include/asm/ioctls.h b/arch/frv/include/asm/ioctls.h
index a993e3759ccf..2f9fb436ec3c 100644
--- a/arch/frv/include/asm/ioctls.h
+++ b/arch/frv/include/asm/ioctls.h
@@ -1,88 +1,10 @@
1#ifndef __ASM_IOCTLS_H__ 1#ifndef __ASM_IOCTLS_H__
2#define __ASM_IOCTLS_H__ 2#define __ASM_IOCTLS_H__
3 3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ 4#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
57
58#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
59#define FIOCLEX 0x5451
60#define FIOASYNC 0x5452
61#define TIOCSERCONFIG 0x5453
62#define TIOCSERGWILD 0x5454
63#define TIOCSERSWILD 0x5455
64#define TIOCGLCKTRMIOS 0x5456
65#define TIOCSLCKTRMIOS 0x5457
66#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
67#define TIOCSERGETLSR 0x5459 /* Get line status register */
68#define TIOCSERGETMULTI 0x545A /* Get multiport config */
69#define TIOCSERSETMULTI 0x545B /* Set multiport config */
70
71#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
72#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
73#define FIOQSIZE 0x545E 5#define FIOQSIZE 0x545E
74 6
75/* Used for packet mode */ 7#include <asm-generic/ioctls.h>
76#define TIOCPKT_DATA 0
77#define TIOCPKT_FLUSHREAD 1
78#define TIOCPKT_FLUSHWRITE 2
79#define TIOCPKT_STOP 4
80#define TIOCPKT_START 8
81#define TIOCPKT_NOSTOP 16
82#define TIOCPKT_DOSTOP 32
83#define TIOCPKT_IOCTL 64
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86 8
87#endif /* __ASM_IOCTLS_H__ */ 9#endif /* __ASM_IOCTLS_H__ */
88 10
diff --git a/arch/h8300/include/asm/ioctls.h b/arch/h8300/include/asm/ioctls.h
index b6b249f9f308..30eaed2facdb 100644
--- a/arch/h8300/include/asm/ioctls.h
+++ b/arch/h8300/include/asm/ioctls.h
@@ -1,87 +1,8 @@
1#ifndef __ARCH_H8300_IOCTLS_H__ 1#ifndef __ARCH_H8300_IOCTLS_H__
2#define __ARCH_H8300_IOCTLS_H__ 2#define __ARCH_H8300_IOCTLS_H__
3 3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
57
58#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
59#define FIOCLEX 0x5451
60#define FIOASYNC 0x5452
61#define TIOCSERCONFIG 0x5453
62#define TIOCSERGWILD 0x5454
63#define TIOCSERSWILD 0x5455
64#define TIOCGLCKTRMIOS 0x5456
65#define TIOCSLCKTRMIOS 0x5457
66#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
67#define TIOCSERGETLSR 0x5459 /* Get line status register */
68#define TIOCSERGETMULTI 0x545A /* Get multiport config */
69#define TIOCSERSETMULTI 0x545B /* Set multiport config */
70
71#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
72#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
73#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
74 5
75/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
76#define TIOCPKT_DATA 0
77#define TIOCPKT_FLUSHREAD 1
78#define TIOCPKT_FLUSHWRITE 2
79#define TIOCPKT_STOP 4
80#define TIOCPKT_START 8
81#define TIOCPKT_NOSTOP 16
82#define TIOCPKT_DOSTOP 32
83#define TIOCPKT_IOCTL 64
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86 7
87#endif /* __ARCH_H8300_IOCTLS_H__ */ 8#endif /* __ARCH_H8300_IOCTLS_H__ */
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 1e8d71ad93ef..13633da0d3de 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -395,7 +395,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
395{ 395{
396 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 396 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
397 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && 397 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
398 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 398 (cmd != TIOCMIWAIT)) {
399 if (tty->flags & (1 << TTY_IO_ERROR)) 399 if (tty->flags & (1 << TTY_IO_ERROR))
400 return -EIO; 400 return -EIO;
401 } 401 }
@@ -433,16 +433,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
433 case TIOCMIWAIT: 433 case TIOCMIWAIT:
434 printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); 434 printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n");
435 return 0; 435 return 0;
436 /*
437 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
438 * Return: write counters to the user passed counter struct
439 * NB: both 1->0 and 0->1 transitions are counted except for
440 * RI where only 0->1 is counted.
441 */
442 case TIOCGICOUNT:
443 printk(KERN_INFO "rs_ioctl: TIOCGICOUNT called\n");
444 return 0;
445
446 case TIOCSERGWILD: 436 case TIOCSERGWILD:
447 case TIOCSERSWILD: 437 case TIOCSERSWILD:
448 /* "setserial -W" is called in Debian boot */ 438 /* "setserial -W" is called in Debian boot */
diff --git a/arch/ia64/include/asm/ioctls.h b/arch/ia64/include/asm/ioctls.h
index b79c385114ef..f3aab5512e98 100644
--- a/arch/ia64/include/asm/ioctls.h
+++ b/arch/ia64/include/asm/ioctls.h
@@ -1,93 +1,6 @@
1#ifndef _ASM_IA64_IOCTLS_H 1#ifndef _ASM_IA64_IOCTLS_H
2#define _ASM_IA64_IOCTLS_H 2#define _ASM_IA64_IOCTLS_H
3 3
4/* 4#include <asm-generic/ioctls.h>
5 * Based on <asm-i386/ioctls.h>
6 *
7 * Modified 1998, 1999, 2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#include <asm/ioctl.h>
12
13/* 0x54 is just a magic number to make these relatively unique ('T') */
14
15#define TCGETS 0x5401
16#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
17#define TCSETSW 0x5403
18#define TCSETSF 0x5404
19#define TCGETA 0x5405
20#define TCSETA 0x5406
21#define TCSETAW 0x5407
22#define TCSETAF 0x5408
23#define TCSBRK 0x5409
24#define TCXONC 0x540A
25#define TCFLSH 0x540B
26#define TIOCEXCL 0x540C
27#define TIOCNXCL 0x540D
28#define TIOCSCTTY 0x540E
29#define TIOCGPGRP 0x540F
30#define TIOCSPGRP 0x5410
31#define TIOCOUTQ 0x5411
32#define TIOCSTI 0x5412
33#define TIOCGWINSZ 0x5413
34#define TIOCSWINSZ 0x5414
35#define TIOCMGET 0x5415
36#define TIOCMBIS 0x5416
37#define TIOCMBIC 0x5417
38#define TIOCMSET 0x5418
39#define TIOCGSOFTCAR 0x5419
40#define TIOCSSOFTCAR 0x541A
41#define FIONREAD 0x541B
42#define TIOCINQ FIONREAD
43#define TIOCLINUX 0x541C
44#define TIOCCONS 0x541D
45#define TIOCGSERIAL 0x541E
46#define TIOCSSERIAL 0x541F
47#define TIOCPKT 0x5420
48#define FIONBIO 0x5421
49#define TIOCNOTTY 0x5422
50#define TIOCSETD 0x5423
51#define TIOCGETD 0x5424
52#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
53#define TIOCSBRK 0x5427 /* BSD compatibility */
54#define TIOCCBRK 0x5428 /* BSD compatibility */
55#define TIOCGSID 0x5429 /* Return the session ID of FD */
56#define TCGETS2 _IOR('T',0x2A, struct termios2)
57#define TCSETS2 _IOW('T',0x2B, struct termios2)
58#define TCSETSW2 _IOW('T',0x2C, struct termios2)
59#define TCSETSF2 _IOW('T',0x2D, struct termios2)
60#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
61#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
62#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
63
64#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
65#define FIOCLEX 0x5451
66#define FIOASYNC 0x5452
67#define TIOCSERCONFIG 0x5453
68#define TIOCSERGWILD 0x5454
69#define TIOCSERSWILD 0x5455
70#define TIOCGLCKTRMIOS 0x5456
71#define TIOCSLCKTRMIOS 0x5457
72#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
73#define TIOCSERGETLSR 0x5459 /* Get line status register */
74#define TIOCSERGETMULTI 0x545A /* Get multiport config */
75#define TIOCSERSETMULTI 0x545B /* Set multiport config */
76
77#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
78#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
79#define FIOQSIZE 0x5460
80
81/* Used for packet mode */
82#define TIOCPKT_DATA 0
83#define TIOCPKT_FLUSHREAD 1
84#define TIOCPKT_FLUSHWRITE 2
85#define TIOCPKT_STOP 4
86#define TIOCPKT_START 8
87#define TIOCPKT_NOSTOP 16
88#define TIOCPKT_DOSTOP 32
89#define TIOCPKT_IOCTL 64
90
91#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
92 5
93#endif /* _ASM_IA64_IOCTLS_H */ 6#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/arch/m32r/include/asm/ioctls.h b/arch/m32r/include/asm/ioctls.h
index 66288063a4c0..349bf87bfbd0 100644
--- a/arch/m32r/include/asm/ioctls.h
+++ b/arch/m32r/include/asm/ioctls.h
@@ -1,87 +1,6 @@
1#ifndef __ARCH_M32R_IOCTLS_H__ 1#ifndef __ARCH_M32R_IOCTLS_H__
2#define __ARCH_M32R_IOCTLS_H__ 2#define __ARCH_M32R_IOCTLS_H__
3 3
4#include <asm/ioctl.h> 4#include <asm-generic/ioctls.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
57
58#define FIONCLEX 0x5450
59#define FIOCLEX 0x5451
60#define FIOASYNC 0x5452
61#define TIOCSERCONFIG 0x5453
62#define TIOCSERGWILD 0x5454
63#define TIOCSERSWILD 0x5455
64#define TIOCGLCKTRMIOS 0x5456
65#define TIOCSLCKTRMIOS 0x5457
66#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
67#define TIOCSERGETLSR 0x5459 /* Get line status register */
68#define TIOCSERGETMULTI 0x545A /* Get multiport config */
69#define TIOCSERSETMULTI 0x545B /* Set multiport config */
70
71#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
72#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
73#define FIOQSIZE 0x5460
74
75/* Used for packet mode */
76#define TIOCPKT_DATA 0
77#define TIOCPKT_FLUSHREAD 1
78#define TIOCPKT_FLUSHWRITE 2
79#define TIOCPKT_STOP 4
80#define TIOCPKT_START 8
81#define TIOCPKT_NOSTOP 16
82#define TIOCPKT_DOSTOP 32
83#define TIOCPKT_IOCTL 64
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86 5
87#endif /* __ARCH_M32R_IOCTLS_H__ */ 6#endif /* __ARCH_M32R_IOCTLS_H__ */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 8030e2481d97..77bb0d6baa62 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -434,7 +434,7 @@ config PROC_HARDWARE
434 434
435config ISA 435config ISA
436 bool 436 bool
437 depends on Q40 || AMIGA_PCMCIA || GG2 437 depends on Q40 || AMIGA_PCMCIA
438 default y 438 default y
439 help 439 help
440 Find out whether you have ISA slots on your motherboard. ISA is the 440 Find out whether you have ISA slots on your motherboard. ISA is the
@@ -445,7 +445,7 @@ config ISA
445 445
446config GENERIC_ISA_DMA 446config GENERIC_ISA_DMA
447 bool 447 bool
448 depends on Q40 || AMIGA_PCMCIA || GG2 448 depends on Q40 || AMIGA_PCMCIA
449 default y 449 default y
450 450
451config ZONE_DMA 451config ZONE_DMA
diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h
index 5ca5dd951a4a..7a19b5686a4a 100644
--- a/arch/m68k/include/asm/amigahw.h
+++ b/arch/m68k/include/asm/amigahw.h
@@ -102,7 +102,6 @@ struct amiga_hw_present {
102 AMIGAHW_DECLARE(ALICE_NTSC); /* NTSC Alice (8374) */ 102 AMIGAHW_DECLARE(ALICE_NTSC); /* NTSC Alice (8374) */
103 AMIGAHW_DECLARE(MAGIC_REKICK); /* A3000 Magic Hard Rekick */ 103 AMIGAHW_DECLARE(MAGIC_REKICK); /* A3000 Magic Hard Rekick */
104 AMIGAHW_DECLARE(PCMCIA); /* PCMCIA Slot */ 104 AMIGAHW_DECLARE(PCMCIA); /* PCMCIA Slot */
105 AMIGAHW_DECLARE(GG2_ISA); /* GG2 Zorro2ISA Bridge */
106 AMIGAHW_DECLARE(ZORRO); /* Zorro AutoConfig */ 105 AMIGAHW_DECLARE(ZORRO); /* Zorro AutoConfig */
107 AMIGAHW_DECLARE(ZORRO3); /* Zorro III */ 106 AMIGAHW_DECLARE(ZORRO3); /* Zorro III */
108}; 107};
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index eab36dcacf6c..03ae3d14cd4a 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -1,7 +1,211 @@
1#ifdef __uClinux__ 1#ifndef __ARCH_M68K_ATOMIC__
2#include "atomic_no.h" 2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21/*
22 * The ColdFire parts cannot do some immediate to memory operations,
23 * so for them we do not specify the "i" asm constraint.
24 */
25#ifdef CONFIG_COLDFIRE
26#define ASM_DI "d"
3#else 27#else
4#include "atomic_mm.h" 28#define ASM_DI "di"
5#endif 29#endif
6 30
31static inline void atomic_add(int i, atomic_t *v)
32{
33 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
34}
35
36static inline void atomic_sub(int i, atomic_t *v)
37{
38 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
39}
40
41static inline void atomic_inc(atomic_t *v)
42{
43 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
44}
45
46static inline void atomic_dec(atomic_t *v)
47{
48 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
49}
50
51static inline int atomic_dec_and_test(atomic_t *v)
52{
53 char c;
54 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
55 return c != 0;
56}
57
58static inline int atomic_inc_and_test(atomic_t *v)
59{
60 char c;
61 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
62 return c != 0;
63}
64
65#ifdef CONFIG_RMW_INSNS
66
67static inline int atomic_add_return(int i, atomic_t *v)
68{
69 int t, tmp;
70
71 __asm__ __volatile__(
72 "1: movel %2,%1\n"
73 " addl %3,%1\n"
74 " casl %2,%1,%0\n"
75 " jne 1b"
76 : "+m" (*v), "=&d" (t), "=&d" (tmp)
77 : "g" (i), "2" (atomic_read(v)));
78 return t;
79}
80
81static inline int atomic_sub_return(int i, atomic_t *v)
82{
83 int t, tmp;
84
85 __asm__ __volatile__(
86 "1: movel %2,%1\n"
87 " subl %3,%1\n"
88 " casl %2,%1,%0\n"
89 " jne 1b"
90 : "+m" (*v), "=&d" (t), "=&d" (tmp)
91 : "g" (i), "2" (atomic_read(v)));
92 return t;
93}
94
95#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
96#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
97
98#else /* !CONFIG_RMW_INSNS */
99
100static inline int atomic_add_return(int i, atomic_t * v)
101{
102 unsigned long flags;
103 int t;
104
105 local_irq_save(flags);
106 t = atomic_read(v);
107 t += i;
108 atomic_set(v, t);
109 local_irq_restore(flags);
110
111 return t;
112}
113
114static inline int atomic_sub_return(int i, atomic_t * v)
115{
116 unsigned long flags;
117 int t;
118
119 local_irq_save(flags);
120 t = atomic_read(v);
121 t -= i;
122 atomic_set(v, t);
123 local_irq_restore(flags);
124
125 return t;
126}
127
128static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
129{
130 unsigned long flags;
131 int prev;
132
133 local_irq_save(flags);
134 prev = atomic_read(v);
135 if (prev == old)
136 atomic_set(v, new);
137 local_irq_restore(flags);
138 return prev;
139}
140
141static inline int atomic_xchg(atomic_t *v, int new)
142{
143 unsigned long flags;
144 int prev;
145
146 local_irq_save(flags);
147 prev = atomic_read(v);
148 atomic_set(v, new);
149 local_irq_restore(flags);
150 return prev;
151}
152
153#endif /* !CONFIG_RMW_INSNS */
154
155#define atomic_dec_return(v) atomic_sub_return(1, (v))
156#define atomic_inc_return(v) atomic_add_return(1, (v))
157
158static inline int atomic_sub_and_test(int i, atomic_t *v)
159{
160 char c;
161 __asm__ __volatile__("subl %2,%1; seq %0"
162 : "=d" (c), "+m" (*v)
163 : ASM_DI (i));
164 return c != 0;
165}
166
167static inline int atomic_add_negative(int i, atomic_t *v)
168{
169 char c;
170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v)
172 : "id" (i));
173 return c != 0;
174}
175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
179}
180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
184}
185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
187{
188 int c, old;
189 c = atomic_read(v);
190 for (;;) {
191 if (unlikely(c == (u)))
192 break;
193 old = atomic_cmpxchg((v), c, c + (a));
194 if (likely(old == c))
195 break;
196 c = old;
197 }
198 return c != (u);
199}
200
201#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
202
203/* Atomic operations are already serializing */
204#define smp_mb__before_atomic_dec() barrier()
205#define smp_mb__after_atomic_dec() barrier()
206#define smp_mb__before_atomic_inc() barrier()
207#define smp_mb__after_atomic_inc() barrier()
208
209#include <asm-generic/atomic-long.h>
7#include <asm-generic/atomic64.h> 210#include <asm-generic/atomic64.h>
211#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
deleted file mode 100644
index 6a223b3f7e74..000000000000
--- a/arch/m68k/include/asm/atomic_mm.h
+++ /dev/null
@@ -1,200 +0,0 @@
1#ifndef __ARCH_M68K_ATOMIC__
2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21static inline void atomic_add(int i, atomic_t *v)
22{
23 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
24}
25
26static inline void atomic_sub(int i, atomic_t *v)
27{
28 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
29}
30
31static inline void atomic_inc(atomic_t *v)
32{
33 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
34}
35
36static inline void atomic_dec(atomic_t *v)
37{
38 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
39}
40
41static inline int atomic_dec_and_test(atomic_t *v)
42{
43 char c;
44 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
45 return c != 0;
46}
47
48static inline int atomic_inc_and_test(atomic_t *v)
49{
50 char c;
51 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
52 return c != 0;
53}
54
55#ifdef CONFIG_RMW_INSNS
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 int t, tmp;
60
61 __asm__ __volatile__(
62 "1: movel %2,%1\n"
63 " addl %3,%1\n"
64 " casl %2,%1,%0\n"
65 " jne 1b"
66 : "+m" (*v), "=&d" (t), "=&d" (tmp)
67 : "g" (i), "2" (atomic_read(v)));
68 return t;
69}
70
71static inline int atomic_sub_return(int i, atomic_t *v)
72{
73 int t, tmp;
74
75 __asm__ __volatile__(
76 "1: movel %2,%1\n"
77 " subl %3,%1\n"
78 " casl %2,%1,%0\n"
79 " jne 1b"
80 : "+m" (*v), "=&d" (t), "=&d" (tmp)
81 : "g" (i), "2" (atomic_read(v)));
82 return t;
83}
84
85#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88#else /* !CONFIG_RMW_INSNS */
89
90static inline int atomic_add_return(int i, atomic_t * v)
91{
92 unsigned long flags;
93 int t;
94
95 local_irq_save(flags);
96 t = atomic_read(v);
97 t += i;
98 atomic_set(v, t);
99 local_irq_restore(flags);
100
101 return t;
102}
103
104static inline int atomic_sub_return(int i, atomic_t * v)
105{
106 unsigned long flags;
107 int t;
108
109 local_irq_save(flags);
110 t = atomic_read(v);
111 t -= i;
112 atomic_set(v, t);
113 local_irq_restore(flags);
114
115 return t;
116}
117
118static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
119{
120 unsigned long flags;
121 int prev;
122
123 local_irq_save(flags);
124 prev = atomic_read(v);
125 if (prev == old)
126 atomic_set(v, new);
127 local_irq_restore(flags);
128 return prev;
129}
130
131static inline int atomic_xchg(atomic_t *v, int new)
132{
133 unsigned long flags;
134 int prev;
135
136 local_irq_save(flags);
137 prev = atomic_read(v);
138 atomic_set(v, new);
139 local_irq_restore(flags);
140 return prev;
141}
142
143#endif /* !CONFIG_RMW_INSNS */
144
145#define atomic_dec_return(v) atomic_sub_return(1, (v))
146#define atomic_inc_return(v) atomic_add_return(1, (v))
147
148static inline int atomic_sub_and_test(int i, atomic_t *v)
149{
150 char c;
151 __asm__ __volatile__("subl %2,%1; seq %0"
152 : "=d" (c), "+m" (*v)
153 : "id" (i));
154 return c != 0;
155}
156
157static inline int atomic_add_negative(int i, atomic_t *v)
158{
159 char c;
160 __asm__ __volatile__("addl %2,%1; smi %0"
161 : "=d" (c), "+m" (*v)
162 : "id" (i));
163 return c != 0;
164}
165
166static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
167{
168 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
169}
170
171static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
172{
173 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
174}
175
176static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
177{
178 int c, old;
179 c = atomic_read(v);
180 for (;;) {
181 if (unlikely(c == (u)))
182 break;
183 old = atomic_cmpxchg((v), c, c + (a));
184 if (likely(old == c))
185 break;
186 c = old;
187 }
188 return c != (u);
189}
190
191#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
192
193/* Atomic operations are already serializing */
194#define smp_mb__before_atomic_dec() barrier()
195#define smp_mb__after_atomic_dec() barrier()
196#define smp_mb__before_atomic_inc() barrier()
197#define smp_mb__after_atomic_inc() barrier()
198
199#include <asm-generic/atomic-long.h>
200#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h
deleted file mode 100644
index 289310c63a8a..000000000000
--- a/arch/m68k/include/asm/atomic_no.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21static __inline__ void atomic_add(int i, atomic_t *v)
22{
23#ifdef CONFIG_COLDFIRE
24 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
25#else
26 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
27#endif
28}
29
30static __inline__ void atomic_sub(int i, atomic_t *v)
31{
32#ifdef CONFIG_COLDFIRE
33 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
34#else
35 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
36#endif
37}
38
39static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
40{
41 char c;
42#ifdef CONFIG_COLDFIRE
43 __asm__ __volatile__("subl %2,%1; seq %0"
44 : "=d" (c), "+m" (*v)
45 : "d" (i));
46#else
47 __asm__ __volatile__("subl %2,%1; seq %0"
48 : "=d" (c), "+m" (*v)
49 : "di" (i));
50#endif
51 return c != 0;
52}
53
54static __inline__ void atomic_inc(volatile atomic_t *v)
55{
56 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
57}
58
59/*
60 * atomic_inc_and_test - increment and test
61 * @v: pointer of type atomic_t
62 *
63 * Atomically increments @v by 1
64 * and returns true if the result is zero, or false for all
65 * other cases.
66 */
67
68static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
69{
70 char c;
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
72 return c != 0;
73}
74
75static __inline__ void atomic_dec(volatile atomic_t *v)
76{
77 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
78}
79
80static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
81{
82 char c;
83 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
84 return c != 0;
85}
86
87static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
88{
89 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
90}
91
92static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
93{
94 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
95}
96
97/* Atomic operations are already serializing */
98#define smp_mb__before_atomic_dec() barrier()
99#define smp_mb__after_atomic_dec() barrier()
100#define smp_mb__before_atomic_inc() barrier()
101#define smp_mb__after_atomic_inc() barrier()
102
103static inline int atomic_add_return(int i, atomic_t * v)
104{
105 unsigned long temp, flags;
106
107 local_irq_save(flags);
108 temp = *(long *)v;
109 temp += i;
110 *(long *)v = temp;
111 local_irq_restore(flags);
112
113 return temp;
114}
115
116#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
117
118static inline int atomic_sub_return(int i, atomic_t * v)
119{
120 unsigned long temp, flags;
121
122 local_irq_save(flags);
123 temp = *(long *)v;
124 temp -= i;
125 *(long *)v = temp;
126 local_irq_restore(flags);
127
128 return temp;
129}
130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133
134static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
135{
136 int c, old;
137 c = atomic_read(v);
138 for (;;) {
139 if (unlikely(c == (u)))
140 break;
141 old = atomic_cmpxchg((v), c, c + (a));
142 if (likely(old == c))
143 break;
144 c = old;
145 }
146 return c != (u);
147}
148
149#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
150
151#define atomic_dec_return(v) atomic_sub_return(1,(v))
152#define atomic_inc_return(v) atomic_add_return(1,(v))
153
154#include <asm-generic/atomic-long.h>
155#endif /* __ARCH_M68KNOMMU_ATOMIC __ */
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index 474125886218..e41fea399bfe 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -3,6 +3,9 @@
3 3
4#include <asm/setup.h> 4#include <asm/setup.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#ifdef __ASSEMBLY__
7#include <asm/thread_info.h>
8#endif
6 9
7/* 10/*
8 * Stack layout in 'ret_from_exception': 11 * Stack layout in 'ret_from_exception':
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 9e673e3bd434..0fb3468000e7 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -49,23 +49,6 @@
49#define MULTI_ISA 0 49#define MULTI_ISA 0
50#endif /* Q40 */ 50#endif /* Q40 */
51 51
52/* GG-II Zorro to ISA bridge */
53#ifdef CONFIG_GG2
54
55extern unsigned long gg2_isa_base;
56#define GG2_ISA_IO_B(ioaddr) (gg2_isa_base+1+((unsigned long)(ioaddr)*4))
57#define GG2_ISA_IO_W(ioaddr) (gg2_isa_base+ ((unsigned long)(ioaddr)*4))
58#define GG2_ISA_MEM_B(madr) (gg2_isa_base+1+(((unsigned long)(madr)*4) & 0xfffff))
59#define GG2_ISA_MEM_W(madr) (gg2_isa_base+ (((unsigned long)(madr)*4) & 0xfffff))
60
61#ifndef MULTI_ISA
62#define MULTI_ISA 0
63#else
64#undef MULTI_ISA
65#define MULTI_ISA 1
66#endif
67#endif /* GG2 */
68
69#ifdef CONFIG_AMIGA_PCMCIA 52#ifdef CONFIG_AMIGA_PCMCIA
70#include <asm/amigayle.h> 53#include <asm/amigayle.h>
71 54
@@ -89,8 +72,7 @@ extern unsigned long gg2_isa_base;
89#endif 72#endif
90 73
91#define ISA_TYPE_Q40 (1) 74#define ISA_TYPE_Q40 (1)
92#define ISA_TYPE_GG2 (2) 75#define ISA_TYPE_AG (2)
93#define ISA_TYPE_AG (3)
94 76
95#if defined(CONFIG_Q40) && !defined(MULTI_ISA) 77#if defined(CONFIG_Q40) && !defined(MULTI_ISA)
96#define ISA_TYPE ISA_TYPE_Q40 78#define ISA_TYPE ISA_TYPE_Q40
@@ -100,10 +82,6 @@ extern unsigned long gg2_isa_base;
100#define ISA_TYPE ISA_TYPE_AG 82#define ISA_TYPE ISA_TYPE_AG
101#define ISA_SEX 1 83#define ISA_SEX 1
102#endif 84#endif
103#if defined(CONFIG_GG2) && !defined(MULTI_ISA)
104#define ISA_TYPE ISA_TYPE_GG2
105#define ISA_SEX 0
106#endif
107 85
108#ifdef MULTI_ISA 86#ifdef MULTI_ISA
109extern int isa_type; 87extern int isa_type;
@@ -125,9 +103,6 @@ static inline u8 __iomem *isa_itb(unsigned long addr)
125#ifdef CONFIG_Q40 103#ifdef CONFIG_Q40
126 case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_IO_B(addr); 104 case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_IO_B(addr);
127#endif 105#endif
128#ifdef CONFIG_GG2
129 case ISA_TYPE_GG2: return (u8 __iomem *)GG2_ISA_IO_B(addr);
130#endif
131#ifdef CONFIG_AMIGA_PCMCIA 106#ifdef CONFIG_AMIGA_PCMCIA
132 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr); 107 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr);
133#endif 108#endif
@@ -141,9 +116,6 @@ static inline u16 __iomem *isa_itw(unsigned long addr)
141#ifdef CONFIG_Q40 116#ifdef CONFIG_Q40
142 case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_IO_W(addr); 117 case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_IO_W(addr);
143#endif 118#endif
144#ifdef CONFIG_GG2
145 case ISA_TYPE_GG2: return (u16 __iomem *)GG2_ISA_IO_W(addr);
146#endif
147#ifdef CONFIG_AMIGA_PCMCIA 119#ifdef CONFIG_AMIGA_PCMCIA
148 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr); 120 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr);
149#endif 121#endif
@@ -167,9 +139,6 @@ static inline u8 __iomem *isa_mtb(unsigned long addr)
167#ifdef CONFIG_Q40 139#ifdef CONFIG_Q40
168 case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_MEM_B(addr); 140 case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
169#endif 141#endif
170#ifdef CONFIG_GG2
171 case ISA_TYPE_GG2: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
172#endif
173#ifdef CONFIG_AMIGA_PCMCIA 142#ifdef CONFIG_AMIGA_PCMCIA
174 case ISA_TYPE_AG: return (u8 __iomem *)addr; 143 case ISA_TYPE_AG: return (u8 __iomem *)addr;
175#endif 144#endif
@@ -183,9 +152,6 @@ static inline u16 __iomem *isa_mtw(unsigned long addr)
183#ifdef CONFIG_Q40 152#ifdef CONFIG_Q40
184 case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_MEM_W(addr); 153 case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
185#endif 154#endif
186#ifdef CONFIG_GG2
187 case ISA_TYPE_GG2: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
188#endif
189#ifdef CONFIG_AMIGA_PCMCIA 155#ifdef CONFIG_AMIGA_PCMCIA
190 case ISA_TYPE_AG: return (u16 __iomem *)addr; 156 case ISA_TYPE_AG: return (u16 __iomem *)addr;
191#endif 157#endif
@@ -217,9 +183,6 @@ static inline void isa_delay(void)
217#ifdef CONFIG_Q40 183#ifdef CONFIG_Q40
218 case ISA_TYPE_Q40: isa_outb(0,0x80); break; 184 case ISA_TYPE_Q40: isa_outb(0,0x80); break;
219#endif 185#endif
220#ifdef CONFIG_GG2
221 case ISA_TYPE_GG2: break;
222#endif
223#ifdef CONFIG_AMIGA_PCMCIA 186#ifdef CONFIG_AMIGA_PCMCIA
224 case ISA_TYPE_AG: break; 187 case ISA_TYPE_AG: break;
225#endif 188#endif
@@ -287,9 +250,13 @@ static inline void isa_delay(void)
287#define outb(val,port) ((void)0) 250#define outb(val,port) ((void)0)
288#define outb_p(val,port) ((void)0) 251#define outb_p(val,port) ((void)0)
289#define inw(port) 0xffff 252#define inw(port) 0xffff
253#define inw_p(port) 0xffff
290#define outw(val,port) ((void)0) 254#define outw(val,port) ((void)0)
255#define outw_p(val,port) ((void)0)
291#define inl(port) 0xffffffffUL 256#define inl(port) 0xffffffffUL
257#define inl_p(port) 0xffffffffUL
292#define outl(val,port) ((void)0) 258#define outl(val,port) ((void)0)
259#define outl_p(val,port) ((void)0)
293 260
294#define insb(port,buf,nr) ((void)0) 261#define insb(port,buf,nr) ((void)0)
295#define outsb(port,buf,nr) ((void)0) 262#define outsb(port,buf,nr) ((void)0)
diff --git a/arch/m68k/include/asm/ioctls.h b/arch/m68k/include/asm/ioctls.h
index 91a57d665460..1332bb4ca5b0 100644
--- a/arch/m68k/include/asm/ioctls.h
+++ b/arch/m68k/include/asm/ioctls.h
@@ -1,86 +1,8 @@
1#ifndef __ARCH_M68K_IOCTLS_H__ 1#ifndef __ARCH_M68K_IOCTLS_H__
2#define __ARCH_M68K_IOCTLS_H__ 2#define __ARCH_M68K_IOCTLS_H__
3 3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
56
57#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
58#define FIOCLEX 0x5451
59#define FIOASYNC 0x5452
60#define TIOCSERCONFIG 0x5453
61#define TIOCSERGWILD 0x5454
62#define TIOCSERSWILD 0x5455
63#define TIOCGLCKTRMIOS 0x5456
64#define TIOCSLCKTRMIOS 0x5457
65#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
66#define TIOCSERGETLSR 0x5459 /* Get line status register */
67#define TIOCSERGETMULTI 0x545A /* Get multiport config */
68#define TIOCSERSETMULTI 0x545B /* Set multiport config */
69
70#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
71#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
72#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
73 5
74/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
75#define TIOCPKT_DATA 0
76#define TIOCPKT_FLUSHREAD 1
77#define TIOCPKT_FLUSHWRITE 2
78#define TIOCPKT_STOP 4
79#define TIOCPKT_START 8
80#define TIOCPKT_NOSTOP 16
81#define TIOCPKT_DOSTOP 32
82#define TIOCPKT_IOCTL 64
83
84#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
85 7
86#endif /* __ARCH_M68K_IOCTLS_H__ */ 8#endif /* __ARCH_M68K_IOCTLS_H__ */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index fc24b6fc5508..789f3b2de0e9 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -1,5 +1,44 @@
1#ifdef __uClinux__ 1#ifndef _M68K_MACHDEP_H
2#include "machdep_no.h" 2#define _M68K_MACHDEP_H
3#else 3
4#include "machdep_mm.h" 4#include <linux/seq_file.h>
5#endif 5#include <linux/interrupt.h>
6
7struct pt_regs;
8struct mktime;
9struct rtc_time;
10struct rtc_pll_info;
11struct buffer_head;
12
13extern void (*mach_sched_init) (irq_handler_t handler);
14/* machine dependent irq functions */
15extern void (*mach_init_IRQ) (void);
16extern void (*mach_get_model) (char *model);
17extern void (*mach_get_hardware_list) (struct seq_file *m);
18/* machine dependent timer functions */
19extern unsigned long (*mach_gettimeoffset)(void);
20extern int (*mach_hwclk)(int, struct rtc_time*);
21extern unsigned int (*mach_get_ss)(void);
22extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
23extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
24extern int (*mach_set_clock_mmss)(unsigned long);
25extern void (*mach_gettod)(int *year, int *mon, int *day, int *hour,
26 int *min, int *sec);
27extern void (*mach_reset)( void );
28extern void (*mach_halt)( void );
29extern void (*mach_power_off)( void );
30extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
31extern void (*mach_hd_setup)(char *, int *);
32extern long mach_max_dma_address;
33extern void (*mach_heartbeat) (int);
34extern void (*mach_l2_flush) (int);
35extern void (*mach_beep) (unsigned int, unsigned int);
36
37/* Hardware clock functions */
38extern void hw_timer_init(void);
39extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41
42extern void config_BSP(char *command, int len);
43
44#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/machdep_mm.h b/arch/m68k/include/asm/machdep_mm.h
deleted file mode 100644
index 5637dcef314e..000000000000
--- a/arch/m68k/include/asm/machdep_mm.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _M68K_MACHDEP_H
2#define _M68K_MACHDEP_H
3
4#include <linux/seq_file.h>
5#include <linux/interrupt.h>
6
7struct pt_regs;
8struct mktime;
9struct rtc_time;
10struct rtc_pll_info;
11struct buffer_head;
12
13extern void (*mach_sched_init) (irq_handler_t handler);
14/* machine dependent irq functions */
15extern void (*mach_init_IRQ) (void);
16extern void (*mach_get_model) (char *model);
17extern void (*mach_get_hardware_list) (struct seq_file *m);
18/* machine dependent timer functions */
19extern unsigned long (*mach_gettimeoffset)(void);
20extern int (*mach_hwclk)(int, struct rtc_time*);
21extern unsigned int (*mach_get_ss)(void);
22extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
23extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
24extern int (*mach_set_clock_mmss)(unsigned long);
25extern void (*mach_reset)( void );
26extern void (*mach_halt)( void );
27extern void (*mach_power_off)( void );
28extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
29extern void (*mach_hd_setup)(char *, int *);
30extern long mach_max_dma_address;
31extern void (*mach_heartbeat) (int);
32extern void (*mach_l2_flush) (int);
33extern void (*mach_beep) (unsigned int, unsigned int);
34
35#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/machdep_no.h b/arch/m68k/include/asm/machdep_no.h
deleted file mode 100644
index de9f47a51cc2..000000000000
--- a/arch/m68k/include/asm/machdep_no.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _M68KNOMMU_MACHDEP_H
2#define _M68KNOMMU_MACHDEP_H
3
4#include <linux/interrupt.h>
5
6/* Hardware clock functions */
7extern void hw_timer_init(void);
8extern unsigned long hw_timer_offset(void);
9
10extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
11
12/* Machine dependent time handling */
13extern void (*mach_gettod)(int *year, int *mon, int *day, int *hour,
14 int *min, int *sec);
15extern int (*mach_set_clock_mmss)(unsigned long);
16
17/* machine dependent power off functions */
18extern void (*mach_reset)( void );
19extern void (*mach_halt)( void );
20extern void (*mach_power_off)( void );
21
22extern void config_BSP(char *command, int len);
23
24extern void do_IRQ(int irq, struct pt_regs *fp);
25
26#endif /* _M68KNOMMU_MACHDEP_H */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index f2b4480cc98a..dfebb7c1e379 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -1,5 +1,49 @@
1#ifdef __uClinux__ 1#ifndef _M68K_PAGE_H
2#include "page_no.h" 2#define _M68K_PAGE_H
3
4#include <linux/const.h>
5#include <asm/setup.h>
6#include <asm/page_offset.h>
7
8/* PAGE_SHIFT determines the page size */
9#ifndef CONFIG_SUN3
10#define PAGE_SHIFT (12)
3#else 11#else
12#define PAGE_SHIFT (13)
13#endif
14#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
15#define PAGE_MASK (~(PAGE_SIZE-1))
16#define PAGE_OFFSET (PAGE_OFFSET_RAW)
17
18#ifndef __ASSEMBLY__
19
20/*
21 * These are used to make use of C type-checking..
22 */
23typedef struct { unsigned long pte; } pte_t;
24typedef struct { unsigned long pmd[16]; } pmd_t;
25typedef struct { unsigned long pgd; } pgd_t;
26typedef struct { unsigned long pgprot; } pgprot_t;
27typedef struct page *pgtable_t;
28
29#define pte_val(x) ((x).pte)
30#define pmd_val(x) ((&x)->pmd[0])
31#define pgd_val(x) ((x).pgd)
32#define pgprot_val(x) ((x).pgprot)
33
34#define __pte(x) ((pte_t) { (x) } )
35#define __pmd(x) ((pmd_t) { (x) } )
36#define __pgd(x) ((pgd_t) { (x) } )
37#define __pgprot(x) ((pgprot_t) { (x) } )
38
39#endif /* !__ASSEMBLY__ */
40
41#ifdef CONFIG_MMU
4#include "page_mm.h" 42#include "page_mm.h"
43#else
44#include "page_no.h"
5#endif 45#endif
46
47#include <asm-generic/getorder.h>
48
49#endif /* _M68K_PAGE_H */
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index d009f3ea39ab..31d5570d6567 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -1,29 +1,9 @@
1#ifndef _M68K_PAGE_H 1#ifndef _M68K_PAGE_MM_H
2#define _M68K_PAGE_H 2#define _M68K_PAGE_MM_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#ifndef CONFIG_SUN3
8#define PAGE_SHIFT (12)
9#else
10#define PAGE_SHIFT (13)
11#endif
12#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
13#define PAGE_MASK (~(PAGE_SIZE-1))
14
15#include <asm/setup.h>
16
17#if PAGE_SHIFT < 13
18#define THREAD_SIZE (8192)
19#else
20#define THREAD_SIZE PAGE_SIZE
21#endif
22 3
23#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
24 5
25#include <linux/compiler.h> 6#include <linux/compiler.h>
26
27#include <asm/module.h> 7#include <asm/module.h>
28 8
29#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 9#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
@@ -84,33 +64,6 @@ static inline void clear_page(void *page)
84 flush_dcache_page(page); \ 64 flush_dcache_page(page); \
85 } while (0) 65 } while (0)
86 66
87/*
88 * These are used to make use of C type-checking..
89 */
90typedef struct { unsigned long pte; } pte_t;
91typedef struct { unsigned long pmd[16]; } pmd_t;
92typedef struct { unsigned long pgd; } pgd_t;
93typedef struct { unsigned long pgprot; } pgprot_t;
94typedef struct page *pgtable_t;
95
96#define pte_val(x) ((x).pte)
97#define pmd_val(x) ((&x)->pmd[0])
98#define pgd_val(x) ((x).pgd)
99#define pgprot_val(x) ((x).pgprot)
100
101#define __pte(x) ((pte_t) { (x) } )
102#define __pmd(x) ((pmd_t) { (x) } )
103#define __pgd(x) ((pgd_t) { (x) } )
104#define __pgprot(x) ((pgprot_t) { (x) } )
105
106#endif /* !__ASSEMBLY__ */
107
108#include <asm/page_offset.h>
109
110#define PAGE_OFFSET (PAGE_OFFSET_RAW)
111
112#ifndef __ASSEMBLY__
113
114extern unsigned long m68k_memoffset; 67extern unsigned long m68k_memoffset;
115 68
116#ifndef CONFIG_SUN3 69#ifndef CONFIG_SUN3
@@ -127,7 +80,7 @@ static inline unsigned long ___pa(void *vaddr)
127 : "0" (vaddr), "i" (m68k_fixup_memoffset)); 80 : "0" (vaddr), "i" (m68k_fixup_memoffset));
128 return paddr; 81 return paddr;
129} 82}
130#define __pa(vaddr) ___pa((void *)(vaddr)) 83#define __pa(vaddr) ___pa((void *)(long)(vaddr))
131static inline void *__va(unsigned long paddr) 84static inline void *__va(unsigned long paddr)
132{ 85{
133 void *vaddr; 86 void *vaddr;
@@ -223,6 +176,4 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
223#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 176#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
224 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 177 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
225 178
226#include <asm-generic/getorder.h> 179#endif /* _M68K_PAGE_MM_H */
227
228#endif /* _M68K_PAGE_H */
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 8029a33e03c3..90595721185f 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -1,18 +1,11 @@
1#ifndef _M68KNOMMU_PAGE_H 1#ifndef _M68K_PAGE_NO_H
2#define _M68KNOMMU_PAGE_H 2#define _M68K_PAGE_NO_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7
8#define PAGE_SHIFT (12)
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1))
11
12#include <asm/setup.h>
13 3
14#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
15 5
6extern unsigned long memory_start;
7extern unsigned long memory_end;
8
16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 9#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
17#define free_user_page(page, addr) free_page(addr) 10#define free_user_page(page, addr) free_page(addr)
18 11
@@ -26,36 +19,6 @@
26 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
27#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 20#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
28 21
29/*
30 * These are used to make use of C type-checking..
31 */
32typedef struct { unsigned long pte; } pte_t;
33typedef struct { unsigned long pmd[16]; } pmd_t;
34typedef struct { unsigned long pgd; } pgd_t;
35typedef struct { unsigned long pgprot; } pgprot_t;
36typedef struct page *pgtable_t;
37
38#define pte_val(x) ((x).pte)
39#define pmd_val(x) ((&x)->pmd[0])
40#define pgd_val(x) ((x).pgd)
41#define pgprot_val(x) ((x).pgprot)
42
43#define __pte(x) ((pte_t) { (x) } )
44#define __pmd(x) ((pmd_t) { (x) } )
45#define __pgd(x) ((pgd_t) { (x) } )
46#define __pgprot(x) ((pgprot_t) { (x) } )
47
48extern unsigned long memory_start;
49extern unsigned long memory_end;
50
51#endif /* !__ASSEMBLY__ */
52
53#include <asm/page_offset.h>
54
55#define PAGE_OFFSET (PAGE_OFFSET_RAW)
56
57#ifndef __ASSEMBLY__
58
59#define __pa(vaddr) ((unsigned long)(vaddr)) 22#define __pa(vaddr) ((unsigned long)(vaddr))
60#define __va(paddr) ((void *)(paddr)) 23#define __va(paddr) ((void *)(paddr))
61 24
@@ -74,6 +37,4 @@ extern unsigned long memory_end;
74 37
75#endif /* __ASSEMBLY__ */ 38#endif /* __ASSEMBLY__ */
76 39
77#include <asm-generic/getorder.h> 40#endif /* _M68K_PAGE_NO_H */
78
79#endif /* _M68KNOMMU_PAGE_H */
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 2c356f90f171..2936dda938d7 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -1,5 +1,133 @@
1#ifdef __uClinux__ 1#ifndef _M68K_STRING_H_
2#include "string_no.h" 2#define _M68K_STRING_H_
3
4#include <linux/types.h>
5#include <linux/compiler.h>
6
7static inline size_t __kernel_strlen(const char *s)
8{
9 const char *sc;
10
11 for (sc = s; *sc++; )
12 ;
13 return sc - s - 1;
14}
15
16static inline char *__kernel_strcpy(char *dest, const char *src)
17{
18 char *xdest = dest;
19
20 asm volatile ("\n"
21 "1: move.b (%1)+,(%0)+\n"
22 " jne 1b"
23 : "+a" (dest), "+a" (src)
24 : : "memory");
25 return xdest;
26}
27
28#ifndef __IN_STRING_C
29
30#define __HAVE_ARCH_STRLEN
31#define strlen(s) (__builtin_constant_p(s) ? \
32 __builtin_strlen(s) : \
33 __kernel_strlen(s))
34
35#define __HAVE_ARCH_STRNLEN
36static inline size_t strnlen(const char *s, size_t count)
37{
38 const char *sc = s;
39
40 asm volatile ("\n"
41 "1: subq.l #1,%1\n"
42 " jcs 2f\n"
43 " tst.b (%0)+\n"
44 " jne 1b\n"
45 " subq.l #1,%0\n"
46 "2:"
47 : "+a" (sc), "+d" (count));
48 return sc - s;
49}
50
51#define __HAVE_ARCH_STRCPY
52#if __GNUC__ >= 4
53#define strcpy(d, s) (__builtin_constant_p(s) && \
54 __builtin_strlen(s) <= 32 ? \
55 __builtin_strcpy(d, s) : \
56 __kernel_strcpy(d, s))
3#else 57#else
4#include "string_mm.h" 58#define strcpy(d, s) __kernel_strcpy(d, s)
5#endif 59#endif
60
61#define __HAVE_ARCH_STRNCPY
62static inline char *strncpy(char *dest, const char *src, size_t n)
63{
64 char *xdest = dest;
65
66 asm volatile ("\n"
67 " jra 2f\n"
68 "1: move.b (%1),(%0)+\n"
69 " jeq 2f\n"
70 " addq.l #1,%1\n"
71 "2: subq.l #1,%2\n"
72 " jcc 1b\n"
73 : "+a" (dest), "+a" (src), "+d" (n)
74 : : "memory");
75 return xdest;
76}
77
78#define __HAVE_ARCH_STRCAT
79#define strcat(d, s) ({ \
80 char *__d = (d); \
81 strcpy(__d + strlen(__d), (s)); \
82})
83
84#define __HAVE_ARCH_STRCHR
85static inline char *strchr(const char *s, int c)
86{
87 char sc, ch = c;
88
89 for (; (sc = *s++) != ch; ) {
90 if (!sc)
91 return NULL;
92 }
93 return (char *)s - 1;
94}
95
96#ifndef CONFIG_COLDFIRE
97#define __HAVE_ARCH_STRCMP
98static inline int strcmp(const char *cs, const char *ct)
99{
100 char res;
101
102 asm ("\n"
103 "1: move.b (%0)+,%2\n" /* get *cs */
104 " cmp.b (%1)+,%2\n" /* compare a byte */
105 " jne 2f\n" /* not equal, break out */
106 " tst.b %2\n" /* at end of cs? */
107 " jne 1b\n" /* no, keep going */
108 " jra 3f\n" /* strings are equal */
109 "2: sub.b -(%1),%2\n" /* *cs - *ct */
110 "3:"
111 : "+a" (cs), "+a" (ct), "=d" (res));
112 return res;
113}
114
115#define __HAVE_ARCH_MEMMOVE
116extern void *memmove(void *, const void *, __kernel_size_t);
117
118#define __HAVE_ARCH_MEMCMP
119extern int memcmp(const void *, const void *, __kernel_size_t);
120#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
121#endif /* CONFIG_COLDFIRE */
122
123#define __HAVE_ARCH_MEMSET
124extern void *memset(void *, int, __kernel_size_t);
125#define memset(d, c, n) __builtin_memset(d, c, n)
126
127#define __HAVE_ARCH_MEMCPY
128extern void *memcpy(void *, const void *, __kernel_size_t);
129#define memcpy(d, s, n) __builtin_memcpy(d, s, n)
130
131#endif
132
133#endif /* _M68K_STRING_H_ */
diff --git a/arch/m68k/include/asm/string_mm.h b/arch/m68k/include/asm/string_mm.h
deleted file mode 100644
index 2eb7df1e0f5d..000000000000
--- a/arch/m68k/include/asm/string_mm.h
+++ /dev/null
@@ -1,131 +0,0 @@
1#ifndef _M68K_STRING_H_
2#define _M68K_STRING_H_
3
4#include <linux/types.h>
5#include <linux/compiler.h>
6
7static inline size_t __kernel_strlen(const char *s)
8{
9 const char *sc;
10
11 for (sc = s; *sc++; )
12 ;
13 return sc - s - 1;
14}
15
16static inline char *__kernel_strcpy(char *dest, const char *src)
17{
18 char *xdest = dest;
19
20 asm volatile ("\n"
21 "1: move.b (%1)+,(%0)+\n"
22 " jne 1b"
23 : "+a" (dest), "+a" (src)
24 : : "memory");
25 return xdest;
26}
27
28#ifndef __IN_STRING_C
29
30#define __HAVE_ARCH_STRLEN
31#define strlen(s) (__builtin_constant_p(s) ? \
32 __builtin_strlen(s) : \
33 __kernel_strlen(s))
34
35#define __HAVE_ARCH_STRNLEN
36static inline size_t strnlen(const char *s, size_t count)
37{
38 const char *sc = s;
39
40 asm volatile ("\n"
41 "1: subq.l #1,%1\n"
42 " jcs 2f\n"
43 " tst.b (%0)+\n"
44 " jne 1b\n"
45 " subq.l #1,%0\n"
46 "2:"
47 : "+a" (sc), "+d" (count));
48 return sc - s;
49}
50
51#define __HAVE_ARCH_STRCPY
52#if __GNUC__ >= 4
53#define strcpy(d, s) (__builtin_constant_p(s) && \
54 __builtin_strlen(s) <= 32 ? \
55 __builtin_strcpy(d, s) : \
56 __kernel_strcpy(d, s))
57#else
58#define strcpy(d, s) __kernel_strcpy(d, s)
59#endif
60
61#define __HAVE_ARCH_STRNCPY
62static inline char *strncpy(char *dest, const char *src, size_t n)
63{
64 char *xdest = dest;
65
66 asm volatile ("\n"
67 " jra 2f\n"
68 "1: move.b (%1),(%0)+\n"
69 " jeq 2f\n"
70 " addq.l #1,%1\n"
71 "2: subq.l #1,%2\n"
72 " jcc 1b\n"
73 : "+a" (dest), "+a" (src), "+d" (n)
74 : : "memory");
75 return xdest;
76}
77
78#define __HAVE_ARCH_STRCAT
79#define strcat(d, s) ({ \
80 char *__d = (d); \
81 strcpy(__d + strlen(__d), (s)); \
82})
83
84#define __HAVE_ARCH_STRCHR
85static inline char *strchr(const char *s, int c)
86{
87 char sc, ch = c;
88
89 for (; (sc = *s++) != ch; ) {
90 if (!sc)
91 return NULL;
92 }
93 return (char *)s - 1;
94}
95
96#define __HAVE_ARCH_STRCMP
97static inline int strcmp(const char *cs, const char *ct)
98{
99 char res;
100
101 asm ("\n"
102 "1: move.b (%0)+,%2\n" /* get *cs */
103 " cmp.b (%1)+,%2\n" /* compare a byte */
104 " jne 2f\n" /* not equal, break out */
105 " tst.b %2\n" /* at end of cs? */
106 " jne 1b\n" /* no, keep going */
107 " jra 3f\n" /* strings are equal */
108 "2: sub.b -(%1),%2\n" /* *cs - *ct */
109 "3:"
110 : "+a" (cs), "+a" (ct), "=d" (res));
111 return res;
112}
113
114#define __HAVE_ARCH_MEMSET
115extern void *memset(void *, int, __kernel_size_t);
116#define memset(d, c, n) __builtin_memset(d, c, n)
117
118#define __HAVE_ARCH_MEMCPY
119extern void *memcpy(void *, const void *, __kernel_size_t);
120#define memcpy(d, s, n) __builtin_memcpy(d, s, n)
121
122#define __HAVE_ARCH_MEMMOVE
123extern void *memmove(void *, const void *, __kernel_size_t);
124
125#define __HAVE_ARCH_MEMCMP
126extern int memcmp(const void *, const void *, __kernel_size_t);
127#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
128
129#endif
130
131#endif /* _M68K_STRING_H_ */
diff --git a/arch/m68k/include/asm/string_no.h b/arch/m68k/include/asm/string_no.h
deleted file mode 100644
index af09e17000fc..000000000000
--- a/arch/m68k/include/asm/string_no.h
+++ /dev/null
@@ -1,126 +0,0 @@
1#ifndef _M68KNOMMU_STRING_H_
2#define _M68KNOMMU_STRING_H_
3
4#ifdef __KERNEL__ /* only set these up for kernel code */
5
6#include <asm/setup.h>
7#include <asm/page.h>
8
9#define __HAVE_ARCH_STRCPY
10static inline char * strcpy(char * dest,const char *src)
11{
12 char *xdest = dest;
13
14 __asm__ __volatile__
15 ("1:\tmoveb %1@+,%0@+\n\t"
16 "jne 1b"
17 : "=a" (dest), "=a" (src)
18 : "0" (dest), "1" (src) : "memory");
19 return xdest;
20}
21
22#define __HAVE_ARCH_STRNCPY
23static inline char * strncpy(char *dest, const char *src, size_t n)
24{
25 char *xdest = dest;
26
27 if (n == 0)
28 return xdest;
29
30 __asm__ __volatile__
31 ("1:\tmoveb %1@+,%0@+\n\t"
32 "jeq 2f\n\t"
33 "subql #1,%2\n\t"
34 "jne 1b\n\t"
35 "2:"
36 : "=a" (dest), "=a" (src), "=d" (n)
37 : "0" (dest), "1" (src), "2" (n)
38 : "memory");
39 return xdest;
40}
41
42
43#ifndef CONFIG_COLDFIRE
44
45#define __HAVE_ARCH_STRCMP
46static inline int strcmp(const char * cs,const char * ct)
47{
48 char __res;
49
50 __asm__
51 ("1:\tmoveb %0@+,%2\n\t" /* get *cs */
52 "cmpb %1@+,%2\n\t" /* compare a byte */
53 "jne 2f\n\t" /* not equal, break out */
54 "tstb %2\n\t" /* at end of cs? */
55 "jne 1b\n\t" /* no, keep going */
56 "jra 3f\n\t" /* strings are equal */
57 "2:\tsubb %1@-,%2\n\t" /* *cs - *ct */
58 "3:"
59 : "=a" (cs), "=a" (ct), "=d" (__res)
60 : "0" (cs), "1" (ct));
61
62 return __res;
63}
64
65#define __HAVE_ARCH_STRNCMP
66static inline int strncmp(const char * cs,const char * ct,size_t count)
67{
68 char __res;
69
70 if (!count)
71 return 0;
72 __asm__
73 ("1:\tmovb %0@+,%3\n\t" /* get *cs */
74 "cmpb %1@+,%3\n\t" /* compare a byte */
75 "jne 3f\n\t" /* not equal, break out */
76 "tstb %3\n\t" /* at end of cs? */
77 "jeq 4f\n\t" /* yes, all done */
78 "subql #1,%2\n\t" /* no, adjust count */
79 "jne 1b\n\t" /* more to do, keep going */
80 "2:\tmoveq #0,%3\n\t" /* strings are equal */
81 "jra 4f\n\t"
82 "3:\tsubb %1@-,%3\n\t" /* *cs - *ct */
83 "4:"
84 : "=a" (cs), "=a" (ct), "=d" (count), "=d" (__res)
85 : "0" (cs), "1" (ct), "2" (count));
86 return __res;
87}
88
89#endif /* CONFIG_COLDFIRE */
90
91#define __HAVE_ARCH_MEMSET
92extern void * memset(void * s, int c, size_t count);
93
94#define __HAVE_ARCH_MEMCPY
95extern void * memcpy(void *d, const void *s, size_t count);
96
97#else /* KERNEL */
98
99/*
100 * let user libraries deal with these,
101 * IMHO the kernel has no place defining these functions for user apps
102 */
103
104#define __HAVE_ARCH_STRCPY 1
105#define __HAVE_ARCH_STRNCPY 1
106#define __HAVE_ARCH_STRCAT 1
107#define __HAVE_ARCH_STRNCAT 1
108#define __HAVE_ARCH_STRCMP 1
109#define __HAVE_ARCH_STRNCMP 1
110#define __HAVE_ARCH_STRNICMP 1
111#define __HAVE_ARCH_STRCHR 1
112#define __HAVE_ARCH_STRRCHR 1
113#define __HAVE_ARCH_STRSTR 1
114#define __HAVE_ARCH_STRLEN 1
115#define __HAVE_ARCH_STRNLEN 1
116#define __HAVE_ARCH_MEMSET 1
117#define __HAVE_ARCH_MEMCPY 1
118#define __HAVE_ARCH_MEMMOVE 1
119#define __HAVE_ARCH_MEMSCAN 1
120#define __HAVE_ARCH_MEMCMP 1
121#define __HAVE_ARCH_MEMCHR 1
122#define __HAVE_ARCH_STRTOK 1
123
124#endif /* KERNEL */
125
126#endif /* _M68K_STRING_H_ */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
index 12053c44cccf..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system_mm.h
+++ b/arch/m68k/include/asm/system_mm.h
@@ -182,9 +182,7 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr)))) 183 (unsigned long)(n), sizeof(*(ptr))))
184 184
185#ifndef CONFIG_SMP
186#include <asm-generic/cmpxchg.h> 185#include <asm-generic/cmpxchg.h>
187#endif
188 186
189#endif 187#endif
190 188
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
index 20126c09794e..6fe9f93bc3ff 100644
--- a/arch/m68k/include/asm/system_no.h
+++ b/arch/m68k/include/asm/system_no.h
@@ -59,17 +59,10 @@ asmlinkage void resume(void);
59#define wmb() asm volatile ("" : : :"memory") 59#define wmb() asm volatile ("" : : :"memory")
60#define set_mb(var, value) ({ (var) = (value); wmb(); }) 60#define set_mb(var, value) ({ (var) = (value); wmb(); })
61 61
62#ifdef CONFIG_SMP
63#define smp_mb() mb()
64#define smp_rmb() rmb()
65#define smp_wmb() wmb()
66#define smp_read_barrier_depends() read_barrier_depends()
67#else
68#define smp_mb() barrier() 62#define smp_mb() barrier()
69#define smp_rmb() barrier() 63#define smp_rmb() barrier()
70#define smp_wmb() barrier() 64#define smp_wmb() barrier()
71#define smp_read_barrier_depends() do { } while(0) 65#define smp_read_barrier_depends() do { } while(0)
72#endif
73 66
74#define read_barrier_depends() ((void)0) 67#define read_barrier_depends() ((void)0)
75 68
@@ -152,9 +145,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
152 (unsigned long)(n), sizeof(*(ptr)))) 145 (unsigned long)(n), sizeof(*(ptr))))
153#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
154 147
155#ifndef CONFIG_SMP
156#include <asm-generic/cmpxchg.h> 148#include <asm-generic/cmpxchg.h>
157#endif
158 149
159#define arch_align_stack(x) (x) 150#define arch_align_stack(x) (x)
160 151
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index f31a3f42b7b3..1da5d53a00eb 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -1,5 +1,108 @@
1#ifdef __uClinux__ 1#ifndef _ASM_M68K_THREAD_INFO_H
2#include "thread_info_no.h" 2#define _ASM_M68K_THREAD_INFO_H
3
4#include <asm/types.h>
5#include <asm/page.h>
6
7/*
8 * On machines with 4k pages we default to an 8k thread size, though we
9 * allow a 4k with config option. Any other machine page size then
10 * the thread size must match the page size (which is 8k and larger here).
11 */
12#if PAGE_SHIFT < 13
13#ifdef CONFIG_4KSTACKS
14#define THREAD_SIZE 4096
3#else 15#else
4#include "thread_info_mm.h" 16#define THREAD_SIZE 8192
5#endif 17#endif
18#else
19#define THREAD_SIZE PAGE_SIZE
20#endif
21#define THREAD_SIZE_ORDER ((THREAD_SIZE / PAGE_SIZE) - 1)
22
23#ifndef __ASSEMBLY__
24
25struct thread_info {
26 struct task_struct *task; /* main task structure */
27 unsigned long flags;
28 struct exec_domain *exec_domain; /* execution domain */
29 int preempt_count; /* 0 => preemptable, <0 => BUG */
30 __u32 cpu; /* should always be 0 on m68k */
31 unsigned long tp_value; /* thread pointer */
32 struct restart_block restart_block;
33};
34#endif /* __ASSEMBLY__ */
35
36#define PREEMPT_ACTIVE 0x4000000
37
38#define INIT_THREAD_INFO(tsk) \
39{ \
40 .task = &tsk, \
41 .exec_domain = &default_exec_domain, \
42 .preempt_count = INIT_PREEMPT_COUNT, \
43 .restart_block = { \
44 .fn = do_no_restart_syscall, \
45 }, \
46}
47
48#define init_stack (init_thread_union.stack)
49
50#ifdef CONFIG_MMU
51
52#ifndef __ASSEMBLY__
53#include <asm/current.h>
54#endif
55
56#ifdef ASM_OFFSETS_C
57#define task_thread_info(tsk) ((struct thread_info *) NULL)
58#else
59#include <asm/asm-offsets.h>
60#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
61#endif
62
63#define init_thread_info (init_task.thread.info)
64#define task_stack_page(tsk) ((tsk)->stack)
65#define current_thread_info() task_thread_info(current)
66
67#define __HAVE_THREAD_FUNCTIONS
68
69#define setup_thread_stack(p, org) ({ \
70 *(struct task_struct **)(p)->stack = (p); \
71 task_thread_info(p)->task = (p); \
72})
73
74#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
75
76#else /* !CONFIG_MMU */
77
78#ifndef __ASSEMBLY__
79/* how to get the thread information struct from C */
80static inline struct thread_info *current_thread_info(void)
81{
82 struct thread_info *ti;
83 __asm__(
84 "move.l %%sp, %0 \n\t"
85 "and.l %1, %0"
86 : "=&d"(ti)
87 : "di" (~(THREAD_SIZE-1))
88 );
89 return ti;
90}
91#endif
92
93#define init_thread_info (init_thread_union.thread_info)
94
95#endif /* CONFIG_MMU */
96
97/* entry.S relies on these definitions!
98 * bits 0-7 are tested at every exception exit
99 * bits 8-15 are also tested at syscall exit
100 */
101#define TIF_SIGPENDING 6 /* signal pending */
102#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
103#define TIF_DELAYED_TRACE 14 /* single step a syscall */
104#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
105#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
106#define TIF_FREEZE 17 /* thread is freezing for suspend */
107
108#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h
deleted file mode 100644
index 3bf31dc51b12..000000000000
--- a/arch/m68k/include/asm/thread_info_mm.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef _ASM_M68K_THREAD_INFO_H
2#define _ASM_M68K_THREAD_INFO_H
3
4#ifndef ASM_OFFSETS_C
5#include <asm/asm-offsets.h>
6#endif
7#include <asm/types.h>
8#include <asm/page.h>
9
10#ifndef __ASSEMBLY__
11#include <asm/current.h>
12
13struct thread_info {
14 struct task_struct *task; /* main task structure */
15 unsigned long flags;
16 struct exec_domain *exec_domain; /* execution domain */
17 int preempt_count; /* 0 => preemptable, <0 => BUG */
18 __u32 cpu; /* should always be 0 on m68k */
19 unsigned long tp_value; /* thread pointer */
20 struct restart_block restart_block;
21};
22#endif /* __ASSEMBLY__ */
23
24#define PREEMPT_ACTIVE 0x4000000
25
26#define INIT_THREAD_INFO(tsk) \
27{ \
28 .task = &tsk, \
29 .exec_domain = &default_exec_domain, \
30 .preempt_count = INIT_PREEMPT_COUNT, \
31 .restart_block = { \
32 .fn = do_no_restart_syscall, \
33 }, \
34}
35
36/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */
37#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
38
39#define init_thread_info (init_task.thread.info)
40#define init_stack (init_thread_union.stack)
41
42#ifdef ASM_OFFSETS_C
43#define task_thread_info(tsk) ((struct thread_info *) NULL)
44#else
45#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
46#endif
47
48#define task_stack_page(tsk) ((tsk)->stack)
49#define current_thread_info() task_thread_info(current)
50
51#define __HAVE_THREAD_FUNCTIONS
52
53#define setup_thread_stack(p, org) ({ \
54 *(struct task_struct **)(p)->stack = (p); \
55 task_thread_info(p)->task = (p); \
56})
57
58#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
59
60/* entry.S relies on these definitions!
61 * bits 0-7 are tested at every exception exit
62 * bits 8-15 are also tested at syscall exit
63 */
64#define TIF_SIGPENDING 6 /* signal pending */
65#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
66#define TIF_DELAYED_TRACE 14 /* single step a syscall */
67#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
68#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
69#define TIF_FREEZE 17 /* thread is freezing for suspend */
70
71#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/thread_info_no.h b/arch/m68k/include/asm/thread_info_no.h
deleted file mode 100644
index 51f354b672e6..000000000000
--- a/arch/m68k/include/asm/thread_info_no.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/* thread_info.h: m68knommu low-level thread information
2 * adapted from the i386 and PPC versions by Greg Ungerer (gerg@snapgear.com)
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */
7
8#ifndef _ASM_THREAD_INFO_H
9#define _ASM_THREAD_INFO_H
10
11#include <asm/page.h>
12
13#ifdef __KERNEL__
14
15/*
16 * Size of kernel stack for each process. This must be a power of 2...
17 */
18#ifdef CONFIG_4KSTACKS
19#define THREAD_SIZE_ORDER (0)
20#else
21#define THREAD_SIZE_ORDER (1)
22#endif
23
24/*
25 * for asm files, THREAD_SIZE is now generated by asm-offsets.c
26 */
27#define THREAD_SIZE (PAGE_SIZE<<THREAD_SIZE_ORDER)
28
29#ifndef __ASSEMBLY__
30
31/*
32 * low level task data.
33 */
34struct thread_info {
35 struct task_struct *task; /* main task structure */
36 struct exec_domain *exec_domain; /* execution domain */
37 unsigned long flags; /* low level flags */
38 int cpu; /* cpu we're on */
39 int preempt_count; /* 0 => preemptable, <0 => BUG */
40 unsigned long tp_value; /* thread pointer */
41 struct restart_block restart_block;
42};
43
44/*
45 * macros/functions for gaining access to the thread information structure
46 */
47#define INIT_THREAD_INFO(tsk) \
48{ \
49 .task = &tsk, \
50 .exec_domain = &default_exec_domain, \
51 .flags = 0, \
52 .cpu = 0, \
53 .preempt_count = INIT_PREEMPT_COUNT, \
54 .restart_block = { \
55 .fn = do_no_restart_syscall, \
56 }, \
57}
58
59#define init_thread_info (init_thread_union.thread_info)
60#define init_stack (init_thread_union.stack)
61
62
63/* how to get the thread information struct from C */
64static inline struct thread_info *current_thread_info(void)
65{
66 struct thread_info *ti;
67 __asm__(
68 "move.l %%sp, %0 \n\t"
69 "and.l %1, %0"
70 : "=&d"(ti)
71 : "di" (~(THREAD_SIZE-1))
72 );
73 return ti;
74}
75
76#endif /* __ASSEMBLY__ */
77
78#define PREEMPT_ACTIVE 0x4000000
79
80/*
81 * thread information flag bit numbers
82 */
83#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
84#define TIF_SIGPENDING 1 /* signal pending */
85#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
86#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
87 TIF_NEED_RESCHED */
88#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
89#define TIF_FREEZE 16 /* is freezing for suspend */
90
91/* as above, but as bit values */
92#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
93#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
94#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
95#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
96#define _TIF_FREEZE (1<<TIF_FREEZE)
97
98#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
99
100#endif /* __KERNEL__ */
101
102#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 3011ec0f5365..0bffb17d5db7 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -1,5 +1,272 @@
1#ifdef __uClinux__ 1/*
2#include "traps_no.h" 2 * linux/include/asm/traps.h
3#else 3 *
4#include "traps_mm.h" 4 * Copyright (C) 1993 Hamish Macdonald
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _M68K_TRAPS_H
12#define _M68K_TRAPS_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/linkage.h>
17#include <asm/ptrace.h>
18
19typedef void (*e_vector)(void);
20extern e_vector vectors[];
21
22asmlinkage void auto_inthandler(void);
23asmlinkage void user_inthandler(void);
24asmlinkage void bad_inthandler(void);
25extern void init_vectors(void);
26
5#endif 27#endif
28
29#define VEC_RESETSP (0)
30#define VEC_RESETPC (1)
31#define VEC_BUSERR (2)
32#define VEC_ADDRERR (3)
33#define VEC_ILLEGAL (4)
34#define VEC_ZERODIV (5)
35#define VEC_CHK (6)
36#define VEC_TRAP (7)
37#define VEC_PRIV (8)
38#define VEC_TRACE (9)
39#define VEC_LINE10 (10)
40#define VEC_LINE11 (11)
41#define VEC_RESV12 (12)
42#define VEC_COPROC (13)
43#define VEC_FORMAT (14)
44#define VEC_UNINT (15)
45#define VEC_RESV16 (16)
46#define VEC_RESV17 (17)
47#define VEC_RESV18 (18)
48#define VEC_RESV19 (19)
49#define VEC_RESV20 (20)
50#define VEC_RESV21 (21)
51#define VEC_RESV22 (22)
52#define VEC_RESV23 (23)
53#define VEC_SPUR (24)
54#define VEC_INT1 (25)
55#define VEC_INT2 (26)
56#define VEC_INT3 (27)
57#define VEC_INT4 (28)
58#define VEC_INT5 (29)
59#define VEC_INT6 (30)
60#define VEC_INT7 (31)
61#define VEC_SYS (32)
62#define VEC_TRAP1 (33)
63#define VEC_TRAP2 (34)
64#define VEC_TRAP3 (35)
65#define VEC_TRAP4 (36)
66#define VEC_TRAP5 (37)
67#define VEC_TRAP6 (38)
68#define VEC_TRAP7 (39)
69#define VEC_TRAP8 (40)
70#define VEC_TRAP9 (41)
71#define VEC_TRAP10 (42)
72#define VEC_TRAP11 (43)
73#define VEC_TRAP12 (44)
74#define VEC_TRAP13 (45)
75#define VEC_TRAP14 (46)
76#define VEC_TRAP15 (47)
77#define VEC_FPBRUC (48)
78#define VEC_FPIR (49)
79#define VEC_FPDIVZ (50)
80#define VEC_FPUNDER (51)
81#define VEC_FPOE (52)
82#define VEC_FPOVER (53)
83#define VEC_FPNAN (54)
84#define VEC_FPUNSUP (55)
85#define VEC_MMUCFG (56)
86#define VEC_MMUILL (57)
87#define VEC_MMUACC (58)
88#define VEC_RESV59 (59)
89#define VEC_UNIMPEA (60)
90#define VEC_UNIMPII (61)
91#define VEC_RESV62 (62)
92#define VEC_RESV63 (63)
93#define VEC_USER (64)
94
95#define VECOFF(vec) ((vec)<<2)
96
97#ifndef __ASSEMBLY__
98
99/* Status register bits */
100#define PS_T (0x8000)
101#define PS_S (0x2000)
102#define PS_M (0x1000)
103#define PS_C (0x0001)
104
105/* bits for 68020/68030 special status word */
106
107#define FC (0x8000)
108#define FB (0x4000)
109#define RC (0x2000)
110#define RB (0x1000)
111#define DF (0x0100)
112#define RM (0x0080)
113#define RW (0x0040)
114#define SZ (0x0030)
115#define DFC (0x0007)
116
117/* bits for 68030 MMU status register (mmusr,psr) */
118
119#define MMU_B (0x8000) /* bus error */
120#define MMU_L (0x4000) /* limit violation */
121#define MMU_S (0x2000) /* supervisor violation */
122#define MMU_WP (0x0800) /* write-protected */
123#define MMU_I (0x0400) /* invalid descriptor */
124#define MMU_M (0x0200) /* ATC entry modified */
125#define MMU_T (0x0040) /* transparent translation */
126#define MMU_NUM (0x0007) /* number of levels traversed */
127
128
129/* bits for 68040 special status word */
130#define CP_040 (0x8000)
131#define CU_040 (0x4000)
132#define CT_040 (0x2000)
133#define CM_040 (0x1000)
134#define MA_040 (0x0800)
135#define ATC_040 (0x0400)
136#define LK_040 (0x0200)
137#define RW_040 (0x0100)
138#define SIZ_040 (0x0060)
139#define TT_040 (0x0018)
140#define TM_040 (0x0007)
141
142/* bits for 68040 write back status word */
143#define WBV_040 (0x80)
144#define WBSIZ_040 (0x60)
145#define WBBYT_040 (0x20)
146#define WBWRD_040 (0x40)
147#define WBLNG_040 (0x00)
148#define WBTT_040 (0x18)
149#define WBTM_040 (0x07)
150
151/* bus access size codes */
152#define BA_SIZE_BYTE (0x20)
153#define BA_SIZE_WORD (0x40)
154#define BA_SIZE_LONG (0x00)
155#define BA_SIZE_LINE (0x60)
156
157/* bus access transfer type codes */
158#define BA_TT_MOVE16 (0x08)
159
160/* bits for 68040 MMU status register (mmusr) */
161#define MMU_B_040 (0x0800)
162#define MMU_G_040 (0x0400)
163#define MMU_S_040 (0x0080)
164#define MMU_CM_040 (0x0060)
165#define MMU_M_040 (0x0010)
166#define MMU_WP_040 (0x0004)
167#define MMU_T_040 (0x0002)
168#define MMU_R_040 (0x0001)
169
170/* bits in the 68060 fault status long word (FSLW) */
171#define MMU060_MA (0x08000000) /* misaligned */
172#define MMU060_LK (0x02000000) /* locked transfer */
173#define MMU060_RW (0x01800000) /* read/write */
174# define MMU060_RW_W (0x00800000) /* write */
175# define MMU060_RW_R (0x01000000) /* read */
176# define MMU060_RW_RMW (0x01800000) /* read/modify/write */
177# define MMU060_W (0x00800000) /* general write, includes rmw */
178#define MMU060_SIZ (0x00600000) /* transfer size */
179#define MMU060_TT (0x00180000) /* transfer type (TT) bits */
180#define MMU060_TM (0x00070000) /* transfer modifier (TM) bits */
181#define MMU060_IO (0x00008000) /* instruction or operand */
182#define MMU060_PBE (0x00004000) /* push buffer bus error */
183#define MMU060_SBE (0x00002000) /* store buffer bus error */
184#define MMU060_PTA (0x00001000) /* pointer A fault */
185#define MMU060_PTB (0x00000800) /* pointer B fault */
186#define MMU060_IL (0x00000400) /* double indirect descr fault */
187#define MMU060_PF (0x00000200) /* page fault (invalid descr) */
188#define MMU060_SP (0x00000100) /* supervisor protection */
189#define MMU060_WP (0x00000080) /* write protection */
190#define MMU060_TWE (0x00000040) /* bus error on table search */
191#define MMU060_RE (0x00000020) /* bus error on read */
192#define MMU060_WE (0x00000010) /* bus error on write */
193#define MMU060_TTR (0x00000008) /* error caused by TTR translation */
194#define MMU060_BPE (0x00000004) /* branch prediction error */
195#define MMU060_SEE (0x00000001) /* software emulated error */
196
197/* cases of missing or invalid descriptors */
198#define MMU060_DESC_ERR (MMU060_PTA | MMU060_PTB | \
199 MMU060_IL | MMU060_PF)
200/* bits that indicate real errors */
201#define MMU060_ERR_BITS (MMU060_PBE | MMU060_SBE | MMU060_DESC_ERR | MMU060_SP | \
202 MMU060_WP | MMU060_TWE | MMU060_RE | MMU060_WE)
203
204/* structure for stack frames */
205
206struct frame {
207 struct pt_regs ptregs;
208 union {
209 struct {
210 unsigned long iaddr; /* instruction address */
211 } fmt2;
212 struct {
213 unsigned long effaddr; /* effective address */
214 } fmt3;
215 struct {
216 unsigned long effaddr; /* effective address */
217 unsigned long pc; /* pc of faulted instr */
218 } fmt4;
219 struct {
220 unsigned long effaddr; /* effective address */
221 unsigned short ssw; /* special status word */
222 unsigned short wb3s; /* write back 3 status */
223 unsigned short wb2s; /* write back 2 status */
224 unsigned short wb1s; /* write back 1 status */
225 unsigned long faddr; /* fault address */
226 unsigned long wb3a; /* write back 3 address */
227 unsigned long wb3d; /* write back 3 data */
228 unsigned long wb2a; /* write back 2 address */
229 unsigned long wb2d; /* write back 2 data */
230 unsigned long wb1a; /* write back 1 address */
231 unsigned long wb1dpd0; /* write back 1 data/push data 0*/
232 unsigned long pd1; /* push data 1*/
233 unsigned long pd2; /* push data 2*/
234 unsigned long pd3; /* push data 3*/
235 } fmt7;
236 struct {
237 unsigned long iaddr; /* instruction address */
238 unsigned short int1[4]; /* internal registers */
239 } fmt9;
240 struct {
241 unsigned short int1;
242 unsigned short ssw; /* special status word */
243 unsigned short isc; /* instruction stage c */
244 unsigned short isb; /* instruction stage b */
245 unsigned long daddr; /* data cycle fault address */
246 unsigned short int2[2];
247 unsigned long dobuf; /* data cycle output buffer */
248 unsigned short int3[2];
249 } fmta;
250 struct {
251 unsigned short int1;
252 unsigned short ssw; /* special status word */
253 unsigned short isc; /* instruction stage c */
254 unsigned short isb; /* instruction stage b */
255 unsigned long daddr; /* data cycle fault address */
256 unsigned short int2[2];
257 unsigned long dobuf; /* data cycle output buffer */
258 unsigned short int3[4];
259 unsigned long baddr; /* stage B address */
260 unsigned short int4[2];
261 unsigned long dibuf; /* data cycle input buffer */
262 unsigned short int5[3];
263 unsigned ver : 4; /* stack frame version # */
264 unsigned int6:12;
265 unsigned short int7[18];
266 } fmtb;
267 } un;
268};
269
270#endif /* __ASSEMBLY__ */
271
272#endif /* _M68K_TRAPS_H */
diff --git a/arch/m68k/include/asm/traps_mm.h b/arch/m68k/include/asm/traps_mm.h
deleted file mode 100644
index 8caef25624c7..000000000000
--- a/arch/m68k/include/asm/traps_mm.h
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * linux/include/asm/traps.h
3 *
4 * Copyright (C) 1993 Hamish Macdonald
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _M68K_TRAPS_H
12#define _M68K_TRAPS_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/linkage.h>
17#include <asm/ptrace.h>
18
19typedef void (*e_vector)(void);
20
21asmlinkage void auto_inthandler(void);
22asmlinkage void user_inthandler(void);
23asmlinkage void bad_inthandler(void);
24
25extern e_vector vectors[];
26
27#endif
28
29#define VEC_RESETSP (0)
30#define VEC_RESETPC (1)
31#define VEC_BUSERR (2)
32#define VEC_ADDRERR (3)
33#define VEC_ILLEGAL (4)
34#define VEC_ZERODIV (5)
35#define VEC_CHK (6)
36#define VEC_TRAP (7)
37#define VEC_PRIV (8)
38#define VEC_TRACE (9)
39#define VEC_LINE10 (10)
40#define VEC_LINE11 (11)
41#define VEC_RESV12 (12)
42#define VEC_COPROC (13)
43#define VEC_FORMAT (14)
44#define VEC_UNINT (15)
45#define VEC_RESV16 (16)
46#define VEC_RESV17 (17)
47#define VEC_RESV18 (18)
48#define VEC_RESV19 (19)
49#define VEC_RESV20 (20)
50#define VEC_RESV21 (21)
51#define VEC_RESV22 (22)
52#define VEC_RESV23 (23)
53#define VEC_SPUR (24)
54#define VEC_INT1 (25)
55#define VEC_INT2 (26)
56#define VEC_INT3 (27)
57#define VEC_INT4 (28)
58#define VEC_INT5 (29)
59#define VEC_INT6 (30)
60#define VEC_INT7 (31)
61#define VEC_SYS (32)
62#define VEC_TRAP1 (33)
63#define VEC_TRAP2 (34)
64#define VEC_TRAP3 (35)
65#define VEC_TRAP4 (36)
66#define VEC_TRAP5 (37)
67#define VEC_TRAP6 (38)
68#define VEC_TRAP7 (39)
69#define VEC_TRAP8 (40)
70#define VEC_TRAP9 (41)
71#define VEC_TRAP10 (42)
72#define VEC_TRAP11 (43)
73#define VEC_TRAP12 (44)
74#define VEC_TRAP13 (45)
75#define VEC_TRAP14 (46)
76#define VEC_TRAP15 (47)
77#define VEC_FPBRUC (48)
78#define VEC_FPIR (49)
79#define VEC_FPDIVZ (50)
80#define VEC_FPUNDER (51)
81#define VEC_FPOE (52)
82#define VEC_FPOVER (53)
83#define VEC_FPNAN (54)
84#define VEC_FPUNSUP (55)
85#define VEC_MMUCFG (56)
86#define VEC_MMUILL (57)
87#define VEC_MMUACC (58)
88#define VEC_RESV59 (59)
89#define VEC_UNIMPEA (60)
90#define VEC_UNIMPII (61)
91#define VEC_RESV62 (62)
92#define VEC_RESV63 (63)
93#define VEC_USER (64)
94
95#define VECOFF(vec) ((vec)<<2)
96
97#ifndef __ASSEMBLY__
98
99/* Status register bits */
100#define PS_T (0x8000)
101#define PS_S (0x2000)
102#define PS_M (0x1000)
103#define PS_C (0x0001)
104
105/* bits for 68020/68030 special status word */
106
107#define FC (0x8000)
108#define FB (0x4000)
109#define RC (0x2000)
110#define RB (0x1000)
111#define DF (0x0100)
112#define RM (0x0080)
113#define RW (0x0040)
114#define SZ (0x0030)
115#define DFC (0x0007)
116
117/* bits for 68030 MMU status register (mmusr,psr) */
118
119#define MMU_B (0x8000) /* bus error */
120#define MMU_L (0x4000) /* limit violation */
121#define MMU_S (0x2000) /* supervisor violation */
122#define MMU_WP (0x0800) /* write-protected */
123#define MMU_I (0x0400) /* invalid descriptor */
124#define MMU_M (0x0200) /* ATC entry modified */
125#define MMU_T (0x0040) /* transparent translation */
126#define MMU_NUM (0x0007) /* number of levels traversed */
127
128
129/* bits for 68040 special status word */
130#define CP_040 (0x8000)
131#define CU_040 (0x4000)
132#define CT_040 (0x2000)
133#define CM_040 (0x1000)
134#define MA_040 (0x0800)
135#define ATC_040 (0x0400)
136#define LK_040 (0x0200)
137#define RW_040 (0x0100)
138#define SIZ_040 (0x0060)
139#define TT_040 (0x0018)
140#define TM_040 (0x0007)
141
142/* bits for 68040 write back status word */
143#define WBV_040 (0x80)
144#define WBSIZ_040 (0x60)
145#define WBBYT_040 (0x20)
146#define WBWRD_040 (0x40)
147#define WBLNG_040 (0x00)
148#define WBTT_040 (0x18)
149#define WBTM_040 (0x07)
150
151/* bus access size codes */
152#define BA_SIZE_BYTE (0x20)
153#define BA_SIZE_WORD (0x40)
154#define BA_SIZE_LONG (0x00)
155#define BA_SIZE_LINE (0x60)
156
157/* bus access transfer type codes */
158#define BA_TT_MOVE16 (0x08)
159
160/* bits for 68040 MMU status register (mmusr) */
161#define MMU_B_040 (0x0800)
162#define MMU_G_040 (0x0400)
163#define MMU_S_040 (0x0080)
164#define MMU_CM_040 (0x0060)
165#define MMU_M_040 (0x0010)
166#define MMU_WP_040 (0x0004)
167#define MMU_T_040 (0x0002)
168#define MMU_R_040 (0x0001)
169
170/* bits in the 68060 fault status long word (FSLW) */
171#define MMU060_MA (0x08000000) /* misaligned */
172#define MMU060_LK (0x02000000) /* locked transfer */
173#define MMU060_RW (0x01800000) /* read/write */
174# define MMU060_RW_W (0x00800000) /* write */
175# define MMU060_RW_R (0x01000000) /* read */
176# define MMU060_RW_RMW (0x01800000) /* read/modify/write */
177# define MMU060_W (0x00800000) /* general write, includes rmw */
178#define MMU060_SIZ (0x00600000) /* transfer size */
179#define MMU060_TT (0x00180000) /* transfer type (TT) bits */
180#define MMU060_TM (0x00070000) /* transfer modifier (TM) bits */
181#define MMU060_IO (0x00008000) /* instruction or operand */
182#define MMU060_PBE (0x00004000) /* push buffer bus error */
183#define MMU060_SBE (0x00002000) /* store buffer bus error */
184#define MMU060_PTA (0x00001000) /* pointer A fault */
185#define MMU060_PTB (0x00000800) /* pointer B fault */
186#define MMU060_IL (0x00000400) /* double indirect descr fault */
187#define MMU060_PF (0x00000200) /* page fault (invalid descr) */
188#define MMU060_SP (0x00000100) /* supervisor protection */
189#define MMU060_WP (0x00000080) /* write protection */
190#define MMU060_TWE (0x00000040) /* bus error on table search */
191#define MMU060_RE (0x00000020) /* bus error on read */
192#define MMU060_WE (0x00000010) /* bus error on write */
193#define MMU060_TTR (0x00000008) /* error caused by TTR translation */
194#define MMU060_BPE (0x00000004) /* branch prediction error */
195#define MMU060_SEE (0x00000001) /* software emulated error */
196
197/* cases of missing or invalid descriptors */
198#define MMU060_DESC_ERR (MMU060_PTA | MMU060_PTB | \
199 MMU060_IL | MMU060_PF)
200/* bits that indicate real errors */
201#define MMU060_ERR_BITS (MMU060_PBE | MMU060_SBE | MMU060_DESC_ERR | MMU060_SP | \
202 MMU060_WP | MMU060_TWE | MMU060_RE | MMU060_WE)
203
204/* structure for stack frames */
205
206struct frame {
207 struct pt_regs ptregs;
208 union {
209 struct {
210 unsigned long iaddr; /* instruction address */
211 } fmt2;
212 struct {
213 unsigned long effaddr; /* effective address */
214 } fmt3;
215 struct {
216 unsigned long effaddr; /* effective address */
217 unsigned long pc; /* pc of faulted instr */
218 } fmt4;
219 struct {
220 unsigned long effaddr; /* effective address */
221 unsigned short ssw; /* special status word */
222 unsigned short wb3s; /* write back 3 status */
223 unsigned short wb2s; /* write back 2 status */
224 unsigned short wb1s; /* write back 1 status */
225 unsigned long faddr; /* fault address */
226 unsigned long wb3a; /* write back 3 address */
227 unsigned long wb3d; /* write back 3 data */
228 unsigned long wb2a; /* write back 2 address */
229 unsigned long wb2d; /* write back 2 data */
230 unsigned long wb1a; /* write back 1 address */
231 unsigned long wb1dpd0; /* write back 1 data/push data 0*/
232 unsigned long pd1; /* push data 1*/
233 unsigned long pd2; /* push data 2*/
234 unsigned long pd3; /* push data 3*/
235 } fmt7;
236 struct {
237 unsigned long iaddr; /* instruction address */
238 unsigned short int1[4]; /* internal registers */
239 } fmt9;
240 struct {
241 unsigned short int1;
242 unsigned short ssw; /* special status word */
243 unsigned short isc; /* instruction stage c */
244 unsigned short isb; /* instruction stage b */
245 unsigned long daddr; /* data cycle fault address */
246 unsigned short int2[2];
247 unsigned long dobuf; /* data cycle output buffer */
248 unsigned short int3[2];
249 } fmta;
250 struct {
251 unsigned short int1;
252 unsigned short ssw; /* special status word */
253 unsigned short isc; /* instruction stage c */
254 unsigned short isb; /* instruction stage b */
255 unsigned long daddr; /* data cycle fault address */
256 unsigned short int2[2];
257 unsigned long dobuf; /* data cycle output buffer */
258 unsigned short int3[4];
259 unsigned long baddr; /* stage B address */
260 unsigned short int4[2];
261 unsigned long dibuf; /* data cycle input buffer */
262 unsigned short int5[3];
263 unsigned ver : 4; /* stack frame version # */
264 unsigned int6:12;
265 unsigned short int7[18];
266 } fmtb;
267 } un;
268};
269
270#endif /* __ASSEMBLY__ */
271
272#endif /* _M68K_TRAPS_H */
diff --git a/arch/m68k/include/asm/traps_no.h b/arch/m68k/include/asm/traps_no.h
deleted file mode 100644
index d0671e5f8e29..000000000000
--- a/arch/m68k/include/asm/traps_no.h
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * linux/include/asm/traps.h
3 *
4 * Copyright (C) 1993 Hamish Macdonald
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _M68KNOMMU_TRAPS_H
12#define _M68KNOMMU_TRAPS_H
13
14#ifndef __ASSEMBLY__
15
16typedef void (*e_vector)(void);
17
18extern e_vector vectors[];
19extern void init_vectors(void);
20extern void enable_vector(unsigned int irq);
21extern void disable_vector(unsigned int irq);
22extern void ack_vector(unsigned int irq);
23
24#endif
25
26#define VEC_BUSERR (2)
27#define VEC_ADDRERR (3)
28#define VEC_ILLEGAL (4)
29#define VEC_ZERODIV (5)
30#define VEC_CHK (6)
31#define VEC_TRAP (7)
32#define VEC_PRIV (8)
33#define VEC_TRACE (9)
34#define VEC_LINE10 (10)
35#define VEC_LINE11 (11)
36#define VEC_RESV1 (12)
37#define VEC_COPROC (13)
38#define VEC_FORMAT (14)
39#define VEC_UNINT (15)
40#define VEC_SPUR (24)
41#define VEC_INT1 (25)
42#define VEC_INT2 (26)
43#define VEC_INT3 (27)
44#define VEC_INT4 (28)
45#define VEC_INT5 (29)
46#define VEC_INT6 (30)
47#define VEC_INT7 (31)
48#define VEC_SYS (32)
49#define VEC_TRAP1 (33)
50#define VEC_TRAP2 (34)
51#define VEC_TRAP3 (35)
52#define VEC_TRAP4 (36)
53#define VEC_TRAP5 (37)
54#define VEC_TRAP6 (38)
55#define VEC_TRAP7 (39)
56#define VEC_TRAP8 (40)
57#define VEC_TRAP9 (41)
58#define VEC_TRAP10 (42)
59#define VEC_TRAP11 (43)
60#define VEC_TRAP12 (44)
61#define VEC_TRAP13 (45)
62#define VEC_TRAP14 (46)
63#define VEC_TRAP15 (47)
64#define VEC_FPBRUC (48)
65#define VEC_FPIR (49)
66#define VEC_FPDIVZ (50)
67#define VEC_FPUNDER (51)
68#define VEC_FPOE (52)
69#define VEC_FPOVER (53)
70#define VEC_FPNAN (54)
71#define VEC_FPUNSUP (55)
72#define VEC_UNIMPEA (60)
73#define VEC_UNIMPII (61)
74#define VEC_USER (64)
75
76#define VECOFF(vec) ((vec)<<2)
77
78#ifndef __ASSEMBLY__
79
80/* Status register bits */
81#define PS_T (0x8000)
82#define PS_S (0x2000)
83#define PS_M (0x1000)
84#define PS_C (0x0001)
85
86/* structure for stack frames */
87
88struct frame {
89 struct pt_regs ptregs;
90 union {
91 struct {
92 unsigned long iaddr; /* instruction address */
93 } fmt2;
94 struct {
95 unsigned long effaddr; /* effective address */
96 } fmt3;
97 struct {
98 unsigned long effaddr; /* effective address */
99 unsigned long pc; /* pc of faulted instr */
100 } fmt4;
101 struct {
102 unsigned long effaddr; /* effective address */
103 unsigned short ssw; /* special status word */
104 unsigned short wb3s; /* write back 3 status */
105 unsigned short wb2s; /* write back 2 status */
106 unsigned short wb1s; /* write back 1 status */
107 unsigned long faddr; /* fault address */
108 unsigned long wb3a; /* write back 3 address */
109 unsigned long wb3d; /* write back 3 data */
110 unsigned long wb2a; /* write back 2 address */
111 unsigned long wb2d; /* write back 2 data */
112 unsigned long wb1a; /* write back 1 address */
113 unsigned long wb1dpd0; /* write back 1 data/push data 0*/
114 unsigned long pd1; /* push data 1*/
115 unsigned long pd2; /* push data 2*/
116 unsigned long pd3; /* push data 3*/
117 } fmt7;
118 struct {
119 unsigned long iaddr; /* instruction address */
120 unsigned short int1[4]; /* internal registers */
121 } fmt9;
122 struct {
123 unsigned short int1;
124 unsigned short ssw; /* special status word */
125 unsigned short isc; /* instruction stage c */
126 unsigned short isb; /* instruction stage b */
127 unsigned long daddr; /* data cycle fault address */
128 unsigned short int2[2];
129 unsigned long dobuf; /* data cycle output buffer */
130 unsigned short int3[2];
131 } fmta;
132 struct {
133 unsigned short int1;
134 unsigned short ssw; /* special status word */
135 unsigned short isc; /* instruction stage c */
136 unsigned short isb; /* instruction stage b */
137 unsigned long daddr; /* data cycle fault address */
138 unsigned short int2[2];
139 unsigned long dobuf; /* data cycle output buffer */
140 unsigned short int3[4];
141 unsigned long baddr; /* stage B address */
142 unsigned short int4[2];
143 unsigned long dibuf; /* data cycle input buffer */
144 unsigned short int5[3];
145 unsigned ver : 4; /* stack frame version # */
146 unsigned int6:12;
147 unsigned short int7[18];
148 } fmtb;
149 } un;
150};
151
152#endif /* __ASSEMBLY__ */
153
154#endif /* _M68KNOMMU_TRAPS_H */
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 303730afb1c9..b3963ab3d149 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -359,12 +359,6 @@ void __init setup_arch(char **cmdline_p)
359 isa_type = ISA_TYPE_Q40; 359 isa_type = ISA_TYPE_Q40;
360 isa_sex = 0; 360 isa_sex = 0;
361 } 361 }
362#ifdef CONFIG_GG2
363 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(GG2_ISA)) {
364 isa_type = ISA_TYPE_GG2;
365 isa_sex = 0;
366 }
367#endif
368#ifdef CONFIG_AMIGA_PCMCIA 362#ifdef CONFIG_AMIGA_PCMCIA
369 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) { 363 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
370 isa_type = ISA_TYPE_AG; 364 isa_type = ISA_TYPE_AG;
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 2f431ece7b5f..3db2e7f902aa 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -12,7 +12,6 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/sem.h> 15#include <linux/sem.h>
17#include <linux/msg.h> 16#include <linux/msg.h>
18#include <linux/shm.h> 17#include <linux/shm.h>
@@ -377,7 +376,6 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
377 struct vm_area_struct *vma; 376 struct vm_area_struct *vma;
378 int ret = -EINVAL; 377 int ret = -EINVAL;
379 378
380 lock_kernel();
381 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || 379 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
382 cache & ~FLUSH_CACHE_BOTH) 380 cache & ~FLUSH_CACHE_BOTH)
383 goto out; 381 goto out;
@@ -446,7 +444,6 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
446 } 444 }
447 } 445 }
448out: 446out:
449 unlock_kernel();
450 return ret; 447 return ret;
451} 448}
452 449
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 4926b3856c15..06438dac08ff 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -42,9 +42,7 @@ static inline int set_rtc_mmss(unsigned long nowtime)
42static irqreturn_t timer_interrupt(int irq, void *dummy) 42static irqreturn_t timer_interrupt(int irq, void *dummy)
43{ 43{
44 do_timer(1); 44 do_timer(1);
45#ifndef CONFIG_SMP
46 update_process_times(user_mode(get_irq_regs())); 45 update_process_times(user_mode(get_irq_regs()));
47#endif
48 profile_tick(CPU_PROFILING); 46 profile_tick(CPU_PROFILING);
49 47
50#ifdef CONFIG_HEARTBEAT 48#ifdef CONFIG_HEARTBEAT
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index ad90393a3361..2d9e21bd313a 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -67,9 +67,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
67 intersil_clear(); 67 intersil_clear();
68#endif 68#endif
69 do_timer(1); 69 do_timer(1);
70#ifndef CONFIG_SMP
71 update_process_times(user_mode(get_irq_regs())); 70 update_process_times(user_mode(get_irq_regs()));
72#endif
73 if (!(kstat_cpu(0).irqs[irq] % 20)) 71 if (!(kstat_cpu(0).irqs[irq] % 20))
74 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); 72 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
75 return IRQ_HANDLED; 73 return IRQ_HANDLED;
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index a90acf5b0cde..7089dd9d843b 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -50,9 +50,8 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
50 50
51 write_sequnlock(&xtime_lock); 51 write_sequnlock(&xtime_lock);
52 52
53#ifndef CONFIG_SMP
54 update_process_times(user_mode(get_irq_regs())); 53 update_process_times(user_mode(get_irq_regs()));
55#endif 54
56 return(IRQ_HANDLED); 55 return(IRQ_HANDLED);
57} 56}
58#endif 57#endif
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index dd7d591f70ea..cd79d7e92ce6 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -112,7 +112,7 @@ ret_from_exception:
112 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 112 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
113 movel %d1,%a0 113 movel %d1,%a0
114 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 114 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
115 andl #_TIF_NEED_RESCHED,%d1 115 andl #(1<<TIF_NEED_RESCHED),%d1
116 jeq Lkernel_return 116 jeq Lkernel_return
117 117
118 movel %a0@(TI_PREEMPTCOUNT),%d1 118 movel %a0@(TI_PREEMPTCOUNT),%d1
@@ -136,7 +136,7 @@ Luser_return:
136 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 136 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
137 movel %d1,%a0 137 movel %d1,%a0
138 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 138 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
139 andl #_TIF_WORK_MASK,%d1 139 andl #0xefff,%d1
140 jne Lwork_to_do /* still work to do */ 140 jne Lwork_to_do /* still work to do */
141 141
142Lreturn: 142Lreturn:
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 1dc55ee2681b..3691630931d6 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -24,6 +24,33 @@
24 24
25#include <prom.h> 25#include <prom.h>
26 26
27static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
28 unsigned int old_state)
29{
30 switch (state) {
31 case 0:
32 if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) {
33 /* power-on sequence as suggested in the databooks */
34 __raw_writel(0, port->membase + UART_MOD_CNTRL);
35 wmb();
36 __raw_writel(1, port->membase + UART_MOD_CNTRL);
37 wmb();
38 }
39 __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */
40 wmb();
41 serial8250_do_pm(port, state, old_state);
42 break;
43 case 3: /* power off */
44 serial8250_do_pm(port, state, old_state);
45 __raw_writel(0, port->membase + UART_MOD_CNTRL);
46 wmb();
47 break;
48 default:
49 serial8250_do_pm(port, state, old_state);
50 break;
51 }
52}
53
27#define PORT(_base, _irq) \ 54#define PORT(_base, _irq) \
28 { \ 55 { \
29 .mapbase = _base, \ 56 .mapbase = _base, \
@@ -33,6 +60,7 @@
33 .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ 60 .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
34 UPF_FIXED_TYPE, \ 61 UPF_FIXED_TYPE, \
35 .type = PORT_16550A, \ 62 .type = PORT_16550A, \
63 .pm = alchemy_8250_pm, \
36 } 64 }
37 65
38static struct plat_serial8250_port au1x00_uart_data[] = { 66static struct plat_serial8250_port au1x00_uart_data[] = {
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 5ef06a164a82..e5916a516e58 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -49,11 +49,6 @@
49 * We only have to save/restore registers that aren't otherwise 49 * We only have to save/restore registers that aren't otherwise
50 * done as part of a driver pm_* function. 50 * done as part of a driver pm_* function.
51 */ 51 */
52static unsigned int sleep_uart0_inten;
53static unsigned int sleep_uart0_fifoctl;
54static unsigned int sleep_uart0_linectl;
55static unsigned int sleep_uart0_clkdiv;
56static unsigned int sleep_uart0_enable;
57static unsigned int sleep_usb[2]; 52static unsigned int sleep_usb[2];
58static unsigned int sleep_sys_clocks[5]; 53static unsigned int sleep_sys_clocks[5];
59static unsigned int sleep_sys_pinfunc; 54static unsigned int sleep_sys_pinfunc;
@@ -62,22 +57,6 @@ static unsigned int sleep_static_memctlr[4][3];
62 57
63static void save_core_regs(void) 58static void save_core_regs(void)
64{ 59{
65 extern void save_au1xxx_intctl(void);
66 extern void pm_eth0_shutdown(void);
67
68 /*
69 * Do the serial ports.....these really should be a pm_*
70 * registered function by the driver......but of course the
71 * standard serial driver doesn't understand our Au1xxx
72 * unique registers.
73 */
74 sleep_uart0_inten = au_readl(UART0_ADDR + UART_IER);
75 sleep_uart0_fifoctl = au_readl(UART0_ADDR + UART_FCR);
76 sleep_uart0_linectl = au_readl(UART0_ADDR + UART_LCR);
77 sleep_uart0_clkdiv = au_readl(UART0_ADDR + UART_CLK);
78 sleep_uart0_enable = au_readl(UART0_ADDR + UART_MOD_CNTRL);
79 au_sync();
80
81#ifndef CONFIG_SOC_AU1200 60#ifndef CONFIG_SOC_AU1200
82 /* Shutdown USB host/device. */ 61 /* Shutdown USB host/device. */
83 sleep_usb[0] = au_readl(USB_HOST_CONFIG); 62 sleep_usb[0] = au_readl(USB_HOST_CONFIG);
@@ -175,20 +154,6 @@ static void restore_core_regs(void)
175 au_writel(sleep_static_memctlr[3][0], MEM_STCFG3); 154 au_writel(sleep_static_memctlr[3][0], MEM_STCFG3);
176 au_writel(sleep_static_memctlr[3][1], MEM_STTIME3); 155 au_writel(sleep_static_memctlr[3][1], MEM_STTIME3);
177 au_writel(sleep_static_memctlr[3][2], MEM_STADDR3); 156 au_writel(sleep_static_memctlr[3][2], MEM_STADDR3);
178
179 /*
180 * Enable the UART if it was enabled before sleep.
181 * I guess I should define module control bits........
182 */
183 if (sleep_uart0_enable & 0x02) {
184 au_writel(0, UART0_ADDR + UART_MOD_CNTRL); au_sync();
185 au_writel(1, UART0_ADDR + UART_MOD_CNTRL); au_sync();
186 au_writel(3, UART0_ADDR + UART_MOD_CNTRL); au_sync();
187 au_writel(sleep_uart0_inten, UART0_ADDR + UART_IER); au_sync();
188 au_writel(sleep_uart0_fifoctl, UART0_ADDR + UART_FCR); au_sync();
189 au_writel(sleep_uart0_linectl, UART0_ADDR + UART_LCR); au_sync();
190 au_writel(sleep_uart0_clkdiv, UART0_ADDR + UART_CLK); au_sync();
191 }
192} 157}
193 158
194void au_sleep(void) 159void au_sleep(void)
diff --git a/arch/mn10300/include/asm/ioctls.h b/arch/mn10300/include/asm/ioctls.h
index cb8cf1902234..0212f4b22557 100644
--- a/arch/mn10300/include/asm/ioctls.h
+++ b/arch/mn10300/include/asm/ioctls.h
@@ -1,88 +1,6 @@
1#ifndef _ASM_IOCTLS_H 1#ifndef _ASM_IOCTLS_H
2#define _ASM_IOCTLS_H 2#define _ASM_IOCTLS_H
3 3
4#include <asm/ioctl.h> 4#include <asm-generic/ioctls.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T', 0x2A, struct termios2)
51#define TCSETS2 _IOW('T', 0x2B, struct termios2)
52#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
53#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
54#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number
55 * (of pty-mux device) */
56#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
57#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */
58
59#define FIONCLEX 0x5450
60#define FIOCLEX 0x5451
61#define FIOASYNC 0x5452
62#define TIOCSERCONFIG 0x5453
63#define TIOCSERGWILD 0x5454
64#define TIOCSERSWILD 0x5455
65#define TIOCGLCKTRMIOS 0x5456
66#define TIOCSLCKTRMIOS 0x5457
67#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
68#define TIOCSERGETLSR 0x5459 /* Get line status register */
69#define TIOCSERGETMULTI 0x545A /* Get multiport config */
70#define TIOCSERSETMULTI 0x545B /* Set multiport config */
71
72#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
73#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
74#define FIOQSIZE 0x5460
75
76/* Used for packet mode */
77#define TIOCPKT_DATA 0
78#define TIOCPKT_FLUSHREAD 1
79#define TIOCPKT_FLUSHWRITE 2
80#define TIOCPKT_STOP 4
81#define TIOCPKT_START 8
82#define TIOCPKT_NOSTOP 16
83#define TIOCPKT_DOSTOP 32
84#define TIOCPKT_IOCTL 64
85
86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
87 5
88#endif /* _ASM_IOCTLS_H */ 6#endif /* _ASM_IOCTLS_H */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 6c67d9ebf166..19e5015e039b 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -209,169 +209,6 @@ static int __init of_add_fixed_phys(void)
209arch_initcall(of_add_fixed_phys); 209arch_initcall(of_add_fixed_phys);
210#endif /* CONFIG_FIXED_PHY */ 210#endif /* CONFIG_FIXED_PHY */
211 211
212static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
213{
214 if (!phy_type)
215 return FSL_USB2_PHY_NONE;
216 if (!strcasecmp(phy_type, "ulpi"))
217 return FSL_USB2_PHY_ULPI;
218 if (!strcasecmp(phy_type, "utmi"))
219 return FSL_USB2_PHY_UTMI;
220 if (!strcasecmp(phy_type, "utmi_wide"))
221 return FSL_USB2_PHY_UTMI_WIDE;
222 if (!strcasecmp(phy_type, "serial"))
223 return FSL_USB2_PHY_SERIAL;
224
225 return FSL_USB2_PHY_NONE;
226}
227
228static int __init fsl_usb_of_init(void)
229{
230 struct device_node *np;
231 unsigned int i = 0;
232 struct platform_device *usb_dev_mph = NULL, *usb_dev_dr_host = NULL,
233 *usb_dev_dr_client = NULL;
234 int ret;
235
236 for_each_compatible_node(np, NULL, "fsl-usb2-mph") {
237 struct resource r[2];
238 struct fsl_usb2_platform_data usb_data;
239 const unsigned char *prop = NULL;
240
241 memset(&r, 0, sizeof(r));
242 memset(&usb_data, 0, sizeof(usb_data));
243
244 ret = of_address_to_resource(np, 0, &r[0]);
245 if (ret)
246 goto err;
247
248 of_irq_to_resource(np, 0, &r[1]);
249
250 usb_dev_mph =
251 platform_device_register_simple("fsl-ehci", i, r, 2);
252 if (IS_ERR(usb_dev_mph)) {
253 ret = PTR_ERR(usb_dev_mph);
254 goto err;
255 }
256
257 usb_dev_mph->dev.coherent_dma_mask = 0xffffffffUL;
258 usb_dev_mph->dev.dma_mask = &usb_dev_mph->dev.coherent_dma_mask;
259
260 usb_data.operating_mode = FSL_USB2_MPH_HOST;
261
262 prop = of_get_property(np, "port0", NULL);
263 if (prop)
264 usb_data.port_enables |= FSL_USB2_PORT0_ENABLED;
265
266 prop = of_get_property(np, "port1", NULL);
267 if (prop)
268 usb_data.port_enables |= FSL_USB2_PORT1_ENABLED;
269
270 prop = of_get_property(np, "phy_type", NULL);
271 usb_data.phy_mode = determine_usb_phy(prop);
272
273 ret =
274 platform_device_add_data(usb_dev_mph, &usb_data,
275 sizeof(struct
276 fsl_usb2_platform_data));
277 if (ret)
278 goto unreg_mph;
279 i++;
280 }
281
282 for_each_compatible_node(np, NULL, "fsl-usb2-dr") {
283 struct resource r[2];
284 struct fsl_usb2_platform_data usb_data;
285 const unsigned char *prop = NULL;
286
287 if (!of_device_is_available(np))
288 continue;
289
290 memset(&r, 0, sizeof(r));
291 memset(&usb_data, 0, sizeof(usb_data));
292
293 ret = of_address_to_resource(np, 0, &r[0]);
294 if (ret)
295 goto unreg_mph;
296
297 of_irq_to_resource(np, 0, &r[1]);
298
299 prop = of_get_property(np, "dr_mode", NULL);
300
301 if (!prop || !strcmp(prop, "host")) {
302 usb_data.operating_mode = FSL_USB2_DR_HOST;
303 usb_dev_dr_host = platform_device_register_simple(
304 "fsl-ehci", i, r, 2);
305 if (IS_ERR(usb_dev_dr_host)) {
306 ret = PTR_ERR(usb_dev_dr_host);
307 goto err;
308 }
309 } else if (prop && !strcmp(prop, "peripheral")) {
310 usb_data.operating_mode = FSL_USB2_DR_DEVICE;
311 usb_dev_dr_client = platform_device_register_simple(
312 "fsl-usb2-udc", i, r, 2);
313 if (IS_ERR(usb_dev_dr_client)) {
314 ret = PTR_ERR(usb_dev_dr_client);
315 goto err;
316 }
317 } else if (prop && !strcmp(prop, "otg")) {
318 usb_data.operating_mode = FSL_USB2_DR_OTG;
319 usb_dev_dr_host = platform_device_register_simple(
320 "fsl-ehci", i, r, 2);
321 if (IS_ERR(usb_dev_dr_host)) {
322 ret = PTR_ERR(usb_dev_dr_host);
323 goto err;
324 }
325 usb_dev_dr_client = platform_device_register_simple(
326 "fsl-usb2-udc", i, r, 2);
327 if (IS_ERR(usb_dev_dr_client)) {
328 ret = PTR_ERR(usb_dev_dr_client);
329 goto err;
330 }
331 } else {
332 ret = -EINVAL;
333 goto err;
334 }
335
336 prop = of_get_property(np, "phy_type", NULL);
337 usb_data.phy_mode = determine_usb_phy(prop);
338
339 if (usb_dev_dr_host) {
340 usb_dev_dr_host->dev.coherent_dma_mask = 0xffffffffUL;
341 usb_dev_dr_host->dev.dma_mask = &usb_dev_dr_host->
342 dev.coherent_dma_mask;
343 if ((ret = platform_device_add_data(usb_dev_dr_host,
344 &usb_data, sizeof(struct
345 fsl_usb2_platform_data))))
346 goto unreg_dr;
347 }
348 if (usb_dev_dr_client) {
349 usb_dev_dr_client->dev.coherent_dma_mask = 0xffffffffUL;
350 usb_dev_dr_client->dev.dma_mask = &usb_dev_dr_client->
351 dev.coherent_dma_mask;
352 if ((ret = platform_device_add_data(usb_dev_dr_client,
353 &usb_data, sizeof(struct
354 fsl_usb2_platform_data))))
355 goto unreg_dr;
356 }
357 i++;
358 }
359 return 0;
360
361unreg_dr:
362 if (usb_dev_dr_host)
363 platform_device_unregister(usb_dev_dr_host);
364 if (usb_dev_dr_client)
365 platform_device_unregister(usb_dev_dr_client);
366unreg_mph:
367 if (usb_dev_mph)
368 platform_device_unregister(usb_dev_mph);
369err:
370 return ret;
371}
372
373arch_initcall(fsl_usb_of_init);
374
375#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 212#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
376static __be32 __iomem *rstcr; 213static __be32 __iomem *rstcr;
377 214
diff --git a/arch/s390/include/asm/ioctls.h b/arch/s390/include/asm/ioctls.h
index 2f3d8736361f..960a4c1ebdf1 100644
--- a/arch/s390/include/asm/ioctls.h
+++ b/arch/s390/include/asm/ioctls.h
@@ -1,94 +1,8 @@
1/*
2 * include/asm-s390/ioctls.h
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/ioctls.h"
7 */
8
9#ifndef __ARCH_S390_IOCTLS_H__ 1#ifndef __ARCH_S390_IOCTLS_H__
10#define __ARCH_S390_IOCTLS_H__ 2#define __ARCH_S390_IOCTLS_H__
11 3
12#include <asm/ioctl.h>
13
14/* 0x54 is just a magic number to make these relatively unique ('T') */
15
16#define TCGETS 0x5401
17#define TCSETS 0x5402
18#define TCSETSW 0x5403
19#define TCSETSF 0x5404
20#define TCGETA 0x5405
21#define TCSETA 0x5406
22#define TCSETAW 0x5407
23#define TCSETAF 0x5408
24#define TCSBRK 0x5409
25#define TCXONC 0x540A
26#define TCFLSH 0x540B
27#define TIOCEXCL 0x540C
28#define TIOCNXCL 0x540D
29#define TIOCSCTTY 0x540E
30#define TIOCGPGRP 0x540F
31#define TIOCSPGRP 0x5410
32#define TIOCOUTQ 0x5411
33#define TIOCSTI 0x5412
34#define TIOCGWINSZ 0x5413
35#define TIOCSWINSZ 0x5414
36#define TIOCMGET 0x5415
37#define TIOCMBIS 0x5416
38#define TIOCMBIC 0x5417
39#define TIOCMSET 0x5418
40#define TIOCGSOFTCAR 0x5419
41#define TIOCSSOFTCAR 0x541A
42#define FIONREAD 0x541B
43#define TIOCINQ FIONREAD
44#define TIOCLINUX 0x541C
45#define TIOCCONS 0x541D
46#define TIOCGSERIAL 0x541E
47#define TIOCSSERIAL 0x541F
48#define TIOCPKT 0x5420
49#define FIONBIO 0x5421
50#define TIOCNOTTY 0x5422
51#define TIOCSETD 0x5423
52#define TIOCGETD 0x5424
53#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
54#define TIOCSBRK 0x5427 /* BSD compatibility */
55#define TIOCCBRK 0x5428 /* BSD compatibility */
56#define TIOCGSID 0x5429 /* Return the session ID of FD */
57#define TCGETS2 _IOR('T',0x2A, struct termios2)
58#define TCSETS2 _IOW('T',0x2B, struct termios2)
59#define TCSETSW2 _IOW('T',0x2C, struct termios2)
60#define TCSETSF2 _IOW('T',0x2D, struct termios2)
61#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
62#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
63#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
64
65#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
66#define FIOCLEX 0x5451
67#define FIOASYNC 0x5452
68#define TIOCSERCONFIG 0x5453
69#define TIOCSERGWILD 0x5454
70#define TIOCSERSWILD 0x5455
71#define TIOCGLCKTRMIOS 0x5456
72#define TIOCSLCKTRMIOS 0x5457
73#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
74#define TIOCSERGETLSR 0x5459 /* Get line status register */
75#define TIOCSERGETMULTI 0x545A /* Get multiport config */
76#define TIOCSERSETMULTI 0x545B /* Set multiport config */
77
78#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
79#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
80#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
81 5
82/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
83#define TIOCPKT_DATA 0
84#define TIOCPKT_FLUSHREAD 1
85#define TIOCPKT_FLUSHWRITE 2
86#define TIOCPKT_STOP 4
87#define TIOCPKT_START 8
88#define TIOCPKT_NOSTOP 16
89#define TIOCPKT_DOSTOP 32
90#define TIOCPKT_IOCTL 64
91
92#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
93 7
94#endif 8#endif
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index cd28f9ad910d..f899e01a8ac9 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -47,6 +47,20 @@
47#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49#define __my_cpu_offset percpu_read(this_cpu_off) 49#define __my_cpu_offset percpu_read(this_cpu_off)
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define __this_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "=r" (tcp_ptr__) \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
63})
50#else 64#else
51#define __percpu_arg(x) "%P" #x 65#define __percpu_arg(x) "%P" #x
52#endif 66#endif
diff --git a/block/Kconfig b/block/Kconfig
index 9be0b56eaee1..6c9213ef15a1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -77,6 +77,18 @@ config BLK_DEV_INTEGRITY
77 T10/SCSI Data Integrity Field or the T13/ATA External Path 77 T10/SCSI Data Integrity Field or the T13/ATA External Path
78 Protection. If in doubt, say N. 78 Protection. If in doubt, say N.
79 79
80config BLK_DEV_THROTTLING
81 bool "Block layer bio throttling support"
82 depends on BLK_CGROUP=y && EXPERIMENTAL
83 default n
84 ---help---
85 Block layer bio throttling support. It can be used to limit
86 the IO rate to a device. IO rate policies are per cgroup and
87 one needs to mount and use blkio cgroup controller for creating
88 cgroups and specifying per device IO rate policies.
89
90 See Documentation/cgroups/blkio-controller.txt for more information.
91
80endif # BLOCK 92endif # BLOCK
81 93
82config BLOCK_COMPAT 94config BLOCK_COMPAT
diff --git a/block/Makefile b/block/Makefile
index 0bb499a739cd..0fec4b3fab51 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,12 +3,13 @@
3# 3#
4 4
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o 8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o 11obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
12obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
12obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 13obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
13obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o 14obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
14obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 15obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
deleted file mode 100644
index f0faefca032f..000000000000
--- a/block/blk-barrier.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Functions related to barrier IO handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/gfp.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
16 *
17 * Description:
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
22 *
23 **/
24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
25{
26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA &&
30 ordered != QUEUE_ORDERED_TAG &&
31 ordered != QUEUE_ORDERED_TAG_FLUSH &&
32 ordered != QUEUE_ORDERED_TAG_FUA) {
33 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34 return -EINVAL;
35 }
36
37 q->ordered = ordered;
38 q->next_ordered = ordered;
39
40 return 0;
41}
42EXPORT_SYMBOL(blk_queue_ordered);
43
44/*
45 * Cache flushing for ordered writes handling
46 */
47unsigned blk_ordered_cur_seq(struct request_queue *q)
48{
49 if (!q->ordseq)
50 return 0;
51 return 1 << ffz(q->ordseq);
52}
53
54unsigned blk_ordered_req_seq(struct request *rq)
55{
56 struct request_queue *q = rq->q;
57
58 BUG_ON(q->ordseq == 0);
59
60 if (rq == &q->pre_flush_rq)
61 return QUEUE_ORDSEQ_PREFLUSH;
62 if (rq == &q->bar_rq)
63 return QUEUE_ORDSEQ_BAR;
64 if (rq == &q->post_flush_rq)
65 return QUEUE_ORDSEQ_POSTFLUSH;
66
67 /*
68 * !fs requests don't need to follow barrier ordering. Always
69 * put them at the front. This fixes the following deadlock.
70 *
71 * http://thread.gmane.org/gmane.linux.kernel/537473
72 */
73 if (rq->cmd_type != REQ_TYPE_FS)
74 return QUEUE_ORDSEQ_DRAIN;
75
76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
77 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
78 return QUEUE_ORDSEQ_DRAIN;
79 else
80 return QUEUE_ORDSEQ_DONE;
81}
82
83bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
84{
85 struct request *rq;
86
87 if (error && !q->orderr)
88 q->orderr = error;
89
90 BUG_ON(q->ordseq & seq);
91 q->ordseq |= seq;
92
93 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
94 return false;
95
96 /*
97 * Okay, sequence complete.
98 */
99 q->ordseq = 0;
100 rq = q->orig_bar_rq;
101 __blk_end_request_all(rq, q->orderr);
102 return true;
103}
104
105static void pre_flush_end_io(struct request *rq, int error)
106{
107 elv_completed_request(rq->q, rq);
108 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
109}
110
111static void bar_end_io(struct request *rq, int error)
112{
113 elv_completed_request(rq->q, rq);
114 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
115}
116
117static void post_flush_end_io(struct request *rq, int error)
118{
119 elv_completed_request(rq->q, rq);
120 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
121}
122
123static void queue_flush(struct request_queue *q, unsigned which)
124{
125 struct request *rq;
126 rq_end_io_fn *end_io;
127
128 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
129 rq = &q->pre_flush_rq;
130 end_io = pre_flush_end_io;
131 } else {
132 rq = &q->post_flush_rq;
133 end_io = post_flush_end_io;
134 }
135
136 blk_rq_init(q, rq);
137 rq->cmd_type = REQ_TYPE_FS;
138 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
139 rq->rq_disk = q->orig_bar_rq->rq_disk;
140 rq->end_io = end_io;
141
142 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
143}
144
145static inline bool start_ordered(struct request_queue *q, struct request **rqp)
146{
147 struct request *rq = *rqp;
148 unsigned skip = 0;
149
150 q->orderr = 0;
151 q->ordered = q->next_ordered;
152 q->ordseq |= QUEUE_ORDSEQ_STARTED;
153
154 /*
155 * For an empty barrier, there's no actual BAR request, which
156 * in turn makes POSTFLUSH unnecessary. Mask them off.
157 */
158 if (!blk_rq_sectors(rq)) {
159 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
160 QUEUE_ORDERED_DO_POSTFLUSH);
161 /*
162 * Empty barrier on a write-through device w/ ordered
163 * tag has no command to issue and without any command
164 * to issue, ordering by tag can't be used. Drain
165 * instead.
166 */
167 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
168 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
169 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
170 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
171 }
172 }
173
174 /* stash away the original request */
175 blk_dequeue_request(rq);
176 q->orig_bar_rq = rq;
177 rq = NULL;
178
179 /*
180 * Queue ordered sequence. As we stack them at the head, we
181 * need to queue in reverse order. Note that we rely on that
182 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
183 * request gets inbetween ordered sequence.
184 */
185 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
186 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
187 rq = &q->post_flush_rq;
188 } else
189 skip |= QUEUE_ORDSEQ_POSTFLUSH;
190
191 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
192 rq = &q->bar_rq;
193
194 /* initialize proxy request and queue it */
195 blk_rq_init(q, rq);
196 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
197 rq->cmd_flags |= REQ_WRITE;
198 if (q->ordered & QUEUE_ORDERED_DO_FUA)
199 rq->cmd_flags |= REQ_FUA;
200 init_request_from_bio(rq, q->orig_bar_rq->bio);
201 rq->end_io = bar_end_io;
202
203 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
204 } else
205 skip |= QUEUE_ORDSEQ_BAR;
206
207 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
208 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
209 rq = &q->pre_flush_rq;
210 } else
211 skip |= QUEUE_ORDSEQ_PREFLUSH;
212
213 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
214 rq = NULL;
215 else
216 skip |= QUEUE_ORDSEQ_DRAIN;
217
218 *rqp = rq;
219
220 /*
221 * Complete skipped sequences. If whole sequence is complete,
222 * return false to tell elevator that this request is gone.
223 */
224 return !blk_ordered_complete_seq(q, skip, 0);
225}
226
227bool blk_do_ordered(struct request_queue *q, struct request **rqp)
228{
229 struct request *rq = *rqp;
230 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
231 (rq->cmd_flags & REQ_HARDBARRIER);
232
233 if (!q->ordseq) {
234 if (!is_barrier)
235 return true;
236
237 if (q->next_ordered != QUEUE_ORDERED_NONE)
238 return start_ordered(q, rqp);
239 else {
240 /*
241 * Queue ordering not supported. Terminate
242 * with prejudice.
243 */
244 blk_dequeue_request(rq);
245 __blk_end_request_all(rq, -EOPNOTSUPP);
246 *rqp = NULL;
247 return false;
248 }
249 }
250
251 /*
252 * Ordered sequence in progress
253 */
254
255 /* Special requests are not subject to ordering rules. */
256 if (rq->cmd_type != REQ_TYPE_FS &&
257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
258 return true;
259
260 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
261 /* Ordered by tag. Blocking the next barrier is enough. */
262 if (is_barrier && rq != &q->bar_rq)
263 *rqp = NULL;
264 } else {
265 /* Ordered by draining. Wait for turn. */
266 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
267 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
268 *rqp = NULL;
269 }
270
271 return true;
272}
273
274static void bio_end_empty_barrier(struct bio *bio, int err)
275{
276 if (err) {
277 if (err == -EOPNOTSUPP)
278 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
279 clear_bit(BIO_UPTODATE, &bio->bi_flags);
280 }
281 if (bio->bi_private)
282 complete(bio->bi_private);
283 bio_put(bio);
284}
285
286/**
287 * blkdev_issue_flush - queue a flush
288 * @bdev: blockdev to issue flush for
289 * @gfp_mask: memory allocation flags (for bio_alloc)
290 * @error_sector: error sector
291 * @flags: BLKDEV_IFL_* flags to control behaviour
292 *
293 * Description:
294 * Issue a flush for the block device in question. Caller can supply
295 * room for storing the error offset in case of a flush error, if they
296 * wish to. If WAIT flag is not passed then caller may check only what
297 * request was pushed in some internal queue for later handling.
298 */
299int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
300 sector_t *error_sector, unsigned long flags)
301{
302 DECLARE_COMPLETION_ONSTACK(wait);
303 struct request_queue *q;
304 struct bio *bio;
305 int ret = 0;
306
307 if (bdev->bd_disk == NULL)
308 return -ENXIO;
309
310 q = bdev_get_queue(bdev);
311 if (!q)
312 return -ENXIO;
313
314 /*
315 * some block devices may not have their queue correctly set up here
316 * (e.g. loop device without a backing file) and so issuing a flush
317 * here will panic. Ensure there is a request function before issuing
318 * the barrier.
319 */
320 if (!q->make_request_fn)
321 return -ENXIO;
322
323 bio = bio_alloc(gfp_mask, 0);
324 bio->bi_end_io = bio_end_empty_barrier;
325 bio->bi_bdev = bdev;
326 if (test_bit(BLKDEV_WAIT, &flags))
327 bio->bi_private = &wait;
328
329 bio_get(bio);
330 submit_bio(WRITE_BARRIER, bio);
331 if (test_bit(BLKDEV_WAIT, &flags)) {
332 wait_for_completion(&wait);
333 /*
334 * The driver must store the error location in ->bi_sector, if
335 * it supports it. For non-stacked drivers, this should be
336 * copied from blk_rq_pos(rq).
337 */
338 if (error_sector)
339 *error_sector = bio->bi_sector;
340 }
341
342 if (bio_flagged(bio, BIO_EOPNOTSUPP))
343 ret = -EOPNOTSUPP;
344 else if (!bio_flagged(bio, BIO_UPTODATE))
345 ret = -EIO;
346
347 bio_put(bio);
348 return ret;
349}
350EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2fef1ef931a0..b1febd0f6d2a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -37,6 +37,12 @@ static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39 39
40/* for encoding cft->private value on file */
41#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42/* What policy owns the file, proportional or throttle */
43#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
45
40struct cgroup_subsys blkio_subsys = { 46struct cgroup_subsys blkio_subsys = {
41 .name = "blkio", 47 .name = "blkio",
42 .create = blkiocg_create, 48 .create = blkiocg_create,
@@ -59,6 +65,27 @@ static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
59 list_add(&pn->node, &blkcg->policy_list); 65 list_add(&pn->node, &blkcg->policy_list);
60} 66}
61 67
68static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
70{
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
72
73 if (blkg->plid == plid)
74 return 1;
75
76 return 0;
77}
78
79/* Determines if policy node matches cgroup file being accessed */
80static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
82{
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
85
86 return (plid == pn->plid && fileid == pn->fileid);
87}
88
62/* Must be called with blkcg->lock held */ 89/* Must be called with blkcg->lock held */
63static inline void blkio_policy_delete_node(struct blkio_policy_node *pn) 90static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
64{ 91{
@@ -67,12 +94,13 @@ static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
67 94
68/* Must be called with blkcg->lock held */ 95/* Must be called with blkcg->lock held */
69static struct blkio_policy_node * 96static struct blkio_policy_node *
70blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev) 97blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
71{ 99{
72 struct blkio_policy_node *pn; 100 struct blkio_policy_node *pn;
73 101
74 list_for_each_entry(pn, &blkcg->policy_list, node) { 102 list_for_each_entry(pn, &blkcg->policy_list, node) {
75 if (pn->dev == dev) 103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
76 return pn; 104 return pn;
77 } 105 }
78 106
@@ -86,6 +114,67 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
86} 114}
87EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
88 116
117static inline void
118blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
119{
120 struct blkio_policy_type *blkiop;
121
122 list_for_each_entry(blkiop, &blkio_list, list) {
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop->plid != blkg->plid)
125 continue;
126 if (blkiop->ops.blkio_update_group_weight_fn)
127 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
128 blkg, weight);
129 }
130}
131
132static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
133 int fileid)
134{
135 struct blkio_policy_type *blkiop;
136
137 list_for_each_entry(blkiop, &blkio_list, list) {
138
139 /* If this policy does not own the blkg, do not send updates */
140 if (blkiop->plid != blkg->plid)
141 continue;
142
143 if (fileid == BLKIO_THROTL_read_bps_device
144 && blkiop->ops.blkio_update_group_read_bps_fn)
145 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
146 blkg, bps);
147
148 if (fileid == BLKIO_THROTL_write_bps_device
149 && blkiop->ops.blkio_update_group_write_bps_fn)
150 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
151 blkg, bps);
152 }
153}
154
155static inline void blkio_update_group_iops(struct blkio_group *blkg,
156 unsigned int iops, int fileid)
157{
158 struct blkio_policy_type *blkiop;
159
160 list_for_each_entry(blkiop, &blkio_list, list) {
161
162 /* If this policy does not own the blkg, do not send updates */
163 if (blkiop->plid != blkg->plid)
164 continue;
165
166 if (fileid == BLKIO_THROTL_read_iops_device
167 && blkiop->ops.blkio_update_group_read_iops_fn)
168 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
169 blkg, iops);
170
171 if (fileid == BLKIO_THROTL_write_iops_device
172 && blkiop->ops.blkio_update_group_write_iops_fn)
173 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
174 blkg,iops);
175 }
176}
177
89/* 178/*
90 * Add to the appropriate stat variable depending on the request type. 179 * Add to the appropriate stat variable depending on the request type.
91 * This should be called with the blkg->stats_lock held. 180 * This should be called with the blkg->stats_lock held.
@@ -341,7 +430,8 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
341EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); 430EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
342 431
343void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 432void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
344 struct blkio_group *blkg, void *key, dev_t dev) 433 struct blkio_group *blkg, void *key, dev_t dev,
434 enum blkio_policy_id plid)
345{ 435{
346 unsigned long flags; 436 unsigned long flags;
347 437
@@ -350,6 +440,7 @@ void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
350 rcu_assign_pointer(blkg->key, key); 440 rcu_assign_pointer(blkg->key, key);
351 blkg->blkcg_id = css_id(&blkcg->css); 441 blkg->blkcg_id = css_id(&blkcg->css);
352 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 442 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
443 blkg->plid = plid;
353 spin_unlock_irqrestore(&blkcg->lock, flags); 444 spin_unlock_irqrestore(&blkcg->lock, flags);
354 /* Need to take css reference ? */ 445 /* Need to take css reference ? */
355 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); 446 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
@@ -408,51 +499,6 @@ struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
408} 499}
409EXPORT_SYMBOL_GPL(blkiocg_lookup_group); 500EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
410 501
411#define SHOW_FUNCTION(__VAR) \
412static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
413 struct cftype *cftype) \
414{ \
415 struct blkio_cgroup *blkcg; \
416 \
417 blkcg = cgroup_to_blkio_cgroup(cgroup); \
418 return (u64)blkcg->__VAR; \
419}
420
421SHOW_FUNCTION(weight);
422#undef SHOW_FUNCTION
423
424static int
425blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
426{
427 struct blkio_cgroup *blkcg;
428 struct blkio_group *blkg;
429 struct hlist_node *n;
430 struct blkio_policy_type *blkiop;
431 struct blkio_policy_node *pn;
432
433 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
434 return -EINVAL;
435
436 blkcg = cgroup_to_blkio_cgroup(cgroup);
437 spin_lock(&blkio_list_lock);
438 spin_lock_irq(&blkcg->lock);
439 blkcg->weight = (unsigned int)val;
440
441 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
442 pn = blkio_policy_search_node(blkcg, blkg->dev);
443
444 if (pn)
445 continue;
446
447 list_for_each_entry(blkiop, &blkio_list, list)
448 blkiop->ops.blkio_update_group_weight_fn(blkg,
449 blkcg->weight);
450 }
451 spin_unlock_irq(&blkcg->lock);
452 spin_unlock(&blkio_list_lock);
453 return 0;
454}
455
456static int 502static int
457blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 503blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
458{ 504{
@@ -593,52 +639,6 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
593 return disk_total; 639 return disk_total;
594} 640}
595 641
596#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
597static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
598 struct cftype *cftype, struct cgroup_map_cb *cb) \
599{ \
600 struct blkio_cgroup *blkcg; \
601 struct blkio_group *blkg; \
602 struct hlist_node *n; \
603 uint64_t cgroup_total = 0; \
604 \
605 if (!cgroup_lock_live_group(cgroup)) \
606 return -ENODEV; \
607 \
608 blkcg = cgroup_to_blkio_cgroup(cgroup); \
609 rcu_read_lock(); \
610 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
611 if (blkg->dev) { \
612 spin_lock_irq(&blkg->stats_lock); \
613 cgroup_total += blkio_get_stat(blkg, cb, \
614 blkg->dev, type); \
615 spin_unlock_irq(&blkg->stats_lock); \
616 } \
617 } \
618 if (show_total) \
619 cb->fill(cb, "Total", cgroup_total); \
620 rcu_read_unlock(); \
621 cgroup_unlock(); \
622 return 0; \
623}
624
625SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
626SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
627SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
628SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
629SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
630SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
631SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
632SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
633#ifdef CONFIG_DEBUG_BLK_CGROUP
634SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
635SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
636SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
637SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
638SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
639#endif
640#undef SHOW_FUNCTION_PER_GROUP
641
642static int blkio_check_dev_num(dev_t dev) 642static int blkio_check_dev_num(dev_t dev)
643{ 643{
644 int part = 0; 644 int part = 0;
@@ -652,13 +652,14 @@ static int blkio_check_dev_num(dev_t dev)
652} 652}
653 653
654static int blkio_policy_parse_and_set(char *buf, 654static int blkio_policy_parse_and_set(char *buf,
655 struct blkio_policy_node *newpn) 655 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
656{ 656{
657 char *s[4], *p, *major_s = NULL, *minor_s = NULL; 657 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
658 int ret; 658 int ret;
659 unsigned long major, minor, temp; 659 unsigned long major, minor, temp;
660 int i = 0; 660 int i = 0;
661 dev_t dev; 661 dev_t dev;
662 u64 bps, iops;
662 663
663 memset(s, 0, sizeof(s)); 664 memset(s, 0, sizeof(s));
664 665
@@ -705,12 +706,47 @@ static int blkio_policy_parse_and_set(char *buf,
705 if (s[1] == NULL) 706 if (s[1] == NULL)
706 return -EINVAL; 707 return -EINVAL;
707 708
708 ret = strict_strtoul(s[1], 10, &temp); 709 switch (plid) {
709 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || 710 case BLKIO_POLICY_PROP:
710 temp > BLKIO_WEIGHT_MAX) 711 ret = strict_strtoul(s[1], 10, &temp);
711 return -EINVAL; 712 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
713 temp > BLKIO_WEIGHT_MAX)
714 return -EINVAL;
712 715
713 newpn->weight = temp; 716 newpn->plid = plid;
717 newpn->fileid = fileid;
718 newpn->val.weight = temp;
719 break;
720 case BLKIO_POLICY_THROTL:
721 switch(fileid) {
722 case BLKIO_THROTL_read_bps_device:
723 case BLKIO_THROTL_write_bps_device:
724 ret = strict_strtoull(s[1], 10, &bps);
725 if (ret)
726 return -EINVAL;
727
728 newpn->plid = plid;
729 newpn->fileid = fileid;
730 newpn->val.bps = bps;
731 break;
732 case BLKIO_THROTL_read_iops_device:
733 case BLKIO_THROTL_write_iops_device:
734 ret = strict_strtoull(s[1], 10, &iops);
735 if (ret)
736 return -EINVAL;
737
738 if (iops > THROTL_IOPS_MAX)
739 return -EINVAL;
740
741 newpn->plid = plid;
742 newpn->fileid = fileid;
743 newpn->val.iops = (unsigned int)iops;
744 break;
745 }
746 break;
747 default:
748 BUG();
749 }
714 750
715 return 0; 751 return 0;
716} 752}
@@ -720,26 +756,180 @@ unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
720{ 756{
721 struct blkio_policy_node *pn; 757 struct blkio_policy_node *pn;
722 758
723 pn = blkio_policy_search_node(blkcg, dev); 759 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
760 BLKIO_PROP_weight_device);
724 if (pn) 761 if (pn)
725 return pn->weight; 762 return pn->val.weight;
726 else 763 else
727 return blkcg->weight; 764 return blkcg->weight;
728} 765}
729EXPORT_SYMBOL_GPL(blkcg_get_weight); 766EXPORT_SYMBOL_GPL(blkcg_get_weight);
730 767
768uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
769{
770 struct blkio_policy_node *pn;
771
772 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
773 BLKIO_THROTL_read_bps_device);
774 if (pn)
775 return pn->val.bps;
776 else
777 return -1;
778}
779
780uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
781{
782 struct blkio_policy_node *pn;
783 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
784 BLKIO_THROTL_write_bps_device);
785 if (pn)
786 return pn->val.bps;
787 else
788 return -1;
789}
790
791unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
792{
793 struct blkio_policy_node *pn;
794
795 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
796 BLKIO_THROTL_read_iops_device);
797 if (pn)
798 return pn->val.iops;
799 else
800 return -1;
801}
802
803unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
804{
805 struct blkio_policy_node *pn;
806 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
807 BLKIO_THROTL_write_iops_device);
808 if (pn)
809 return pn->val.iops;
810 else
811 return -1;
812}
813
814/* Checks whether user asked for deleting a policy rule */
815static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
816{
817 switch(pn->plid) {
818 case BLKIO_POLICY_PROP:
819 if (pn->val.weight == 0)
820 return 1;
821 break;
822 case BLKIO_POLICY_THROTL:
823 switch(pn->fileid) {
824 case BLKIO_THROTL_read_bps_device:
825 case BLKIO_THROTL_write_bps_device:
826 if (pn->val.bps == 0)
827 return 1;
828 break;
829 case BLKIO_THROTL_read_iops_device:
830 case BLKIO_THROTL_write_iops_device:
831 if (pn->val.iops == 0)
832 return 1;
833 }
834 break;
835 default:
836 BUG();
837 }
838
839 return 0;
840}
841
842static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
843 struct blkio_policy_node *newpn)
844{
845 switch(oldpn->plid) {
846 case BLKIO_POLICY_PROP:
847 oldpn->val.weight = newpn->val.weight;
848 break;
849 case BLKIO_POLICY_THROTL:
850 switch(newpn->fileid) {
851 case BLKIO_THROTL_read_bps_device:
852 case BLKIO_THROTL_write_bps_device:
853 oldpn->val.bps = newpn->val.bps;
854 break;
855 case BLKIO_THROTL_read_iops_device:
856 case BLKIO_THROTL_write_iops_device:
857 oldpn->val.iops = newpn->val.iops;
858 }
859 break;
860 default:
861 BUG();
862 }
863}
864
865/*
866 * Some rules/values in blkg have changed. Propogate those to respective
867 * policies.
868 */
869static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
870 struct blkio_group *blkg, struct blkio_policy_node *pn)
871{
872 unsigned int weight, iops;
873 u64 bps;
874
875 switch(pn->plid) {
876 case BLKIO_POLICY_PROP:
877 weight = pn->val.weight ? pn->val.weight :
878 blkcg->weight;
879 blkio_update_group_weight(blkg, weight);
880 break;
881 case BLKIO_POLICY_THROTL:
882 switch(pn->fileid) {
883 case BLKIO_THROTL_read_bps_device:
884 case BLKIO_THROTL_write_bps_device:
885 bps = pn->val.bps ? pn->val.bps : (-1);
886 blkio_update_group_bps(blkg, bps, pn->fileid);
887 break;
888 case BLKIO_THROTL_read_iops_device:
889 case BLKIO_THROTL_write_iops_device:
890 iops = pn->val.iops ? pn->val.iops : (-1);
891 blkio_update_group_iops(blkg, iops, pn->fileid);
892 break;
893 }
894 break;
895 default:
896 BUG();
897 }
898}
899
900/*
901 * A policy node rule has been updated. Propogate this update to all the
902 * block groups which might be affected by this update.
903 */
904static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
905 struct blkio_policy_node *pn)
906{
907 struct blkio_group *blkg;
908 struct hlist_node *n;
909
910 spin_lock(&blkio_list_lock);
911 spin_lock_irq(&blkcg->lock);
912
913 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
914 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
915 continue;
916 blkio_update_blkg_policy(blkcg, blkg, pn);
917 }
918
919 spin_unlock_irq(&blkcg->lock);
920 spin_unlock(&blkio_list_lock);
921}
731 922
732static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft, 923static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
733 const char *buffer) 924 const char *buffer)
734{ 925{
735 int ret = 0; 926 int ret = 0;
736 char *buf; 927 char *buf;
737 struct blkio_policy_node *newpn, *pn; 928 struct blkio_policy_node *newpn, *pn;
738 struct blkio_cgroup *blkcg; 929 struct blkio_cgroup *blkcg;
739 struct blkio_group *blkg;
740 int keep_newpn = 0; 930 int keep_newpn = 0;
741 struct hlist_node *n; 931 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
742 struct blkio_policy_type *blkiop; 932 int fileid = BLKIOFILE_ATTR(cft->private);
743 933
744 buf = kstrdup(buffer, GFP_KERNEL); 934 buf = kstrdup(buffer, GFP_KERNEL);
745 if (!buf) 935 if (!buf)
@@ -751,7 +941,7 @@ static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
751 goto free_buf; 941 goto free_buf;
752 } 942 }
753 943
754 ret = blkio_policy_parse_and_set(buf, newpn); 944 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
755 if (ret) 945 if (ret)
756 goto free_newpn; 946 goto free_newpn;
757 947
@@ -759,9 +949,9 @@ static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
759 949
760 spin_lock_irq(&blkcg->lock); 950 spin_lock_irq(&blkcg->lock);
761 951
762 pn = blkio_policy_search_node(blkcg, newpn->dev); 952 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
763 if (!pn) { 953 if (!pn) {
764 if (newpn->weight != 0) { 954 if (!blkio_delete_rule_command(newpn)) {
765 blkio_policy_insert_node(blkcg, newpn); 955 blkio_policy_insert_node(blkcg, newpn);
766 keep_newpn = 1; 956 keep_newpn = 1;
767 } 957 }
@@ -769,33 +959,17 @@ static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
769 goto update_io_group; 959 goto update_io_group;
770 } 960 }
771 961
772 if (newpn->weight == 0) { 962 if (blkio_delete_rule_command(newpn)) {
773 /* weight == 0 means deleteing a specific weight */
774 blkio_policy_delete_node(pn); 963 blkio_policy_delete_node(pn);
775 spin_unlock_irq(&blkcg->lock); 964 spin_unlock_irq(&blkcg->lock);
776 goto update_io_group; 965 goto update_io_group;
777 } 966 }
778 spin_unlock_irq(&blkcg->lock); 967 spin_unlock_irq(&blkcg->lock);
779 968
780 pn->weight = newpn->weight; 969 blkio_update_policy_rule(pn, newpn);
781 970
782update_io_group: 971update_io_group:
783 /* update weight for each cfqg */ 972 blkio_update_policy_node_blkg(blkcg, newpn);
784 spin_lock(&blkio_list_lock);
785 spin_lock_irq(&blkcg->lock);
786
787 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
788 if (newpn->dev == blkg->dev) {
789 list_for_each_entry(blkiop, &blkio_list, list)
790 blkiop->ops.blkio_update_group_weight_fn(blkg,
791 newpn->weight ?
792 newpn->weight :
793 blkcg->weight);
794 }
795 }
796
797 spin_unlock_irq(&blkcg->lock);
798 spin_unlock(&blkio_list_lock);
799 973
800free_newpn: 974free_newpn:
801 if (!keep_newpn) 975 if (!keep_newpn)
@@ -805,23 +979,256 @@ free_buf:
805 return ret; 979 return ret;
806} 980}
807 981
808static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft, 982static void
809 struct seq_file *m) 983blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
810{ 984{
811 struct blkio_cgroup *blkcg; 985 switch(pn->plid) {
812 struct blkio_policy_node *pn; 986 case BLKIO_POLICY_PROP:
987 if (pn->fileid == BLKIO_PROP_weight_device)
988 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
989 MINOR(pn->dev), pn->val.weight);
990 break;
991 case BLKIO_POLICY_THROTL:
992 switch(pn->fileid) {
993 case BLKIO_THROTL_read_bps_device:
994 case BLKIO_THROTL_write_bps_device:
995 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
996 MINOR(pn->dev), pn->val.bps);
997 break;
998 case BLKIO_THROTL_read_iops_device:
999 case BLKIO_THROTL_write_iops_device:
1000 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1001 MINOR(pn->dev), pn->val.iops);
1002 break;
1003 }
1004 break;
1005 default:
1006 BUG();
1007 }
1008}
813 1009
814 seq_printf(m, "dev\tweight\n"); 1010/* cgroup files which read their data from policy nodes end up here */
1011static void blkio_read_policy_node_files(struct cftype *cft,
1012 struct blkio_cgroup *blkcg, struct seq_file *m)
1013{
1014 struct blkio_policy_node *pn;
815 1015
816 blkcg = cgroup_to_blkio_cgroup(cgrp);
817 if (!list_empty(&blkcg->policy_list)) { 1016 if (!list_empty(&blkcg->policy_list)) {
818 spin_lock_irq(&blkcg->lock); 1017 spin_lock_irq(&blkcg->lock);
819 list_for_each_entry(pn, &blkcg->policy_list, node) { 1018 list_for_each_entry(pn, &blkcg->policy_list, node) {
820 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), 1019 if (!pn_matches_cftype(cft, pn))
821 MINOR(pn->dev), pn->weight); 1020 continue;
1021 blkio_print_policy_node(m, pn);
822 } 1022 }
823 spin_unlock_irq(&blkcg->lock); 1023 spin_unlock_irq(&blkcg->lock);
824 } 1024 }
1025}
1026
1027static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1028 struct seq_file *m)
1029{
1030 struct blkio_cgroup *blkcg;
1031 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1032 int name = BLKIOFILE_ATTR(cft->private);
1033
1034 blkcg = cgroup_to_blkio_cgroup(cgrp);
1035
1036 switch(plid) {
1037 case BLKIO_POLICY_PROP:
1038 switch(name) {
1039 case BLKIO_PROP_weight_device:
1040 blkio_read_policy_node_files(cft, blkcg, m);
1041 return 0;
1042 default:
1043 BUG();
1044 }
1045 break;
1046 case BLKIO_POLICY_THROTL:
1047 switch(name){
1048 case BLKIO_THROTL_read_bps_device:
1049 case BLKIO_THROTL_write_bps_device:
1050 case BLKIO_THROTL_read_iops_device:
1051 case BLKIO_THROTL_write_iops_device:
1052 blkio_read_policy_node_files(cft, blkcg, m);
1053 return 0;
1054 default:
1055 BUG();
1056 }
1057 break;
1058 default:
1059 BUG();
1060 }
1061
1062 return 0;
1063}
1064
1065static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1066 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
1067 bool show_total)
1068{
1069 struct blkio_group *blkg;
1070 struct hlist_node *n;
1071 uint64_t cgroup_total = 0;
1072
1073 rcu_read_lock();
1074 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1075 if (blkg->dev) {
1076 if (!cftype_blkg_same_policy(cft, blkg))
1077 continue;
1078 spin_lock_irq(&blkg->stats_lock);
1079 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
1080 type);
1081 spin_unlock_irq(&blkg->stats_lock);
1082 }
1083 }
1084 if (show_total)
1085 cb->fill(cb, "Total", cgroup_total);
1086 rcu_read_unlock();
1087 return 0;
1088}
1089
1090/* All map kind of cgroup file get serviced by this function */
1091static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1092 struct cgroup_map_cb *cb)
1093{
1094 struct blkio_cgroup *blkcg;
1095 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1096 int name = BLKIOFILE_ATTR(cft->private);
1097
1098 blkcg = cgroup_to_blkio_cgroup(cgrp);
1099
1100 switch(plid) {
1101 case BLKIO_POLICY_PROP:
1102 switch(name) {
1103 case BLKIO_PROP_time:
1104 return blkio_read_blkg_stats(blkcg, cft, cb,
1105 BLKIO_STAT_TIME, 0);
1106 case BLKIO_PROP_sectors:
1107 return blkio_read_blkg_stats(blkcg, cft, cb,
1108 BLKIO_STAT_SECTORS, 0);
1109 case BLKIO_PROP_io_service_bytes:
1110 return blkio_read_blkg_stats(blkcg, cft, cb,
1111 BLKIO_STAT_SERVICE_BYTES, 1);
1112 case BLKIO_PROP_io_serviced:
1113 return blkio_read_blkg_stats(blkcg, cft, cb,
1114 BLKIO_STAT_SERVICED, 1);
1115 case BLKIO_PROP_io_service_time:
1116 return blkio_read_blkg_stats(blkcg, cft, cb,
1117 BLKIO_STAT_SERVICE_TIME, 1);
1118 case BLKIO_PROP_io_wait_time:
1119 return blkio_read_blkg_stats(blkcg, cft, cb,
1120 BLKIO_STAT_WAIT_TIME, 1);
1121 case BLKIO_PROP_io_merged:
1122 return blkio_read_blkg_stats(blkcg, cft, cb,
1123 BLKIO_STAT_MERGED, 1);
1124 case BLKIO_PROP_io_queued:
1125 return blkio_read_blkg_stats(blkcg, cft, cb,
1126 BLKIO_STAT_QUEUED, 1);
1127#ifdef CONFIG_DEBUG_BLK_CGROUP
1128 case BLKIO_PROP_dequeue:
1129 return blkio_read_blkg_stats(blkcg, cft, cb,
1130 BLKIO_STAT_DEQUEUE, 0);
1131 case BLKIO_PROP_avg_queue_size:
1132 return blkio_read_blkg_stats(blkcg, cft, cb,
1133 BLKIO_STAT_AVG_QUEUE_SIZE, 0);
1134 case BLKIO_PROP_group_wait_time:
1135 return blkio_read_blkg_stats(blkcg, cft, cb,
1136 BLKIO_STAT_GROUP_WAIT_TIME, 0);
1137 case BLKIO_PROP_idle_time:
1138 return blkio_read_blkg_stats(blkcg, cft, cb,
1139 BLKIO_STAT_IDLE_TIME, 0);
1140 case BLKIO_PROP_empty_time:
1141 return blkio_read_blkg_stats(blkcg, cft, cb,
1142 BLKIO_STAT_EMPTY_TIME, 0);
1143#endif
1144 default:
1145 BUG();
1146 }
1147 break;
1148 case BLKIO_POLICY_THROTL:
1149 switch(name){
1150 case BLKIO_THROTL_io_service_bytes:
1151 return blkio_read_blkg_stats(blkcg, cft, cb,
1152 BLKIO_STAT_SERVICE_BYTES, 1);
1153 case BLKIO_THROTL_io_serviced:
1154 return blkio_read_blkg_stats(blkcg, cft, cb,
1155 BLKIO_STAT_SERVICED, 1);
1156 default:
1157 BUG();
1158 }
1159 break;
1160 default:
1161 BUG();
1162 }
1163
1164 return 0;
1165}
1166
1167static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1168{
1169 struct blkio_group *blkg;
1170 struct hlist_node *n;
1171 struct blkio_policy_node *pn;
1172
1173 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1174 return -EINVAL;
1175
1176 spin_lock(&blkio_list_lock);
1177 spin_lock_irq(&blkcg->lock);
1178 blkcg->weight = (unsigned int)val;
1179
1180 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1181 pn = blkio_policy_search_node(blkcg, blkg->dev,
1182 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1183 if (pn)
1184 continue;
1185
1186 blkio_update_group_weight(blkg, blkcg->weight);
1187 }
1188 spin_unlock_irq(&blkcg->lock);
1189 spin_unlock(&blkio_list_lock);
1190 return 0;
1191}
1192
1193static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1194 struct blkio_cgroup *blkcg;
1195 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1196 int name = BLKIOFILE_ATTR(cft->private);
1197
1198 blkcg = cgroup_to_blkio_cgroup(cgrp);
1199
1200 switch(plid) {
1201 case BLKIO_POLICY_PROP:
1202 switch(name) {
1203 case BLKIO_PROP_weight:
1204 return (u64)blkcg->weight;
1205 }
1206 break;
1207 default:
1208 BUG();
1209 }
1210 return 0;
1211}
1212
1213static int
1214blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1215{
1216 struct blkio_cgroup *blkcg;
1217 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1218 int name = BLKIOFILE_ATTR(cft->private);
1219
1220 blkcg = cgroup_to_blkio_cgroup(cgrp);
1221
1222 switch(plid) {
1223 case BLKIO_POLICY_PROP:
1224 switch(name) {
1225 case BLKIO_PROP_weight:
1226 return blkio_weight_write(blkcg, val);
1227 }
1228 break;
1229 default:
1230 BUG();
1231 }
825 1232
826 return 0; 1233 return 0;
827} 1234}
@@ -829,71 +1236,151 @@ static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
829struct cftype blkio_files[] = { 1236struct cftype blkio_files[] = {
830 { 1237 {
831 .name = "weight_device", 1238 .name = "weight_device",
832 .read_seq_string = blkiocg_weight_device_read, 1239 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
833 .write_string = blkiocg_weight_device_write, 1240 BLKIO_PROP_weight_device),
1241 .read_seq_string = blkiocg_file_read,
1242 .write_string = blkiocg_file_write,
834 .max_write_len = 256, 1243 .max_write_len = 256,
835 }, 1244 },
836 { 1245 {
837 .name = "weight", 1246 .name = "weight",
838 .read_u64 = blkiocg_weight_read, 1247 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
839 .write_u64 = blkiocg_weight_write, 1248 BLKIO_PROP_weight),
1249 .read_u64 = blkiocg_file_read_u64,
1250 .write_u64 = blkiocg_file_write_u64,
840 }, 1251 },
841 { 1252 {
842 .name = "time", 1253 .name = "time",
843 .read_map = blkiocg_time_read, 1254 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1255 BLKIO_PROP_time),
1256 .read_map = blkiocg_file_read_map,
844 }, 1257 },
845 { 1258 {
846 .name = "sectors", 1259 .name = "sectors",
847 .read_map = blkiocg_sectors_read, 1260 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1261 BLKIO_PROP_sectors),
1262 .read_map = blkiocg_file_read_map,
848 }, 1263 },
849 { 1264 {
850 .name = "io_service_bytes", 1265 .name = "io_service_bytes",
851 .read_map = blkiocg_io_service_bytes_read, 1266 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1267 BLKIO_PROP_io_service_bytes),
1268 .read_map = blkiocg_file_read_map,
852 }, 1269 },
853 { 1270 {
854 .name = "io_serviced", 1271 .name = "io_serviced",
855 .read_map = blkiocg_io_serviced_read, 1272 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1273 BLKIO_PROP_io_serviced),
1274 .read_map = blkiocg_file_read_map,
856 }, 1275 },
857 { 1276 {
858 .name = "io_service_time", 1277 .name = "io_service_time",
859 .read_map = blkiocg_io_service_time_read, 1278 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1279 BLKIO_PROP_io_service_time),
1280 .read_map = blkiocg_file_read_map,
860 }, 1281 },
861 { 1282 {
862 .name = "io_wait_time", 1283 .name = "io_wait_time",
863 .read_map = blkiocg_io_wait_time_read, 1284 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1285 BLKIO_PROP_io_wait_time),
1286 .read_map = blkiocg_file_read_map,
864 }, 1287 },
865 { 1288 {
866 .name = "io_merged", 1289 .name = "io_merged",
867 .read_map = blkiocg_io_merged_read, 1290 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1291 BLKIO_PROP_io_merged),
1292 .read_map = blkiocg_file_read_map,
868 }, 1293 },
869 { 1294 {
870 .name = "io_queued", 1295 .name = "io_queued",
871 .read_map = blkiocg_io_queued_read, 1296 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1297 BLKIO_PROP_io_queued),
1298 .read_map = blkiocg_file_read_map,
872 }, 1299 },
873 { 1300 {
874 .name = "reset_stats", 1301 .name = "reset_stats",
875 .write_u64 = blkiocg_reset_stats, 1302 .write_u64 = blkiocg_reset_stats,
876 }, 1303 },
1304#ifdef CONFIG_BLK_DEV_THROTTLING
1305 {
1306 .name = "throttle.read_bps_device",
1307 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1308 BLKIO_THROTL_read_bps_device),
1309 .read_seq_string = blkiocg_file_read,
1310 .write_string = blkiocg_file_write,
1311 .max_write_len = 256,
1312 },
1313
1314 {
1315 .name = "throttle.write_bps_device",
1316 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1317 BLKIO_THROTL_write_bps_device),
1318 .read_seq_string = blkiocg_file_read,
1319 .write_string = blkiocg_file_write,
1320 .max_write_len = 256,
1321 },
1322
1323 {
1324 .name = "throttle.read_iops_device",
1325 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1326 BLKIO_THROTL_read_iops_device),
1327 .read_seq_string = blkiocg_file_read,
1328 .write_string = blkiocg_file_write,
1329 .max_write_len = 256,
1330 },
1331
1332 {
1333 .name = "throttle.write_iops_device",
1334 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1335 BLKIO_THROTL_write_iops_device),
1336 .read_seq_string = blkiocg_file_read,
1337 .write_string = blkiocg_file_write,
1338 .max_write_len = 256,
1339 },
1340 {
1341 .name = "throttle.io_service_bytes",
1342 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1343 BLKIO_THROTL_io_service_bytes),
1344 .read_map = blkiocg_file_read_map,
1345 },
1346 {
1347 .name = "throttle.io_serviced",
1348 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1349 BLKIO_THROTL_io_serviced),
1350 .read_map = blkiocg_file_read_map,
1351 },
1352#endif /* CONFIG_BLK_DEV_THROTTLING */
1353
877#ifdef CONFIG_DEBUG_BLK_CGROUP 1354#ifdef CONFIG_DEBUG_BLK_CGROUP
878 { 1355 {
879 .name = "avg_queue_size", 1356 .name = "avg_queue_size",
880 .read_map = blkiocg_avg_queue_size_read, 1357 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1358 BLKIO_PROP_avg_queue_size),
1359 .read_map = blkiocg_file_read_map,
881 }, 1360 },
882 { 1361 {
883 .name = "group_wait_time", 1362 .name = "group_wait_time",
884 .read_map = blkiocg_group_wait_time_read, 1363 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1364 BLKIO_PROP_group_wait_time),
1365 .read_map = blkiocg_file_read_map,
885 }, 1366 },
886 { 1367 {
887 .name = "idle_time", 1368 .name = "idle_time",
888 .read_map = blkiocg_idle_time_read, 1369 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1370 BLKIO_PROP_idle_time),
1371 .read_map = blkiocg_file_read_map,
889 }, 1372 },
890 { 1373 {
891 .name = "empty_time", 1374 .name = "empty_time",
892 .read_map = blkiocg_empty_time_read, 1375 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1376 BLKIO_PROP_empty_time),
1377 .read_map = blkiocg_file_read_map,
893 }, 1378 },
894 { 1379 {
895 .name = "dequeue", 1380 .name = "dequeue",
896 .read_map = blkiocg_dequeue_read, 1381 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1382 BLKIO_PROP_dequeue),
1383 .read_map = blkiocg_file_read_map,
897 }, 1384 },
898#endif 1385#endif
899}; 1386};
@@ -932,13 +1419,14 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
932 /* 1419 /*
933 * This blkio_group is being unlinked as associated cgroup is 1420 * This blkio_group is being unlinked as associated cgroup is
934 * going away. Let all the IO controlling policies know about 1421 * going away. Let all the IO controlling policies know about
935 * this event. Currently this is static call to one io 1422 * this event.
936 * controlling policy. Once we have more policies in place, we
937 * need some dynamic registration of callback function.
938 */ 1423 */
939 spin_lock(&blkio_list_lock); 1424 spin_lock(&blkio_list_lock);
940 list_for_each_entry(blkiop, &blkio_list, list) 1425 list_for_each_entry(blkiop, &blkio_list, list) {
1426 if (blkiop->plid != blkg->plid)
1427 continue;
941 blkiop->ops.blkio_unlink_group_fn(key, blkg); 1428 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1429 }
942 spin_unlock(&blkio_list_lock); 1430 spin_unlock(&blkio_list_lock);
943 } while (1); 1431 } while (1);
944 1432
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 2b866ec1dcea..ea4861bdd549 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,6 +15,14 @@
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17 17
18enum blkio_policy_id {
19 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
20 BLKIO_POLICY_THROTL, /* Throttling */
21};
22
23/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
18#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 26#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
19 27
20#ifndef CONFIG_BLK_CGROUP 28#ifndef CONFIG_BLK_CGROUP
@@ -65,6 +73,35 @@ enum blkg_state_flags {
65 BLKG_empty, 73 BLKG_empty,
66}; 74};
67 75
76/* cgroup files owned by proportional weight policy */
77enum blkcg_file_name_prop {
78 BLKIO_PROP_weight = 1,
79 BLKIO_PROP_weight_device,
80 BLKIO_PROP_io_service_bytes,
81 BLKIO_PROP_io_serviced,
82 BLKIO_PROP_time,
83 BLKIO_PROP_sectors,
84 BLKIO_PROP_io_service_time,
85 BLKIO_PROP_io_wait_time,
86 BLKIO_PROP_io_merged,
87 BLKIO_PROP_io_queued,
88 BLKIO_PROP_avg_queue_size,
89 BLKIO_PROP_group_wait_time,
90 BLKIO_PROP_idle_time,
91 BLKIO_PROP_empty_time,
92 BLKIO_PROP_dequeue,
93};
94
95/* cgroup files owned by throttle policy */
96enum blkcg_file_name_throtl {
97 BLKIO_THROTL_read_bps_device,
98 BLKIO_THROTL_write_bps_device,
99 BLKIO_THROTL_read_iops_device,
100 BLKIO_THROTL_write_iops_device,
101 BLKIO_THROTL_io_service_bytes,
102 BLKIO_THROTL_io_serviced,
103};
104
68struct blkio_cgroup { 105struct blkio_cgroup {
69 struct cgroup_subsys_state css; 106 struct cgroup_subsys_state css;
70 unsigned int weight; 107 unsigned int weight;
@@ -112,6 +149,8 @@ struct blkio_group {
112 char path[128]; 149 char path[128];
113 /* The device MKDEV(major, minor), this group has been created for */ 150 /* The device MKDEV(major, minor), this group has been created for */
114 dev_t dev; 151 dev_t dev;
152 /* policy which owns this blk group */
153 enum blkio_policy_id plid;
115 154
116 /* Need to serialize the stats in the case of reset/update */ 155 /* Need to serialize the stats in the case of reset/update */
117 spinlock_t stats_lock; 156 spinlock_t stats_lock;
@@ -121,24 +160,60 @@ struct blkio_group {
121struct blkio_policy_node { 160struct blkio_policy_node {
122 struct list_head node; 161 struct list_head node;
123 dev_t dev; 162 dev_t dev;
124 unsigned int weight; 163 /* This node belongs to max bw policy or porportional weight policy */
164 enum blkio_policy_id plid;
165 /* cgroup file to which this rule belongs to */
166 int fileid;
167
168 union {
169 unsigned int weight;
170 /*
171 * Rate read/write in terms of byptes per second
172 * Whether this rate represents read or write is determined
173 * by file type "fileid".
174 */
175 u64 bps;
176 unsigned int iops;
177 } val;
125}; 178};
126 179
127extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, 180extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
128 dev_t dev); 181 dev_t dev);
182extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
183 dev_t dev);
184extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
185 dev_t dev);
186extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
187 dev_t dev);
188extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
189 dev_t dev);
129 190
130typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); 191typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
131typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg, 192
132 unsigned int weight); 193typedef void (blkio_update_group_weight_fn) (void *key,
194 struct blkio_group *blkg, unsigned int weight);
195typedef void (blkio_update_group_read_bps_fn) (void * key,
196 struct blkio_group *blkg, u64 read_bps);
197typedef void (blkio_update_group_write_bps_fn) (void *key,
198 struct blkio_group *blkg, u64 write_bps);
199typedef void (blkio_update_group_read_iops_fn) (void *key,
200 struct blkio_group *blkg, unsigned int read_iops);
201typedef void (blkio_update_group_write_iops_fn) (void *key,
202 struct blkio_group *blkg, unsigned int write_iops);
133 203
134struct blkio_policy_ops { 204struct blkio_policy_ops {
135 blkio_unlink_group_fn *blkio_unlink_group_fn; 205 blkio_unlink_group_fn *blkio_unlink_group_fn;
136 blkio_update_group_weight_fn *blkio_update_group_weight_fn; 206 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
207 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
208 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
209 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
210 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
137}; 211};
138 212
139struct blkio_policy_type { 213struct blkio_policy_type {
140 struct list_head list; 214 struct list_head list;
141 struct blkio_policy_ops ops; 215 struct blkio_policy_ops ops;
216 enum blkio_policy_id plid;
142}; 217};
143 218
144/* Blkio controller policy registration */ 219/* Blkio controller policy registration */
@@ -212,7 +287,8 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
212extern struct blkio_cgroup blkio_root_cgroup; 287extern struct blkio_cgroup blkio_root_cgroup;
213extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 288extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
214extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 289extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
215 struct blkio_group *blkg, void *key, dev_t dev); 290 struct blkio_group *blkg, void *key, dev_t dev,
291 enum blkio_policy_id plid);
216extern int blkiocg_del_blkio_group(struct blkio_group *blkg); 292extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
217extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 293extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
218 void *key); 294 void *key);
@@ -234,7 +310,8 @@ static inline struct blkio_cgroup *
234cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 310cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
235 311
236static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 312static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
237 struct blkio_group *blkg, void *key, dev_t dev) {} 313 struct blkio_group *blkg, void *key, dev_t dev,
314 enum blkio_policy_id plid) {}
238 315
239static inline int 316static inline int
240blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } 317blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
diff --git a/block/blk-core.c b/block/blk-core.c
index 32a1c123dfb3..45141469e89e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -64,13 +64,15 @@ static void drive_stat_acct(struct request *rq, int new_io)
64 return; 64 return;
65 65
66 cpu = part_stat_lock(); 66 cpu = part_stat_lock();
67 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
68 67
69 if (!new_io) 68 if (!new_io) {
69 part = rq->part;
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
71 else { 71 } else {
72 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
72 part_round_stats(cpu, part); 73 part_round_stats(cpu, part);
73 part_inc_in_flight(part, rw); 74 part_inc_in_flight(part, rw);
75 rq->part = part;
74 } 76 }
75 77
76 part_stat_unlock(); 78 part_stat_unlock();
@@ -128,6 +130,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
128 rq->ref_count = 1; 130 rq->ref_count = 1;
129 rq->start_time = jiffies; 131 rq->start_time = jiffies;
130 set_start_time_ns(rq); 132 set_start_time_ns(rq);
133 rq->part = NULL;
131} 134}
132EXPORT_SYMBOL(blk_rq_init); 135EXPORT_SYMBOL(blk_rq_init);
133 136
@@ -136,7 +139,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
136{ 139{
137 struct request_queue *q = rq->q; 140 struct request_queue *q = rq->q;
138 141
139 if (&q->bar_rq != rq) { 142 if (&q->flush_rq != rq) {
140 if (error) 143 if (error)
141 clear_bit(BIO_UPTODATE, &bio->bi_flags); 144 clear_bit(BIO_UPTODATE, &bio->bi_flags);
142 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 145 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
@@ -160,13 +163,12 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
160 if (bio->bi_size == 0) 163 if (bio->bi_size == 0)
161 bio_endio(bio, error); 164 bio_endio(bio, error);
162 } else { 165 } else {
163
164 /* 166 /*
165 * Okay, this is the barrier request in progress, just 167 * Okay, this is the sequenced flush request in
166 * record the error; 168 * progress, just record the error;
167 */ 169 */
168 if (error && !q->orderr) 170 if (error && !q->flush_err)
169 q->orderr = error; 171 q->flush_err = error;
170 } 172 }
171} 173}
172 174
@@ -382,6 +384,7 @@ void blk_sync_queue(struct request_queue *q)
382 del_timer_sync(&q->unplug_timer); 384 del_timer_sync(&q->unplug_timer);
383 del_timer_sync(&q->timeout); 385 del_timer_sync(&q->timeout);
384 cancel_work_sync(&q->unplug_work); 386 cancel_work_sync(&q->unplug_work);
387 throtl_shutdown_timer_wq(q);
385} 388}
386EXPORT_SYMBOL(blk_sync_queue); 389EXPORT_SYMBOL(blk_sync_queue);
387 390
@@ -459,6 +462,8 @@ void blk_cleanup_queue(struct request_queue *q)
459 if (q->elevator) 462 if (q->elevator)
460 elevator_exit(q->elevator); 463 elevator_exit(q->elevator);
461 464
465 blk_throtl_exit(q);
466
462 blk_put_queue(q); 467 blk_put_queue(q);
463} 468}
464EXPORT_SYMBOL(blk_cleanup_queue); 469EXPORT_SYMBOL(blk_cleanup_queue);
@@ -515,11 +520,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
515 return NULL; 520 return NULL;
516 } 521 }
517 522
523 if (blk_throtl_init(q)) {
524 kmem_cache_free(blk_requestq_cachep, q);
525 return NULL;
526 }
527
518 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 528 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
519 laptop_mode_timer_fn, (unsigned long) q); 529 laptop_mode_timer_fn, (unsigned long) q);
520 init_timer(&q->unplug_timer); 530 init_timer(&q->unplug_timer);
521 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 531 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
522 INIT_LIST_HEAD(&q->timeout_list); 532 INIT_LIST_HEAD(&q->timeout_list);
533 INIT_LIST_HEAD(&q->pending_flushes);
523 INIT_WORK(&q->unplug_work, blk_unplug_work); 534 INIT_WORK(&q->unplug_work, blk_unplug_work);
524 535
525 kobject_init(&q->kobj, &blk_queue_ktype); 536 kobject_init(&q->kobj, &blk_queue_ktype);
@@ -796,11 +807,16 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
796 rl->starved[is_sync] = 0; 807 rl->starved[is_sync] = 0;
797 808
798 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 809 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
799 if (priv) 810 if (priv) {
800 rl->elvpriv++; 811 rl->elvpriv++;
801 812
802 if (blk_queue_io_stat(q)) 813 /*
803 rw_flags |= REQ_IO_STAT; 814 * Don't do stats for non-priv requests
815 */
816 if (blk_queue_io_stat(q))
817 rw_flags |= REQ_IO_STAT;
818 }
819
804 spin_unlock_irq(q->queue_lock); 820 spin_unlock_irq(q->queue_lock);
805 821
806 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 822 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
@@ -1037,22 +1053,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1037} 1053}
1038EXPORT_SYMBOL(blk_insert_request); 1054EXPORT_SYMBOL(blk_insert_request);
1039 1055
1040/*
1041 * add-request adds a request to the linked list.
1042 * queue lock is held and interrupts disabled, as we muck with the
1043 * request queue list.
1044 */
1045static inline void add_request(struct request_queue *q, struct request *req)
1046{
1047 drive_stat_acct(req, 1);
1048
1049 /*
1050 * elevator indicated where it wants this request to be
1051 * inserted at elevator_merge time
1052 */
1053 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1054}
1055
1056static void part_round_stats_single(int cpu, struct hd_struct *part, 1056static void part_round_stats_single(int cpu, struct hd_struct *part,
1057 unsigned long now) 1057 unsigned long now)
1058{ 1058{
@@ -1201,13 +1201,16 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1201 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1201 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1202 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); 1202 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
1203 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
1204 int where = ELEVATOR_INSERT_SORT;
1204 int rw_flags; 1205 int rw_flags;
1205 1206
1206 if ((bio->bi_rw & REQ_HARDBARRIER) && 1207 /* REQ_HARDBARRIER is no more */
1207 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1208 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1209 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1208 bio_endio(bio, -EOPNOTSUPP); 1210 bio_endio(bio, -EOPNOTSUPP);
1209 return 0; 1211 return 0;
1210 } 1212 }
1213
1211 /* 1214 /*
1212 * low level driver can indicate that it wants pages above a 1215 * low level driver can indicate that it wants pages above a
1213 * certain limit bounced to low memory (ie for highmem, or even 1216 * certain limit bounced to low memory (ie for highmem, or even
@@ -1217,7 +1220,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1217 1220
1218 spin_lock_irq(q->queue_lock); 1221 spin_lock_irq(q->queue_lock);
1219 1222
1220 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) 1223 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1224 where = ELEVATOR_INSERT_FRONT;
1225 goto get_rq;
1226 }
1227
1228 if (elv_queue_empty(q))
1221 goto get_rq; 1229 goto get_rq;
1222 1230
1223 el_ret = elv_merge(q, &req, bio); 1231 el_ret = elv_merge(q, &req, bio);
@@ -1314,7 +1322,10 @@ get_rq:
1314 req->cpu = blk_cpu_to_group(smp_processor_id()); 1322 req->cpu = blk_cpu_to_group(smp_processor_id());
1315 if (queue_should_plug(q) && elv_queue_empty(q)) 1323 if (queue_should_plug(q) && elv_queue_empty(q))
1316 blk_plug_device(q); 1324 blk_plug_device(q);
1317 add_request(q, req); 1325
1326 /* insert the request into the elevator */
1327 drive_stat_acct(req, 1);
1328 __elv_add_request(q, req, where, 0);
1318out: 1329out:
1319 if (unplug || !queue_should_plug(q)) 1330 if (unplug || !queue_should_plug(q))
1320 __generic_unplug_device(q); 1331 __generic_unplug_device(q);
@@ -1514,6 +1525,19 @@ static inline void __generic_make_request(struct bio *bio)
1514 if (bio_check_eod(bio, nr_sectors)) 1525 if (bio_check_eod(bio, nr_sectors))
1515 goto end_io; 1526 goto end_io;
1516 1527
1528 /*
1529 * Filter flush bio's early so that make_request based
1530 * drivers without flush support don't have to worry
1531 * about them.
1532 */
1533 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1534 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1535 if (!nr_sectors) {
1536 err = 0;
1537 goto end_io;
1538 }
1539 }
1540
1517 if ((bio->bi_rw & REQ_DISCARD) && 1541 if ((bio->bi_rw & REQ_DISCARD) &&
1518 (!blk_queue_discard(q) || 1542 (!blk_queue_discard(q) ||
1519 ((bio->bi_rw & REQ_SECURE) && 1543 ((bio->bi_rw & REQ_SECURE) &&
@@ -1522,6 +1546,15 @@ static inline void __generic_make_request(struct bio *bio)
1522 goto end_io; 1546 goto end_io;
1523 } 1547 }
1524 1548
1549 blk_throtl_bio(q, &bio);
1550
1551 /*
1552 * If bio = NULL, bio has been throttled and will be submitted
1553 * later.
1554 */
1555 if (!bio)
1556 break;
1557
1525 trace_block_bio_queue(q, bio); 1558 trace_block_bio_queue(q, bio);
1526 1559
1527 ret = q->make_request_fn(q, bio); 1560 ret = q->make_request_fn(q, bio);
@@ -1612,11 +1645,12 @@ void submit_bio(int rw, struct bio *bio)
1612 1645
1613 if (unlikely(block_dump)) { 1646 if (unlikely(block_dump)) {
1614 char b[BDEVNAME_SIZE]; 1647 char b[BDEVNAME_SIZE];
1615 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 1648 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1616 current->comm, task_pid_nr(current), 1649 current->comm, task_pid_nr(current),
1617 (rw & WRITE) ? "WRITE" : "READ", 1650 (rw & WRITE) ? "WRITE" : "READ",
1618 (unsigned long long)bio->bi_sector, 1651 (unsigned long long)bio->bi_sector,
1619 bdevname(bio->bi_bdev, b)); 1652 bdevname(bio->bi_bdev, b),
1653 count);
1620 } 1654 }
1621 } 1655 }
1622 1656
@@ -1759,7 +1793,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1759 int cpu; 1793 int cpu;
1760 1794
1761 cpu = part_stat_lock(); 1795 cpu = part_stat_lock();
1762 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1796 part = req->part;
1763 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1797 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1764 part_stat_unlock(); 1798 part_stat_unlock();
1765 } 1799 }
@@ -1768,18 +1802,18 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1768static void blk_account_io_done(struct request *req) 1802static void blk_account_io_done(struct request *req)
1769{ 1803{
1770 /* 1804 /*
1771 * Account IO completion. bar_rq isn't accounted as a normal 1805 * Account IO completion. flush_rq isn't accounted as a
1772 * IO on queueing nor completion. Accounting the containing 1806 * normal IO on queueing nor completion. Accounting the
1773 * request is enough. 1807 * containing request is enough.
1774 */ 1808 */
1775 if (blk_do_io_stat(req) && req != &req->q->bar_rq) { 1809 if (blk_do_io_stat(req) && req != &req->q->flush_rq) {
1776 unsigned long duration = jiffies - req->start_time; 1810 unsigned long duration = jiffies - req->start_time;
1777 const int rw = rq_data_dir(req); 1811 const int rw = rq_data_dir(req);
1778 struct hd_struct *part; 1812 struct hd_struct *part;
1779 int cpu; 1813 int cpu;
1780 1814
1781 cpu = part_stat_lock(); 1815 cpu = part_stat_lock();
1782 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1816 part = req->part;
1783 1817
1784 part_stat_inc(cpu, part, ios[rw]); 1818 part_stat_inc(cpu, part, ios[rw]);
1785 part_stat_add(cpu, part, ticks[rw], duration); 1819 part_stat_add(cpu, part, ticks[rw], duration);
@@ -2497,9 +2531,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2497static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2531static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2498{ 2532{
2499 dst->cpu = src->cpu; 2533 dst->cpu = src->cpu;
2500 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2534 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2501 if (src->cmd_flags & REQ_DISCARD)
2502 dst->cmd_flags |= REQ_DISCARD;
2503 dst->cmd_type = src->cmd_type; 2535 dst->cmd_type = src->cmd_type;
2504 dst->__sector = blk_rq_pos(src); 2536 dst->__sector = blk_rq_pos(src);
2505 dst->__data_len = blk_rq_bytes(src); 2537 dst->__data_len = blk_rq_bytes(src);
@@ -2579,6 +2611,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2579} 2611}
2580EXPORT_SYMBOL(kblockd_schedule_work); 2612EXPORT_SYMBOL(kblockd_schedule_work);
2581 2613
2614int kblockd_schedule_delayed_work(struct request_queue *q,
2615 struct delayed_work *dwork, unsigned long delay)
2616{
2617 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2618}
2619EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2620
2582int __init blk_dev_init(void) 2621int __init blk_dev_init(void)
2583{ 2622{
2584 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2623 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-exec.c b/block/blk-exec.c
index e1672f14840e..cf1456a02acd 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -80,6 +80,7 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
80 DECLARE_COMPLETION_ONSTACK(wait); 80 DECLARE_COMPLETION_ONSTACK(wait);
81 char sense[SCSI_SENSE_BUFFERSIZE]; 81 char sense[SCSI_SENSE_BUFFERSIZE];
82 int err = 0; 82 int err = 0;
83 unsigned long hang_check;
83 84
84 /* 85 /*
85 * we need an extra reference to the request, so we can look at 86 * we need an extra reference to the request, so we can look at
@@ -95,7 +96,13 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
95 96
96 rq->end_io_data = &wait; 97 rq->end_io_data = &wait;
97 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); 98 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
98 wait_for_completion(&wait); 99
100 /* Prevent hang_check timer from firing at us during very long I/O */
101 hang_check = sysctl_hung_task_timeout_secs;
102 if (hang_check)
103 while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
104 else
105 wait_for_completion(&wait);
99 106
100 if (rq->errors) 107 if (rq->errors)
101 err = -EIO; 108 err = -EIO;
diff --git a/block/blk-flush.c b/block/blk-flush.c
new file mode 100644
index 000000000000..54b123d6563e
--- /dev/null
+++ b/block/blk-flush.c
@@ -0,0 +1,262 @@
1/*
2 * Functions to sequence FLUSH and FUA writes.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/gfp.h>
9
10#include "blk.h"
11
12/* FLUSH/FUA sequences */
13enum {
14 QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
15 QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
16 QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
17 QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
18 QUEUE_FSEQ_DONE = (1 << 4),
19};
20
21static struct request *queue_next_fseq(struct request_queue *q);
22
23unsigned blk_flush_cur_seq(struct request_queue *q)
24{
25 if (!q->flush_seq)
26 return 0;
27 return 1 << ffz(q->flush_seq);
28}
29
30static struct request *blk_flush_complete_seq(struct request_queue *q,
31 unsigned seq, int error)
32{
33 struct request *next_rq = NULL;
34
35 if (error && !q->flush_err)
36 q->flush_err = error;
37
38 BUG_ON(q->flush_seq & seq);
39 q->flush_seq |= seq;
40
41 if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
42 /* not complete yet, queue the next flush sequence */
43 next_rq = queue_next_fseq(q);
44 } else {
45 /* complete this flush request */
46 __blk_end_request_all(q->orig_flush_rq, q->flush_err);
47 q->orig_flush_rq = NULL;
48 q->flush_seq = 0;
49
50 /* dispatch the next flush if there's one */
51 if (!list_empty(&q->pending_flushes)) {
52 next_rq = list_entry_rq(q->pending_flushes.next);
53 list_move(&next_rq->queuelist, &q->queue_head);
54 }
55 }
56 return next_rq;
57}
58
59static void blk_flush_complete_seq_end_io(struct request_queue *q,
60 unsigned seq, int error)
61{
62 bool was_empty = elv_queue_empty(q);
63 struct request *next_rq;
64
65 next_rq = blk_flush_complete_seq(q, seq, error);
66
67 /*
68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases.
70 */
71 if (was_empty && next_rq)
72 __blk_run_queue(q);
73}
74
75static void pre_flush_end_io(struct request *rq, int error)
76{
77 elv_completed_request(rq->q, rq);
78 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
79}
80
81static void flush_data_end_io(struct request *rq, int error)
82{
83 elv_completed_request(rq->q, rq);
84 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
85}
86
87static void post_flush_end_io(struct request *rq, int error)
88{
89 elv_completed_request(rq->q, rq);
90 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
91}
92
93static void init_flush_request(struct request *rq, struct gendisk *disk)
94{
95 rq->cmd_type = REQ_TYPE_FS;
96 rq->cmd_flags = WRITE_FLUSH;
97 rq->rq_disk = disk;
98}
99
100static struct request *queue_next_fseq(struct request_queue *q)
101{
102 struct request *orig_rq = q->orig_flush_rq;
103 struct request *rq = &q->flush_rq;
104
105 blk_rq_init(q, rq);
106
107 switch (blk_flush_cur_seq(q)) {
108 case QUEUE_FSEQ_PREFLUSH:
109 init_flush_request(rq, orig_rq->rq_disk);
110 rq->end_io = pre_flush_end_io;
111 break;
112 case QUEUE_FSEQ_DATA:
113 init_request_from_bio(rq, orig_rq->bio);
114 /*
115 * orig_rq->rq_disk may be different from
116 * bio->bi_bdev->bd_disk if orig_rq got here through
117 * remapping drivers. Make sure rq->rq_disk points
118 * to the same one as orig_rq.
119 */
120 rq->rq_disk = orig_rq->rq_disk;
121 rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
122 rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
123 rq->end_io = flush_data_end_io;
124 break;
125 case QUEUE_FSEQ_POSTFLUSH:
126 init_flush_request(rq, orig_rq->rq_disk);
127 rq->end_io = post_flush_end_io;
128 break;
129 default:
130 BUG();
131 }
132
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
134 return rq;
135}
136
137struct request *blk_do_flush(struct request_queue *q, struct request *rq)
138{
139 unsigned int fflags = q->flush_flags; /* may change, cache it */
140 bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
141 bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
142 bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
143 unsigned skip = 0;
144
145 /*
146 * Special case. If there's data but flush is not necessary,
147 * the request can be issued directly.
148 *
149 * Flush w/o data should be able to be issued directly too but
150 * currently some drivers assume that rq->bio contains
151 * non-zero data if it isn't NULL and empty FLUSH requests
152 * getting here usually have bio's without data.
153 */
154 if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
155 rq->cmd_flags &= ~REQ_FLUSH;
156 if (!has_fua)
157 rq->cmd_flags &= ~REQ_FUA;
158 return rq;
159 }
160
161 /*
162 * Sequenced flushes can't be processed in parallel. If
163 * another one is already in progress, queue for later
164 * processing.
165 */
166 if (q->flush_seq) {
167 list_move_tail(&rq->queuelist, &q->pending_flushes);
168 return NULL;
169 }
170
171 /*
172 * Start a new flush sequence
173 */
174 q->flush_err = 0;
175 q->flush_seq |= QUEUE_FSEQ_STARTED;
176
177 /* adjust FLUSH/FUA of the original request and stash it away */
178 rq->cmd_flags &= ~REQ_FLUSH;
179 if (!has_fua)
180 rq->cmd_flags &= ~REQ_FUA;
181 blk_dequeue_request(rq);
182 q->orig_flush_rq = rq;
183
184 /* skip unneded sequences and return the first one */
185 if (!do_preflush)
186 skip |= QUEUE_FSEQ_PREFLUSH;
187 if (!blk_rq_sectors(rq))
188 skip |= QUEUE_FSEQ_DATA;
189 if (!do_postflush)
190 skip |= QUEUE_FSEQ_POSTFLUSH;
191 return blk_flush_complete_seq(q, skip, 0);
192}
193
194static void bio_end_flush(struct bio *bio, int err)
195{
196 if (err)
197 clear_bit(BIO_UPTODATE, &bio->bi_flags);
198 if (bio->bi_private)
199 complete(bio->bi_private);
200 bio_put(bio);
201}
202
203/**
204 * blkdev_issue_flush - queue a flush
205 * @bdev: blockdev to issue flush for
206 * @gfp_mask: memory allocation flags (for bio_alloc)
207 * @error_sector: error sector
208 *
209 * Description:
210 * Issue a flush for the block device in question. Caller can supply
211 * room for storing the error offset in case of a flush error, if they
212 * wish to. If WAIT flag is not passed then caller may check only what
213 * request was pushed in some internal queue for later handling.
214 */
215int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
216 sector_t *error_sector)
217{
218 DECLARE_COMPLETION_ONSTACK(wait);
219 struct request_queue *q;
220 struct bio *bio;
221 int ret = 0;
222
223 if (bdev->bd_disk == NULL)
224 return -ENXIO;
225
226 q = bdev_get_queue(bdev);
227 if (!q)
228 return -ENXIO;
229
230 /*
231 * some block devices may not have their queue correctly set up here
232 * (e.g. loop device without a backing file) and so issuing a flush
233 * here will panic. Ensure there is a request function before issuing
234 * the flush.
235 */
236 if (!q->make_request_fn)
237 return -ENXIO;
238
239 bio = bio_alloc(gfp_mask, 0);
240 bio->bi_end_io = bio_end_flush;
241 bio->bi_bdev = bdev;
242 bio->bi_private = &wait;
243
244 bio_get(bio);
245 submit_bio(WRITE_FLUSH, bio);
246 wait_for_completion(&wait);
247
248 /*
249 * The driver must store the error location in ->bi_sector, if
250 * it supports it. For non-stacked drivers, this should be
251 * copied from blk_rq_pos(rq).
252 */
253 if (error_sector)
254 *error_sector = bio->bi_sector;
255
256 if (!bio_flagged(bio, BIO_UPTODATE))
257 ret = -EIO;
258
259 bio_put(bio);
260 return ret;
261}
262EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index edce1ef7933d..54bcba6c02a7 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -32,24 +32,37 @@ static struct kmem_cache *integrity_cachep;
32 32
33/** 33/**
34 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements 34 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
35 * @rq: request with integrity metadata attached 35 * @q: request queue
36 * @bio: bio with integrity metadata attached
36 * 37 *
37 * Description: Returns the number of elements required in a 38 * Description: Returns the number of elements required in a
38 * scatterlist corresponding to the integrity metadata in a request. 39 * scatterlist corresponding to the integrity metadata in a bio.
39 */ 40 */
40int blk_rq_count_integrity_sg(struct request *rq) 41int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
41{ 42{
42 struct bio_vec *iv, *ivprv; 43 struct bio_vec *iv, *ivprv = NULL;
43 struct req_iterator iter; 44 unsigned int segments = 0;
44 unsigned int segments; 45 unsigned int seg_size = 0;
46 unsigned int i = 0;
45 47
46 ivprv = NULL; 48 bio_for_each_integrity_vec(iv, bio, i) {
47 segments = 0;
48 49
49 rq_for_each_integrity_segment(iv, rq, iter) { 50 if (ivprv) {
51 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
52 goto new_segment;
53
54 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
55 goto new_segment;
50 56
51 if (!ivprv || !BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 57 if (seg_size + iv->bv_len > queue_max_segment_size(q))
58 goto new_segment;
59
60 seg_size += iv->bv_len;
61 } else {
62new_segment:
52 segments++; 63 segments++;
64 seg_size = iv->bv_len;
65 }
53 66
54 ivprv = iv; 67 ivprv = iv;
55 } 68 }
@@ -60,30 +73,34 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
60 73
61/** 74/**
62 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist 75 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
63 * @rq: request with integrity metadata attached 76 * @q: request queue
77 * @bio: bio with integrity metadata attached
64 * @sglist: target scatterlist 78 * @sglist: target scatterlist
65 * 79 *
66 * Description: Map the integrity vectors in request into a 80 * Description: Map the integrity vectors in request into a
67 * scatterlist. The scatterlist must be big enough to hold all 81 * scatterlist. The scatterlist must be big enough to hold all
68 * elements. I.e. sized using blk_rq_count_integrity_sg(). 82 * elements. I.e. sized using blk_rq_count_integrity_sg().
69 */ 83 */
70int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist) 84int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
85 struct scatterlist *sglist)
71{ 86{
72 struct bio_vec *iv, *ivprv; 87 struct bio_vec *iv, *ivprv = NULL;
73 struct req_iterator iter; 88 struct scatterlist *sg = NULL;
74 struct scatterlist *sg; 89 unsigned int segments = 0;
75 unsigned int segments; 90 unsigned int i = 0;
76
77 ivprv = NULL;
78 sg = NULL;
79 segments = 0;
80 91
81 rq_for_each_integrity_segment(iv, rq, iter) { 92 bio_for_each_integrity_vec(iv, bio, i) {
82 93
83 if (ivprv) { 94 if (ivprv) {
84 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 95 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
85 goto new_segment; 96 goto new_segment;
86 97
98 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
99 goto new_segment;
100
101 if (sg->length + iv->bv_len > queue_max_segment_size(q))
102 goto new_segment;
103
87 sg->length += iv->bv_len; 104 sg->length += iv->bv_len;
88 } else { 105 } else {
89new_segment: 106new_segment:
@@ -162,6 +179,40 @@ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
162} 179}
163EXPORT_SYMBOL(blk_integrity_compare); 180EXPORT_SYMBOL(blk_integrity_compare);
164 181
182int blk_integrity_merge_rq(struct request_queue *q, struct request *req,
183 struct request *next)
184{
185 if (blk_integrity_rq(req) != blk_integrity_rq(next))
186 return -1;
187
188 if (req->nr_integrity_segments + next->nr_integrity_segments >
189 q->limits.max_integrity_segments)
190 return -1;
191
192 return 0;
193}
194EXPORT_SYMBOL(blk_integrity_merge_rq);
195
196int blk_integrity_merge_bio(struct request_queue *q, struct request *req,
197 struct bio *bio)
198{
199 int nr_integrity_segs;
200 struct bio *next = bio->bi_next;
201
202 bio->bi_next = NULL;
203 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
204 bio->bi_next = next;
205
206 if (req->nr_integrity_segments + nr_integrity_segs >
207 q->limits.max_integrity_segments)
208 return -1;
209
210 req->nr_integrity_segments += nr_integrity_segs;
211
212 return 0;
213}
214EXPORT_SYMBOL(blk_integrity_merge_bio);
215
165struct integrity_sysfs_entry { 216struct integrity_sysfs_entry {
166 struct attribute attr; 217 struct attribute attr;
167 ssize_t (*show)(struct blk_integrity *, char *); 218 ssize_t (*show)(struct blk_integrity *, char *);
@@ -381,7 +432,6 @@ void blk_integrity_unregister(struct gendisk *disk)
381 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 432 kobject_uevent(&bi->kobj, KOBJ_REMOVE);
382 kobject_del(&bi->kobj); 433 kobject_del(&bi->kobj);
383 kobject_put(&bi->kobj); 434 kobject_put(&bi->kobj);
384 kmem_cache_free(integrity_cachep, bi);
385 disk->integrity = NULL; 435 disk->integrity = NULL;
386} 436}
387EXPORT_SYMBOL(blk_integrity_unregister); 437EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index c392029a104e..1a320d2406b0 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -39,8 +39,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39{ 39{
40 DECLARE_COMPLETION_ONSTACK(wait); 40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev); 41 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ? 42 int type = REQ_WRITE | REQ_DISCARD;
43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 unsigned int max_discard_sectors; 43 unsigned int max_discard_sectors;
45 struct bio *bio; 44 struct bio *bio;
46 int ret = 0; 45 int ret = 0;
@@ -62,10 +61,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
62 max_discard_sectors &= ~(disc_sects - 1); 61 max_discard_sectors &= ~(disc_sects - 1);
63 } 62 }
64 63
65 if (flags & BLKDEV_IFL_SECURE) { 64 if (flags & BLKDEV_DISCARD_SECURE) {
66 if (!blk_queue_secdiscard(q)) 65 if (!blk_queue_secdiscard(q))
67 return -EOPNOTSUPP; 66 return -EOPNOTSUPP;
68 type |= DISCARD_SECURE; 67 type |= REQ_SECURE;
69 } 68 }
70 69
71 while (nr_sects && !ret) { 70 while (nr_sects && !ret) {
@@ -78,8 +77,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
78 bio->bi_sector = sector; 77 bio->bi_sector = sector;
79 bio->bi_end_io = blkdev_discard_end_io; 78 bio->bi_end_io = blkdev_discard_end_io;
80 bio->bi_bdev = bdev; 79 bio->bi_bdev = bdev;
81 if (flags & BLKDEV_IFL_WAIT) 80 bio->bi_private = &wait;
82 bio->bi_private = &wait;
83 81
84 if (nr_sects > max_discard_sectors) { 82 if (nr_sects > max_discard_sectors) {
85 bio->bi_size = max_discard_sectors << 9; 83 bio->bi_size = max_discard_sectors << 9;
@@ -93,8 +91,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
93 bio_get(bio); 91 bio_get(bio);
94 submit_bio(type, bio); 92 submit_bio(type, bio);
95 93
96 if (flags & BLKDEV_IFL_WAIT) 94 wait_for_completion(&wait);
97 wait_for_completion(&wait);
98 95
99 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 96 if (bio_flagged(bio, BIO_EOPNOTSUPP))
100 ret = -EOPNOTSUPP; 97 ret = -EOPNOTSUPP;
@@ -140,7 +137,6 @@ static void bio_batch_end_io(struct bio *bio, int err)
140 * @sector: start sector 137 * @sector: start sector
141 * @nr_sects: number of sectors to write 138 * @nr_sects: number of sectors to write
142 * @gfp_mask: memory allocation flags (for bio_alloc) 139 * @gfp_mask: memory allocation flags (for bio_alloc)
143 * @flags: BLKDEV_IFL_* flags to control behaviour
144 * 140 *
145 * Description: 141 * Description:
146 * Generate and issue number of bios with zerofiled pages. 142 * Generate and issue number of bios with zerofiled pages.
@@ -149,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
149 */ 145 */
150 146
151int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 147int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
152 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 148 sector_t nr_sects, gfp_t gfp_mask)
153{ 149{
154 int ret; 150 int ret;
155 struct bio *bio; 151 struct bio *bio;
@@ -162,12 +158,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
162 bb.wait = &wait; 158 bb.wait = &wait;
163 bb.end_io = NULL; 159 bb.end_io = NULL;
164 160
165 if (flags & BLKDEV_IFL_BARRIER) {
166 /* issue async barrier before the data */
167 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
168 if (ret)
169 return ret;
170 }
171submit: 161submit:
172 ret = 0; 162 ret = 0;
173 while (nr_sects != 0) { 163 while (nr_sects != 0) {
@@ -181,8 +171,7 @@ submit:
181 bio->bi_sector = sector; 171 bio->bi_sector = sector;
182 bio->bi_bdev = bdev; 172 bio->bi_bdev = bdev;
183 bio->bi_end_io = bio_batch_end_io; 173 bio->bi_end_io = bio_batch_end_io;
184 if (flags & BLKDEV_IFL_WAIT) 174 bio->bi_private = &bb;
185 bio->bi_private = &bb;
186 175
187 while (nr_sects != 0) { 176 while (nr_sects != 0) {
188 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 177 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -199,18 +188,10 @@ submit:
199 issued++; 188 issued++;
200 submit_bio(WRITE, bio); 189 submit_bio(WRITE, bio);
201 } 190 }
202 /*
203 * When all data bios are in flight. Send final barrier if requeted.
204 */
205 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
206 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
207 flags & BLKDEV_IFL_WAIT);
208
209 191
210 if (flags & BLKDEV_IFL_WAIT) 192 /* Wait for bios in-flight */
211 /* Wait for bios in-flight */ 193 while (issued != atomic_read(&bb.done))
212 while ( issued != atomic_read(&bb.done)) 194 wait_for_completion(&wait);
213 wait_for_completion(&wait);
214 195
215 if (!test_bit(BIO_UPTODATE, &bb.flags)) 196 if (!test_bit(BIO_UPTODATE, &bb.flags))
216 /* One of bios in the batch was completed with error.*/ 197 /* One of bios in the batch was completed with error.*/
diff --git a/block/blk-map.c b/block/blk-map.c
index ade0a08c9099..d4a586d8691e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
54 * direct dma. else, set up kernel bounce buffers 54 * direct dma. else, set up kernel bounce buffers
55 */ 55 */
56 uaddr = (unsigned long) ubuf; 56 uaddr = (unsigned long) ubuf;
57 if (blk_rq_aligned(q, ubuf, len) && !map_data) 57 if (blk_rq_aligned(q, uaddr, len) && !map_data)
58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); 58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59 else 59 else
60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); 60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
@@ -288,6 +288,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask) 288 unsigned int len, gfp_t gfp_mask)
289{ 289{
290 int reading = rq_data_dir(rq) == READ; 290 int reading = rq_data_dir(rq) == READ;
291 unsigned long addr = (unsigned long) kbuf;
291 int do_copy = 0; 292 int do_copy = 0;
292 struct bio *bio; 293 struct bio *bio;
293 int ret; 294 int ret;
@@ -297,7 +298,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
297 if (!len || !kbuf) 298 if (!len || !kbuf)
298 return -EINVAL; 299 return -EINVAL;
299 300
300 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); 301 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
301 if (do_copy) 302 if (do_copy)
302 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 303 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
303 else 304 else
diff --git a/block/blk-merge.c b/block/blk-merge.c
index eafc94f68d79..0a2fd8a48a38 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -205,12 +205,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
205{ 205{
206 int nr_phys_segs = bio_phys_segments(q, bio); 206 int nr_phys_segs = bio_phys_segments(q, bio);
207 207
208 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { 208 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
209 req->cmd_flags |= REQ_NOMERGE; 209 goto no_merge;
210 if (req == q->last_merge) 210
211 q->last_merge = NULL; 211 if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
212 return 0; 212 goto no_merge;
213 }
214 213
215 /* 214 /*
216 * This will form the start of a new hw segment. Bump both 215 * This will form the start of a new hw segment. Bump both
@@ -218,6 +217,12 @@ static inline int ll_new_hw_segment(struct request_queue *q,
218 */ 217 */
219 req->nr_phys_segments += nr_phys_segs; 218 req->nr_phys_segments += nr_phys_segs;
220 return 1; 219 return 1;
220
221no_merge:
222 req->cmd_flags |= REQ_NOMERGE;
223 if (req == q->last_merge)
224 q->last_merge = NULL;
225 return 0;
221} 226}
222 227
223int ll_back_merge_fn(struct request_queue *q, struct request *req, 228int ll_back_merge_fn(struct request_queue *q, struct request *req,
@@ -301,6 +306,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
301 if (total_phys_segments > queue_max_segments(q)) 306 if (total_phys_segments > queue_max_segments(q))
302 return 0; 307 return 0;
303 308
309 if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
310 return 0;
311
304 /* Merge is OK... */ 312 /* Merge is OK... */
305 req->nr_phys_segments = total_phys_segments; 313 req->nr_phys_segments = total_phys_segments;
306 return 1; 314 return 1;
@@ -343,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
343 int cpu; 351 int cpu;
344 352
345 cpu = part_stat_lock(); 353 cpu = part_stat_lock();
346 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 354 part = req->part;
347 355
348 part_round_stats(cpu, part); 356 part_round_stats(cpu, part);
349 part_dec_in_flight(part, rq_data_dir(req)); 357 part_dec_in_flight(part, rq_data_dir(req));
@@ -384,9 +392,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
384 || next->special) 392 || next->special)
385 return 0; 393 return 0;
386 394
387 if (blk_integrity_rq(req) != blk_integrity_rq(next))
388 return 0;
389
390 /* 395 /*
391 * If we are allowed to merge, then append bio list 396 * If we are allowed to merge, then append bio list
392 * from next to rq and release next. merge_requests_fn 397 * from next to rq and release next. merge_requests_fn
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a234f4bf1d6f..701859fb9647 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -111,6 +111,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
111void blk_set_default_limits(struct queue_limits *lim) 111void blk_set_default_limits(struct queue_limits *lim)
112{ 112{
113 lim->max_segments = BLK_MAX_SEGMENTS; 113 lim->max_segments = BLK_MAX_SEGMENTS;
114 lim->max_integrity_segments = 0;
114 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 115 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
115 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 116 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
116 lim->max_sectors = BLK_DEF_MAX_SECTORS; 117 lim->max_sectors = BLK_DEF_MAX_SECTORS;
@@ -213,7 +214,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
213 */ 214 */
214 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 215 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
215 dma = 1; 216 dma = 1;
216 q->limits.bounce_pfn = max_low_pfn; 217 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
217#else 218#else
218 if (b_pfn < blk_max_low_pfn) 219 if (b_pfn < blk_max_low_pfn)
219 dma = 1; 220 dma = 1;
@@ -343,7 +344,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
343 * hardware can operate on without reverting to read-modify-write 344 * hardware can operate on without reverting to read-modify-write
344 * operations. 345 * operations.
345 */ 346 */
346void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) 347void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
347{ 348{
348 q->limits.physical_block_size = size; 349 q->limits.physical_block_size = size;
349 350
@@ -455,11 +456,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
455} 456}
456EXPORT_SYMBOL(blk_queue_io_opt); 457EXPORT_SYMBOL(blk_queue_io_opt);
457 458
458/*
459 * Returns the minimum that is _not_ zero, unless both are zero.
460 */
461#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
462
463/** 459/**
464 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers 460 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
465 * @t: the stacking driver (top) 461 * @t: the stacking driver (top)
@@ -514,6 +510,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
514 b->seg_boundary_mask); 510 b->seg_boundary_mask);
515 511
516 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 512 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
513 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
514 b->max_integrity_segments);
517 515
518 t->max_segment_size = min_not_zero(t->max_segment_size, 516 t->max_segment_size = min_not_zero(t->max_segment_size,
519 b->max_segment_size); 517 b->max_segment_size);
@@ -794,6 +792,26 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
794} 792}
795EXPORT_SYMBOL(blk_queue_update_dma_alignment); 793EXPORT_SYMBOL(blk_queue_update_dma_alignment);
796 794
795/**
796 * blk_queue_flush - configure queue's cache flush capability
797 * @q: the request queue for the device
798 * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
799 *
800 * Tell block layer cache flush capability of @q. If it supports
801 * flushing, REQ_FLUSH should be set. If it supports bypassing
802 * write cache for individual writes, REQ_FUA should be set.
803 */
804void blk_queue_flush(struct request_queue *q, unsigned int flush)
805{
806 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
807
808 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
809 flush &= ~REQ_FUA;
810
811 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
812}
813EXPORT_SYMBOL_GPL(blk_queue_flush);
814
797static int __init blk_settings_init(void) 815static int __init blk_settings_init(void)
798{ 816{
799 blk_max_low_pfn = max_low_pfn - 1; 817 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0749b89c6885..da8a8a40cd4c 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -112,6 +112,11 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
112 return queue_var_show(queue_max_segments(q), (page)); 112 return queue_var_show(queue_max_segments(q), (page));
113} 113}
114 114
115static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
116{
117 return queue_var_show(q->limits.max_integrity_segments, (page));
118}
119
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{ 121{
117 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
@@ -288,6 +293,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
288 .show = queue_max_segments_show, 293 .show = queue_max_segments_show,
289}; 294};
290 295
296static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
297 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
298 .show = queue_max_integrity_segments_show,
299};
300
291static struct queue_sysfs_entry queue_max_segment_size_entry = { 301static struct queue_sysfs_entry queue_max_segment_size_entry = {
292 .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 302 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
293 .show = queue_max_segment_size_show, 303 .show = queue_max_segment_size_show,
@@ -375,6 +385,7 @@ static struct attribute *default_attrs[] = {
375 &queue_max_hw_sectors_entry.attr, 385 &queue_max_hw_sectors_entry.attr,
376 &queue_max_sectors_entry.attr, 386 &queue_max_sectors_entry.attr,
377 &queue_max_segments_entry.attr, 387 &queue_max_segments_entry.attr,
388 &queue_max_integrity_segments_entry.attr,
378 &queue_max_segment_size_entry.attr, 389 &queue_max_segment_size_entry.attr,
379 &queue_iosched_entry.attr, 390 &queue_iosched_entry.attr,
380 &queue_hw_sector_size_entry.attr, 391 &queue_hw_sector_size_entry.attr,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
new file mode 100644
index 000000000000..56ad4531b412
--- /dev/null
+++ b/block/blk-throttle.c
@@ -0,0 +1,1123 @@
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
13
14/* Max dispatch from a group in 1 round */
15static int throtl_grp_quantum = 8;
16
17/* Total max dispatch from all groups in one round */
18static int throtl_quantum = 32;
19
20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22
23struct throtl_rb_root {
24 struct rb_root rb;
25 struct rb_node *left;
26 unsigned int count;
27 unsigned long min_disptime;
28};
29
30#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
31 .count = 0, .min_disptime = 0}
32
33#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
34
35struct throtl_grp {
36 /* List of throtl groups on the request queue*/
37 struct hlist_node tg_node;
38
39 /* active throtl group service_tree member */
40 struct rb_node rb_node;
41
42 /*
43 * Dispatch time in jiffies. This is the estimated time when group
44 * will unthrottle and is ready to dispatch more bio. It is used as
45 * key to sort active groups in service tree.
46 */
47 unsigned long disptime;
48
49 struct blkio_group blkg;
50 atomic_t ref;
51 unsigned int flags;
52
53 /* Two lists for READ and WRITE */
54 struct bio_list bio_lists[2];
55
56 /* Number of queued bios on READ and WRITE lists */
57 unsigned int nr_queued[2];
58
59 /* bytes per second rate limits */
60 uint64_t bps[2];
61
62 /* IOPS limits */
63 unsigned int iops[2];
64
65 /* Number of bytes disptached in current slice */
66 uint64_t bytes_disp[2];
67 /* Number of bio's dispatched in current slice */
68 unsigned int io_disp[2];
69
70 /* When did we start a new slice */
71 unsigned long slice_start[2];
72 unsigned long slice_end[2];
73
74 /* Some throttle limits got updated for the group */
75 bool limits_changed;
76};
77
78struct throtl_data
79{
80 /* List of throtl groups */
81 struct hlist_head tg_list;
82
83 /* service tree for active throtl groups */
84 struct throtl_rb_root tg_service_tree;
85
86 struct throtl_grp root_tg;
87 struct request_queue *queue;
88
89 /* Total Number of queued bios on READ and WRITE lists */
90 unsigned int nr_queued[2];
91
92 /*
93 * number of total undestroyed groups
94 */
95 unsigned int nr_undestroyed_grps;
96
97 /* Work for dispatching throttled bios */
98 struct delayed_work throtl_work;
99
100 atomic_t limits_changed;
101};
102
103enum tg_state_flags {
104 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
105};
106
107#define THROTL_TG_FNS(name) \
108static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
109{ \
110 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
111} \
112static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
113{ \
114 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
115} \
116static inline int throtl_tg_##name(const struct throtl_grp *tg) \
117{ \
118 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
119}
120
121THROTL_TG_FNS(on_rr);
122
123#define throtl_log_tg(td, tg, fmt, args...) \
124 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
125 blkg_path(&(tg)->blkg), ##args); \
126
127#define throtl_log(td, fmt, args...) \
128 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
129
130static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
131{
132 if (blkg)
133 return container_of(blkg, struct throtl_grp, blkg);
134
135 return NULL;
136}
137
138static inline int total_nr_queued(struct throtl_data *td)
139{
140 return (td->nr_queued[0] + td->nr_queued[1]);
141}
142
143static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
144{
145 atomic_inc(&tg->ref);
146 return tg;
147}
148
149static void throtl_put_tg(struct throtl_grp *tg)
150{
151 BUG_ON(atomic_read(&tg->ref) <= 0);
152 if (!atomic_dec_and_test(&tg->ref))
153 return;
154 kfree(tg);
155}
156
157static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
158 struct cgroup *cgroup)
159{
160 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
161 struct throtl_grp *tg = NULL;
162 void *key = td;
163 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
164 unsigned int major, minor;
165
166 /*
167 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix
168 * tree of blkg (instead of traversing through hash list all
169 * the time.
170 */
171 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
172
173 /* Fill in device details for root group */
174 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
175 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
176 tg->blkg.dev = MKDEV(major, minor);
177 goto done;
178 }
179
180 if (tg)
181 goto done;
182
183 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
184 if (!tg)
185 goto done;
186
187 INIT_HLIST_NODE(&tg->tg_node);
188 RB_CLEAR_NODE(&tg->rb_node);
189 bio_list_init(&tg->bio_lists[0]);
190 bio_list_init(&tg->bio_lists[1]);
191
192 /*
193 * Take the initial reference that will be released on destroy
194 * This can be thought of a joint reference by cgroup and
195 * request queue which will be dropped by either request queue
196 * exit or cgroup deletion path depending on who is exiting first.
197 */
198 atomic_set(&tg->ref, 1);
199
200 /* Add group onto cgroup list */
201 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
202 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
203 MKDEV(major, minor), BLKIO_POLICY_THROTL);
204
205 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
206 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
207 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
208 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
209
210 hlist_add_head(&tg->tg_node, &td->tg_list);
211 td->nr_undestroyed_grps++;
212done:
213 return tg;
214}
215
216static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
217{
218 struct cgroup *cgroup;
219 struct throtl_grp *tg = NULL;
220
221 rcu_read_lock();
222 cgroup = task_cgroup(current, blkio_subsys_id);
223 tg = throtl_find_alloc_tg(td, cgroup);
224 if (!tg)
225 tg = &td->root_tg;
226 rcu_read_unlock();
227 return tg;
228}
229
230static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
231{
232 /* Service tree is empty */
233 if (!root->count)
234 return NULL;
235
236 if (!root->left)
237 root->left = rb_first(&root->rb);
238
239 if (root->left)
240 return rb_entry_tg(root->left);
241
242 return NULL;
243}
244
245static void rb_erase_init(struct rb_node *n, struct rb_root *root)
246{
247 rb_erase(n, root);
248 RB_CLEAR_NODE(n);
249}
250
251static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
252{
253 if (root->left == n)
254 root->left = NULL;
255 rb_erase_init(n, &root->rb);
256 --root->count;
257}
258
259static void update_min_dispatch_time(struct throtl_rb_root *st)
260{
261 struct throtl_grp *tg;
262
263 tg = throtl_rb_first(st);
264 if (!tg)
265 return;
266
267 st->min_disptime = tg->disptime;
268}
269
270static void
271tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
272{
273 struct rb_node **node = &st->rb.rb_node;
274 struct rb_node *parent = NULL;
275 struct throtl_grp *__tg;
276 unsigned long key = tg->disptime;
277 int left = 1;
278
279 while (*node != NULL) {
280 parent = *node;
281 __tg = rb_entry_tg(parent);
282
283 if (time_before(key, __tg->disptime))
284 node = &parent->rb_left;
285 else {
286 node = &parent->rb_right;
287 left = 0;
288 }
289 }
290
291 if (left)
292 st->left = &tg->rb_node;
293
294 rb_link_node(&tg->rb_node, parent, node);
295 rb_insert_color(&tg->rb_node, &st->rb);
296}
297
298static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
299{
300 struct throtl_rb_root *st = &td->tg_service_tree;
301
302 tg_service_tree_add(st, tg);
303 throtl_mark_tg_on_rr(tg);
304 st->count++;
305}
306
307static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
308{
309 if (!throtl_tg_on_rr(tg))
310 __throtl_enqueue_tg(td, tg);
311}
312
313static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
314{
315 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
316 throtl_clear_tg_on_rr(tg);
317}
318
319static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
320{
321 if (throtl_tg_on_rr(tg))
322 __throtl_dequeue_tg(td, tg);
323}
324
325static void throtl_schedule_next_dispatch(struct throtl_data *td)
326{
327 struct throtl_rb_root *st = &td->tg_service_tree;
328
329 /*
330 * If there are more bios pending, schedule more work.
331 */
332 if (!total_nr_queued(td))
333 return;
334
335 BUG_ON(!st->count);
336
337 update_min_dispatch_time(st);
338
339 if (time_before_eq(st->min_disptime, jiffies))
340 throtl_schedule_delayed_work(td->queue, 0);
341 else
342 throtl_schedule_delayed_work(td->queue,
343 (st->min_disptime - jiffies));
344}
345
346static inline void
347throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
348{
349 tg->bytes_disp[rw] = 0;
350 tg->io_disp[rw] = 0;
351 tg->slice_start[rw] = jiffies;
352 tg->slice_end[rw] = jiffies + throtl_slice;
353 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
354 rw == READ ? 'R' : 'W', tg->slice_start[rw],
355 tg->slice_end[rw], jiffies);
356}
357
358static inline void throtl_extend_slice(struct throtl_data *td,
359 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360{
361 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
362 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
363 rw == READ ? 'R' : 'W', tg->slice_start[rw],
364 tg->slice_end[rw], jiffies);
365}
366
367/* Determine if previously allocated or extended slice is complete or not */
368static bool
369throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
370{
371 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
372 return 0;
373
374 return 1;
375}
376
377/* Trim the used slices and adjust slice start accordingly */
378static inline void
379throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
380{
381 unsigned long nr_slices, time_elapsed, io_trim;
382 u64 bytes_trim, tmp;
383
384 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
385
386 /*
387 * If bps are unlimited (-1), then time slice don't get
388 * renewed. Don't try to trim the slice if slice is used. A new
389 * slice will start when appropriate.
390 */
391 if (throtl_slice_used(td, tg, rw))
392 return;
393
394 time_elapsed = jiffies - tg->slice_start[rw];
395
396 nr_slices = time_elapsed / throtl_slice;
397
398 if (!nr_slices)
399 return;
400 tmp = tg->bps[rw] * throtl_slice * nr_slices;
401 do_div(tmp, HZ);
402 bytes_trim = tmp;
403
404 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
405
406 if (!bytes_trim && !io_trim)
407 return;
408
409 if (tg->bytes_disp[rw] >= bytes_trim)
410 tg->bytes_disp[rw] -= bytes_trim;
411 else
412 tg->bytes_disp[rw] = 0;
413
414 if (tg->io_disp[rw] >= io_trim)
415 tg->io_disp[rw] -= io_trim;
416 else
417 tg->io_disp[rw] = 0;
418
419 tg->slice_start[rw] += nr_slices * throtl_slice;
420
421 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
422 " start=%lu end=%lu jiffies=%lu",
423 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
424 tg->slice_start[rw], tg->slice_end[rw], jiffies);
425}
426
427static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
428 struct bio *bio, unsigned long *wait)
429{
430 bool rw = bio_data_dir(bio);
431 unsigned int io_allowed;
432 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
433 u64 tmp;
434
435 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
436
437 /* Slice has just started. Consider one slice interval */
438 if (!jiffy_elapsed)
439 jiffy_elapsed_rnd = throtl_slice;
440
441 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
442
443 /*
444 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
445 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
446 * will allow dispatch after 1 second and after that slice should
447 * have been trimmed.
448 */
449
450 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
451 do_div(tmp, HZ);
452
453 if (tmp > UINT_MAX)
454 io_allowed = UINT_MAX;
455 else
456 io_allowed = tmp;
457
458 if (tg->io_disp[rw] + 1 <= io_allowed) {
459 if (wait)
460 *wait = 0;
461 return 1;
462 }
463
464 /* Calc approx time to dispatch */
465 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
466
467 if (jiffy_wait > jiffy_elapsed)
468 jiffy_wait = jiffy_wait - jiffy_elapsed;
469 else
470 jiffy_wait = 1;
471
472 if (wait)
473 *wait = jiffy_wait;
474 return 0;
475}
476
477static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
478 struct bio *bio, unsigned long *wait)
479{
480 bool rw = bio_data_dir(bio);
481 u64 bytes_allowed, extra_bytes, tmp;
482 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
483
484 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
485
486 /* Slice has just started. Consider one slice interval */
487 if (!jiffy_elapsed)
488 jiffy_elapsed_rnd = throtl_slice;
489
490 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
491
492 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
493 do_div(tmp, HZ);
494 bytes_allowed = tmp;
495
496 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
497 if (wait)
498 *wait = 0;
499 return 1;
500 }
501
502 /* Calc approx time to dispatch */
503 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
504 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
505
506 if (!jiffy_wait)
507 jiffy_wait = 1;
508
509 /*
510 * This wait time is without taking into consideration the rounding
511 * up we did. Add that time also.
512 */
513 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
514 if (wait)
515 *wait = jiffy_wait;
516 return 0;
517}
518
519/*
520 * Returns whether one can dispatch a bio or not. Also returns approx number
521 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
522 */
523static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
524 struct bio *bio, unsigned long *wait)
525{
526 bool rw = bio_data_dir(bio);
527 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
528
529 /*
530 * Currently whole state machine of group depends on first bio
531 * queued in the group bio list. So one should not be calling
532 * this function with a different bio if there are other bios
533 * queued.
534 */
535 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
536
537 /* If tg->bps = -1, then BW is unlimited */
538 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
539 if (wait)
540 *wait = 0;
541 return 1;
542 }
543
544 /*
545 * If previous slice expired, start a new one otherwise renew/extend
546 * existing slice to make sure it is at least throtl_slice interval
547 * long since now.
548 */
549 if (throtl_slice_used(td, tg, rw))
550 throtl_start_new_slice(td, tg, rw);
551 else {
552 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
553 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
554 }
555
556 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
557 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
558 if (wait)
559 *wait = 0;
560 return 1;
561 }
562
563 max_wait = max(bps_wait, iops_wait);
564
565 if (wait)
566 *wait = max_wait;
567
568 if (time_before(tg->slice_end[rw], jiffies + max_wait))
569 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
570
571 return 0;
572}
573
574static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
575{
576 bool rw = bio_data_dir(bio);
577 bool sync = bio->bi_rw & REQ_SYNC;
578
579 /* Charge the bio to the group */
580 tg->bytes_disp[rw] += bio->bi_size;
581 tg->io_disp[rw]++;
582
583 /*
584 * TODO: This will take blkg->stats_lock. Figure out a way
585 * to avoid this cost.
586 */
587 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
588}
589
590static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
591 struct bio *bio)
592{
593 bool rw = bio_data_dir(bio);
594
595 bio_list_add(&tg->bio_lists[rw], bio);
596 /* Take a bio reference on tg */
597 throtl_ref_get_tg(tg);
598 tg->nr_queued[rw]++;
599 td->nr_queued[rw]++;
600 throtl_enqueue_tg(td, tg);
601}
602
603static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
604{
605 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
606 struct bio *bio;
607
608 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
609 tg_may_dispatch(td, tg, bio, &read_wait);
610
611 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
612 tg_may_dispatch(td, tg, bio, &write_wait);
613
614 min_wait = min(read_wait, write_wait);
615 disptime = jiffies + min_wait;
616
617 /* Update dispatch time */
618 throtl_dequeue_tg(td, tg);
619 tg->disptime = disptime;
620 throtl_enqueue_tg(td, tg);
621}
622
623static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
624 bool rw, struct bio_list *bl)
625{
626 struct bio *bio;
627
628 bio = bio_list_pop(&tg->bio_lists[rw]);
629 tg->nr_queued[rw]--;
630 /* Drop bio reference on tg */
631 throtl_put_tg(tg);
632
633 BUG_ON(td->nr_queued[rw] <= 0);
634 td->nr_queued[rw]--;
635
636 throtl_charge_bio(tg, bio);
637 bio_list_add(bl, bio);
638 bio->bi_rw |= REQ_THROTTLED;
639
640 throtl_trim_slice(td, tg, rw);
641}
642
643static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
644 struct bio_list *bl)
645{
646 unsigned int nr_reads = 0, nr_writes = 0;
647 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
648 unsigned int max_nr_writes = throtl_grp_quantum - nr_reads;
649 struct bio *bio;
650
651 /* Try to dispatch 75% READS and 25% WRITES */
652
653 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
654 && tg_may_dispatch(td, tg, bio, NULL)) {
655
656 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
657 nr_reads++;
658
659 if (nr_reads >= max_nr_reads)
660 break;
661 }
662
663 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
664 && tg_may_dispatch(td, tg, bio, NULL)) {
665
666 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
667 nr_writes++;
668
669 if (nr_writes >= max_nr_writes)
670 break;
671 }
672
673 return nr_reads + nr_writes;
674}
675
676static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
677{
678 unsigned int nr_disp = 0;
679 struct throtl_grp *tg;
680 struct throtl_rb_root *st = &td->tg_service_tree;
681
682 while (1) {
683 tg = throtl_rb_first(st);
684
685 if (!tg)
686 break;
687
688 if (time_before(jiffies, tg->disptime))
689 break;
690
691 throtl_dequeue_tg(td, tg);
692
693 nr_disp += throtl_dispatch_tg(td, tg, bl);
694
695 if (tg->nr_queued[0] || tg->nr_queued[1]) {
696 tg_update_disptime(td, tg);
697 throtl_enqueue_tg(td, tg);
698 }
699
700 if (nr_disp >= throtl_quantum)
701 break;
702 }
703
704 return nr_disp;
705}
706
707static void throtl_process_limit_change(struct throtl_data *td)
708{
709 struct throtl_grp *tg;
710 struct hlist_node *pos, *n;
711
712 /*
713 * Make sure atomic_inc() effects from
714 * throtl_update_blkio_group_read_bps(), group of functions are
715 * visible.
716 * Is this required or smp_mb__after_atomic_inc() was suffcient
717 * after the atomic_inc().
718 */
719 smp_rmb();
720 if (!atomic_read(&td->limits_changed))
721 return;
722
723 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
724
725 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
726 /*
727 * Do I need an smp_rmb() here to make sure tg->limits_changed
728 * update is visible. I am relying on smp_rmb() at the
729 * beginning of function and not putting a new one here.
730 */
731
732 if (throtl_tg_on_rr(tg) && tg->limits_changed) {
733 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
734 " riops=%u wiops=%u", tg->bps[READ],
735 tg->bps[WRITE], tg->iops[READ],
736 tg->iops[WRITE]);
737 tg_update_disptime(td, tg);
738 tg->limits_changed = false;
739 }
740 }
741
742 smp_mb__before_atomic_dec();
743 atomic_dec(&td->limits_changed);
744 smp_mb__after_atomic_dec();
745}
746
747/* Dispatch throttled bios. Should be called without queue lock held. */
748static int throtl_dispatch(struct request_queue *q)
749{
750 struct throtl_data *td = q->td;
751 unsigned int nr_disp = 0;
752 struct bio_list bio_list_on_stack;
753 struct bio *bio;
754
755 spin_lock_irq(q->queue_lock);
756
757 throtl_process_limit_change(td);
758
759 if (!total_nr_queued(td))
760 goto out;
761
762 bio_list_init(&bio_list_on_stack);
763
764 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
765 total_nr_queued(td), td->nr_queued[READ],
766 td->nr_queued[WRITE]);
767
768 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
769
770 if (nr_disp)
771 throtl_log(td, "bios disp=%u", nr_disp);
772
773 throtl_schedule_next_dispatch(td);
774out:
775 spin_unlock_irq(q->queue_lock);
776
777 /*
778 * If we dispatched some requests, unplug the queue to make sure
779 * immediate dispatch
780 */
781 if (nr_disp) {
782 while((bio = bio_list_pop(&bio_list_on_stack)))
783 generic_make_request(bio);
784 blk_unplug(q);
785 }
786 return nr_disp;
787}
788
789void blk_throtl_work(struct work_struct *work)
790{
791 struct throtl_data *td = container_of(work, struct throtl_data,
792 throtl_work.work);
793 struct request_queue *q = td->queue;
794
795 throtl_dispatch(q);
796}
797
798/* Call with queue lock held */
799void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
800{
801
802 struct throtl_data *td = q->td;
803 struct delayed_work *dwork = &td->throtl_work;
804
805 if (total_nr_queued(td) > 0) {
806 /*
807 * We might have a work scheduled to be executed in future.
808 * Cancel that and schedule a new one.
809 */
810 __cancel_delayed_work(dwork);
811 kblockd_schedule_delayed_work(q, dwork, delay);
812 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
813 delay, jiffies);
814 }
815}
816EXPORT_SYMBOL(throtl_schedule_delayed_work);
817
818static void
819throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
820{
821 /* Something wrong if we are trying to remove same group twice */
822 BUG_ON(hlist_unhashed(&tg->tg_node));
823
824 hlist_del_init(&tg->tg_node);
825
826 /*
827 * Put the reference taken at the time of creation so that when all
828 * queues are gone, group can be destroyed.
829 */
830 throtl_put_tg(tg);
831 td->nr_undestroyed_grps--;
832}
833
834static void throtl_release_tgs(struct throtl_data *td)
835{
836 struct hlist_node *pos, *n;
837 struct throtl_grp *tg;
838
839 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
840 /*
841 * If cgroup removal path got to blk_group first and removed
842 * it from cgroup list, then it will take care of destroying
843 * cfqg also.
844 */
845 if (!blkiocg_del_blkio_group(&tg->blkg))
846 throtl_destroy_tg(td, tg);
847 }
848}
849
850static void throtl_td_free(struct throtl_data *td)
851{
852 kfree(td);
853}
854
855/*
856 * Blk cgroup controller notification saying that blkio_group object is being
857 * delinked as associated cgroup object is going away. That also means that
858 * no new IO will come in this group. So get rid of this group as soon as
859 * any pending IO in the group is finished.
860 *
861 * This function is called under rcu_read_lock(). key is the rcu protected
862 * pointer. That means "key" is a valid throtl_data pointer as long as we are
863 * rcu read lock.
864 *
865 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
866 * it should not be NULL as even if queue was going away, cgroup deltion
867 * path got to it first.
868 */
869void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
870{
871 unsigned long flags;
872 struct throtl_data *td = key;
873
874 spin_lock_irqsave(td->queue->queue_lock, flags);
875 throtl_destroy_tg(td, tg_of_blkg(blkg));
876 spin_unlock_irqrestore(td->queue->queue_lock, flags);
877}
878
879/*
880 * For all update functions, key should be a valid pointer because these
881 * update functions are called under blkcg_lock, that means, blkg is
882 * valid and in turn key is valid. queue exit path can not race becuase
883 * of blkcg_lock
884 *
885 * Can not take queue lock in update functions as queue lock under blkcg_lock
886 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
887 */
888static void throtl_update_blkio_group_read_bps(void *key,
889 struct blkio_group *blkg, u64 read_bps)
890{
891 struct throtl_data *td = key;
892
893 tg_of_blkg(blkg)->bps[READ] = read_bps;
894 /* Make sure read_bps is updated before setting limits_changed */
895 smp_wmb();
896 tg_of_blkg(blkg)->limits_changed = true;
897
898 /* Make sure tg->limits_changed is updated before td->limits_changed */
899 smp_mb__before_atomic_inc();
900 atomic_inc(&td->limits_changed);
901 smp_mb__after_atomic_inc();
902
903 /* Schedule a work now to process the limit change */
904 throtl_schedule_delayed_work(td->queue, 0);
905}
906
907static void throtl_update_blkio_group_write_bps(void *key,
908 struct blkio_group *blkg, u64 write_bps)
909{
910 struct throtl_data *td = key;
911
912 tg_of_blkg(blkg)->bps[WRITE] = write_bps;
913 smp_wmb();
914 tg_of_blkg(blkg)->limits_changed = true;
915 smp_mb__before_atomic_inc();
916 atomic_inc(&td->limits_changed);
917 smp_mb__after_atomic_inc();
918 throtl_schedule_delayed_work(td->queue, 0);
919}
920
921static void throtl_update_blkio_group_read_iops(void *key,
922 struct blkio_group *blkg, unsigned int read_iops)
923{
924 struct throtl_data *td = key;
925
926 tg_of_blkg(blkg)->iops[READ] = read_iops;
927 smp_wmb();
928 tg_of_blkg(blkg)->limits_changed = true;
929 smp_mb__before_atomic_inc();
930 atomic_inc(&td->limits_changed);
931 smp_mb__after_atomic_inc();
932 throtl_schedule_delayed_work(td->queue, 0);
933}
934
935static void throtl_update_blkio_group_write_iops(void *key,
936 struct blkio_group *blkg, unsigned int write_iops)
937{
938 struct throtl_data *td = key;
939
940 tg_of_blkg(blkg)->iops[WRITE] = write_iops;
941 smp_wmb();
942 tg_of_blkg(blkg)->limits_changed = true;
943 smp_mb__before_atomic_inc();
944 atomic_inc(&td->limits_changed);
945 smp_mb__after_atomic_inc();
946 throtl_schedule_delayed_work(td->queue, 0);
947}
948
949void throtl_shutdown_timer_wq(struct request_queue *q)
950{
951 struct throtl_data *td = q->td;
952
953 cancel_delayed_work_sync(&td->throtl_work);
954}
955
956static struct blkio_policy_type blkio_policy_throtl = {
957 .ops = {
958 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
959 .blkio_update_group_read_bps_fn =
960 throtl_update_blkio_group_read_bps,
961 .blkio_update_group_write_bps_fn =
962 throtl_update_blkio_group_write_bps,
963 .blkio_update_group_read_iops_fn =
964 throtl_update_blkio_group_read_iops,
965 .blkio_update_group_write_iops_fn =
966 throtl_update_blkio_group_write_iops,
967 },
968 .plid = BLKIO_POLICY_THROTL,
969};
970
971int blk_throtl_bio(struct request_queue *q, struct bio **biop)
972{
973 struct throtl_data *td = q->td;
974 struct throtl_grp *tg;
975 struct bio *bio = *biop;
976 bool rw = bio_data_dir(bio), update_disptime = true;
977
978 if (bio->bi_rw & REQ_THROTTLED) {
979 bio->bi_rw &= ~REQ_THROTTLED;
980 return 0;
981 }
982
983 spin_lock_irq(q->queue_lock);
984 tg = throtl_get_tg(td);
985
986 if (tg->nr_queued[rw]) {
987 /*
988 * There is already another bio queued in same dir. No
989 * need to update dispatch time.
990 * Still update the disptime if rate limits on this group
991 * were changed.
992 */
993 if (!tg->limits_changed)
994 update_disptime = false;
995 else
996 tg->limits_changed = false;
997
998 goto queue_bio;
999 }
1000
1001 /* Bio is with-in rate limit of group */
1002 if (tg_may_dispatch(td, tg, bio, NULL)) {
1003 throtl_charge_bio(tg, bio);
1004 goto out;
1005 }
1006
1007queue_bio:
1008 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
1009 " iodisp=%u iops=%u queued=%d/%d",
1010 rw == READ ? 'R' : 'W',
1011 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1012 tg->io_disp[rw], tg->iops[rw],
1013 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1014
1015 throtl_add_bio_tg(q->td, tg, bio);
1016 *biop = NULL;
1017
1018 if (update_disptime) {
1019 tg_update_disptime(td, tg);
1020 throtl_schedule_next_dispatch(td);
1021 }
1022
1023out:
1024 spin_unlock_irq(q->queue_lock);
1025 return 0;
1026}
1027
1028int blk_throtl_init(struct request_queue *q)
1029{
1030 struct throtl_data *td;
1031 struct throtl_grp *tg;
1032
1033 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1034 if (!td)
1035 return -ENOMEM;
1036
1037 INIT_HLIST_HEAD(&td->tg_list);
1038 td->tg_service_tree = THROTL_RB_ROOT;
1039 atomic_set(&td->limits_changed, 0);
1040
1041 /* Init root group */
1042 tg = &td->root_tg;
1043 INIT_HLIST_NODE(&tg->tg_node);
1044 RB_CLEAR_NODE(&tg->rb_node);
1045 bio_list_init(&tg->bio_lists[0]);
1046 bio_list_init(&tg->bio_lists[1]);
1047
1048 /* Practically unlimited BW */
1049 tg->bps[0] = tg->bps[1] = -1;
1050 tg->iops[0] = tg->iops[1] = -1;
1051
1052 /*
1053 * Set root group reference to 2. One reference will be dropped when
1054 * all groups on tg_list are being deleted during queue exit. Other
1055 * reference will remain there as we don't want to delete this group
1056 * as it is statically allocated and gets destroyed when throtl_data
1057 * goes away.
1058 */
1059 atomic_set(&tg->ref, 2);
1060 hlist_add_head(&tg->tg_node, &td->tg_list);
1061 td->nr_undestroyed_grps++;
1062
1063 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1064
1065 rcu_read_lock();
1066 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
1067 0, BLKIO_POLICY_THROTL);
1068 rcu_read_unlock();
1069
1070 /* Attach throtl data to request queue */
1071 td->queue = q;
1072 q->td = td;
1073 return 0;
1074}
1075
1076void blk_throtl_exit(struct request_queue *q)
1077{
1078 struct throtl_data *td = q->td;
1079 bool wait = false;
1080
1081 BUG_ON(!td);
1082
1083 throtl_shutdown_timer_wq(q);
1084
1085 spin_lock_irq(q->queue_lock);
1086 throtl_release_tgs(td);
1087
1088 /* If there are other groups */
1089 if (td->nr_undestroyed_grps > 0)
1090 wait = true;
1091
1092 spin_unlock_irq(q->queue_lock);
1093
1094 /*
1095 * Wait for tg->blkg->key accessors to exit their grace periods.
1096 * Do this wait only if there are other undestroyed groups out
1097 * there (other than root group). This can happen if cgroup deletion
1098 * path claimed the responsibility of cleaning up a group before
1099 * queue cleanup code get to the group.
1100 *
1101 * Do not call synchronize_rcu() unconditionally as there are drivers
1102 * which create/delete request queue hundreds of times during scan/boot
1103 * and synchronize_rcu() can take significant time and slow down boot.
1104 */
1105 if (wait)
1106 synchronize_rcu();
1107
1108 /*
1109 * Just being safe to make sure after previous flush if some body did
1110 * update limits through cgroup and another work got queued, cancel
1111 * it.
1112 */
1113 throtl_shutdown_timer_wq(q);
1114 throtl_td_free(td);
1115}
1116
1117static int __init throtl_init(void)
1118{
1119 blkio_policy_register(&blkio_policy_throtl);
1120 return 0;
1121}
1122
1123module_init(throtl_init);
diff --git a/block/blk.h b/block/blk.h
index d6b911ac002c..1e675e5ade02 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq)
51 */ 51 */
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53 53
54struct request *blk_do_flush(struct request_queue *q, struct request *rq);
55
54static inline struct request *__elv_next_request(struct request_queue *q) 56static inline struct request *__elv_next_request(struct request_queue *q)
55{ 57{
56 struct request *rq; 58 struct request *rq;
@@ -58,7 +60,11 @@ static inline struct request *__elv_next_request(struct request_queue *q)
58 while (1) { 60 while (1) {
59 while (!list_empty(&q->queue_head)) { 61 while (!list_empty(&q->queue_head)) {
60 rq = list_entry_rq(q->queue_head.next); 62 rq = list_entry_rq(q->queue_head.next);
61 if (blk_do_ordered(q, &rq)) 63 if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
64 rq == &q->flush_rq)
65 return rq;
66 rq = blk_do_flush(q, rq);
67 if (rq)
62 return rq; 68 return rq;
63 } 69 }
64 70
@@ -110,10 +116,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
110 116
111int blk_dev_init(void); 117int blk_dev_init(void);
112 118
113void elv_quiesce_start(struct request_queue *q);
114void elv_quiesce_end(struct request_queue *q);
115
116
117/* 119/*
118 * Return the threshold (number of used requests) at which the queue is 120 * Return the threshold (number of used requests) at which the queue is
119 * considered to be congested. It include a little hysteresis to keep the 121 * considered to be congested. It include a little hysteresis to keep the
@@ -132,14 +134,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
132 return q->nr_congestion_off; 134 return q->nr_congestion_off;
133} 135}
134 136
135#if defined(CONFIG_BLK_DEV_INTEGRITY)
136
137#define rq_for_each_integrity_segment(bvl, _rq, _iter) \
138 __rq_for_each_bio(_iter.bio, _rq) \
139 bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
140
141#endif /* BLK_DEV_INTEGRITY */
142
143static inline int blk_cpu_to_group(int cpu) 137static inline int blk_cpu_to_group(int cpu)
144{ 138{
145 int group = NR_CPUS; 139 int group = NR_CPUS;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9eba291eb6fd..4cd59b0d7c15 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -160,6 +160,7 @@ enum wl_prio_t {
160 BE_WORKLOAD = 0, 160 BE_WORKLOAD = 0,
161 RT_WORKLOAD = 1, 161 RT_WORKLOAD = 1,
162 IDLE_WORKLOAD = 2, 162 IDLE_WORKLOAD = 2,
163 CFQ_PRIO_NR,
163}; 164};
164 165
165/* 166/*
@@ -184,10 +185,19 @@ struct cfq_group {
184 /* number of cfqq currently on this group */ 185 /* number of cfqq currently on this group */
185 int nr_cfqq; 186 int nr_cfqq;
186 187
187 /* Per group busy queus average. Useful for workload slice calc. */
188 unsigned int busy_queues_avg[2];
189 /* 188 /*
190 * rr lists of queues with requests, onle rr for each priority class. 189 * Per group busy queus average. Useful for workload slice calc. We
190 * create the array for each prio class but at run time it is used
191 * only for RT and BE class and slot for IDLE class remains unused.
192 * This is primarily done to avoid confusion and a gcc warning.
193 */
194 unsigned int busy_queues_avg[CFQ_PRIO_NR];
195 /*
196 * rr lists of queues with requests. We maintain service trees for
197 * RT and BE classes. These trees are subdivided in subclasses
198 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
199 * class there is no subclassification and all the cfq queues go on
200 * a single tree service_tree_idle.
191 * Counts are embedded in the cfq_rb_root 201 * Counts are embedded in the cfq_rb_root
192 */ 202 */
193 struct cfq_rb_root service_trees[2][3]; 203 struct cfq_rb_root service_trees[2][3];
@@ -221,7 +231,6 @@ struct cfq_data {
221 enum wl_type_t serving_type; 231 enum wl_type_t serving_type;
222 unsigned long workload_expires; 232 unsigned long workload_expires;
223 struct cfq_group *serving_group; 233 struct cfq_group *serving_group;
224 bool noidle_tree_requires_idle;
225 234
226 /* 235 /*
227 * Each priority tree is sorted by next_request position. These 236 * Each priority tree is sorted by next_request position. These
@@ -977,8 +986,8 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
977 return NULL; 986 return NULL;
978} 987}
979 988
980void 989void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
981cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) 990 unsigned int weight)
982{ 991{
983 cfqg_of_blkg(blkg)->weight = weight; 992 cfqg_of_blkg(blkg)->weight = weight;
984} 993}
@@ -2180,7 +2189,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2180 slice = max_t(unsigned, slice, CFQ_MIN_TT); 2189 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2181 cfq_log(cfqd, "workload slice:%d", slice); 2190 cfq_log(cfqd, "workload slice:%d", slice);
2182 cfqd->workload_expires = jiffies + slice; 2191 cfqd->workload_expires = jiffies + slice;
2183 cfqd->noidle_tree_requires_idle = false;
2184} 2192}
2185 2193
2186static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) 2194static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
@@ -3177,7 +3185,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3177 if (cfqq->queued[0] + cfqq->queued[1] >= 4) 3185 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3178 cfq_mark_cfqq_deep(cfqq); 3186 cfq_mark_cfqq_deep(cfqq);
3179 3187
3180 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 3188 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3189 enable_idle = 0;
3190 else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3181 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) 3191 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3182 enable_idle = 0; 3192 enable_idle = 0;
3183 else if (sample_valid(cic->ttime_samples)) { 3193 else if (sample_valid(cic->ttime_samples)) {
@@ -3494,17 +3504,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3494 cfq_slice_expired(cfqd, 1); 3504 cfq_slice_expired(cfqd, 1);
3495 else if (sync && cfqq_empty && 3505 else if (sync && cfqq_empty &&
3496 !cfq_close_cooperator(cfqd, cfqq)) { 3506 !cfq_close_cooperator(cfqd, cfqq)) {
3497 cfqd->noidle_tree_requires_idle |= 3507 cfq_arm_slice_timer(cfqd);
3498 !(rq->cmd_flags & REQ_NOIDLE);
3499 /*
3500 * Idling is enabled for SYNC_WORKLOAD.
3501 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3502 * only if we processed at least one !REQ_NOIDLE request
3503 */
3504 if (cfqd->serving_type == SYNC_WORKLOAD
3505 || cfqd->noidle_tree_requires_idle
3506 || cfqq->cfqg->nr_cfqq == 1)
3507 cfq_arm_slice_timer(cfqd);
3508 } 3508 }
3509 } 3509 }
3510 3510
@@ -4090,6 +4090,7 @@ static struct blkio_policy_type blkio_policy_cfq = {
4090 .blkio_unlink_group_fn = cfq_unlink_blkio_group, 4090 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
4091 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, 4091 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4092 }, 4092 },
4093 .plid = BLKIO_POLICY_PROP,
4093}; 4094};
4094#else 4095#else
4095static struct blkio_policy_type blkio_policy_cfq; 4096static struct blkio_policy_type blkio_policy_cfq;
diff --git a/block/cfq.h b/block/cfq.h
index 93448e5a2e41..54a6d90f8e8c 100644
--- a/block/cfq.h
+++ b/block/cfq.h
@@ -69,7 +69,7 @@ static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
69 69
70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
71 struct blkio_group *blkg, void *key, dev_t dev) { 71 struct blkio_group *blkg, void *key, dev_t dev) {
72 blkiocg_add_blkio_group(blkcg, blkg, key, dev); 72 blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
73} 73}
74 74
75static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 75static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
diff --git a/block/elevator.c b/block/elevator.c
index 4e11559aa2b0..282e8308f7e2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -617,8 +617,6 @@ void elv_quiesce_end(struct request_queue *q)
617 617
618void elv_insert(struct request_queue *q, struct request *rq, int where) 618void elv_insert(struct request_queue *q, struct request *rq, int where)
619{ 619{
620 struct list_head *pos;
621 unsigned ordseq;
622 int unplug_it = 1; 620 int unplug_it = 1;
623 621
624 trace_block_rq_insert(q, rq); 622 trace_block_rq_insert(q, rq);
@@ -626,9 +624,16 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
626 rq->q = q; 624 rq->q = q;
627 625
628 switch (where) { 626 switch (where) {
627 case ELEVATOR_INSERT_REQUEUE:
628 /*
629 * Most requeues happen because of a busy condition,
630 * don't force unplug of the queue for that case.
631 * Clear unplug_it and fall through.
632 */
633 unplug_it = 0;
634
629 case ELEVATOR_INSERT_FRONT: 635 case ELEVATOR_INSERT_FRONT:
630 rq->cmd_flags |= REQ_SOFTBARRIER; 636 rq->cmd_flags |= REQ_SOFTBARRIER;
631
632 list_add(&rq->queuelist, &q->queue_head); 637 list_add(&rq->queuelist, &q->queue_head);
633 break; 638 break;
634 639
@@ -668,36 +673,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
668 q->elevator->ops->elevator_add_req_fn(q, rq); 673 q->elevator->ops->elevator_add_req_fn(q, rq);
669 break; 674 break;
670 675
671 case ELEVATOR_INSERT_REQUEUE:
672 /*
673 * If ordered flush isn't in progress, we do front
674 * insertion; otherwise, requests should be requeued
675 * in ordseq order.
676 */
677 rq->cmd_flags |= REQ_SOFTBARRIER;
678
679 /*
680 * Most requeues happen because of a busy condition,
681 * don't force unplug of the queue for that case.
682 */
683 unplug_it = 0;
684
685 if (q->ordseq == 0) {
686 list_add(&rq->queuelist, &q->queue_head);
687 break;
688 }
689
690 ordseq = blk_ordered_req_seq(rq);
691
692 list_for_each(pos, &q->queue_head) {
693 struct request *pos_rq = list_entry_rq(pos);
694 if (ordseq <= blk_ordered_req_seq(pos_rq))
695 break;
696 }
697
698 list_add_tail(&rq->queuelist, pos);
699 break;
700
701 default: 676 default:
702 printk(KERN_ERR "%s: bad insertion point %d\n", 677 printk(KERN_ERR "%s: bad insertion point %d\n",
703 __func__, where); 678 __func__, where);
@@ -716,26 +691,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
716void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
717 int plug) 692 int plug)
718{ 693{
719 if (q->ordcolor)
720 rq->cmd_flags |= REQ_ORDERED_COLOR;
721
722 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
723 /* 695 /* barriers are scheduling boundary, update end_sector */
724 * toggle ordered color
725 */
726 if (rq->cmd_flags & REQ_HARDBARRIER)
727 q->ordcolor ^= 1;
728
729 /*
730 * barriers implicitly indicate back insertion
731 */
732 if (where == ELEVATOR_INSERT_SORT)
733 where = ELEVATOR_INSERT_BACK;
734
735 /*
736 * this request is scheduling boundary, update
737 * end_sector
738 */
739 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
740 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
741 q->end_sector = rq_end_sector(rq); 698 q->end_sector = rq_end_sector(rq);
@@ -855,24 +812,6 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
855 e->ops->elevator_completed_req_fn) 812 e->ops->elevator_completed_req_fn)
856 e->ops->elevator_completed_req_fn(q, rq); 813 e->ops->elevator_completed_req_fn(q, rq);
857 } 814 }
858
859 /*
860 * Check if the queue is waiting for fs requests to be
861 * drained for flush sequence.
862 */
863 if (unlikely(q->ordseq)) {
864 struct request *next = NULL;
865
866 if (!list_empty(&q->queue_head))
867 next = list_entry_rq(q->queue_head.next);
868
869 if (!queue_in_flight(q) &&
870 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
871 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
872 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
873 __blk_run_queue(q);
874 }
875 }
876} 815}
877 816
878#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 817#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
diff --git a/block/genhd.c b/block/genhd.c
index 59a2db6fecef..a8adf96a4b41 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -22,9 +22,7 @@
22#include "blk.h" 22#include "blk.h"
23 23
24static DEFINE_MUTEX(block_class_lock); 24static DEFINE_MUTEX(block_class_lock);
25#ifndef CONFIG_SYSFS_DEPRECATED
26struct kobject *block_depr; 25struct kobject *block_depr;
27#endif
28 26
29/* for extended dynamic devt allocation, currently only one major is used */ 27/* for extended dynamic devt allocation, currently only one major is used */
30#define MAX_EXT_DEVT (1 << MINORBITS) 28#define MAX_EXT_DEVT (1 << MINORBITS)
@@ -541,13 +539,15 @@ void add_disk(struct gendisk *disk)
541 disk->major = MAJOR(devt); 539 disk->major = MAJOR(devt);
542 disk->first_minor = MINOR(devt); 540 disk->first_minor = MINOR(devt);
543 541
542 /* Register BDI before referencing it from bdev */
543 bdi = &disk->queue->backing_dev_info;
544 bdi_register_dev(bdi, disk_devt(disk));
545
544 blk_register_region(disk_devt(disk), disk->minors, NULL, 546 blk_register_region(disk_devt(disk), disk->minors, NULL,
545 exact_match, exact_lock, disk); 547 exact_match, exact_lock, disk);
546 register_disk(disk); 548 register_disk(disk);
547 blk_register_queue(disk); 549 blk_register_queue(disk);
548 550
549 bdi = &disk->queue->backing_dev_info;
550 bdi_register_dev(bdi, disk_devt(disk));
551 retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, 551 retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
552 "bdi"); 552 "bdi");
553 WARN_ON(retval); 553 WARN_ON(retval);
@@ -642,6 +642,7 @@ void __init printk_all_partitions(void)
642 struct hd_struct *part; 642 struct hd_struct *part;
643 char name_buf[BDEVNAME_SIZE]; 643 char name_buf[BDEVNAME_SIZE];
644 char devt_buf[BDEVT_SIZE]; 644 char devt_buf[BDEVT_SIZE];
645 u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
645 646
646 /* 647 /*
647 * Don't show empty devices or things that have been 648 * Don't show empty devices or things that have been
@@ -660,10 +661,14 @@ void __init printk_all_partitions(void)
660 while ((part = disk_part_iter_next(&piter))) { 661 while ((part = disk_part_iter_next(&piter))) {
661 bool is_part0 = part == &disk->part0; 662 bool is_part0 = part == &disk->part0;
662 663
663 printk("%s%s %10llu %s", is_part0 ? "" : " ", 664 uuid[0] = 0;
665 if (part->info)
666 part_unpack_uuid(part->info->uuid, uuid);
667
668 printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
664 bdevt_str(part_devt(part), devt_buf), 669 bdevt_str(part_devt(part), devt_buf),
665 (unsigned long long)part->nr_sects >> 1, 670 (unsigned long long)part->nr_sects >> 1,
666 disk_name(disk, part->partno, name_buf)); 671 disk_name(disk, part->partno, name_buf), uuid);
667 if (is_part0) { 672 if (is_part0) {
668 if (disk->driverfs_dev != NULL && 673 if (disk->driverfs_dev != NULL &&
669 disk->driverfs_dev->driver != NULL) 674 disk->driverfs_dev->driver != NULL)
@@ -803,10 +808,9 @@ static int __init genhd_device_init(void)
803 808
804 register_blkdev(BLOCK_EXT_MAJOR, "blkext"); 809 register_blkdev(BLOCK_EXT_MAJOR, "blkext");
805 810
806#ifndef CONFIG_SYSFS_DEPRECATED
807 /* create top-level block dir */ 811 /* create top-level block dir */
808 block_depr = kobject_create_and_add("block", NULL); 812 if (!sysfs_deprecated)
809#endif 813 block_depr = kobject_create_and_add("block", NULL);
810 return 0; 814 return 0;
811} 815}
812 816
@@ -925,8 +929,15 @@ static void disk_free_ptbl_rcu_cb(struct rcu_head *head)
925{ 929{
926 struct disk_part_tbl *ptbl = 930 struct disk_part_tbl *ptbl =
927 container_of(head, struct disk_part_tbl, rcu_head); 931 container_of(head, struct disk_part_tbl, rcu_head);
932 struct gendisk *disk = ptbl->disk;
933 struct request_queue *q = disk->queue;
934 unsigned long flags;
928 935
929 kfree(ptbl); 936 kfree(ptbl);
937
938 spin_lock_irqsave(q->queue_lock, flags);
939 elv_quiesce_end(q);
940 spin_unlock_irqrestore(q->queue_lock, flags);
930} 941}
931 942
932/** 943/**
@@ -944,11 +955,17 @@ static void disk_replace_part_tbl(struct gendisk *disk,
944 struct disk_part_tbl *new_ptbl) 955 struct disk_part_tbl *new_ptbl)
945{ 956{
946 struct disk_part_tbl *old_ptbl = disk->part_tbl; 957 struct disk_part_tbl *old_ptbl = disk->part_tbl;
958 struct request_queue *q = disk->queue;
947 959
948 rcu_assign_pointer(disk->part_tbl, new_ptbl); 960 rcu_assign_pointer(disk->part_tbl, new_ptbl);
949 961
950 if (old_ptbl) { 962 if (old_ptbl) {
951 rcu_assign_pointer(old_ptbl->last_lookup, NULL); 963 rcu_assign_pointer(old_ptbl->last_lookup, NULL);
964
965 spin_lock_irq(q->queue_lock);
966 elv_quiesce_start(q);
967 spin_unlock_irq(q->queue_lock);
968
952 call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); 969 call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
953 } 970 }
954} 971}
@@ -989,6 +1006,7 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
989 return -ENOMEM; 1006 return -ENOMEM;
990 1007
991 new_ptbl->len = target; 1008 new_ptbl->len = target;
1009 new_ptbl->disk = disk;
992 1010
993 for (i = 0; i < len; i++) 1011 for (i = 0; i < len; i++)
994 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); 1012 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
@@ -1004,6 +1022,7 @@ static void disk_release(struct device *dev)
1004 kfree(disk->random); 1022 kfree(disk->random);
1005 disk_replace_part_tbl(disk, NULL); 1023 disk_replace_part_tbl(disk, NULL);
1006 free_part_stats(&disk->part0); 1024 free_part_stats(&disk->part0);
1025 free_part_info(&disk->part0);
1007 kfree(disk); 1026 kfree(disk);
1008} 1027}
1009struct class block_class = { 1028struct class block_class = {
diff --git a/block/ioctl.c b/block/ioctl.c
index d8052f0dabd3..d724ceb1d465 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -62,7 +62,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
62 62
63 /* all seems OK */ 63 /* all seems OK */
64 part = add_partition(disk, partno, start, length, 64 part = add_partition(disk, partno, start, length,
65 ADDPART_FLAG_NONE); 65 ADDPART_FLAG_NONE, NULL);
66 mutex_unlock(&bdev->bd_mutex); 66 mutex_unlock(&bdev->bd_mutex);
67 return IS_ERR(part) ? PTR_ERR(part) : 0; 67 return IS_ERR(part) ? PTR_ERR(part) : 0;
68 case BLKPG_DEL_PARTITION: 68 case BLKPG_DEL_PARTITION:
@@ -116,7 +116,7 @@ static int blkdev_reread_part(struct block_device *bdev)
116static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, 116static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
117 uint64_t len, int secure) 117 uint64_t len, int secure)
118{ 118{
119 unsigned long flags = BLKDEV_IFL_WAIT; 119 unsigned long flags = 0;
120 120
121 if (start & 511) 121 if (start & 511)
122 return -EINVAL; 122 return -EINVAL;
@@ -128,7 +128,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 128 if (start + len > (bdev->bd_inode->i_size >> 9))
129 return -EINVAL; 129 return -EINVAL;
130 if (secure) 130 if (secure)
131 flags |= BLKDEV_IFL_SECURE; 131 flags |= BLKDEV_DISCARD_SECURE;
132 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); 132 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
133} 133}
134 134
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 14d18bf81255..d05387d1e14b 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -3335,7 +3335,7 @@ void ata_sff_port_init(struct ata_port *ap)
3335 3335
3336int __init ata_sff_init(void) 3336int __init ata_sff_init(void)
3337{ 3337{
3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); 3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3339 if (!ata_sff_wq) 3339 if (!ata_sff_wq)
3340 return -ENOMEM; 3340 return -ENOMEM;
3341 3341
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ef38aff737eb..fd96345bc35c 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -71,7 +71,6 @@ config PREVENT_FIRMWARE_BUILD
71 71
72config FW_LOADER 72config FW_LOADER
73 tristate "Userspace firmware loading support" if EMBEDDED 73 tristate "Userspace firmware loading support" if EMBEDDED
74 depends on HOTPLUG
75 default y 74 default y
76 ---help--- 75 ---help---
77 This option is provided for the case where no in-kernel-tree modules 76 This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index c12c7f2f2a6f..5f51c3b4451e 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -19,7 +19,5 @@ obj-$(CONFIG_MODULES) += module.o
19endif 19endif
20obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o 20obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
21 21
22ifeq ($(CONFIG_DEBUG_DRIVER),y) 22ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
23EXTRA_CFLAGS += -DDEBUG
24endif
25 23
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index eb1b7fa20dce..33c270a64db7 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -440,22 +440,6 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev)
440 } 440 }
441} 441}
442 442
443#ifdef CONFIG_SYSFS_DEPRECATED
444static int make_deprecated_bus_links(struct device *dev)
445{
446 return sysfs_create_link(&dev->kobj,
447 &dev->bus->p->subsys.kobj, "bus");
448}
449
450static void remove_deprecated_bus_links(struct device *dev)
451{
452 sysfs_remove_link(&dev->kobj, "bus");
453}
454#else
455static inline int make_deprecated_bus_links(struct device *dev) { return 0; }
456static inline void remove_deprecated_bus_links(struct device *dev) { }
457#endif
458
459/** 443/**
460 * bus_add_device - add device to bus 444 * bus_add_device - add device to bus
461 * @dev: device being added 445 * @dev: device being added
@@ -482,15 +466,10 @@ int bus_add_device(struct device *dev)
482 &dev->bus->p->subsys.kobj, "subsystem"); 466 &dev->bus->p->subsys.kobj, "subsystem");
483 if (error) 467 if (error)
484 goto out_subsys; 468 goto out_subsys;
485 error = make_deprecated_bus_links(dev);
486 if (error)
487 goto out_deprecated;
488 klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); 469 klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
489 } 470 }
490 return 0; 471 return 0;
491 472
492out_deprecated:
493 sysfs_remove_link(&dev->kobj, "subsystem");
494out_subsys: 473out_subsys:
495 sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); 474 sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
496out_id: 475out_id:
@@ -530,7 +509,6 @@ void bus_remove_device(struct device *dev)
530{ 509{
531 if (dev->bus) { 510 if (dev->bus) {
532 sysfs_remove_link(&dev->kobj, "subsystem"); 511 sysfs_remove_link(&dev->kobj, "subsystem");
533 remove_deprecated_bus_links(dev);
534 sysfs_remove_link(&dev->bus->p->devices_kset->kobj, 512 sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
535 dev_name(dev)); 513 dev_name(dev));
536 device_remove_attrs(dev->bus, dev); 514 device_remove_attrs(dev->bus, dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8e231d05b400..9c63a5687d69 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -184,9 +184,9 @@ int __class_register(struct class *cls, struct lock_class_key *key)
184 if (!cls->dev_kobj) 184 if (!cls->dev_kobj)
185 cls->dev_kobj = sysfs_dev_char_kobj; 185 cls->dev_kobj = sysfs_dev_char_kobj;
186 186
187#if defined(CONFIG_SYSFS_DEPRECATED) && defined(CONFIG_BLOCK) 187#if defined(CONFIG_BLOCK)
188 /* let the block class directory show up in the root of sysfs */ 188 /* let the block class directory show up in the root of sysfs */
189 if (cls != &block_class) 189 if (!sysfs_deprecated || cls != &block_class)
190 cp->class_subsys.kobj.kset = class_kset; 190 cp->class_subsys.kobj.kset = class_kset;
191#else 191#else
192 cp->class_subsys.kobj.kset = class_kset; 192 cp->class_subsys.kobj.kset = class_kset;
@@ -276,25 +276,6 @@ void class_destroy(struct class *cls)
276 class_unregister(cls); 276 class_unregister(cls);
277} 277}
278 278
279#ifdef CONFIG_SYSFS_DEPRECATED
280char *make_class_name(const char *name, struct kobject *kobj)
281{
282 char *class_name;
283 int size;
284
285 size = strlen(name) + strlen(kobject_name(kobj)) + 2;
286
287 class_name = kmalloc(size, GFP_KERNEL);
288 if (!class_name)
289 return NULL;
290
291 strcpy(class_name, name);
292 strcat(class_name, ":");
293 strcat(class_name, kobject_name(kobj));
294 return class_name;
295}
296#endif
297
298/** 279/**
299 * class_dev_iter_init - initialize class device iterator 280 * class_dev_iter_init - initialize class device iterator
300 * @iter: class iterator to initialize 281 * @iter: class iterator to initialize
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d1b2c9adc271..2cb49a93b1e6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,6 +26,19 @@
26#include "base.h" 26#include "base.h"
27#include "power/power.h" 27#include "power/power.h"
28 28
29#ifdef CONFIG_SYSFS_DEPRECATED
30#ifdef CONFIG_SYSFS_DEPRECATED_V2
31long sysfs_deprecated = 1;
32#else
33long sysfs_deprecated = 0;
34#endif
35static __init int sysfs_deprecated_setup(char *arg)
36{
37 return strict_strtol(arg, 10, &sysfs_deprecated);
38}
39early_param("sysfs.deprecated", sysfs_deprecated_setup);
40#endif
41
29int (*platform_notify)(struct device *dev) = NULL; 42int (*platform_notify)(struct device *dev) = NULL;
30int (*platform_notify_remove)(struct device *dev) = NULL; 43int (*platform_notify_remove)(struct device *dev) = NULL;
31static struct kobject *dev_kobj; 44static struct kobject *dev_kobj;
@@ -203,37 +216,6 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
203 if (dev->driver) 216 if (dev->driver)
204 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 217 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
205 218
206#ifdef CONFIG_SYSFS_DEPRECATED
207 if (dev->class) {
208 struct device *parent = dev->parent;
209
210 /* find first bus device in parent chain */
211 while (parent && !parent->bus)
212 parent = parent->parent;
213 if (parent && parent->bus) {
214 const char *path;
215
216 path = kobject_get_path(&parent->kobj, GFP_KERNEL);
217 if (path) {
218 add_uevent_var(env, "PHYSDEVPATH=%s", path);
219 kfree(path);
220 }
221
222 add_uevent_var(env, "PHYSDEVBUS=%s", parent->bus->name);
223
224 if (parent->driver)
225 add_uevent_var(env, "PHYSDEVDRIVER=%s",
226 parent->driver->name);
227 }
228 } else if (dev->bus) {
229 add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
230
231 if (dev->driver)
232 add_uevent_var(env, "PHYSDEVDRIVER=%s",
233 dev->driver->name);
234 }
235#endif
236
237 /* have the bus specific function add its stuff */ 219 /* have the bus specific function add its stuff */
238 if (dev->bus && dev->bus->uevent) { 220 if (dev->bus && dev->bus->uevent) {
239 retval = dev->bus->uevent(dev, env); 221 retval = dev->bus->uevent(dev, env);
@@ -578,24 +560,6 @@ void device_initialize(struct device *dev)
578 set_dev_node(dev, -1); 560 set_dev_node(dev, -1);
579} 561}
580 562
581#ifdef CONFIG_SYSFS_DEPRECATED
582static struct kobject *get_device_parent(struct device *dev,
583 struct device *parent)
584{
585 /* class devices without a parent live in /sys/class/<classname>/ */
586 if (dev->class && (!parent || parent->class != dev->class))
587 return &dev->class->p->class_subsys.kobj;
588 /* all other devices keep their parent */
589 else if (parent)
590 return &parent->kobj;
591
592 return NULL;
593}
594
595static inline void cleanup_device_parent(struct device *dev) {}
596static inline void cleanup_glue_dir(struct device *dev,
597 struct kobject *glue_dir) {}
598#else
599static struct kobject *virtual_device_parent(struct device *dev) 563static struct kobject *virtual_device_parent(struct device *dev)
600{ 564{
601 static struct kobject *virtual_dir = NULL; 565 static struct kobject *virtual_dir = NULL;
@@ -666,6 +630,15 @@ static struct kobject *get_device_parent(struct device *dev,
666 struct kobject *parent_kobj; 630 struct kobject *parent_kobj;
667 struct kobject *k; 631 struct kobject *k;
668 632
633#ifdef CONFIG_BLOCK
634 /* block disks show up in /sys/block */
635 if (sysfs_deprecated && dev->class == &block_class) {
636 if (parent && parent->class == &block_class)
637 return &parent->kobj;
638 return &block_class.p->class_subsys.kobj;
639 }
640#endif
641
669 /* 642 /*
670 * If we have no parent, we live in "virtual". 643 * If we have no parent, we live in "virtual".
671 * Class-devices with a non class-device as parent, live 644 * Class-devices with a non class-device as parent, live
@@ -719,7 +692,6 @@ static void cleanup_device_parent(struct device *dev)
719{ 692{
720 cleanup_glue_dir(dev, dev->kobj.parent); 693 cleanup_glue_dir(dev, dev->kobj.parent);
721} 694}
722#endif
723 695
724static void setup_parent(struct device *dev, struct device *parent) 696static void setup_parent(struct device *dev, struct device *parent)
725{ 697{
@@ -742,70 +714,29 @@ static int device_add_class_symlinks(struct device *dev)
742 if (error) 714 if (error)
743 goto out; 715 goto out;
744 716
745#ifdef CONFIG_SYSFS_DEPRECATED
746 /* stacked class devices need a symlink in the class directory */
747 if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
748 device_is_not_partition(dev)) {
749 error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
750 &dev->kobj, dev_name(dev));
751 if (error)
752 goto out_subsys;
753 }
754
755 if (dev->parent && device_is_not_partition(dev)) { 717 if (dev->parent && device_is_not_partition(dev)) {
756 struct device *parent = dev->parent; 718 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
757 char *class_name;
758
759 /*
760 * stacked class devices have the 'device' link
761 * pointing to the bus device instead of the parent
762 */
763 while (parent->class && !parent->bus && parent->parent)
764 parent = parent->parent;
765
766 error = sysfs_create_link(&dev->kobj,
767 &parent->kobj,
768 "device"); 719 "device");
769 if (error) 720 if (error)
770 goto out_busid; 721 goto out_subsys;
771
772 class_name = make_class_name(dev->class->name,
773 &dev->kobj);
774 if (class_name)
775 error = sysfs_create_link(&dev->parent->kobj,
776 &dev->kobj, class_name);
777 kfree(class_name);
778 if (error)
779 goto out_device;
780 } 722 }
781 return 0;
782 723
783out_device: 724#ifdef CONFIG_BLOCK
784 if (dev->parent && device_is_not_partition(dev)) 725 /* /sys/block has directories and does not need symlinks */
785 sysfs_remove_link(&dev->kobj, "device"); 726 if (sysfs_deprecated && dev->class == &block_class)
786out_busid: 727 return 0;
787 if (dev->kobj.parent != &dev->class->p->class_subsys.kobj && 728#endif
788 device_is_not_partition(dev)) 729
789 sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
790 dev_name(dev));
791#else
792 /* link in the class directory pointing to the device */ 730 /* link in the class directory pointing to the device */
793 error = sysfs_create_link(&dev->class->p->class_subsys.kobj, 731 error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
794 &dev->kobj, dev_name(dev)); 732 &dev->kobj, dev_name(dev));
795 if (error) 733 if (error)
796 goto out_subsys; 734 goto out_device;
797 735
798 if (dev->parent && device_is_not_partition(dev)) {
799 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
800 "device");
801 if (error)
802 goto out_busid;
803 }
804 return 0; 736 return 0;
805 737
806out_busid: 738out_device:
807 sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev)); 739 sysfs_remove_link(&dev->kobj, "device");
808#endif
809 740
810out_subsys: 741out_subsys:
811 sysfs_remove_link(&dev->kobj, "subsystem"); 742 sysfs_remove_link(&dev->kobj, "subsystem");
@@ -818,30 +749,14 @@ static void device_remove_class_symlinks(struct device *dev)
818 if (!dev->class) 749 if (!dev->class)
819 return; 750 return;
820 751
821#ifdef CONFIG_SYSFS_DEPRECATED
822 if (dev->parent && device_is_not_partition(dev)) {
823 char *class_name;
824
825 class_name = make_class_name(dev->class->name, &dev->kobj);
826 if (class_name) {
827 sysfs_remove_link(&dev->parent->kobj, class_name);
828 kfree(class_name);
829 }
830 sysfs_remove_link(&dev->kobj, "device");
831 }
832
833 if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
834 device_is_not_partition(dev))
835 sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
836 dev_name(dev));
837#else
838 if (dev->parent && device_is_not_partition(dev)) 752 if (dev->parent && device_is_not_partition(dev))
839 sysfs_remove_link(&dev->kobj, "device"); 753 sysfs_remove_link(&dev->kobj, "device");
840
841 sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
842#endif
843
844 sysfs_remove_link(&dev->kobj, "subsystem"); 754 sysfs_remove_link(&dev->kobj, "subsystem");
755#ifdef CONFIG_BLOCK
756 if (sysfs_deprecated && dev->class == &block_class)
757 return;
758#endif
759 sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
845} 760}
846 761
847/** 762/**
@@ -1613,41 +1528,23 @@ int device_rename(struct device *dev, const char *new_name)
1613 pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), 1528 pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
1614 __func__, new_name); 1529 __func__, new_name);
1615 1530
1616#ifdef CONFIG_SYSFS_DEPRECATED
1617 if ((dev->class) && (dev->parent))
1618 old_class_name = make_class_name(dev->class->name, &dev->kobj);
1619#endif
1620
1621 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 1531 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
1622 if (!old_device_name) { 1532 if (!old_device_name) {
1623 error = -ENOMEM; 1533 error = -ENOMEM;
1624 goto out; 1534 goto out;
1625 } 1535 }
1626 1536
1627#ifndef CONFIG_SYSFS_DEPRECATED
1628 if (dev->class) { 1537 if (dev->class) {
1629 error = sysfs_rename_link(&dev->class->p->class_subsys.kobj, 1538 error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
1630 &dev->kobj, old_device_name, new_name); 1539 &dev->kobj, old_device_name, new_name);
1631 if (error) 1540 if (error)
1632 goto out; 1541 goto out;
1633 } 1542 }
1634#endif 1543
1635 error = kobject_rename(&dev->kobj, new_name); 1544 error = kobject_rename(&dev->kobj, new_name);
1636 if (error) 1545 if (error)
1637 goto out; 1546 goto out;
1638 1547
1639#ifdef CONFIG_SYSFS_DEPRECATED
1640 if (old_class_name) {
1641 new_class_name = make_class_name(dev->class->name, &dev->kobj);
1642 if (new_class_name) {
1643 error = sysfs_rename_link(&dev->parent->kobj,
1644 &dev->kobj,
1645 old_class_name,
1646 new_class_name);
1647 }
1648 }
1649#endif
1650
1651out: 1548out:
1652 put_device(dev); 1549 put_device(dev);
1653 1550
@@ -1664,40 +1561,13 @@ static int device_move_class_links(struct device *dev,
1664 struct device *new_parent) 1561 struct device *new_parent)
1665{ 1562{
1666 int error = 0; 1563 int error = 0;
1667#ifdef CONFIG_SYSFS_DEPRECATED
1668 char *class_name;
1669 1564
1670 class_name = make_class_name(dev->class->name, &dev->kobj);
1671 if (!class_name) {
1672 error = -ENOMEM;
1673 goto out;
1674 }
1675 if (old_parent) {
1676 sysfs_remove_link(&dev->kobj, "device");
1677 sysfs_remove_link(&old_parent->kobj, class_name);
1678 }
1679 if (new_parent) {
1680 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
1681 "device");
1682 if (error)
1683 goto out;
1684 error = sysfs_create_link(&new_parent->kobj, &dev->kobj,
1685 class_name);
1686 if (error)
1687 sysfs_remove_link(&dev->kobj, "device");
1688 } else
1689 error = 0;
1690out:
1691 kfree(class_name);
1692 return error;
1693#else
1694 if (old_parent) 1565 if (old_parent)
1695 sysfs_remove_link(&dev->kobj, "device"); 1566 sysfs_remove_link(&dev->kobj, "device");
1696 if (new_parent) 1567 if (new_parent)
1697 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 1568 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
1698 "device"); 1569 "device");
1699 return error; 1570 return error;
1700#endif
1701} 1571}
1702 1572
1703/** 1573/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 933442f40321..cafeaaf0428f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -27,6 +27,8 @@
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30static DEFINE_MUTEX(mem_sysfs_mutex);
31
30#define MEMORY_CLASS_NAME "memory" 32#define MEMORY_CLASS_NAME "memory"
31 33
32static struct sysdev_class memory_sysdev_class = { 34static struct sysdev_class memory_sysdev_class = {
@@ -435,6 +437,45 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
435 return 0; 437 return 0;
436} 438}
437 439
440struct memory_block *find_memory_block_hinted(struct mem_section *section,
441 struct memory_block *hint)
442{
443 struct kobject *kobj;
444 struct sys_device *sysdev;
445 struct memory_block *mem;
446 char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
447
448 kobj = hint ? &hint->sysdev.kobj : NULL;
449
450 /*
451 * This only works because we know that section == sysdev->id
452 * slightly redundant with sysdev_register()
453 */
454 sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section));
455
456 kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
457 if (!kobj)
458 return NULL;
459
460 sysdev = container_of(kobj, struct sys_device, kobj);
461 mem = container_of(sysdev, struct memory_block, sysdev);
462
463 return mem;
464}
465
466/*
467 * For now, we have a linear search to go find the appropriate
468 * memory_block corresponding to a particular phys_index. If
469 * this gets to be a real problem, we can always use a radix
470 * tree or something here.
471 *
472 * This could be made generic for all sysdev classes.
473 */
474struct memory_block *find_memory_block(struct mem_section *section)
475{
476 return find_memory_block_hinted(section, NULL);
477}
478
438static int add_memory_block(int nid, struct mem_section *section, 479static int add_memory_block(int nid, struct mem_section *section,
439 unsigned long state, enum mem_add_context context) 480 unsigned long state, enum mem_add_context context)
440{ 481{
@@ -445,8 +486,11 @@ static int add_memory_block(int nid, struct mem_section *section,
445 if (!mem) 486 if (!mem)
446 return -ENOMEM; 487 return -ENOMEM;
447 488
489 mutex_lock(&mem_sysfs_mutex);
490
448 mem->phys_index = __section_nr(section); 491 mem->phys_index = __section_nr(section);
449 mem->state = state; 492 mem->state = state;
493 mem->section_count++;
450 mutex_init(&mem->state_mutex); 494 mutex_init(&mem->state_mutex);
451 start_pfn = section_nr_to_pfn(mem->phys_index); 495 start_pfn = section_nr_to_pfn(mem->phys_index);
452 mem->phys_device = arch_get_memory_phys_device(start_pfn); 496 mem->phys_device = arch_get_memory_phys_device(start_pfn);
@@ -465,53 +509,29 @@ static int add_memory_block(int nid, struct mem_section *section,
465 ret = register_mem_sect_under_node(mem, nid); 509 ret = register_mem_sect_under_node(mem, nid);
466 } 510 }
467 511
512 mutex_unlock(&mem_sysfs_mutex);
468 return ret; 513 return ret;
469} 514}
470 515
471/*
472 * For now, we have a linear search to go find the appropriate
473 * memory_block corresponding to a particular phys_index. If
474 * this gets to be a real problem, we can always use a radix
475 * tree or something here.
476 *
477 * This could be made generic for all sysdev classes.
478 */
479struct memory_block *find_memory_block(struct mem_section *section)
480{
481 struct kobject *kobj;
482 struct sys_device *sysdev;
483 struct memory_block *mem;
484 char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
485
486 /*
487 * This only works because we know that section == sysdev->id
488 * slightly redundant with sysdev_register()
489 */
490 sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section));
491
492 kobj = kset_find_obj(&memory_sysdev_class.kset, name);
493 if (!kobj)
494 return NULL;
495
496 sysdev = container_of(kobj, struct sys_device, kobj);
497 mem = container_of(sysdev, struct memory_block, sysdev);
498
499 return mem;
500}
501
502int remove_memory_block(unsigned long node_id, struct mem_section *section, 516int remove_memory_block(unsigned long node_id, struct mem_section *section,
503 int phys_device) 517 int phys_device)
504{ 518{
505 struct memory_block *mem; 519 struct memory_block *mem;
506 520
521 mutex_lock(&mem_sysfs_mutex);
507 mem = find_memory_block(section); 522 mem = find_memory_block(section);
508 unregister_mem_sect_under_nodes(mem);
509 mem_remove_simple_file(mem, phys_index);
510 mem_remove_simple_file(mem, state);
511 mem_remove_simple_file(mem, phys_device);
512 mem_remove_simple_file(mem, removable);
513 unregister_memory(mem, section);
514 523
524 mem->section_count--;
525 if (mem->section_count == 0) {
526 unregister_mem_sect_under_nodes(mem);
527 mem_remove_simple_file(mem, phys_index);
528 mem_remove_simple_file(mem, state);
529 mem_remove_simple_file(mem, phys_device);
530 mem_remove_simple_file(mem, removable);
531 unregister_memory(mem, section);
532 }
533
534 mutex_unlock(&mem_sysfs_mutex);
515 return 0; 535 return 0;
516} 536}
517 537
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 2872e86837b2..ee53558b452f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -409,25 +409,27 @@ static int link_mem_sections(int nid)
409 unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; 409 unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
410 unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; 410 unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
411 unsigned long pfn; 411 unsigned long pfn;
412 struct memory_block *mem_blk = NULL;
412 int err = 0; 413 int err = 0;
413 414
414 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 415 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
415 unsigned long section_nr = pfn_to_section_nr(pfn); 416 unsigned long section_nr = pfn_to_section_nr(pfn);
416 struct mem_section *mem_sect; 417 struct mem_section *mem_sect;
417 struct memory_block *mem_blk;
418 int ret; 418 int ret;
419 419
420 if (!present_section_nr(section_nr)) 420 if (!present_section_nr(section_nr))
421 continue; 421 continue;
422 mem_sect = __nr_to_section(section_nr); 422 mem_sect = __nr_to_section(section_nr);
423 mem_blk = find_memory_block(mem_sect); 423 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
424 ret = register_mem_sect_under_node(mem_blk, nid); 424 ret = register_mem_sect_under_node(mem_blk, nid);
425 if (!err) 425 if (!err)
426 err = ret; 426 err = ret;
427 427
428 /* discard ref obtained in find_memory_block() */ 428 /* discard ref obtained in find_memory_block() */
429 kobject_put(&mem_blk->sysdev.kobj);
430 } 429 }
430
431 if (mem_blk)
432 kobject_put(&mem_blk->sysdev.kobj);
431 return err; 433 return err;
432} 434}
433 435
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c6c933f58102..3966e62ad019 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -192,6 +192,9 @@ int platform_device_add_resources(struct platform_device *pdev,
192{ 192{
193 struct resource *r; 193 struct resource *r;
194 194
195 if (!res)
196 return 0;
197
195 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 198 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
196 if (r) { 199 if (r) {
197 pdev->resource = r; 200 pdev->resource = r;
@@ -215,8 +218,12 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources);
215int platform_device_add_data(struct platform_device *pdev, const void *data, 218int platform_device_add_data(struct platform_device *pdev, const void *data,
216 size_t size) 219 size_t size)
217{ 220{
218 void *d = kmemdup(data, size, GFP_KERNEL); 221 void *d;
222
223 if (!data)
224 return 0;
219 225
226 d = kmemdup(data, size, GFP_KERNEL);
220 if (d) { 227 if (d) {
221 pdev->dev.platform_data = d; 228 pdev->dev.platform_data = d;
222 return 0; 229 return 0;
@@ -373,17 +380,13 @@ struct platform_device *__init_or_module platform_device_register_resndata(
373 380
374 pdev->dev.parent = parent; 381 pdev->dev.parent = parent;
375 382
376 if (res) { 383 ret = platform_device_add_resources(pdev, res, num);
377 ret = platform_device_add_resources(pdev, res, num); 384 if (ret)
378 if (ret) 385 goto err;
379 goto err;
380 }
381 386
382 if (data) { 387 ret = platform_device_add_data(pdev, data, size);
383 ret = platform_device_add_data(pdev, data, size); 388 if (ret)
384 if (ret) 389 goto err;
385 goto err;
386 }
387 390
388 ret = platform_device_add(pdev); 391 ret = platform_device_add(pdev);
389 if (ret) { 392 if (ret) {
@@ -488,12 +491,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
488 * if the probe was successful, and make sure any forced probes of 491 * if the probe was successful, and make sure any forced probes of
489 * new devices fail. 492 * new devices fail.
490 */ 493 */
491 spin_lock(&platform_bus_type.p->klist_drivers.k_lock); 494 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
492 drv->probe = NULL; 495 drv->probe = NULL;
493 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 496 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
494 retval = -ENODEV; 497 retval = -ENODEV;
495 drv->driver.probe = platform_drv_probe_fail; 498 drv->driver.probe = platform_drv_probe_fail;
496 spin_unlock(&platform_bus_type.p->klist_drivers.k_lock); 499 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
497 500
498 if (code != retval) 501 if (code != retval)
499 platform_driver_unregister(drv); 502 platform_driver_unregister(drv);
@@ -530,17 +533,13 @@ struct platform_device * __init_or_module platform_create_bundle(
530 goto err_out; 533 goto err_out;
531 } 534 }
532 535
533 if (res) { 536 error = platform_device_add_resources(pdev, res, n_res);
534 error = platform_device_add_resources(pdev, res, n_res); 537 if (error)
535 if (error) 538 goto err_pdev_put;
536 goto err_pdev_put;
537 }
538 539
539 if (data) { 540 error = platform_device_add_data(pdev, data, size);
540 error = platform_device_add_data(pdev, data, size); 541 if (error)
541 if (error) 542 goto err_pdev_put;
542 goto err_pdev_put;
543 }
544 543
545 error = platform_device_add(pdev); 544 error = platform_device_add(pdev);
546 if (error) 545 if (error)
@@ -976,6 +975,41 @@ struct bus_type platform_bus_type = {
976}; 975};
977EXPORT_SYMBOL_GPL(platform_bus_type); 976EXPORT_SYMBOL_GPL(platform_bus_type);
978 977
978/**
979 * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
980 *
981 * This function can be used by platform code to get the current
982 * set of dev_pm_ops functions used by the platform_bus_type.
983 */
984const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
985{
986 return platform_bus_type.pm;
987}
988
989/**
990 * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
991 *
992 * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
993 *
994 * Platform code can override the dev_pm_ops methods of
995 * platform_bus_type by using this function. It is expected that
996 * platform code will first do a platform_bus_get_pm_ops(), then
997 * kmemdup it, then customize selected methods and pass a pointer to
998 * the new struct dev_pm_ops to this function.
999 *
1000 * Since platform-specific code is customizing methods for *all*
1001 * devices (not just platform-specific devices) it is expected that
1002 * any custom overrides of these functions will keep existing behavior
1003 * and simply extend it. For example, any customization of the
1004 * runtime PM methods should continue to call the pm_generic_*
1005 * functions as the default ones do in addition to the
1006 * platform-specific behavior.
1007 */
1008void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
1009{
1010 platform_bus_type.pm = pm;
1011}
1012
979int __init platform_bus_init(void) 1013int __init platform_bus_init(void)
980{ 1014{
981 int error; 1015 int error;
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 9354dc10a363..1667aaf4fde6 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -432,13 +432,13 @@ int sysdev_suspend(pm_message_t state)
432 /* resume current sysdev */ 432 /* resume current sysdev */
433cls_driver: 433cls_driver:
434 drv = NULL; 434 drv = NULL;
435 printk(KERN_ERR "Class suspend failed for %s\n", 435 printk(KERN_ERR "Class suspend failed for %s: %d\n",
436 kobject_name(&sysdev->kobj)); 436 kobject_name(&sysdev->kobj), ret);
437 437
438aux_driver: 438aux_driver:
439 if (drv) 439 if (drv)
440 printk(KERN_ERR "Class driver suspend failed for %s\n", 440 printk(KERN_ERR "Class driver suspend failed for %s: %d\n",
441 kobject_name(&sysdev->kobj)); 441 kobject_name(&sysdev->kobj), ret);
442 list_for_each_entry(err_drv, &cls->drivers, entry) { 442 list_for_each_entry(err_drv, &cls->drivers, entry) {
443 if (err_drv == drv) 443 if (err_drv == drv)
444 break; 444 break;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4b852c962266..a1725e6488d3 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -115,8 +115,6 @@ static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it does
115module_param(fd_def_df0, ulong, 0); 115module_param(fd_def_df0, ulong, 0);
116MODULE_LICENSE("GPL"); 116MODULE_LICENSE("GPL");
117 117
118static struct request_queue *floppy_queue;
119
120/* 118/*
121 * Macros 119 * Macros
122 */ 120 */
@@ -165,6 +163,7 @@ static volatile int selected = -1; /* currently selected drive */
165static int writepending; 163static int writepending;
166static int writefromint; 164static int writefromint;
167static char *raw_buf; 165static char *raw_buf;
166static int fdc_queue;
168 167
169static DEFINE_SPINLOCK(amiflop_lock); 168static DEFINE_SPINLOCK(amiflop_lock);
170 169
@@ -1335,6 +1334,42 @@ static int get_track(int drive, int track)
1335 return -1; 1334 return -1;
1336} 1335}
1337 1336
1337/*
1338 * Round-robin between our available drives, doing one request from each
1339 */
1340static struct request *set_next_request(void)
1341{
1342 struct request_queue *q;
1343 int cnt = FD_MAX_UNITS;
1344 struct request *rq;
1345
1346 /* Find next queue we can dispatch from */
1347 fdc_queue = fdc_queue + 1;
1348 if (fdc_queue == FD_MAX_UNITS)
1349 fdc_queue = 0;
1350
1351 for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) {
1352
1353 if (unit[fdc_queue].type->code == FD_NODRIVE) {
1354 if (++fdc_queue == FD_MAX_UNITS)
1355 fdc_queue = 0;
1356 continue;
1357 }
1358
1359 q = unit[fdc_queue].gendisk->queue;
1360 if (q) {
1361 rq = blk_fetch_request(q);
1362 if (rq)
1363 break;
1364 }
1365
1366 if (++fdc_queue == FD_MAX_UNITS)
1367 fdc_queue = 0;
1368 }
1369
1370 return rq;
1371}
1372
1338static void redo_fd_request(void) 1373static void redo_fd_request(void)
1339{ 1374{
1340 struct request *rq; 1375 struct request *rq;
@@ -1346,7 +1381,7 @@ static void redo_fd_request(void)
1346 int err; 1381 int err;
1347 1382
1348next_req: 1383next_req:
1349 rq = blk_fetch_request(floppy_queue); 1384 rq = set_next_request();
1350 if (!rq) { 1385 if (!rq) {
1351 /* Nothing left to do */ 1386 /* Nothing left to do */
1352 return; 1387 return;
@@ -1683,6 +1718,13 @@ static int __init fd_probe_drives(void)
1683 continue; 1718 continue;
1684 } 1719 }
1685 unit[drive].gendisk = disk; 1720 unit[drive].gendisk = disk;
1721
1722 disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
1723 if (!disk->queue) {
1724 unit[drive].type->code = FD_NODRIVE;
1725 continue;
1726 }
1727
1686 drives++; 1728 drives++;
1687 if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { 1729 if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
1688 printk("no mem for "); 1730 printk("no mem for ");
@@ -1696,7 +1738,6 @@ static int __init fd_probe_drives(void)
1696 disk->fops = &floppy_fops; 1738 disk->fops = &floppy_fops;
1697 sprintf(disk->disk_name, "fd%d", drive); 1739 sprintf(disk->disk_name, "fd%d", drive);
1698 disk->private_data = &unit[drive]; 1740 disk->private_data = &unit[drive];
1699 disk->queue = floppy_queue;
1700 set_capacity(disk, 880*2); 1741 set_capacity(disk, 880*2);
1701 add_disk(disk); 1742 add_disk(disk);
1702 } 1743 }
@@ -1744,11 +1785,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
1744 goto out_irq2; 1785 goto out_irq2;
1745 } 1786 }
1746 1787
1747 ret = -ENOMEM;
1748 floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock);
1749 if (!floppy_queue)
1750 goto out_queue;
1751
1752 ret = -ENODEV; 1788 ret = -ENODEV;
1753 if (fd_probe_drives() < 1) /* No usable drives */ 1789 if (fd_probe_drives() < 1) /* No usable drives */
1754 goto out_probe; 1790 goto out_probe;
@@ -1792,8 +1828,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
1792 return 0; 1828 return 0;
1793 1829
1794out_probe: 1830out_probe:
1795 blk_cleanup_queue(floppy_queue);
1796out_queue:
1797 free_irq(IRQ_AMIGA_CIAA_TB, NULL); 1831 free_irq(IRQ_AMIGA_CIAA_TB, NULL);
1798out_irq2: 1832out_irq2:
1799 free_irq(IRQ_AMIGA_DSKBLK, NULL); 1833 free_irq(IRQ_AMIGA_DSKBLK, NULL);
@@ -1811,9 +1845,12 @@ static int __exit amiga_floppy_remove(struct platform_device *pdev)
1811 1845
1812 for( i = 0; i < FD_MAX_UNITS; i++) { 1846 for( i = 0; i < FD_MAX_UNITS; i++) {
1813 if (unit[i].type->code != FD_NODRIVE) { 1847 if (unit[i].type->code != FD_NODRIVE) {
1848 struct request_queue *q = unit[i].gendisk->queue;
1814 del_gendisk(unit[i].gendisk); 1849 del_gendisk(unit[i].gendisk);
1815 put_disk(unit[i].gendisk); 1850 put_disk(unit[i].gendisk);
1816 kfree(unit[i].trackbuf); 1851 kfree(unit[i].trackbuf);
1852 if (q)
1853 blk_cleanup_queue(q);
1817 } 1854 }
1818 } 1855 }
1819 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 1856 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
@@ -1821,7 +1858,6 @@ static int __exit amiga_floppy_remove(struct platform_device *pdev)
1821 free_irq(IRQ_AMIGA_DSKBLK, NULL); 1858 free_irq(IRQ_AMIGA_DSKBLK, NULL);
1822 custom.dmacon = DMAF_DISK; /* disable DMA */ 1859 custom.dmacon = DMAF_DISK; /* disable DMA */
1823 amiga_chip_free(raw_buf); 1860 amiga_chip_free(raw_buf);
1824 blk_cleanup_queue(floppy_queue);
1825 unregister_blkdev(FLOPPY_MAJOR, "fd"); 1861 unregister_blkdev(FLOPPY_MAJOR, "fd");
1826} 1862}
1827#endif 1863#endif
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 8c021bb7a991..4e4cc6c828cb 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -80,8 +80,8 @@
80#undef DEBUG 80#undef DEBUG
81 81
82static DEFINE_MUTEX(ataflop_mutex); 82static DEFINE_MUTEX(ataflop_mutex);
83static struct request_queue *floppy_queue;
84static struct request *fd_request; 83static struct request *fd_request;
84static int fdc_queue;
85 85
86/* Disk types: DD, HD, ED */ 86/* Disk types: DD, HD, ED */
87static struct atari_disk_type { 87static struct atari_disk_type {
@@ -1392,6 +1392,29 @@ static void setup_req_params( int drive )
1392 ReqTrack, ReqSector, (unsigned long)ReqData )); 1392 ReqTrack, ReqSector, (unsigned long)ReqData ));
1393} 1393}
1394 1394
1395/*
1396 * Round-robin between our available drives, doing one request from each
1397 */
1398static struct request *set_next_request(void)
1399{
1400 struct request_queue *q;
1401 int old_pos = fdc_queue;
1402 struct request *rq;
1403
1404 do {
1405 q = unit[fdc_queue].disk->queue;
1406 if (++fdc_queue == FD_MAX_UNITS)
1407 fdc_queue = 0;
1408 if (q) {
1409 rq = blk_fetch_request(q);
1410 if (rq)
1411 break;
1412 }
1413 } while (fdc_queue != old_pos);
1414
1415 return rq;
1416}
1417
1395 1418
1396static void redo_fd_request(void) 1419static void redo_fd_request(void)
1397{ 1420{
@@ -1406,7 +1429,7 @@ static void redo_fd_request(void)
1406 1429
1407repeat: 1430repeat:
1408 if (!fd_request) { 1431 if (!fd_request) {
1409 fd_request = blk_fetch_request(floppy_queue); 1432 fd_request = set_next_request();
1410 if (!fd_request) 1433 if (!fd_request)
1411 goto the_end; 1434 goto the_end;
1412 } 1435 }
@@ -1933,10 +1956,6 @@ static int __init atari_floppy_init (void)
1933 PhysTrackBuffer = virt_to_phys(TrackBuffer); 1956 PhysTrackBuffer = virt_to_phys(TrackBuffer);
1934 BufferDrive = BufferSide = BufferTrack = -1; 1957 BufferDrive = BufferSide = BufferTrack = -1;
1935 1958
1936 floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock);
1937 if (!floppy_queue)
1938 goto Enomem;
1939
1940 for (i = 0; i < FD_MAX_UNITS; i++) { 1959 for (i = 0; i < FD_MAX_UNITS; i++) {
1941 unit[i].track = -1; 1960 unit[i].track = -1;
1942 unit[i].flags = 0; 1961 unit[i].flags = 0;
@@ -1945,7 +1964,10 @@ static int __init atari_floppy_init (void)
1945 sprintf(unit[i].disk->disk_name, "fd%d", i); 1964 sprintf(unit[i].disk->disk_name, "fd%d", i);
1946 unit[i].disk->fops = &floppy_fops; 1965 unit[i].disk->fops = &floppy_fops;
1947 unit[i].disk->private_data = &unit[i]; 1966 unit[i].disk->private_data = &unit[i];
1948 unit[i].disk->queue = floppy_queue; 1967 unit[i].disk->queue = blk_init_queue(do_fd_request,
1968 &ataflop_lock);
1969 if (!unit[i].disk->queue)
1970 goto Enomem;
1949 set_capacity(unit[i].disk, MAX_DISK_SIZE * 2); 1971 set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
1950 add_disk(unit[i].disk); 1972 add_disk(unit[i].disk);
1951 } 1973 }
@@ -1960,10 +1982,14 @@ static int __init atari_floppy_init (void)
1960 1982
1961 return 0; 1983 return 0;
1962Enomem: 1984Enomem:
1963 while (i--) 1985 while (i--) {
1986 struct request_queue *q = unit[i].disk->queue;
1987
1964 put_disk(unit[i].disk); 1988 put_disk(unit[i].disk);
1965 if (floppy_queue) 1989 if (q)
1966 blk_cleanup_queue(floppy_queue); 1990 blk_cleanup_queue(q);
1991 }
1992
1967 unregister_blkdev(FLOPPY_MAJOR, "fd"); 1993 unregister_blkdev(FLOPPY_MAJOR, "fd");
1968 return -ENOMEM; 1994 return -ENOMEM;
1969} 1995}
@@ -2012,12 +2038,14 @@ static void __exit atari_floppy_exit(void)
2012 int i; 2038 int i;
2013 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 2039 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
2014 for (i = 0; i < FD_MAX_UNITS; i++) { 2040 for (i = 0; i < FD_MAX_UNITS; i++) {
2041 struct request_queue *q = unit[i].disk->queue;
2042
2015 del_gendisk(unit[i].disk); 2043 del_gendisk(unit[i].disk);
2016 put_disk(unit[i].disk); 2044 put_disk(unit[i].disk);
2045 blk_cleanup_queue(q);
2017 } 2046 }
2018 unregister_blkdev(FLOPPY_MAJOR, "fd"); 2047 unregister_blkdev(FLOPPY_MAJOR, "fd");
2019 2048
2020 blk_cleanup_queue(floppy_queue);
2021 del_timer_sync(&fd_timer); 2049 del_timer_sync(&fd_timer);
2022 atari_stram_free( DMABuffer ); 2050 atari_stram_free( DMABuffer );
2023} 2051}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 82bfd5bb4a97..b7f51e4594f8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -483,7 +483,6 @@ static struct brd_device *brd_alloc(int i)
483 if (!brd->brd_queue) 483 if (!brd->brd_queue)
484 goto out_free_dev; 484 goto out_free_dev;
485 blk_queue_make_request(brd->brd_queue, brd_make_request); 485 blk_queue_make_request(brd->brd_queue, brd_make_request);
486 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
487 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 486 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
488 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
489 488
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c484c96e22a6..f09e6df15aa7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -105,11 +105,12 @@ static const struct pci_device_id cciss_pci_device_id[] = {
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
113 {0,} 114 {0,}
114}; 115};
115 116
@@ -149,11 +150,12 @@ static struct board_type products[] = {
149 {0x3249103C, "Smart Array P812", &SA5_access}, 150 {0x3249103C, "Smart Array P812", &SA5_access},
150 {0x324A103C, "Smart Array P712m", &SA5_access}, 151 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access}, 152 {0x324B103C, "Smart Array P711m", &SA5_access},
152 {0x3250103C, "Smart Array", &SA5_access}, 153 {0x3350103C, "Smart Array", &SA5_access},
153 {0x3251103C, "Smart Array", &SA5_access}, 154 {0x3351103C, "Smart Array", &SA5_access},
154 {0x3252103C, "Smart Array", &SA5_access}, 155 {0x3352103C, "Smart Array", &SA5_access},
155 {0x3253103C, "Smart Array", &SA5_access}, 156 {0x3353103C, "Smart Array", &SA5_access},
156 {0x3254103C, "Smart Array", &SA5_access}, 157 {0x3354103C, "Smart Array", &SA5_access},
158 {0x3355103C, "Smart Array", &SA5_access},
157}; 159};
158 160
159/* How long to wait (in milliseconds) for board to go into simple mode */ 161/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -1232,470 +1234,452 @@ static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
1232 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 1234 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
1233 (void)check_for_unit_attention(h, c); 1235 (void)check_for_unit_attention(h, c);
1234} 1236}
1235/* 1237
1236 * ioctl 1238static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
1237 */
1238static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1239 unsigned int cmd, unsigned long arg)
1240{ 1239{
1241 struct gendisk *disk = bdev->bd_disk; 1240 cciss_pci_info_struct pciinfo;
1242 ctlr_info_t *h = get_host(disk);
1243 drive_info_struct *drv = get_drv(disk);
1244 void __user *argp = (void __user *)arg;
1245 1241
1246 dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", 1242 if (!argp)
1247 cmd, arg); 1243 return -EINVAL;
1248 switch (cmd) { 1244 pciinfo.domain = pci_domain_nr(h->pdev->bus);
1249 case CCISS_GETPCIINFO: 1245 pciinfo.bus = h->pdev->bus->number;
1250 { 1246 pciinfo.dev_fn = h->pdev->devfn;
1251 cciss_pci_info_struct pciinfo; 1247 pciinfo.board_id = h->board_id;
1252 1248 if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
1253 if (!arg) 1249 return -EFAULT;
1254 return -EINVAL; 1250 return 0;
1255 pciinfo.domain = pci_domain_nr(h->pdev->bus); 1251}
1256 pciinfo.bus = h->pdev->bus->number;
1257 pciinfo.dev_fn = h->pdev->devfn;
1258 pciinfo.board_id = h->board_id;
1259 if (copy_to_user
1260 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
1261 return -EFAULT;
1262 return 0;
1263 }
1264 case CCISS_GETINTINFO:
1265 {
1266 cciss_coalint_struct intinfo;
1267 if (!arg)
1268 return -EINVAL;
1269 intinfo.delay =
1270 readl(&h->cfgtable->HostWrite.CoalIntDelay);
1271 intinfo.count =
1272 readl(&h->cfgtable->HostWrite.CoalIntCount);
1273 if (copy_to_user
1274 (argp, &intinfo, sizeof(cciss_coalint_struct)))
1275 return -EFAULT;
1276 return 0;
1277 }
1278 case CCISS_SETINTINFO:
1279 {
1280 cciss_coalint_struct intinfo;
1281 unsigned long flags;
1282 int i;
1283
1284 if (!arg)
1285 return -EINVAL;
1286 if (!capable(CAP_SYS_ADMIN))
1287 return -EPERM;
1288 if (copy_from_user
1289 (&intinfo, argp, sizeof(cciss_coalint_struct)))
1290 return -EFAULT;
1291 if ((intinfo.delay == 0) && (intinfo.count == 0))
1292 return -EINVAL;
1293 spin_lock_irqsave(&h->lock, flags);
1294 /* Update the field, and then ring the doorbell */
1295 writel(intinfo.delay,
1296 &(h->cfgtable->HostWrite.CoalIntDelay));
1297 writel(intinfo.count,
1298 &(h->cfgtable->HostWrite.CoalIntCount));
1299 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
1300
1301 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
1302 if (!(readl(h->vaddr + SA5_DOORBELL)
1303 & CFGTBL_ChangeReq))
1304 break;
1305 /* delay and try again */
1306 udelay(1000);
1307 }
1308 spin_unlock_irqrestore(&h->lock, flags);
1309 if (i >= MAX_IOCTL_CONFIG_WAIT)
1310 return -EAGAIN;
1311 return 0;
1312 }
1313 case CCISS_GETNODENAME:
1314 {
1315 NodeName_type NodeName;
1316 int i;
1317
1318 if (!arg)
1319 return -EINVAL;
1320 for (i = 0; i < 16; i++)
1321 NodeName[i] =
1322 readb(&h->cfgtable->ServerName[i]);
1323 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1324 return -EFAULT;
1325 return 0;
1326 }
1327 case CCISS_SETNODENAME:
1328 {
1329 NodeName_type NodeName;
1330 unsigned long flags;
1331 int i;
1332 1252
1333 if (!arg) 1253static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
1334 return -EINVAL; 1254{
1335 if (!capable(CAP_SYS_ADMIN)) 1255 cciss_coalint_struct intinfo;
1336 return -EPERM;
1337 1256
1338 if (copy_from_user 1257 if (!argp)
1339 (NodeName, argp, sizeof(NodeName_type))) 1258 return -EINVAL;
1340 return -EFAULT; 1259 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
1260 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
1261 if (copy_to_user
1262 (argp, &intinfo, sizeof(cciss_coalint_struct)))
1263 return -EFAULT;
1264 return 0;
1265}
1341 1266
1342 spin_lock_irqsave(&h->lock, flags); 1267static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
1268{
1269 cciss_coalint_struct intinfo;
1270 unsigned long flags;
1271 int i;
1343 1272
1344 /* Update the field, and then ring the doorbell */ 1273 if (!argp)
1345 for (i = 0; i < 16; i++) 1274 return -EINVAL;
1346 writeb(NodeName[i], 1275 if (!capable(CAP_SYS_ADMIN))
1347 &h->cfgtable->ServerName[i]); 1276 return -EPERM;
1277 if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
1278 return -EFAULT;
1279 if ((intinfo.delay == 0) && (intinfo.count == 0))
1280 return -EINVAL;
1281 spin_lock_irqsave(&h->lock, flags);
1282 /* Update the field, and then ring the doorbell */
1283 writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
1284 writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
1285 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
1348 1286
1349 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1287 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
1288 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
1289 break;
1290 udelay(1000); /* delay and try again */
1291 }
1292 spin_unlock_irqrestore(&h->lock, flags);
1293 if (i >= MAX_IOCTL_CONFIG_WAIT)
1294 return -EAGAIN;
1295 return 0;
1296}
1350 1297
1351 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1298static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
1352 if (!(readl(h->vaddr + SA5_DOORBELL) 1299{
1353 & CFGTBL_ChangeReq)) 1300 NodeName_type NodeName;
1354 break; 1301 int i;
1355 /* delay and try again */
1356 udelay(1000);
1357 }
1358 spin_unlock_irqrestore(&h->lock, flags);
1359 if (i >= MAX_IOCTL_CONFIG_WAIT)
1360 return -EAGAIN;
1361 return 0;
1362 }
1363 1302
1364 case CCISS_GETHEARTBEAT: 1303 if (!argp)
1365 { 1304 return -EINVAL;
1366 Heartbeat_type heartbeat; 1305 for (i = 0; i < 16; i++)
1367 1306 NodeName[i] = readb(&h->cfgtable->ServerName[i]);
1368 if (!arg) 1307 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1369 return -EINVAL; 1308 return -EFAULT;
1370 heartbeat = readl(&h->cfgtable->HeartBeat); 1309 return 0;
1371 if (copy_to_user 1310}
1372 (argp, &heartbeat, sizeof(Heartbeat_type)))
1373 return -EFAULT;
1374 return 0;
1375 }
1376 case CCISS_GETBUSTYPES:
1377 {
1378 BusTypes_type BusTypes;
1379
1380 if (!arg)
1381 return -EINVAL;
1382 BusTypes = readl(&h->cfgtable->BusTypes);
1383 if (copy_to_user
1384 (argp, &BusTypes, sizeof(BusTypes_type)))
1385 return -EFAULT;
1386 return 0;
1387 }
1388 case CCISS_GETFIRMVER:
1389 {
1390 FirmwareVer_type firmware;
1391 1311
1392 if (!arg) 1312static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
1393 return -EINVAL; 1313{
1394 memcpy(firmware, h->firm_ver, 4); 1314 NodeName_type NodeName;
1315 unsigned long flags;
1316 int i;
1395 1317
1396 if (copy_to_user 1318 if (!argp)
1397 (argp, firmware, sizeof(FirmwareVer_type))) 1319 return -EINVAL;
1398 return -EFAULT; 1320 if (!capable(CAP_SYS_ADMIN))
1399 return 0; 1321 return -EPERM;
1400 } 1322 if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
1401 case CCISS_GETDRIVVER: 1323 return -EFAULT;
1402 { 1324 spin_lock_irqsave(&h->lock, flags);
1403 DriverVer_type DriverVer = DRIVER_VERSION; 1325 /* Update the field, and then ring the doorbell */
1326 for (i = 0; i < 16; i++)
1327 writeb(NodeName[i], &h->cfgtable->ServerName[i]);
1328 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
1329 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
1330 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
1331 break;
1332 udelay(1000); /* delay and try again */
1333 }
1334 spin_unlock_irqrestore(&h->lock, flags);
1335 if (i >= MAX_IOCTL_CONFIG_WAIT)
1336 return -EAGAIN;
1337 return 0;
1338}
1404 1339
1405 if (!arg) 1340static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1406 return -EINVAL; 1341{
1342 Heartbeat_type heartbeat;
1407 1343
1408 if (copy_to_user 1344 if (!argp)
1409 (argp, &DriverVer, sizeof(DriverVer_type))) 1345 return -EINVAL;
1410 return -EFAULT; 1346 heartbeat = readl(&h->cfgtable->HeartBeat);
1411 return 0; 1347 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
1412 } 1348 return -EFAULT;
1349 return 0;
1350}
1413 1351
1414 case CCISS_DEREGDISK: 1352static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
1415 case CCISS_REGNEWD: 1353{
1416 case CCISS_REVALIDVOLS: 1354 BusTypes_type BusTypes;
1417 return rebuild_lun_table(h, 0, 1); 1355
1356 if (!argp)
1357 return -EINVAL;
1358 BusTypes = readl(&h->cfgtable->BusTypes);
1359 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
1360 return -EFAULT;
1361 return 0;
1362}
1418 1363
1419 case CCISS_GETLUNINFO:{ 1364static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
1420 LogvolInfo_struct luninfo; 1365{
1366 FirmwareVer_type firmware;
1421 1367
1422 memcpy(&luninfo.LunID, drv->LunID, 1368 if (!argp)
1423 sizeof(luninfo.LunID)); 1369 return -EINVAL;
1424 luninfo.num_opens = drv->usage_count; 1370 memcpy(firmware, h->firm_ver, 4);
1425 luninfo.num_parts = 0; 1371
1426 if (copy_to_user(argp, &luninfo, 1372 if (copy_to_user
1427 sizeof(LogvolInfo_struct))) 1373 (argp, firmware, sizeof(FirmwareVer_type)))
1428 return -EFAULT; 1374 return -EFAULT;
1429 return 0; 1375 return 0;
1376}
1377
1378static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
1379{
1380 DriverVer_type DriverVer = DRIVER_VERSION;
1381
1382 if (!argp)
1383 return -EINVAL;
1384 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
1385 return -EFAULT;
1386 return 0;
1387}
1388
1389static int cciss_getluninfo(ctlr_info_t *h,
1390 struct gendisk *disk, void __user *argp)
1391{
1392 LogvolInfo_struct luninfo;
1393 drive_info_struct *drv = get_drv(disk);
1394
1395 if (!argp)
1396 return -EINVAL;
1397 memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
1398 luninfo.num_opens = drv->usage_count;
1399 luninfo.num_parts = 0;
1400 if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
1401 return -EFAULT;
1402 return 0;
1403}
1404
1405static int cciss_passthru(ctlr_info_t *h, void __user *argp)
1406{
1407 IOCTL_Command_struct iocommand;
1408 CommandList_struct *c;
1409 char *buff = NULL;
1410 u64bit temp64;
1411 DECLARE_COMPLETION_ONSTACK(wait);
1412
1413 if (!argp)
1414 return -EINVAL;
1415
1416 if (!capable(CAP_SYS_RAWIO))
1417 return -EPERM;
1418
1419 if (copy_from_user
1420 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
1421 return -EFAULT;
1422 if ((iocommand.buf_size < 1) &&
1423 (iocommand.Request.Type.Direction != XFER_NONE)) {
1424 return -EINVAL;
1425 }
1426 if (iocommand.buf_size > 0) {
1427 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
1428 if (buff == NULL)
1429 return -EFAULT;
1430 }
1431 if (iocommand.Request.Type.Direction == XFER_WRITE) {
1432 /* Copy the data into the buffer we created */
1433 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
1434 kfree(buff);
1435 return -EFAULT;
1430 } 1436 }
1431 case CCISS_PASSTHRU: 1437 } else {
1432 { 1438 memset(buff, 0, iocommand.buf_size);
1433 IOCTL_Command_struct iocommand; 1439 }
1434 CommandList_struct *c; 1440 c = cmd_special_alloc(h);
1435 char *buff = NULL; 1441 if (!c) {
1436 u64bit temp64; 1442 kfree(buff);
1437 DECLARE_COMPLETION_ONSTACK(wait); 1443 return -ENOMEM;
1438 1444 }
1439 if (!arg) 1445 /* Fill in the command type */
1440 return -EINVAL; 1446 c->cmd_type = CMD_IOCTL_PEND;
1441 1447 /* Fill in Command Header */
1442 if (!capable(CAP_SYS_RAWIO)) 1448 c->Header.ReplyQueue = 0; /* unused in simple mode */
1443 return -EPERM; 1449 if (iocommand.buf_size > 0) { /* buffer to fill */
1444 1450 c->Header.SGList = 1;
1445 if (copy_from_user 1451 c->Header.SGTotal = 1;
1446 (&iocommand, argp, sizeof(IOCTL_Command_struct))) 1452 } else { /* no buffers to fill */
1447 return -EFAULT; 1453 c->Header.SGList = 0;
1448 if ((iocommand.buf_size < 1) && 1454 c->Header.SGTotal = 0;
1449 (iocommand.Request.Type.Direction != XFER_NONE)) { 1455 }
1450 return -EINVAL; 1456 c->Header.LUN = iocommand.LUN_info;
1451 } 1457 /* use the kernel address the cmd block for tag */
1452#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */ 1458 c->Header.Tag.lower = c->busaddr;
1453 /* Check kmalloc limits */
1454 if (iocommand.buf_size > 128000)
1455 return -EINVAL;
1456#endif
1457 if (iocommand.buf_size > 0) {
1458 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
1459 if (buff == NULL)
1460 return -EFAULT;
1461 }
1462 if (iocommand.Request.Type.Direction == XFER_WRITE) {
1463 /* Copy the data into the buffer we created */
1464 if (copy_from_user
1465 (buff, iocommand.buf, iocommand.buf_size)) {
1466 kfree(buff);
1467 return -EFAULT;
1468 }
1469 } else {
1470 memset(buff, 0, iocommand.buf_size);
1471 }
1472 c = cmd_special_alloc(h);
1473 if (!c) {
1474 kfree(buff);
1475 return -ENOMEM;
1476 }
1477 /* Fill in the command type */
1478 c->cmd_type = CMD_IOCTL_PEND;
1479 /* Fill in Command Header */
1480 c->Header.ReplyQueue = 0; /* unused in simple mode */
1481 if (iocommand.buf_size > 0) /* buffer to fill */
1482 {
1483 c->Header.SGList = 1;
1484 c->Header.SGTotal = 1;
1485 } else /* no buffers to fill */
1486 {
1487 c->Header.SGList = 0;
1488 c->Header.SGTotal = 0;
1489 }
1490 c->Header.LUN = iocommand.LUN_info;
1491 /* use the kernel address the cmd block for tag */
1492 c->Header.Tag.lower = c->busaddr;
1493
1494 /* Fill in Request block */
1495 c->Request = iocommand.Request;
1496
1497 /* Fill in the scatter gather information */
1498 if (iocommand.buf_size > 0) {
1499 temp64.val = pci_map_single(h->pdev, buff,
1500 iocommand.buf_size,
1501 PCI_DMA_BIDIRECTIONAL);
1502 c->SG[0].Addr.lower = temp64.val32.lower;
1503 c->SG[0].Addr.upper = temp64.val32.upper;
1504 c->SG[0].Len = iocommand.buf_size;
1505 c->SG[0].Ext = 0; /* we are not chaining */
1506 }
1507 c->waiting = &wait;
1508 1459
1509 enqueue_cmd_and_start_io(h, c); 1460 /* Fill in Request block */
1510 wait_for_completion(&wait); 1461 c->Request = iocommand.Request;
1511 1462
1512 /* unlock the buffers from DMA */ 1463 /* Fill in the scatter gather information */
1513 temp64.val32.lower = c->SG[0].Addr.lower; 1464 if (iocommand.buf_size > 0) {
1514 temp64.val32.upper = c->SG[0].Addr.upper; 1465 temp64.val = pci_map_single(h->pdev, buff,
1515 pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, 1466 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
1516 iocommand.buf_size, 1467 c->SG[0].Addr.lower = temp64.val32.lower;
1517 PCI_DMA_BIDIRECTIONAL); 1468 c->SG[0].Addr.upper = temp64.val32.upper;
1469 c->SG[0].Len = iocommand.buf_size;
1470 c->SG[0].Ext = 0; /* we are not chaining */
1471 }
1472 c->waiting = &wait;
1518 1473
1519 check_ioctl_unit_attention(h, c); 1474 enqueue_cmd_and_start_io(h, c);
1475 wait_for_completion(&wait);
1520 1476
1521 /* Copy the error information out */ 1477 /* unlock the buffers from DMA */
1522 iocommand.error_info = *(c->err_info); 1478 temp64.val32.lower = c->SG[0].Addr.lower;
1523 if (copy_to_user 1479 temp64.val32.upper = c->SG[0].Addr.upper;
1524 (argp, &iocommand, sizeof(IOCTL_Command_struct))) { 1480 pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size,
1525 kfree(buff); 1481 PCI_DMA_BIDIRECTIONAL);
1526 cmd_special_free(h, c); 1482 check_ioctl_unit_attention(h, c);
1527 return -EFAULT; 1483
1528 } 1484 /* Copy the error information out */
1485 iocommand.error_info = *(c->err_info);
1486 if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1487 kfree(buff);
1488 cmd_special_free(h, c);
1489 return -EFAULT;
1490 }
1529 1491
1530 if (iocommand.Request.Type.Direction == XFER_READ) { 1492 if (iocommand.Request.Type.Direction == XFER_READ) {
1531 /* Copy the data out of the buffer we created */ 1493 /* Copy the data out of the buffer we created */
1532 if (copy_to_user 1494 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
1533 (iocommand.buf, buff, iocommand.buf_size)) {
1534 kfree(buff);
1535 cmd_special_free(h, c);
1536 return -EFAULT;
1537 }
1538 }
1539 kfree(buff); 1495 kfree(buff);
1540 cmd_special_free(h, c); 1496 cmd_special_free(h, c);
1541 return 0; 1497 return -EFAULT;
1542 } 1498 }
1543 case CCISS_BIG_PASSTHRU:{ 1499 }
1544 BIG_IOCTL_Command_struct *ioc; 1500 kfree(buff);
1545 CommandList_struct *c; 1501 cmd_special_free(h, c);
1546 unsigned char **buff = NULL; 1502 return 0;
1547 int *buff_size = NULL; 1503}
1548 u64bit temp64; 1504
1549 BYTE sg_used = 0; 1505static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
1550 int status = 0; 1506{
1551 int i; 1507 BIG_IOCTL_Command_struct *ioc;
1552 DECLARE_COMPLETION_ONSTACK(wait); 1508 CommandList_struct *c;
1553 __u32 left; 1509 unsigned char **buff = NULL;
1554 __u32 sz; 1510 int *buff_size = NULL;
1555 BYTE __user *data_ptr; 1511 u64bit temp64;
1556 1512 BYTE sg_used = 0;
1557 if (!arg) 1513 int status = 0;
1558 return -EINVAL; 1514 int i;
1559 if (!capable(CAP_SYS_RAWIO)) 1515 DECLARE_COMPLETION_ONSTACK(wait);
1560 return -EPERM; 1516 __u32 left;
1561 ioc = (BIG_IOCTL_Command_struct *) 1517 __u32 sz;
1562 kmalloc(sizeof(*ioc), GFP_KERNEL); 1518 BYTE __user *data_ptr;
1563 if (!ioc) { 1519
1564 status = -ENOMEM; 1520 if (!argp)
1565 goto cleanup1; 1521 return -EINVAL;
1566 } 1522 if (!capable(CAP_SYS_RAWIO))
1567 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 1523 return -EPERM;
1524 ioc = (BIG_IOCTL_Command_struct *)
1525 kmalloc(sizeof(*ioc), GFP_KERNEL);
1526 if (!ioc) {
1527 status = -ENOMEM;
1528 goto cleanup1;
1529 }
1530 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1531 status = -EFAULT;
1532 goto cleanup1;
1533 }
1534 if ((ioc->buf_size < 1) &&
1535 (ioc->Request.Type.Direction != XFER_NONE)) {
1536 status = -EINVAL;
1537 goto cleanup1;
1538 }
1539 /* Check kmalloc limits using all SGs */
1540 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1541 status = -EINVAL;
1542 goto cleanup1;
1543 }
1544 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1545 status = -EINVAL;
1546 goto cleanup1;
1547 }
1548 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1549 if (!buff) {
1550 status = -ENOMEM;
1551 goto cleanup1;
1552 }
1553 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
1554 if (!buff_size) {
1555 status = -ENOMEM;
1556 goto cleanup1;
1557 }
1558 left = ioc->buf_size;
1559 data_ptr = ioc->buf;
1560 while (left) {
1561 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1562 buff_size[sg_used] = sz;
1563 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1564 if (buff[sg_used] == NULL) {
1565 status = -ENOMEM;
1566 goto cleanup1;
1567 }
1568 if (ioc->Request.Type.Direction == XFER_WRITE) {
1569 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1568 status = -EFAULT; 1570 status = -EFAULT;
1569 goto cleanup1; 1571 goto cleanup1;
1570 } 1572 }
1571 if ((ioc->buf_size < 1) && 1573 } else {
1572 (ioc->Request.Type.Direction != XFER_NONE)) { 1574 memset(buff[sg_used], 0, sz);
1573 status = -EINVAL; 1575 }
1574 goto cleanup1; 1576 left -= sz;
1575 } 1577 data_ptr += sz;
1576 /* Check kmalloc limits using all SGs */ 1578 sg_used++;
1577 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 1579 }
1578 status = -EINVAL; 1580 c = cmd_special_alloc(h);
1579 goto cleanup1; 1581 if (!c) {
1580 } 1582 status = -ENOMEM;
1581 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 1583 goto cleanup1;
1582 status = -EINVAL; 1584 }
1583 goto cleanup1; 1585 c->cmd_type = CMD_IOCTL_PEND;
1584 } 1586 c->Header.ReplyQueue = 0;
1585 buff = 1587 c->Header.SGList = sg_used;
1586 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 1588 c->Header.SGTotal = sg_used;
1587 if (!buff) { 1589 c->Header.LUN = ioc->LUN_info;
1588 status = -ENOMEM; 1590 c->Header.Tag.lower = c->busaddr;
1589 goto cleanup1;
1590 }
1591 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1592 GFP_KERNEL);
1593 if (!buff_size) {
1594 status = -ENOMEM;
1595 goto cleanup1;
1596 }
1597 left = ioc->buf_size;
1598 data_ptr = ioc->buf;
1599 while (left) {
1600 sz = (left >
1601 ioc->malloc_size) ? ioc->
1602 malloc_size : left;
1603 buff_size[sg_used] = sz;
1604 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1605 if (buff[sg_used] == NULL) {
1606 status = -ENOMEM;
1607 goto cleanup1;
1608 }
1609 if (ioc->Request.Type.Direction == XFER_WRITE) {
1610 if (copy_from_user
1611 (buff[sg_used], data_ptr, sz)) {
1612 status = -EFAULT;
1613 goto cleanup1;
1614 }
1615 } else {
1616 memset(buff[sg_used], 0, sz);
1617 }
1618 left -= sz;
1619 data_ptr += sz;
1620 sg_used++;
1621 }
1622 c = cmd_special_alloc(h);
1623 if (!c) {
1624 status = -ENOMEM;
1625 goto cleanup1;
1626 }
1627 c->cmd_type = CMD_IOCTL_PEND;
1628 c->Header.ReplyQueue = 0;
1629 1591
1630 if (ioc->buf_size > 0) { 1592 c->Request = ioc->Request;
1631 c->Header.SGList = sg_used; 1593 for (i = 0; i < sg_used; i++) {
1632 c->Header.SGTotal = sg_used; 1594 temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i],
1633 } else { 1595 PCI_DMA_BIDIRECTIONAL);
1634 c->Header.SGList = 0; 1596 c->SG[i].Addr.lower = temp64.val32.lower;
1635 c->Header.SGTotal = 0; 1597 c->SG[i].Addr.upper = temp64.val32.upper;
1636 } 1598 c->SG[i].Len = buff_size[i];
1637 c->Header.LUN = ioc->LUN_info; 1599 c->SG[i].Ext = 0; /* we are not chaining */
1638 c->Header.Tag.lower = c->busaddr; 1600 }
1639 1601 c->waiting = &wait;
1640 c->Request = ioc->Request; 1602 enqueue_cmd_and_start_io(h, c);
1641 if (ioc->buf_size > 0) { 1603 wait_for_completion(&wait);
1642 for (i = 0; i < sg_used; i++) { 1604 /* unlock the buffers from DMA */
1643 temp64.val = 1605 for (i = 0; i < sg_used; i++) {
1644 pci_map_single(h->pdev, buff[i], 1606 temp64.val32.lower = c->SG[i].Addr.lower;
1645 buff_size[i], 1607 temp64.val32.upper = c->SG[i].Addr.upper;
1646 PCI_DMA_BIDIRECTIONAL); 1608 pci_unmap_single(h->pdev,
1647 c->SG[i].Addr.lower = 1609 (dma_addr_t) temp64.val, buff_size[i],
1648 temp64.val32.lower; 1610 PCI_DMA_BIDIRECTIONAL);
1649 c->SG[i].Addr.upper = 1611 }
1650 temp64.val32.upper; 1612 check_ioctl_unit_attention(h, c);
1651 c->SG[i].Len = buff_size[i]; 1613 /* Copy the error information out */
1652 c->SG[i].Ext = 0; /* we are not chaining */ 1614 ioc->error_info = *(c->err_info);
1653 } 1615 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1654 } 1616 cmd_special_free(h, c);
1655 c->waiting = &wait; 1617 status = -EFAULT;
1656 enqueue_cmd_and_start_io(h, c); 1618 goto cleanup1;
1657 wait_for_completion(&wait); 1619 }
1658 /* unlock the buffers from DMA */ 1620 if (ioc->Request.Type.Direction == XFER_READ) {
1659 for (i = 0; i < sg_used; i++) { 1621 /* Copy the data out of the buffer we created */
1660 temp64.val32.lower = c->SG[i].Addr.lower; 1622 BYTE __user *ptr = ioc->buf;
1661 temp64.val32.upper = c->SG[i].Addr.upper; 1623 for (i = 0; i < sg_used; i++) {
1662 pci_unmap_single(h->pdev, 1624 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1663 (dma_addr_t) temp64.val, buff_size[i],
1664 PCI_DMA_BIDIRECTIONAL);
1665 }
1666 check_ioctl_unit_attention(h, c);
1667 /* Copy the error information out */
1668 ioc->error_info = *(c->err_info);
1669 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1670 cmd_special_free(h, c); 1625 cmd_special_free(h, c);
1671 status = -EFAULT; 1626 status = -EFAULT;
1672 goto cleanup1; 1627 goto cleanup1;
1673 } 1628 }
1674 if (ioc->Request.Type.Direction == XFER_READ) { 1629 ptr += buff_size[i];
1675 /* Copy the data out of the buffer we created */
1676 BYTE __user *ptr = ioc->buf;
1677 for (i = 0; i < sg_used; i++) {
1678 if (copy_to_user
1679 (ptr, buff[i], buff_size[i])) {
1680 cmd_special_free(h, c);
1681 status = -EFAULT;
1682 goto cleanup1;
1683 }
1684 ptr += buff_size[i];
1685 }
1686 }
1687 cmd_special_free(h, c);
1688 status = 0;
1689 cleanup1:
1690 if (buff) {
1691 for (i = 0; i < sg_used; i++)
1692 kfree(buff[i]);
1693 kfree(buff);
1694 }
1695 kfree(buff_size);
1696 kfree(ioc);
1697 return status;
1698 } 1630 }
1631 }
1632 cmd_special_free(h, c);
1633 status = 0;
1634cleanup1:
1635 if (buff) {
1636 for (i = 0; i < sg_used; i++)
1637 kfree(buff[i]);
1638 kfree(buff);
1639 }
1640 kfree(buff_size);
1641 kfree(ioc);
1642 return status;
1643}
1644
1645static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1646 unsigned int cmd, unsigned long arg)
1647{
1648 struct gendisk *disk = bdev->bd_disk;
1649 ctlr_info_t *h = get_host(disk);
1650 void __user *argp = (void __user *)arg;
1651
1652 dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
1653 cmd, arg);
1654 switch (cmd) {
1655 case CCISS_GETPCIINFO:
1656 return cciss_getpciinfo(h, argp);
1657 case CCISS_GETINTINFO:
1658 return cciss_getintinfo(h, argp);
1659 case CCISS_SETINTINFO:
1660 return cciss_setintinfo(h, argp);
1661 case CCISS_GETNODENAME:
1662 return cciss_getnodename(h, argp);
1663 case CCISS_SETNODENAME:
1664 return cciss_setnodename(h, argp);
1665 case CCISS_GETHEARTBEAT:
1666 return cciss_getheartbeat(h, argp);
1667 case CCISS_GETBUSTYPES:
1668 return cciss_getbustypes(h, argp);
1669 case CCISS_GETFIRMVER:
1670 return cciss_getfirmver(h, argp);
1671 case CCISS_GETDRIVVER:
1672 return cciss_getdrivver(h, argp);
1673 case CCISS_DEREGDISK:
1674 case CCISS_REGNEWD:
1675 case CCISS_REVALIDVOLS:
1676 return rebuild_lun_table(h, 0, 1);
1677 case CCISS_GETLUNINFO:
1678 return cciss_getluninfo(h, disk, argp);
1679 case CCISS_PASSTHRU:
1680 return cciss_passthru(h, argp);
1681 case CCISS_BIG_PASSTHRU:
1682 return cciss_bigpassthru(h, argp);
1699 1683
1700 /* scsi_cmd_ioctl handles these, below, though some are not */ 1684 /* scsi_cmd_ioctl handles these, below, though some are not */
1701 /* very meaningful for cciss. SG_IO is the main one people want. */ 1685 /* very meaningful for cciss. SG_IO is the main one people want. */
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 9400845d602e..ac04ef97eac2 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -965,29 +965,30 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
965 * ok, (capacity & 7) != 0 sometimes, but who cares... 965 * ok, (capacity & 7) != 0 sometimes, but who cares...
966 * we count rs_{total,left} in bits, not sectors. 966 * we count rs_{total,left} in bits, not sectors.
967 */ 967 */
968 spin_lock_irqsave(&mdev->al_lock, flags);
969 count = drbd_bm_clear_bits(mdev, sbnr, ebnr); 968 count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
970 if (count) { 969 if (count && get_ldev(mdev)) {
971 /* we need the lock for drbd_try_clear_on_disk_bm */ 970 unsigned long now = jiffies;
972 if (jiffies - mdev->rs_mark_time > HZ*10) { 971 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
973 /* should be rolling marks, 972 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
974 * but we estimate only anyways. */ 973 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
975 if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) && 974 unsigned long tw = drbd_bm_total_weight(mdev);
975 if (mdev->rs_mark_left[mdev->rs_last_mark] != tw &&
976 mdev->state.conn != C_PAUSED_SYNC_T && 976 mdev->state.conn != C_PAUSED_SYNC_T &&
977 mdev->state.conn != C_PAUSED_SYNC_S) { 977 mdev->state.conn != C_PAUSED_SYNC_S) {
978 mdev->rs_mark_time = jiffies; 978 mdev->rs_mark_time[next] = now;
979 mdev->rs_mark_left = drbd_bm_total_weight(mdev); 979 mdev->rs_mark_left[next] = tw;
980 mdev->rs_last_mark = next;
980 } 981 }
981 } 982 }
982 if (get_ldev(mdev)) { 983 spin_lock_irqsave(&mdev->al_lock, flags);
983 drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); 984 drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
984 put_ldev(mdev); 985 spin_unlock_irqrestore(&mdev->al_lock, flags);
985 } 986
986 /* just wake_up unconditional now, various lc_chaged(), 987 /* just wake_up unconditional now, various lc_chaged(),
987 * lc_put() in drbd_try_clear_on_disk_bm(). */ 988 * lc_put() in drbd_try_clear_on_disk_bm(). */
988 wake_up = 1; 989 wake_up = 1;
990 put_ldev(mdev);
989 } 991 }
990 spin_unlock_irqrestore(&mdev->al_lock, flags);
991 if (wake_up) 992 if (wake_up)
992 wake_up(&mdev->al_wait); 993 wake_up(&mdev->al_wait);
993} 994}
@@ -1118,7 +1119,7 @@ static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
1118 * @mdev: DRBD device. 1119 * @mdev: DRBD device.
1119 * @sector: The sector number. 1120 * @sector: The sector number.
1120 * 1121 *
1121 * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted. 1122 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
1122 */ 1123 */
1123int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) 1124int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1124{ 1125{
@@ -1129,10 +1130,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1129 sig = wait_event_interruptible(mdev->al_wait, 1130 sig = wait_event_interruptible(mdev->al_wait,
1130 (bm_ext = _bme_get(mdev, enr))); 1131 (bm_ext = _bme_get(mdev, enr)));
1131 if (sig) 1132 if (sig)
1132 return 0; 1133 return -EINTR;
1133 1134
1134 if (test_bit(BME_LOCKED, &bm_ext->flags)) 1135 if (test_bit(BME_LOCKED, &bm_ext->flags))
1135 return 1; 1136 return 0;
1136 1137
1137 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1138 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1138 sig = wait_event_interruptible(mdev->al_wait, 1139 sig = wait_event_interruptible(mdev->al_wait,
@@ -1145,13 +1146,11 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1145 wake_up(&mdev->al_wait); 1146 wake_up(&mdev->al_wait);
1146 } 1147 }
1147 spin_unlock_irq(&mdev->al_lock); 1148 spin_unlock_irq(&mdev->al_lock);
1148 return 0; 1149 return -EINTR;
1149 } 1150 }
1150 } 1151 }
1151
1152 set_bit(BME_LOCKED, &bm_ext->flags); 1152 set_bit(BME_LOCKED, &bm_ext->flags);
1153 1153 return 0;
1154 return 1;
1155} 1154}
1156 1155
1157/** 1156/**
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index e3f88d6e1412..fd42832f785b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -569,7 +569,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
569 * 569 *
570 * maybe bm_set should be atomic_t ? 570 * maybe bm_set should be atomic_t ?
571 */ 571 */
572static unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 572unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
573{ 573{
574 struct drbd_bitmap *b = mdev->bitmap; 574 struct drbd_bitmap *b = mdev->bitmap;
575 unsigned long s; 575 unsigned long s;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 352441b0f92f..9bdcf4393c0a 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -337,13 +337,25 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
337 * NOTE that the payload starts at a long aligned offset, 337 * NOTE that the payload starts at a long aligned offset,
338 * regardless of 32 or 64 bit arch! 338 * regardless of 32 or 64 bit arch!
339 */ 339 */
340struct p_header { 340struct p_header80 {
341 u32 magic; 341 u32 magic;
342 u16 command; 342 u16 command;
343 u16 length; /* bytes of data after this header */ 343 u16 length; /* bytes of data after this header */
344 u8 payload[0]; 344 u8 payload[0];
345} __packed; 345} __packed;
346/* 8 bytes. packet FIXED for the next century! */ 346
347/* Header for big packets, Used for data packets exceeding 64kB */
348struct p_header95 {
349 u16 magic; /* use DRBD_MAGIC_BIG here */
350 u16 command;
351 u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */
352 u8 payload[0];
353} __packed;
354
355union p_header {
356 struct p_header80 h80;
357 struct p_header95 h95;
358};
347 359
348/* 360/*
349 * short commands, packets without payload, plain p_header: 361 * short commands, packets without payload, plain p_header:
@@ -362,12 +374,16 @@ struct p_header {
362 */ 374 */
363 375
364/* these defines must not be changed without changing the protocol version */ 376/* these defines must not be changed without changing the protocol version */
365#define DP_HARDBARRIER 1 377#define DP_HARDBARRIER 1 /* depricated */
366#define DP_RW_SYNC 2 378#define DP_RW_SYNC 2 /* equals REQ_SYNC */
367#define DP_MAY_SET_IN_SYNC 4 379#define DP_MAY_SET_IN_SYNC 4
380#define DP_UNPLUG 8 /* equals REQ_UNPLUG */
381#define DP_FUA 16 /* equals REQ_FUA */
382#define DP_FLUSH 32 /* equals REQ_FLUSH */
383#define DP_DISCARD 64 /* equals REQ_DISCARD */
368 384
369struct p_data { 385struct p_data {
370 struct p_header head; 386 union p_header head;
371 u64 sector; /* 64 bits sector number */ 387 u64 sector; /* 64 bits sector number */
372 u64 block_id; /* to identify the request in protocol B&C */ 388 u64 block_id; /* to identify the request in protocol B&C */
373 u32 seq_num; 389 u32 seq_num;
@@ -383,7 +399,7 @@ struct p_data {
383 * P_DATA_REQUEST, P_RS_DATA_REQUEST 399 * P_DATA_REQUEST, P_RS_DATA_REQUEST
384 */ 400 */
385struct p_block_ack { 401struct p_block_ack {
386 struct p_header head; 402 struct p_header80 head;
387 u64 sector; 403 u64 sector;
388 u64 block_id; 404 u64 block_id;
389 u32 blksize; 405 u32 blksize;
@@ -392,7 +408,7 @@ struct p_block_ack {
392 408
393 409
394struct p_block_req { 410struct p_block_req {
395 struct p_header head; 411 struct p_header80 head;
396 u64 sector; 412 u64 sector;
397 u64 block_id; 413 u64 block_id;
398 u32 blksize; 414 u32 blksize;
@@ -409,7 +425,7 @@ struct p_block_req {
409 */ 425 */
410 426
411struct p_handshake { 427struct p_handshake {
412 struct p_header head; /* 8 bytes */ 428 struct p_header80 head; /* 8 bytes */
413 u32 protocol_min; 429 u32 protocol_min;
414 u32 feature_flags; 430 u32 feature_flags;
415 u32 protocol_max; 431 u32 protocol_max;
@@ -424,19 +440,19 @@ struct p_handshake {
424/* 80 bytes, FIXED for the next century */ 440/* 80 bytes, FIXED for the next century */
425 441
426struct p_barrier { 442struct p_barrier {
427 struct p_header head; 443 struct p_header80 head;
428 u32 barrier; /* barrier number _handle_ only */ 444 u32 barrier; /* barrier number _handle_ only */
429 u32 pad; /* to multiple of 8 Byte */ 445 u32 pad; /* to multiple of 8 Byte */
430} __packed; 446} __packed;
431 447
432struct p_barrier_ack { 448struct p_barrier_ack {
433 struct p_header head; 449 struct p_header80 head;
434 u32 barrier; 450 u32 barrier;
435 u32 set_size; 451 u32 set_size;
436} __packed; 452} __packed;
437 453
438struct p_rs_param { 454struct p_rs_param {
439 struct p_header head; 455 struct p_header80 head;
440 u32 rate; 456 u32 rate;
441 457
442 /* Since protocol version 88 and higher. */ 458 /* Since protocol version 88 and higher. */
@@ -444,20 +460,31 @@ struct p_rs_param {
444} __packed; 460} __packed;
445 461
446struct p_rs_param_89 { 462struct p_rs_param_89 {
447 struct p_header head; 463 struct p_header80 head;
448 u32 rate; 464 u32 rate;
449 /* protocol version 89: */ 465 /* protocol version 89: */
450 char verify_alg[SHARED_SECRET_MAX]; 466 char verify_alg[SHARED_SECRET_MAX];
451 char csums_alg[SHARED_SECRET_MAX]; 467 char csums_alg[SHARED_SECRET_MAX];
452} __packed; 468} __packed;
453 469
470struct p_rs_param_95 {
471 struct p_header80 head;
472 u32 rate;
473 char verify_alg[SHARED_SECRET_MAX];
474 char csums_alg[SHARED_SECRET_MAX];
475 u32 c_plan_ahead;
476 u32 c_delay_target;
477 u32 c_fill_target;
478 u32 c_max_rate;
479} __packed;
480
454enum drbd_conn_flags { 481enum drbd_conn_flags {
455 CF_WANT_LOSE = 1, 482 CF_WANT_LOSE = 1,
456 CF_DRY_RUN = 2, 483 CF_DRY_RUN = 2,
457}; 484};
458 485
459struct p_protocol { 486struct p_protocol {
460 struct p_header head; 487 struct p_header80 head;
461 u32 protocol; 488 u32 protocol;
462 u32 after_sb_0p; 489 u32 after_sb_0p;
463 u32 after_sb_1p; 490 u32 after_sb_1p;
@@ -471,17 +498,17 @@ struct p_protocol {
471} __packed; 498} __packed;
472 499
473struct p_uuids { 500struct p_uuids {
474 struct p_header head; 501 struct p_header80 head;
475 u64 uuid[UI_EXTENDED_SIZE]; 502 u64 uuid[UI_EXTENDED_SIZE];
476} __packed; 503} __packed;
477 504
478struct p_rs_uuid { 505struct p_rs_uuid {
479 struct p_header head; 506 struct p_header80 head;
480 u64 uuid; 507 u64 uuid;
481} __packed; 508} __packed;
482 509
483struct p_sizes { 510struct p_sizes {
484 struct p_header head; 511 struct p_header80 head;
485 u64 d_size; /* size of disk */ 512 u64 d_size; /* size of disk */
486 u64 u_size; /* user requested size */ 513 u64 u_size; /* user requested size */
487 u64 c_size; /* current exported size */ 514 u64 c_size; /* current exported size */
@@ -491,18 +518,18 @@ struct p_sizes {
491} __packed; 518} __packed;
492 519
493struct p_state { 520struct p_state {
494 struct p_header head; 521 struct p_header80 head;
495 u32 state; 522 u32 state;
496} __packed; 523} __packed;
497 524
498struct p_req_state { 525struct p_req_state {
499 struct p_header head; 526 struct p_header80 head;
500 u32 mask; 527 u32 mask;
501 u32 val; 528 u32 val;
502} __packed; 529} __packed;
503 530
504struct p_req_state_reply { 531struct p_req_state_reply {
505 struct p_header head; 532 struct p_header80 head;
506 u32 retcode; 533 u32 retcode;
507} __packed; 534} __packed;
508 535
@@ -517,7 +544,7 @@ struct p_drbd06_param {
517} __packed; 544} __packed;
518 545
519struct p_discard { 546struct p_discard {
520 struct p_header head; 547 struct p_header80 head;
521 u64 block_id; 548 u64 block_id;
522 u32 seq_num; 549 u32 seq_num;
523 u32 pad; 550 u32 pad;
@@ -533,7 +560,7 @@ enum drbd_bitmap_code {
533}; 560};
534 561
535struct p_compressed_bm { 562struct p_compressed_bm {
536 struct p_header head; 563 struct p_header80 head;
537 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code 564 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
538 * (encoding & 0x80): polarity (set/unset) of first runlength 565 * (encoding & 0x80): polarity (set/unset) of first runlength
539 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits 566 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
@@ -544,10 +571,10 @@ struct p_compressed_bm {
544 u8 code[0]; 571 u8 code[0];
545} __packed; 572} __packed;
546 573
547struct p_delay_probe { 574struct p_delay_probe93 {
548 struct p_header head; 575 struct p_header80 head;
549 u32 seq_num; /* sequence number to match the two probe packets */ 576 u32 seq_num; /* sequence number to match the two probe packets */
550 u32 offset; /* usecs the probe got sent after the reference time point */ 577 u32 offset; /* usecs the probe got sent after the reference time point */
551} __packed; 578} __packed;
552 579
553/* DCBP: Drbd Compressed Bitmap Packet ... */ 580/* DCBP: Drbd Compressed Bitmap Packet ... */
@@ -594,7 +621,7 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
594 * so we need to use the fixed size 4KiB page size 621 * so we need to use the fixed size 4KiB page size
595 * most architechtures have used for a long time. 622 * most architechtures have used for a long time.
596 */ 623 */
597#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header)) 624#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
598#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) 625#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
599#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) 626#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
600#if (PAGE_SIZE < 4096) 627#if (PAGE_SIZE < 4096)
@@ -603,13 +630,14 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
603#endif 630#endif
604 631
605union p_polymorph { 632union p_polymorph {
606 struct p_header header; 633 union p_header header;
607 struct p_handshake handshake; 634 struct p_handshake handshake;
608 struct p_data data; 635 struct p_data data;
609 struct p_block_ack block_ack; 636 struct p_block_ack block_ack;
610 struct p_barrier barrier; 637 struct p_barrier barrier;
611 struct p_barrier_ack barrier_ack; 638 struct p_barrier_ack barrier_ack;
612 struct p_rs_param_89 rs_param_89; 639 struct p_rs_param_89 rs_param_89;
640 struct p_rs_param_95 rs_param_95;
613 struct p_protocol protocol; 641 struct p_protocol protocol;
614 struct p_sizes sizes; 642 struct p_sizes sizes;
615 struct p_uuids uuids; 643 struct p_uuids uuids;
@@ -617,6 +645,8 @@ union p_polymorph {
617 struct p_req_state req_state; 645 struct p_req_state req_state;
618 struct p_req_state_reply req_state_reply; 646 struct p_req_state_reply req_state_reply;
619 struct p_block_req block_req; 647 struct p_block_req block_req;
648 struct p_delay_probe93 delay_probe93;
649 struct p_rs_uuid rs_uuid;
620} __packed; 650} __packed;
621 651
622/**********************************************************************/ 652/**********************************************************************/
@@ -697,7 +727,7 @@ struct drbd_tl_epoch {
697 struct list_head requests; /* requests before */ 727 struct list_head requests; /* requests before */
698 struct drbd_tl_epoch *next; /* pointer to the next barrier */ 728 struct drbd_tl_epoch *next; /* pointer to the next barrier */
699 unsigned int br_number; /* the barriers identifier. */ 729 unsigned int br_number; /* the barriers identifier. */
700 int n_req; /* number of requests attached before this barrier */ 730 int n_writes; /* number of requests attached before this barrier */
701}; 731};
702 732
703struct drbd_request; 733struct drbd_request;
@@ -747,7 +777,7 @@ struct digest_info {
747struct drbd_epoch_entry { 777struct drbd_epoch_entry {
748 struct drbd_work w; 778 struct drbd_work w;
749 struct hlist_node colision; 779 struct hlist_node colision;
750 struct drbd_epoch *epoch; 780 struct drbd_epoch *epoch; /* for writes */
751 struct drbd_conf *mdev; 781 struct drbd_conf *mdev;
752 struct page *pages; 782 struct page *pages;
753 atomic_t pending_bios; 783 atomic_t pending_bios;
@@ -755,7 +785,10 @@ struct drbd_epoch_entry {
755 /* see comments on ee flag bits below */ 785 /* see comments on ee flag bits below */
756 unsigned long flags; 786 unsigned long flags;
757 sector_t sector; 787 sector_t sector;
758 u64 block_id; 788 union {
789 u64 block_id;
790 struct digest_info *digest;
791 };
759}; 792};
760 793
761/* ee flag bits. 794/* ee flag bits.
@@ -781,12 +814,16 @@ enum {
781 * if any of those fail, we set this flag atomically 814 * if any of those fail, we set this flag atomically
782 * from the endio callback */ 815 * from the endio callback */
783 __EE_WAS_ERROR, 816 __EE_WAS_ERROR,
817
818 /* This ee has a pointer to a digest instead of a block id */
819 __EE_HAS_DIGEST,
784}; 820};
785#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 821#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
786#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 822#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
787#define EE_IS_BARRIER (1<<__EE_IS_BARRIER) 823#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
788#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 824#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
789#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 825#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
826#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
790 827
791/* global flag bits */ 828/* global flag bits */
792enum { 829enum {
@@ -794,7 +831,6 @@ enum {
794 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 831 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
795 SEND_PING, /* whether asender should send a ping asap */ 832 SEND_PING, /* whether asender should send a ping asap */
796 833
797 STOP_SYNC_TIMER, /* tell timer to cancel itself */
798 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ 834 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
799 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 835 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
800 MD_DIRTY, /* current uuids and flags not yet on disk */ 836 MD_DIRTY, /* current uuids and flags not yet on disk */
@@ -816,6 +852,7 @@ enum {
816 BITMAP_IO, /* suspend application io; 852 BITMAP_IO, /* suspend application io;
817 once no more io in flight, start bitmap io */ 853 once no more io in flight, start bitmap io */
818 BITMAP_IO_QUEUED, /* Started bitmap IO */ 854 BITMAP_IO_QUEUED, /* Started bitmap IO */
855 GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */
819 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 856 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
820 NET_CONGESTED, /* The data socket is congested */ 857 NET_CONGESTED, /* The data socket is congested */
821 858
@@ -829,6 +866,8 @@ enum {
829 * the peer, if it changed there as well. */ 866 * the peer, if it changed there as well. */
830 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ 867 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
831 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ 868 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
869 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
870 AL_SUSPENDED, /* Activity logging is currently suspended. */
832}; 871};
833 872
834struct drbd_bitmap; /* opaque for drbd_conf */ 873struct drbd_bitmap; /* opaque for drbd_conf */
@@ -838,10 +877,6 @@ struct drbd_bitmap; /* opaque for drbd_conf */
838 877
839/* THINK maybe we actually want to use the default "event/%s" worker threads 878/* THINK maybe we actually want to use the default "event/%s" worker threads
840 * or similar in linux 2.6, which uses per cpu data and threads. 879 * or similar in linux 2.6, which uses per cpu data and threads.
841 *
842 * To be general, this might need a spin_lock member.
843 * For now, please use the mdev->req_lock to protect list_head,
844 * see drbd_queue_work below.
845 */ 880 */
846struct drbd_work_queue { 881struct drbd_work_queue {
847 struct list_head q; 882 struct list_head q;
@@ -915,6 +950,12 @@ enum write_ordering_e {
915 WO_bio_barrier 950 WO_bio_barrier
916}; 951};
917 952
953struct fifo_buffer {
954 int *values;
955 unsigned int head_index;
956 unsigned int size;
957};
958
918struct drbd_conf { 959struct drbd_conf {
919 /* things that are stored as / read from meta data on disk */ 960 /* things that are stored as / read from meta data on disk */
920 unsigned long flags; 961 unsigned long flags;
@@ -936,9 +977,16 @@ struct drbd_conf {
936 unsigned int ko_count; 977 unsigned int ko_count;
937 struct drbd_work resync_work, 978 struct drbd_work resync_work,
938 unplug_work, 979 unplug_work,
980 go_diskless,
939 md_sync_work; 981 md_sync_work;
940 struct timer_list resync_timer; 982 struct timer_list resync_timer;
941 struct timer_list md_sync_timer; 983 struct timer_list md_sync_timer;
984#ifdef DRBD_DEBUG_MD_SYNC
985 struct {
986 unsigned int line;
987 const char* func;
988 } last_md_mark_dirty;
989#endif
942 990
943 /* Used after attach while negotiating new disk state. */ 991 /* Used after attach while negotiating new disk state. */
944 union drbd_state new_state_tmp; 992 union drbd_state new_state_tmp;
@@ -946,6 +994,7 @@ struct drbd_conf {
946 union drbd_state state; 994 union drbd_state state;
947 wait_queue_head_t misc_wait; 995 wait_queue_head_t misc_wait;
948 wait_queue_head_t state_wait; /* upon each state change. */ 996 wait_queue_head_t state_wait; /* upon each state change. */
997 wait_queue_head_t net_cnt_wait;
949 unsigned int send_cnt; 998 unsigned int send_cnt;
950 unsigned int recv_cnt; 999 unsigned int recv_cnt;
951 unsigned int read_cnt; 1000 unsigned int read_cnt;
@@ -974,12 +1023,16 @@ struct drbd_conf {
974 unsigned long rs_start; 1023 unsigned long rs_start;
975 /* cumulated time in PausedSyncX state [unit jiffies] */ 1024 /* cumulated time in PausedSyncX state [unit jiffies] */
976 unsigned long rs_paused; 1025 unsigned long rs_paused;
1026 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
1027 unsigned long rs_same_csum;
1028#define DRBD_SYNC_MARKS 8
1029#define DRBD_SYNC_MARK_STEP (3*HZ)
977 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ 1030 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
978 unsigned long rs_mark_left; 1031 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
979 /* marks's time [unit jiffies] */ 1032 /* marks's time [unit jiffies] */
980 unsigned long rs_mark_time; 1033 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
981 /* skipped because csum was equeal [unit BM_BLOCK_SIZE] */ 1034 /* current index into rs_mark_{left,time} */
982 unsigned long rs_same_csum; 1035 int rs_last_mark;
983 1036
984 /* where does the admin want us to start? (sector) */ 1037 /* where does the admin want us to start? (sector) */
985 sector_t ov_start_sector; 1038 sector_t ov_start_sector;
@@ -1012,10 +1065,10 @@ struct drbd_conf {
1012 spinlock_t epoch_lock; 1065 spinlock_t epoch_lock;
1013 unsigned int epochs; 1066 unsigned int epochs;
1014 enum write_ordering_e write_ordering; 1067 enum write_ordering_e write_ordering;
1015 struct list_head active_ee; /* IO in progress */ 1068 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
1016 struct list_head sync_ee; /* IO in progress */ 1069 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
1017 struct list_head done_ee; /* send ack */ 1070 struct list_head done_ee; /* send ack */
1018 struct list_head read_ee; /* IO in progress */ 1071 struct list_head read_ee; /* IO in progress (any read) */
1019 struct list_head net_ee; /* zero-copy network send in progress */ 1072 struct list_head net_ee; /* zero-copy network send in progress */
1020 struct hlist_head *ee_hash; /* is proteced by req_lock! */ 1073 struct hlist_head *ee_hash; /* is proteced by req_lock! */
1021 unsigned int ee_hash_s; 1074 unsigned int ee_hash_s;
@@ -1026,7 +1079,8 @@ struct drbd_conf {
1026 int next_barrier_nr; 1079 int next_barrier_nr;
1027 struct hlist_head *app_reads_hash; /* is proteced by req_lock */ 1080 struct hlist_head *app_reads_hash; /* is proteced by req_lock */
1028 struct list_head resync_reads; 1081 struct list_head resync_reads;
1029 atomic_t pp_in_use; 1082 atomic_t pp_in_use; /* allocated from page pool */
1083 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
1030 wait_queue_head_t ee_wait; 1084 wait_queue_head_t ee_wait;
1031 struct page *md_io_page; /* one page buffer for md_io */ 1085 struct page *md_io_page; /* one page buffer for md_io */
1032 struct page *md_io_tmpp; /* for logical_block_size != 512 */ 1086 struct page *md_io_tmpp; /* for logical_block_size != 512 */
@@ -1054,6 +1108,15 @@ struct drbd_conf {
1054 u64 ed_uuid; /* UUID of the exposed data */ 1108 u64 ed_uuid; /* UUID of the exposed data */
1055 struct mutex state_mutex; 1109 struct mutex state_mutex;
1056 char congestion_reason; /* Why we where congested... */ 1110 char congestion_reason; /* Why we where congested... */
1111 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
1112 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
1113 int rs_last_sect_ev; /* counter to compare with */
1114 int rs_last_events; /* counter of read or write "events" (unit sectors)
1115 * on the lower level device when we last looked. */
1116 int c_sync_rate; /* current resync rate after syncer throttle magic */
1117 struct fifo_buffer rs_plan_s; /* correction values of resync planer */
1118 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1119 int rs_planed; /* resync sectors already planed */
1057}; 1120};
1058 1121
1059static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1122static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1138,6 +1201,8 @@ extern void drbd_free_resources(struct drbd_conf *mdev);
1138extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, 1201extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1139 unsigned int set_size); 1202 unsigned int set_size);
1140extern void tl_clear(struct drbd_conf *mdev); 1203extern void tl_clear(struct drbd_conf *mdev);
1204enum drbd_req_event;
1205extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
1141extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); 1206extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1142extern void drbd_free_sock(struct drbd_conf *mdev); 1207extern void drbd_free_sock(struct drbd_conf *mdev);
1143extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, 1208extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
@@ -1150,12 +1215,12 @@ extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_f
1150extern int _drbd_send_state(struct drbd_conf *mdev); 1215extern int _drbd_send_state(struct drbd_conf *mdev);
1151extern int drbd_send_state(struct drbd_conf *mdev); 1216extern int drbd_send_state(struct drbd_conf *mdev);
1152extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, 1217extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1153 enum drbd_packets cmd, struct p_header *h, 1218 enum drbd_packets cmd, struct p_header80 *h,
1154 size_t size, unsigned msg_flags); 1219 size_t size, unsigned msg_flags);
1155#define USE_DATA_SOCKET 1 1220#define USE_DATA_SOCKET 1
1156#define USE_META_SOCKET 0 1221#define USE_META_SOCKET 0
1157extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, 1222extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1158 enum drbd_packets cmd, struct p_header *h, 1223 enum drbd_packets cmd, struct p_header80 *h,
1159 size_t size); 1224 size_t size);
1160extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, 1225extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
1161 char *data, size_t size); 1226 char *data, size_t size);
@@ -1167,7 +1232,7 @@ extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
1167extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, 1232extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
1168 struct p_block_req *rp); 1233 struct p_block_req *rp);
1169extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, 1234extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
1170 struct p_data *dp); 1235 struct p_data *dp, int data_size);
1171extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, 1236extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
1172 sector_t sector, int blksize, u64 block_id); 1237 sector_t sector, int blksize, u64 block_id);
1173extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, 1238extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
@@ -1201,7 +1266,13 @@ extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
1201extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); 1266extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
1202extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); 1267extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
1203extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 1268extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1269#ifndef DRBD_DEBUG_MD_SYNC
1204extern void drbd_md_mark_dirty(struct drbd_conf *mdev); 1270extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
1271#else
1272#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
1273extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
1274 unsigned int line, const char *func);
1275#endif
1205extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, 1276extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1206 int (*io_fn)(struct drbd_conf *), 1277 int (*io_fn)(struct drbd_conf *),
1207 void (*done)(struct drbd_conf *, int), 1278 void (*done)(struct drbd_conf *, int),
@@ -1209,6 +1280,7 @@ extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1209extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); 1280extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1210extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1281extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1211extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1282extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
1283extern void drbd_go_diskless(struct drbd_conf *mdev);
1212 1284
1213 1285
1214/* Meta data layout 1286/* Meta data layout
@@ -1264,6 +1336,8 @@ struct bm_extent {
1264 * Bit 1 ==> local node thinks this block needs to be synced. 1336 * Bit 1 ==> local node thinks this block needs to be synced.
1265 */ 1337 */
1266 1338
1339#define SLEEP_TIME (HZ/10)
1340
1267#define BM_BLOCK_SHIFT 12 /* 4k per bit */ 1341#define BM_BLOCK_SHIFT 12 /* 4k per bit */
1268#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) 1342#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1269/* (9+3) : 512 bytes @ 8 bits; representing 16M storage 1343/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
@@ -1335,11 +1409,13 @@ struct bm_extent {
1335#endif 1409#endif
1336 1410
1337/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. 1411/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
1338 * With a value of 6 all IO in one 32K block make it to the same slot of the 1412 * With a value of 8 all IO in one 128K block make it to the same slot of the
1339 * hash table. */ 1413 * hash table. */
1340#define HT_SHIFT 6 1414#define HT_SHIFT 8
1341#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) 1415#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
1342 1416
1417#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
1418
1343/* Number of elements in the app_reads_hash */ 1419/* Number of elements in the app_reads_hash */
1344#define APP_R_HSIZE 15 1420#define APP_R_HSIZE 15
1345 1421
@@ -1369,6 +1445,7 @@ extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_
1369/* bm_find_next variants for use while you hold drbd_bm_lock() */ 1445/* bm_find_next variants for use while you hold drbd_bm_lock() */
1370extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1446extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1371extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); 1447extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
1448extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
1372extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); 1449extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
1373extern int drbd_bm_rs_done(struct drbd_conf *mdev); 1450extern int drbd_bm_rs_done(struct drbd_conf *mdev);
1374/* for receive_bitmap */ 1451/* for receive_bitmap */
@@ -1421,7 +1498,8 @@ extern void resync_after_online_grow(struct drbd_conf *);
1421extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1498extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
1422extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, 1499extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
1423 int force); 1500 int force);
1424enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); 1501extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
1502extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
1425extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); 1503extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
1426 1504
1427/* drbd_worker.c */ 1505/* drbd_worker.c */
@@ -1467,10 +1545,12 @@ extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
1467extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); 1545extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
1468extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); 1546extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
1469extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); 1547extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
1548extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
1470 1549
1471extern void resync_timer_fn(unsigned long data); 1550extern void resync_timer_fn(unsigned long data);
1472 1551
1473/* drbd_receiver.c */ 1552/* drbd_receiver.c */
1553extern int drbd_rs_should_slow_down(struct drbd_conf *mdev);
1474extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 1554extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1475 const unsigned rw, const int fault_type); 1555 const unsigned rw, const int fault_type);
1476extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); 1556extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
@@ -1479,7 +1559,10 @@ extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
1479 sector_t sector, 1559 sector_t sector,
1480 unsigned int data_size, 1560 unsigned int data_size,
1481 gfp_t gfp_mask) __must_hold(local); 1561 gfp_t gfp_mask) __must_hold(local);
1482extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e); 1562extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1563 int is_net);
1564#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
1565#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
1483extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, 1566extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1484 struct list_head *head); 1567 struct list_head *head);
1485extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, 1568extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
@@ -1487,6 +1570,7 @@ extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1487extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); 1570extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
1488extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); 1571extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
1489extern void drbd_flush_workqueue(struct drbd_conf *mdev); 1572extern void drbd_flush_workqueue(struct drbd_conf *mdev);
1573extern void drbd_free_tl_hash(struct drbd_conf *mdev);
1490 1574
1491/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to 1575/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
1492 * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ 1576 * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
@@ -1600,6 +1684,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
1600#define susp_MASK 1 1684#define susp_MASK 1
1601#define user_isp_MASK 1 1685#define user_isp_MASK 1
1602#define aftr_isp_MASK 1 1686#define aftr_isp_MASK 1
1687#define susp_nod_MASK 1
1688#define susp_fen_MASK 1
1603 1689
1604#define NS(T, S) \ 1690#define NS(T, S) \
1605 ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ 1691 ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
@@ -1856,13 +1942,6 @@ static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
1856} 1942}
1857 1943
1858static inline void 1944static inline void
1859_drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1860{
1861 list_add_tail(&w->list, &q->q);
1862 up(&q->s);
1863}
1864
1865static inline void
1866drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) 1945drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1867{ 1946{
1868 unsigned long flags; 1947 unsigned long flags;
@@ -1899,19 +1978,19 @@ static inline void request_ping(struct drbd_conf *mdev)
1899static inline int drbd_send_short_cmd(struct drbd_conf *mdev, 1978static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
1900 enum drbd_packets cmd) 1979 enum drbd_packets cmd)
1901{ 1980{
1902 struct p_header h; 1981 struct p_header80 h;
1903 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); 1982 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
1904} 1983}
1905 1984
1906static inline int drbd_send_ping(struct drbd_conf *mdev) 1985static inline int drbd_send_ping(struct drbd_conf *mdev)
1907{ 1986{
1908 struct p_header h; 1987 struct p_header80 h;
1909 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); 1988 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
1910} 1989}
1911 1990
1912static inline int drbd_send_ping_ack(struct drbd_conf *mdev) 1991static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
1913{ 1992{
1914 struct p_header h; 1993 struct p_header80 h;
1915 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); 1994 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
1916} 1995}
1917 1996
@@ -2013,7 +2092,7 @@ static inline void inc_unacked(struct drbd_conf *mdev)
2013static inline void put_net_conf(struct drbd_conf *mdev) 2092static inline void put_net_conf(struct drbd_conf *mdev)
2014{ 2093{
2015 if (atomic_dec_and_test(&mdev->net_cnt)) 2094 if (atomic_dec_and_test(&mdev->net_cnt))
2016 wake_up(&mdev->misc_wait); 2095 wake_up(&mdev->net_cnt_wait);
2017} 2096}
2018 2097
2019/** 2098/**
@@ -2044,10 +2123,14 @@ static inline int get_net_conf(struct drbd_conf *mdev)
2044 2123
2045static inline void put_ldev(struct drbd_conf *mdev) 2124static inline void put_ldev(struct drbd_conf *mdev)
2046{ 2125{
2126 int i = atomic_dec_return(&mdev->local_cnt);
2047 __release(local); 2127 __release(local);
2048 if (atomic_dec_and_test(&mdev->local_cnt)) 2128 D_ASSERT(i >= 0);
2129 if (i == 0) {
2130 if (mdev->state.disk == D_FAILED)
2131 drbd_go_diskless(mdev);
2049 wake_up(&mdev->misc_wait); 2132 wake_up(&mdev->misc_wait);
2050 D_ASSERT(atomic_read(&mdev->local_cnt) >= 0); 2133 }
2051} 2134}
2052 2135
2053#ifndef __CHECKER__ 2136#ifndef __CHECKER__
@@ -2179,11 +2262,16 @@ static inline int drbd_state_is_stable(union drbd_state s)
2179 return 1; 2262 return 1;
2180} 2263}
2181 2264
2265static inline int is_susp(union drbd_state s)
2266{
2267 return s.susp || s.susp_nod || s.susp_fen;
2268}
2269
2182static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) 2270static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
2183{ 2271{
2184 int mxb = drbd_get_max_buffers(mdev); 2272 int mxb = drbd_get_max_buffers(mdev);
2185 2273
2186 if (mdev->state.susp) 2274 if (is_susp(mdev->state))
2187 return 0; 2275 return 0;
2188 if (test_bit(SUSPEND_IO, &mdev->flags)) 2276 if (test_bit(SUSPEND_IO, &mdev->flags))
2189 return 0; 2277 return 0;
@@ -2321,8 +2409,7 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
2321 if (test_bit(MD_NO_BARRIER, &mdev->flags)) 2409 if (test_bit(MD_NO_BARRIER, &mdev->flags))
2322 return; 2410 return;
2323 2411
2324 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, 2412 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2325 BLKDEV_IFL_WAIT);
2326 if (r) { 2413 if (r) {
2327 set_bit(MD_NO_BARRIER, &mdev->flags); 2414 set_bit(MD_NO_BARRIER, &mdev->flags);
2328 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2415 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e4b56119866e..c5dfe6486cf3 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -78,6 +78,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); 78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data); 79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); 80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 82
82MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
83 "Lars Ellenberg <lars@linbit.com>"); 84 "Lars Ellenberg <lars@linbit.com>");
@@ -200,7 +201,7 @@ static int tl_init(struct drbd_conf *mdev)
200 INIT_LIST_HEAD(&b->w.list); 201 INIT_LIST_HEAD(&b->w.list);
201 b->next = NULL; 202 b->next = NULL;
202 b->br_number = 4711; 203 b->br_number = 4711;
203 b->n_req = 0; 204 b->n_writes = 0;
204 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ 205 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
205 206
206 mdev->oldest_tle = b; 207 mdev->oldest_tle = b;
@@ -241,7 +242,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
241 INIT_LIST_HEAD(&new->w.list); 242 INIT_LIST_HEAD(&new->w.list);
242 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ 243 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
243 new->next = NULL; 244 new->next = NULL;
244 new->n_req = 0; 245 new->n_writes = 0;
245 246
246 newest_before = mdev->newest_tle; 247 newest_before = mdev->newest_tle;
247 /* never send a barrier number == 0, because that is special-cased 248 /* never send a barrier number == 0, because that is special-cased
@@ -285,9 +286,9 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
285 barrier_nr, b->br_number); 286 barrier_nr, b->br_number);
286 goto bail; 287 goto bail;
287 } 288 }
288 if (b->n_req != set_size) { 289 if (b->n_writes != set_size) {
289 dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n", 290 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
290 barrier_nr, set_size, b->n_req); 291 barrier_nr, set_size, b->n_writes);
291 goto bail; 292 goto bail;
292 } 293 }
293 294
@@ -334,6 +335,82 @@ bail:
334 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 335 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
335} 336}
336 337
338/**
339 * _tl_restart() - Walks the transfer log, and applies an action to all requests
340 * @mdev: DRBD device.
341 * @what: The action/event to perform with all request objects
342 *
343 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
344 * restart_frozen_disk_io.
345 */
346static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
347{
348 struct drbd_tl_epoch *b, *tmp, **pn;
349 struct list_head *le, *tle, carry_reads;
350 struct drbd_request *req;
351 int rv, n_writes, n_reads;
352
353 b = mdev->oldest_tle;
354 pn = &mdev->oldest_tle;
355 while (b) {
356 n_writes = 0;
357 n_reads = 0;
358 INIT_LIST_HEAD(&carry_reads);
359 list_for_each_safe(le, tle, &b->requests) {
360 req = list_entry(le, struct drbd_request, tl_requests);
361 rv = _req_mod(req, what);
362
363 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
364 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
365 }
366 tmp = b->next;
367
368 if (n_writes) {
369 if (what == resend) {
370 b->n_writes = n_writes;
371 if (b->w.cb == NULL) {
372 b->w.cb = w_send_barrier;
373 inc_ap_pending(mdev);
374 set_bit(CREATE_BARRIER, &mdev->flags);
375 }
376
377 drbd_queue_work(&mdev->data.work, &b->w);
378 }
379 pn = &b->next;
380 } else {
381 if (n_reads)
382 list_add(&carry_reads, &b->requests);
383 /* there could still be requests on that ring list,
384 * in case local io is still pending */
385 list_del(&b->requests);
386
387 /* dec_ap_pending corresponding to queue_barrier.
388 * the newest barrier may not have been queued yet,
389 * in which case w.cb is still NULL. */
390 if (b->w.cb != NULL)
391 dec_ap_pending(mdev);
392
393 if (b == mdev->newest_tle) {
394 /* recycle, but reinit! */
395 D_ASSERT(tmp == NULL);
396 INIT_LIST_HEAD(&b->requests);
397 list_splice(&carry_reads, &b->requests);
398 INIT_LIST_HEAD(&b->w.list);
399 b->w.cb = NULL;
400 b->br_number = net_random();
401 b->n_writes = 0;
402
403 *pn = b;
404 break;
405 }
406 *pn = tmp;
407 kfree(b);
408 }
409 b = tmp;
410 list_splice(&carry_reads, &b->requests);
411 }
412}
413
337 414
338/** 415/**
339 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL 416 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
@@ -345,48 +422,12 @@ bail:
345 */ 422 */
346void tl_clear(struct drbd_conf *mdev) 423void tl_clear(struct drbd_conf *mdev)
347{ 424{
348 struct drbd_tl_epoch *b, *tmp;
349 struct list_head *le, *tle; 425 struct list_head *le, *tle;
350 struct drbd_request *r; 426 struct drbd_request *r;
351 int new_initial_bnr = net_random();
352 427
353 spin_lock_irq(&mdev->req_lock); 428 spin_lock_irq(&mdev->req_lock);
354 429
355 b = mdev->oldest_tle; 430 _tl_restart(mdev, connection_lost_while_pending);
356 while (b) {
357 list_for_each_safe(le, tle, &b->requests) {
358 r = list_entry(le, struct drbd_request, tl_requests);
359 /* It would be nice to complete outside of spinlock.
360 * But this is easier for now. */
361 _req_mod(r, connection_lost_while_pending);
362 }
363 tmp = b->next;
364
365 /* there could still be requests on that ring list,
366 * in case local io is still pending */
367 list_del(&b->requests);
368
369 /* dec_ap_pending corresponding to queue_barrier.
370 * the newest barrier may not have been queued yet,
371 * in which case w.cb is still NULL. */
372 if (b->w.cb != NULL)
373 dec_ap_pending(mdev);
374
375 if (b == mdev->newest_tle) {
376 /* recycle, but reinit! */
377 D_ASSERT(tmp == NULL);
378 INIT_LIST_HEAD(&b->requests);
379 INIT_LIST_HEAD(&b->w.list);
380 b->w.cb = NULL;
381 b->br_number = new_initial_bnr;
382 b->n_req = 0;
383
384 mdev->oldest_tle = b;
385 break;
386 }
387 kfree(b);
388 b = tmp;
389 }
390 431
391 /* we expect this list to be empty. */ 432 /* we expect this list to be empty. */
392 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 433 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
@@ -402,6 +443,15 @@ void tl_clear(struct drbd_conf *mdev)
402 /* ensure bit indicating barrier is required is clear */ 443 /* ensure bit indicating barrier is required is clear */
403 clear_bit(CREATE_BARRIER, &mdev->flags); 444 clear_bit(CREATE_BARRIER, &mdev->flags);
404 445
446 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
447
448 spin_unlock_irq(&mdev->req_lock);
449}
450
451void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
452{
453 spin_lock_irq(&mdev->req_lock);
454 _tl_restart(mdev, what);
405 spin_unlock_irq(&mdev->req_lock); 455 spin_unlock_irq(&mdev->req_lock);
406} 456}
407 457
@@ -456,7 +506,7 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
456static int is_valid_state_transition(struct drbd_conf *, 506static int is_valid_state_transition(struct drbd_conf *,
457 union drbd_state, union drbd_state); 507 union drbd_state, union drbd_state);
458static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, 508static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
459 union drbd_state ns, int *warn_sync_abort); 509 union drbd_state ns, const char **warn_sync_abort);
460int drbd_send_state_req(struct drbd_conf *, 510int drbd_send_state_req(struct drbd_conf *,
461 union drbd_state, union drbd_state); 511 union drbd_state, union drbd_state);
462 512
@@ -606,7 +656,7 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
606 drbd_role_str(ns.peer), 656 drbd_role_str(ns.peer),
607 drbd_disk_str(ns.disk), 657 drbd_disk_str(ns.disk),
608 drbd_disk_str(ns.pdsk), 658 drbd_disk_str(ns.pdsk),
609 ns.susp ? 's' : 'r', 659 is_susp(ns) ? 's' : 'r',
610 ns.aftr_isp ? 'a' : '-', 660 ns.aftr_isp ? 'a' : '-',
611 ns.peer_isp ? 'p' : '-', 661 ns.peer_isp ? 'p' : '-',
612 ns.user_isp ? 'u' : '-' 662 ns.user_isp ? 'u' : '-'
@@ -764,7 +814,7 @@ static int is_valid_state_transition(struct drbd_conf *mdev,
764 * to D_UNKNOWN. This rule and many more along those lines are in this function. 814 * to D_UNKNOWN. This rule and many more along those lines are in this function.
765 */ 815 */
766static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, 816static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
767 union drbd_state ns, int *warn_sync_abort) 817 union drbd_state ns, const char **warn_sync_abort)
768{ 818{
769 enum drbd_fencing_p fp; 819 enum drbd_fencing_p fp;
770 820
@@ -779,9 +829,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
779 os.conn <= C_DISCONNECTING) 829 os.conn <= C_DISCONNECTING)
780 ns.conn = os.conn; 830 ns.conn = os.conn;
781 831
782 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */ 832 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
833 * If you try to go into some Sync* state, that shall fail (elsewhere). */
783 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN && 834 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
784 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING) 835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
785 ns.conn = os.conn; 836 ns.conn = os.conn;
786 837
787 /* After C_DISCONNECTING only C_STANDALONE may follow */ 838 /* After C_DISCONNECTING only C_STANDALONE may follow */
@@ -799,14 +850,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
799 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY) 850 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
800 ns.aftr_isp = 0; 851 ns.aftr_isp = 0;
801 852
802 if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
803 ns.pdsk = D_UNKNOWN;
804
805 /* Abort resync if a disk fails/detaches */ 853 /* Abort resync if a disk fails/detaches */
806 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED && 854 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
807 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) { 855 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
808 if (warn_sync_abort) 856 if (warn_sync_abort)
809 *warn_sync_abort = 1; 857 *warn_sync_abort =
858 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
859 "Online-verify" : "Resync";
810 ns.conn = C_CONNECTED; 860 ns.conn = C_CONNECTED;
811 } 861 }
812 862
@@ -877,7 +927,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
877 if (fp == FP_STONITH && 927 if (fp == FP_STONITH &&
878 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && 928 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
879 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) 929 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
880 ns.susp = 1; 930 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
931
932 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
933 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
934 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
935 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
881 936
882 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { 937 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
883 if (ns.conn == C_SYNC_SOURCE) 938 if (ns.conn == C_SYNC_SOURCE)
@@ -913,6 +968,12 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
913 } 968 }
914} 969}
915 970
971static void drbd_resume_al(struct drbd_conf *mdev)
972{
973 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
974 dev_info(DEV, "Resumed AL updates\n");
975}
976
916/** 977/**
917 * __drbd_set_state() - Set a new DRBD state 978 * __drbd_set_state() - Set a new DRBD state
918 * @mdev: DRBD device. 979 * @mdev: DRBD device.
@@ -928,7 +989,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
928{ 989{
929 union drbd_state os; 990 union drbd_state os;
930 int rv = SS_SUCCESS; 991 int rv = SS_SUCCESS;
931 int warn_sync_abort = 0; 992 const char *warn_sync_abort = NULL;
932 struct after_state_chg_work *ascw; 993 struct after_state_chg_work *ascw;
933 994
934 os = mdev->state; 995 os = mdev->state;
@@ -947,14 +1008,8 @@ int __drbd_set_state(struct drbd_conf *mdev,
947 /* If the old state was illegal as well, then let 1008 /* If the old state was illegal as well, then let
948 this happen...*/ 1009 this happen...*/
949 1010
950 if (is_valid_state(mdev, os) == rv) { 1011 if (is_valid_state(mdev, os) == rv)
951 dev_err(DEV, "Considering state change from bad state. "
952 "Error would be: '%s'\n",
953 drbd_set_st_err_str(rv));
954 print_st(mdev, "old", os);
955 print_st(mdev, "new", ns);
956 rv = is_valid_state_transition(mdev, ns, os); 1012 rv = is_valid_state_transition(mdev, ns, os);
957 }
958 } else 1013 } else
959 rv = is_valid_state_transition(mdev, ns, os); 1014 rv = is_valid_state_transition(mdev, ns, os);
960 } 1015 }
@@ -966,7 +1021,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
966 } 1021 }
967 1022
968 if (warn_sync_abort) 1023 if (warn_sync_abort)
969 dev_warn(DEV, "Resync aborted.\n"); 1024 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
970 1025
971 { 1026 {
972 char *pbp, pb[300]; 1027 char *pbp, pb[300];
@@ -977,7 +1032,10 @@ int __drbd_set_state(struct drbd_conf *mdev,
977 PSC(conn); 1032 PSC(conn);
978 PSC(disk); 1033 PSC(disk);
979 PSC(pdsk); 1034 PSC(pdsk);
980 PSC(susp); 1035 if (is_susp(ns) != is_susp(os))
1036 pbp += sprintf(pbp, "susp( %s -> %s ) ",
1037 drbd_susp_str(is_susp(os)),
1038 drbd_susp_str(is_susp(ns)));
981 PSC(aftr_isp); 1039 PSC(aftr_isp);
982 PSC(peer_isp); 1040 PSC(peer_isp);
983 PSC(user_isp); 1041 PSC(user_isp);
@@ -1002,12 +1060,6 @@ int __drbd_set_state(struct drbd_conf *mdev,
1002 wake_up(&mdev->misc_wait); 1060 wake_up(&mdev->misc_wait);
1003 wake_up(&mdev->state_wait); 1061 wake_up(&mdev->state_wait);
1004 1062
1005 /* post-state-change actions */
1006 if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) {
1007 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1008 mod_timer(&mdev->resync_timer, jiffies);
1009 }
1010
1011 /* aborted verify run. log the last position */ 1063 /* aborted verify run. log the last position */
1012 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && 1064 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1013 ns.conn < C_CONNECTED) { 1065 ns.conn < C_CONNECTED) {
@@ -1020,41 +1072,42 @@ int __drbd_set_state(struct drbd_conf *mdev,
1020 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) && 1072 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1021 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) { 1073 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1022 dev_info(DEV, "Syncer continues.\n"); 1074 dev_info(DEV, "Syncer continues.\n");
1023 mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time; 1075 mdev->rs_paused += (long)jiffies
1024 if (ns.conn == C_SYNC_TARGET) { 1076 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1025 if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags)) 1077 if (ns.conn == C_SYNC_TARGET)
1026 mod_timer(&mdev->resync_timer, jiffies); 1078 mod_timer(&mdev->resync_timer, jiffies);
1027 /* This if (!test_bit) is only needed for the case
1028 that a device that has ceased to used its timer,
1029 i.e. it is already in drbd_resync_finished() gets
1030 paused and resumed. */
1031 }
1032 } 1079 }
1033 1080
1034 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) && 1081 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1035 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) { 1082 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1036 dev_info(DEV, "Resync suspended\n"); 1083 dev_info(DEV, "Resync suspended\n");
1037 mdev->rs_mark_time = jiffies; 1084 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1038 if (ns.conn == C_PAUSED_SYNC_T)
1039 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1040 } 1085 }
1041 1086
1042 if (os.conn == C_CONNECTED && 1087 if (os.conn == C_CONNECTED &&
1043 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) { 1088 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1089 unsigned long now = jiffies;
1090 int i;
1091
1044 mdev->ov_position = 0; 1092 mdev->ov_position = 0;
1045 mdev->rs_total = 1093 mdev->rs_total = drbd_bm_bits(mdev);
1046 mdev->rs_mark_left = drbd_bm_bits(mdev);
1047 if (mdev->agreed_pro_version >= 90) 1094 if (mdev->agreed_pro_version >= 90)
1048 set_ov_position(mdev, ns.conn); 1095 set_ov_position(mdev, ns.conn);
1049 else 1096 else
1050 mdev->ov_start_sector = 0; 1097 mdev->ov_start_sector = 0;
1051 mdev->ov_left = mdev->rs_total 1098 mdev->ov_left = mdev->rs_total
1052 - BM_SECT_TO_BIT(mdev->ov_position); 1099 - BM_SECT_TO_BIT(mdev->ov_position);
1053 mdev->rs_start = 1100 mdev->rs_start = now;
1054 mdev->rs_mark_time = jiffies; 1101 mdev->rs_last_events = 0;
1102 mdev->rs_last_sect_ev = 0;
1055 mdev->ov_last_oos_size = 0; 1103 mdev->ov_last_oos_size = 0;
1056 mdev->ov_last_oos_start = 0; 1104 mdev->ov_last_oos_start = 0;
1057 1105
1106 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1107 mdev->rs_mark_left[i] = mdev->rs_total;
1108 mdev->rs_mark_time[i] = now;
1109 }
1110
1058 if (ns.conn == C_VERIFY_S) { 1111 if (ns.conn == C_VERIFY_S) {
1059 dev_info(DEV, "Starting Online Verify from sector %llu\n", 1112 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1060 (unsigned long long)mdev->ov_position); 1113 (unsigned long long)mdev->ov_position);
@@ -1107,6 +1160,10 @@ int __drbd_set_state(struct drbd_conf *mdev,
1107 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) 1160 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1108 drbd_thread_restart_nowait(&mdev->receiver); 1161 drbd_thread_restart_nowait(&mdev->receiver);
1109 1162
1163 /* Resume AL writing if we get a connection */
1164 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1165 drbd_resume_al(mdev);
1166
1110 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); 1167 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1111 if (ascw) { 1168 if (ascw) {
1112 ascw->os = os; 1169 ascw->os = os;
@@ -1165,6 +1222,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1165 union drbd_state ns, enum chg_state_flags flags) 1222 union drbd_state ns, enum chg_state_flags flags)
1166{ 1223{
1167 enum drbd_fencing_p fp; 1224 enum drbd_fencing_p fp;
1225 enum drbd_req_event what = nothing;
1226 union drbd_state nsm = (union drbd_state){ .i = -1 };
1168 1227
1169 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { 1228 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1170 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1229 clear_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1188,17 +1247,49 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1188 /* Here we have the actions that are performed after a 1247 /* Here we have the actions that are performed after a
1189 state change. This function might sleep */ 1248 state change. This function might sleep */
1190 1249
1191 if (fp == FP_STONITH && ns.susp) { 1250 nsm.i = -1;
1192 /* case1: The outdate peer handler is successful: 1251 if (ns.susp_nod) {
1193 * case2: The connection was established again: */ 1252 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1194 if ((os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) || 1253 if (ns.conn == C_CONNECTED)
1195 (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) { 1254 what = resend, nsm.susp_nod = 0;
1255 else /* ns.conn > C_CONNECTED */
1256 dev_err(DEV, "Unexpected Resynd going on!\n");
1257 }
1258
1259 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1260 what = restart_frozen_disk_io, nsm.susp_nod = 0;
1261
1262 }
1263
1264 if (ns.susp_fen) {
1265 /* case1: The outdate peer handler is successful: */
1266 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1196 tl_clear(mdev); 1267 tl_clear(mdev);
1268 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1269 drbd_uuid_new_current(mdev);
1270 clear_bit(NEW_CUR_UUID, &mdev->flags);
1271 drbd_md_sync(mdev);
1272 }
1197 spin_lock_irq(&mdev->req_lock); 1273 spin_lock_irq(&mdev->req_lock);
1198 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL); 1274 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1199 spin_unlock_irq(&mdev->req_lock); 1275 spin_unlock_irq(&mdev->req_lock);
1200 } 1276 }
1277 /* case2: The connection was established again: */
1278 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1279 clear_bit(NEW_CUR_UUID, &mdev->flags);
1280 what = resend;
1281 nsm.susp_fen = 0;
1282 }
1283 }
1284
1285 if (what != nothing) {
1286 spin_lock_irq(&mdev->req_lock);
1287 _tl_restart(mdev, what);
1288 nsm.i &= mdev->state.i;
1289 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1290 spin_unlock_irq(&mdev->req_lock);
1201 } 1291 }
1292
1202 /* Do not change the order of the if above and the two below... */ 1293 /* Do not change the order of the if above and the two below... */
1203 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ 1294 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1204 drbd_send_uuids(mdev); 1295 drbd_send_uuids(mdev);
@@ -1217,16 +1308,22 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1217 if (get_ldev(mdev)) { 1308 if (get_ldev(mdev)) {
1218 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && 1309 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1219 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { 1310 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1220 drbd_uuid_new_current(mdev); 1311 if (is_susp(mdev->state)) {
1221 drbd_send_uuids(mdev); 1312 set_bit(NEW_CUR_UUID, &mdev->flags);
1313 } else {
1314 drbd_uuid_new_current(mdev);
1315 drbd_send_uuids(mdev);
1316 }
1222 } 1317 }
1223 put_ldev(mdev); 1318 put_ldev(mdev);
1224 } 1319 }
1225 } 1320 }
1226 1321
1227 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) { 1322 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1228 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) 1323 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1229 drbd_uuid_new_current(mdev); 1324 drbd_uuid_new_current(mdev);
1325 drbd_send_uuids(mdev);
1326 }
1230 1327
1231 /* D_DISKLESS Peer becomes secondary */ 1328 /* D_DISKLESS Peer becomes secondary */
1232 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) 1329 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1268,42 +1365,51 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1268 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) 1365 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1269 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); 1366 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1270 1367
1368 /* first half of local IO error */
1271 if (os.disk > D_FAILED && ns.disk == D_FAILED) { 1369 if (os.disk > D_FAILED && ns.disk == D_FAILED) {
1272 enum drbd_io_error_p eh; 1370 enum drbd_io_error_p eh = EP_PASS_ON;
1371
1372 if (drbd_send_state(mdev))
1373 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1374 else
1375 dev_err(DEV, "Sending state for drbd_io_error() failed\n");
1376
1377 drbd_rs_cancel_all(mdev);
1273 1378
1274 eh = EP_PASS_ON;
1275 if (get_ldev_if_state(mdev, D_FAILED)) { 1379 if (get_ldev_if_state(mdev, D_FAILED)) {
1276 eh = mdev->ldev->dc.on_io_error; 1380 eh = mdev->ldev->dc.on_io_error;
1277 put_ldev(mdev); 1381 put_ldev(mdev);
1278 } 1382 }
1383 if (eh == EP_CALL_HELPER)
1384 drbd_khelper(mdev, "local-io-error");
1385 }
1279 1386
1280 drbd_rs_cancel_all(mdev); 1387
1281 /* since get_ldev() only works as long as disk>=D_INCONSISTENT, 1388 /* second half of local IO error handling,
1282 and it is D_DISKLESS here, local_cnt can only go down, it can 1389 * after local_cnt references have reached zero: */
1283 not increase... It will reach zero */ 1390 if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
1284 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1285 mdev->rs_total = 0; 1391 mdev->rs_total = 0;
1286 mdev->rs_failed = 0; 1392 mdev->rs_failed = 0;
1287 atomic_set(&mdev->rs_pending_cnt, 0); 1393 atomic_set(&mdev->rs_pending_cnt, 0);
1288
1289 spin_lock_irq(&mdev->req_lock);
1290 _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
1291 spin_unlock_irq(&mdev->req_lock);
1292
1293 if (eh == EP_CALL_HELPER)
1294 drbd_khelper(mdev, "local-io-error");
1295 } 1394 }
1296 1395
1297 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { 1396 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1397 /* We must still be diskless,
1398 * re-attach has to be serialized with this! */
1399 if (mdev->state.disk != D_DISKLESS)
1400 dev_err(DEV,
1401 "ASSERT FAILED: disk is %s while going diskless\n",
1402 drbd_disk_str(mdev->state.disk));
1403
1404 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
1405 * will inc/dec it frequently. Since we became D_DISKLESS, no
1406 * one has touched the protected members anymore, though, so we
1407 * are safe to free them here. */
1408 if (drbd_send_state(mdev))
1409 dev_warn(DEV, "Notified peer that I detached my disk.\n");
1410 else
1411 dev_err(DEV, "Sending state for detach failed\n");
1298 1412
1299 if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
1300 if (drbd_send_state(mdev))
1301 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1302 else
1303 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1304 }
1305
1306 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1307 lc_destroy(mdev->resync); 1413 lc_destroy(mdev->resync);
1308 mdev->resync = NULL; 1414 mdev->resync = NULL;
1309 lc_destroy(mdev->act_log); 1415 lc_destroy(mdev->act_log);
@@ -1312,8 +1418,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1312 drbd_free_bc(mdev->ldev); 1418 drbd_free_bc(mdev->ldev);
1313 mdev->ldev = NULL;); 1419 mdev->ldev = NULL;);
1314 1420
1315 if (mdev->md_io_tmpp) 1421 if (mdev->md_io_tmpp) {
1316 __free_page(mdev->md_io_tmpp); 1422 __free_page(mdev->md_io_tmpp);
1423 mdev->md_io_tmpp = NULL;
1424 }
1317 } 1425 }
1318 1426
1319 /* Disks got bigger while they were detached */ 1427 /* Disks got bigger while they were detached */
@@ -1329,6 +1437,15 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1329 (os.user_isp && !ns.user_isp)) 1437 (os.user_isp && !ns.user_isp))
1330 resume_next_sg(mdev); 1438 resume_next_sg(mdev);
1331 1439
1440 /* sync target done with resync. Explicitly notify peer, even though
1441 * it should (at least for non-empty resyncs) already know itself. */
1442 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1443 drbd_send_state(mdev);
1444
1445 /* free tl_hash if we Got thawed and are C_STANDALONE */
1446 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1447 drbd_free_tl_hash(mdev);
1448
1332 /* Upon network connection, we need to start the receiver */ 1449 /* Upon network connection, we need to start the receiver */
1333 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED) 1450 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1334 drbd_thread_start(&mdev->receiver); 1451 drbd_thread_start(&mdev->receiver);
@@ -1555,7 +1672,7 @@ void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1555 1672
1556/* the appropriate socket mutex must be held already */ 1673/* the appropriate socket mutex must be held already */
1557int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, 1674int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1558 enum drbd_packets cmd, struct p_header *h, 1675 enum drbd_packets cmd, struct p_header80 *h,
1559 size_t size, unsigned msg_flags) 1676 size_t size, unsigned msg_flags)
1560{ 1677{
1561 int sent, ok; 1678 int sent, ok;
@@ -1565,7 +1682,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1565 1682
1566 h->magic = BE_DRBD_MAGIC; 1683 h->magic = BE_DRBD_MAGIC;
1567 h->command = cpu_to_be16(cmd); 1684 h->command = cpu_to_be16(cmd);
1568 h->length = cpu_to_be16(size-sizeof(struct p_header)); 1685 h->length = cpu_to_be16(size-sizeof(struct p_header80));
1569 1686
1570 sent = drbd_send(mdev, sock, h, size, msg_flags); 1687 sent = drbd_send(mdev, sock, h, size, msg_flags);
1571 1688
@@ -1580,7 +1697,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1580 * when we hold the appropriate socket mutex. 1697 * when we hold the appropriate socket mutex.
1581 */ 1698 */
1582int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, 1699int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1583 enum drbd_packets cmd, struct p_header *h, size_t size) 1700 enum drbd_packets cmd, struct p_header80 *h, size_t size)
1584{ 1701{
1585 int ok = 0; 1702 int ok = 0;
1586 struct socket *sock; 1703 struct socket *sock;
@@ -1608,7 +1725,7 @@ int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1608int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data, 1725int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1609 size_t size) 1726 size_t size)
1610{ 1727{
1611 struct p_header h; 1728 struct p_header80 h;
1612 int ok; 1729 int ok;
1613 1730
1614 h.magic = BE_DRBD_MAGIC; 1731 h.magic = BE_DRBD_MAGIC;
@@ -1630,7 +1747,7 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1630 1747
1631int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) 1748int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1632{ 1749{
1633 struct p_rs_param_89 *p; 1750 struct p_rs_param_95 *p;
1634 struct socket *sock; 1751 struct socket *sock;
1635 int size, rv; 1752 int size, rv;
1636 const int apv = mdev->agreed_pro_version; 1753 const int apv = mdev->agreed_pro_version;
@@ -1638,7 +1755,8 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1638 size = apv <= 87 ? sizeof(struct p_rs_param) 1755 size = apv <= 87 ? sizeof(struct p_rs_param)
1639 : apv == 88 ? sizeof(struct p_rs_param) 1756 : apv == 88 ? sizeof(struct p_rs_param)
1640 + strlen(mdev->sync_conf.verify_alg) + 1 1757 + strlen(mdev->sync_conf.verify_alg) + 1
1641 : /* 89 */ sizeof(struct p_rs_param_89); 1758 : apv <= 94 ? sizeof(struct p_rs_param_89)
1759 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1642 1760
1643 /* used from admin command context and receiver/worker context. 1761 /* used from admin command context and receiver/worker context.
1644 * to avoid kmalloc, grab the socket right here, 1762 * to avoid kmalloc, grab the socket right here,
@@ -1649,12 +1767,16 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1649 if (likely(sock != NULL)) { 1767 if (likely(sock != NULL)) {
1650 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; 1768 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1651 1769
1652 p = &mdev->data.sbuf.rs_param_89; 1770 p = &mdev->data.sbuf.rs_param_95;
1653 1771
1654 /* initialize verify_alg and csums_alg */ 1772 /* initialize verify_alg and csums_alg */
1655 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 1773 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1656 1774
1657 p->rate = cpu_to_be32(sc->rate); 1775 p->rate = cpu_to_be32(sc->rate);
1776 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1777 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1778 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1779 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1658 1780
1659 if (apv >= 88) 1781 if (apv >= 88)
1660 strcpy(p->verify_alg, mdev->sync_conf.verify_alg); 1782 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
@@ -1710,7 +1832,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
1710 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); 1832 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1711 1833
1712 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, 1834 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1713 (struct p_header *)p, size); 1835 (struct p_header80 *)p, size);
1714 kfree(p); 1836 kfree(p);
1715 return rv; 1837 return rv;
1716} 1838}
@@ -1736,7 +1858,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1736 put_ldev(mdev); 1858 put_ldev(mdev);
1737 1859
1738 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, 1860 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1739 (struct p_header *)&p, sizeof(p)); 1861 (struct p_header80 *)&p, sizeof(p));
1740} 1862}
1741 1863
1742int drbd_send_uuids(struct drbd_conf *mdev) 1864int drbd_send_uuids(struct drbd_conf *mdev)
@@ -1757,7 +1879,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1757 p.uuid = cpu_to_be64(val); 1879 p.uuid = cpu_to_be64(val);
1758 1880
1759 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, 1881 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1760 (struct p_header *)&p, sizeof(p)); 1882 (struct p_header80 *)&p, sizeof(p));
1761} 1883}
1762 1884
1763int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) 1885int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
@@ -1787,7 +1909,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
1787 p.dds_flags = cpu_to_be16(flags); 1909 p.dds_flags = cpu_to_be16(flags);
1788 1910
1789 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, 1911 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1790 (struct p_header *)&p, sizeof(p)); 1912 (struct p_header80 *)&p, sizeof(p));
1791 return ok; 1913 return ok;
1792} 1914}
1793 1915
@@ -1812,7 +1934,7 @@ int drbd_send_state(struct drbd_conf *mdev)
1812 1934
1813 if (likely(sock != NULL)) { 1935 if (likely(sock != NULL)) {
1814 ok = _drbd_send_cmd(mdev, sock, P_STATE, 1936 ok = _drbd_send_cmd(mdev, sock, P_STATE,
1815 (struct p_header *)&p, sizeof(p), 0); 1937 (struct p_header80 *)&p, sizeof(p), 0);
1816 } 1938 }
1817 1939
1818 mutex_unlock(&mdev->data.mutex); 1940 mutex_unlock(&mdev->data.mutex);
@@ -1830,7 +1952,7 @@ int drbd_send_state_req(struct drbd_conf *mdev,
1830 p.val = cpu_to_be32(val.i); 1952 p.val = cpu_to_be32(val.i);
1831 1953
1832 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, 1954 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
1833 (struct p_header *)&p, sizeof(p)); 1955 (struct p_header80 *)&p, sizeof(p));
1834} 1956}
1835 1957
1836int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) 1958int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
@@ -1840,7 +1962,7 @@ int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
1840 p.retcode = cpu_to_be32(retcode); 1962 p.retcode = cpu_to_be32(retcode);
1841 1963
1842 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, 1964 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
1843 (struct p_header *)&p, sizeof(p)); 1965 (struct p_header80 *)&p, sizeof(p));
1844} 1966}
1845 1967
1846int fill_bitmap_rle_bits(struct drbd_conf *mdev, 1968int fill_bitmap_rle_bits(struct drbd_conf *mdev,
@@ -1939,7 +2061,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1939 2061
1940enum { OK, FAILED, DONE } 2062enum { OK, FAILED, DONE }
1941send_bitmap_rle_or_plain(struct drbd_conf *mdev, 2063send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1942 struct p_header *h, struct bm_xfer_ctx *c) 2064 struct p_header80 *h, struct bm_xfer_ctx *c)
1943{ 2065{
1944 struct p_compressed_bm *p = (void*)h; 2066 struct p_compressed_bm *p = (void*)h;
1945 unsigned long num_words; 2067 unsigned long num_words;
@@ -1969,12 +2091,12 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1969 if (len) 2091 if (len)
1970 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload); 2092 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
1971 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP, 2093 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
1972 h, sizeof(struct p_header) + len, 0); 2094 h, sizeof(struct p_header80) + len, 0);
1973 c->word_offset += num_words; 2095 c->word_offset += num_words;
1974 c->bit_offset = c->word_offset * BITS_PER_LONG; 2096 c->bit_offset = c->word_offset * BITS_PER_LONG;
1975 2097
1976 c->packets[1]++; 2098 c->packets[1]++;
1977 c->bytes[1] += sizeof(struct p_header) + len; 2099 c->bytes[1] += sizeof(struct p_header80) + len;
1978 2100
1979 if (c->bit_offset > c->bm_bits) 2101 if (c->bit_offset > c->bm_bits)
1980 c->bit_offset = c->bm_bits; 2102 c->bit_offset = c->bm_bits;
@@ -1990,14 +2112,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1990int _drbd_send_bitmap(struct drbd_conf *mdev) 2112int _drbd_send_bitmap(struct drbd_conf *mdev)
1991{ 2113{
1992 struct bm_xfer_ctx c; 2114 struct bm_xfer_ctx c;
1993 struct p_header *p; 2115 struct p_header80 *p;
1994 int ret; 2116 int ret;
1995 2117
1996 ERR_IF(!mdev->bitmap) return FALSE; 2118 ERR_IF(!mdev->bitmap) return FALSE;
1997 2119
1998 /* maybe we should use some per thread scratch page, 2120 /* maybe we should use some per thread scratch page,
1999 * and allocate that during initial device creation? */ 2121 * and allocate that during initial device creation? */
2000 p = (struct p_header *) __get_free_page(GFP_NOIO); 2122 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2001 if (!p) { 2123 if (!p) {
2002 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 2124 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2003 return FALSE; 2125 return FALSE;
@@ -2055,7 +2177,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2055 if (mdev->state.conn < C_CONNECTED) 2177 if (mdev->state.conn < C_CONNECTED)
2056 return FALSE; 2178 return FALSE;
2057 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, 2179 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2058 (struct p_header *)&p, sizeof(p)); 2180 (struct p_header80 *)&p, sizeof(p));
2059 return ok; 2181 return ok;
2060} 2182}
2061 2183
@@ -2083,17 +2205,18 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2083 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) 2205 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2084 return FALSE; 2206 return FALSE;
2085 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, 2207 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2086 (struct p_header *)&p, sizeof(p)); 2208 (struct p_header80 *)&p, sizeof(p));
2087 return ok; 2209 return ok;
2088} 2210}
2089 2211
2212/* dp->sector and dp->block_id already/still in network byte order,
2213 * data_size is payload size according to dp->head,
2214 * and may need to be corrected for digest size. */
2090int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, 2215int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2091 struct p_data *dp) 2216 struct p_data *dp, int data_size)
2092{ 2217{
2093 const int header_size = sizeof(struct p_data) 2218 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2094 - sizeof(struct p_header); 2219 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2095 int data_size = ((struct p_header *)dp)->length - header_size;
2096
2097 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), 2220 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2098 dp->block_id); 2221 dp->block_id);
2099} 2222}
@@ -2141,7 +2264,7 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2141 p.blksize = cpu_to_be32(size); 2264 p.blksize = cpu_to_be32(size);
2142 2265
2143 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, 2266 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2144 (struct p_header *)&p, sizeof(p)); 2267 (struct p_header80 *)&p, sizeof(p));
2145 return ok; 2268 return ok;
2146} 2269}
2147 2270
@@ -2159,7 +2282,7 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev,
2159 2282
2160 p.head.magic = BE_DRBD_MAGIC; 2283 p.head.magic = BE_DRBD_MAGIC;
2161 p.head.command = cpu_to_be16(cmd); 2284 p.head.command = cpu_to_be16(cmd);
2162 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size); 2285 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2163 2286
2164 mutex_lock(&mdev->data.mutex); 2287 mutex_lock(&mdev->data.mutex);
2165 2288
@@ -2181,7 +2304,7 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2181 p.blksize = cpu_to_be32(size); 2304 p.blksize = cpu_to_be32(size);
2182 2305
2183 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, 2306 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2184 (struct p_header *)&p, sizeof(p)); 2307 (struct p_header80 *)&p, sizeof(p));
2185 return ok; 2308 return ok;
2186} 2309}
2187 2310
@@ -2333,6 +2456,18 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2333 return 1; 2456 return 1;
2334} 2457}
2335 2458
2459static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2460{
2461 if (mdev->agreed_pro_version >= 95)
2462 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2463 (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
2464 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2465 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2466 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2467 else
2468 return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0;
2469}
2470
2336/* Used to send write requests 2471/* Used to send write requests
2337 * R_PRIMARY -> Peer (P_DATA) 2472 * R_PRIMARY -> Peer (P_DATA)
2338 */ 2473 */
@@ -2350,30 +2485,25 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2350 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? 2485 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2351 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; 2486 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2352 2487
2353 p.head.magic = BE_DRBD_MAGIC; 2488 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2354 p.head.command = cpu_to_be16(P_DATA); 2489 p.head.h80.magic = BE_DRBD_MAGIC;
2355 p.head.length = 2490 p.head.h80.command = cpu_to_be16(P_DATA);
2356 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size); 2491 p.head.h80.length =
2492 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2493 } else {
2494 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2495 p.head.h95.command = cpu_to_be16(P_DATA);
2496 p.head.h95.length =
2497 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2498 }
2357 2499
2358 p.sector = cpu_to_be64(req->sector); 2500 p.sector = cpu_to_be64(req->sector);
2359 p.block_id = (unsigned long)req; 2501 p.block_id = (unsigned long)req;
2360 p.seq_num = cpu_to_be32(req->seq_num = 2502 p.seq_num = cpu_to_be32(req->seq_num =
2361 atomic_add_return(1, &mdev->packet_seq)); 2503 atomic_add_return(1, &mdev->packet_seq));
2362 dp_flags = 0;
2363 2504
2364 /* NOTE: no need to check if barriers supported here as we would 2505 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2365 * not pass the test in make_request_common in that case 2506
2366 */
2367 if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
2368 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2369 /* dp_flags |= DP_HARDBARRIER; */
2370 }
2371 if (req->master_bio->bi_rw & REQ_SYNC)
2372 dp_flags |= DP_RW_SYNC;
2373 /* for now handle SYNCIO and UNPLUG
2374 * as if they still were one and the same flag */
2375 if (req->master_bio->bi_rw & REQ_UNPLUG)
2376 dp_flags |= DP_RW_SYNC;
2377 if (mdev->state.conn >= C_SYNC_SOURCE && 2507 if (mdev->state.conn >= C_SYNC_SOURCE &&
2378 mdev->state.conn <= C_PAUSED_SYNC_T) 2508 mdev->state.conn <= C_PAUSED_SYNC_T)
2379 dp_flags |= DP_MAY_SET_IN_SYNC; 2509 dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -2414,10 +2544,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2414 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? 2544 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2415 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; 2545 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2416 2546
2417 p.head.magic = BE_DRBD_MAGIC; 2547 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2418 p.head.command = cpu_to_be16(cmd); 2548 p.head.h80.magic = BE_DRBD_MAGIC;
2419 p.head.length = 2549 p.head.h80.command = cpu_to_be16(cmd);
2420 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size); 2550 p.head.h80.length =
2551 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2552 } else {
2553 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2554 p.head.h95.command = cpu_to_be16(cmd);
2555 p.head.h95.length =
2556 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2557 }
2421 2558
2422 p.sector = cpu_to_be64(e->sector); 2559 p.sector = cpu_to_be64(e->sector);
2423 p.block_id = e->block_id; 2560 p.block_id = e->block_id;
@@ -2430,8 +2567,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2430 if (!drbd_get_data_sock(mdev)) 2567 if (!drbd_get_data_sock(mdev))
2431 return 0; 2568 return 0;
2432 2569
2433 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, 2570 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2434 sizeof(p), dgs ? MSG_MORE : 0);
2435 if (ok && dgs) { 2571 if (ok && dgs) {
2436 dgb = mdev->int_dig_out; 2572 dgb = mdev->int_dig_out;
2437 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); 2573 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
@@ -2606,7 +2742,13 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
2606 /* .verify_alg = */ {}, 0, 2742 /* .verify_alg = */ {}, 0,
2607 /* .cpu_mask = */ {}, 0, 2743 /* .cpu_mask = */ {}, 0,
2608 /* .csums_alg = */ {}, 0, 2744 /* .csums_alg = */ {}, 0,
2609 /* .use_rle = */ 0 2745 /* .use_rle = */ 0,
2746 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2747 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2748 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2749 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
2750 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2751 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
2610 }; 2752 };
2611 2753
2612 /* Have to use that way, because the layout differs between 2754 /* Have to use that way, because the layout differs between
@@ -2617,7 +2759,9 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
2617 .conn = C_STANDALONE, 2759 .conn = C_STANDALONE,
2618 .disk = D_DISKLESS, 2760 .disk = D_DISKLESS,
2619 .pdsk = D_UNKNOWN, 2761 .pdsk = D_UNKNOWN,
2620 .susp = 0 2762 .susp = 0,
2763 .susp_nod = 0,
2764 .susp_fen = 0
2621 } }; 2765 } };
2622} 2766}
2623 2767
@@ -2641,6 +2785,9 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2641 atomic_set(&mdev->net_cnt, 0); 2785 atomic_set(&mdev->net_cnt, 0);
2642 atomic_set(&mdev->packet_seq, 0); 2786 atomic_set(&mdev->packet_seq, 0);
2643 atomic_set(&mdev->pp_in_use, 0); 2787 atomic_set(&mdev->pp_in_use, 0);
2788 atomic_set(&mdev->pp_in_use_by_net, 0);
2789 atomic_set(&mdev->rs_sect_in, 0);
2790 atomic_set(&mdev->rs_sect_ev, 0);
2644 2791
2645 mutex_init(&mdev->md_io_mutex); 2792 mutex_init(&mdev->md_io_mutex);
2646 mutex_init(&mdev->data.mutex); 2793 mutex_init(&mdev->data.mutex);
@@ -2667,11 +2814,13 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2667 INIT_LIST_HEAD(&mdev->meta.work.q); 2814 INIT_LIST_HEAD(&mdev->meta.work.q);
2668 INIT_LIST_HEAD(&mdev->resync_work.list); 2815 INIT_LIST_HEAD(&mdev->resync_work.list);
2669 INIT_LIST_HEAD(&mdev->unplug_work.list); 2816 INIT_LIST_HEAD(&mdev->unplug_work.list);
2817 INIT_LIST_HEAD(&mdev->go_diskless.list);
2670 INIT_LIST_HEAD(&mdev->md_sync_work.list); 2818 INIT_LIST_HEAD(&mdev->md_sync_work.list);
2671 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 2819 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2672 2820
2673 mdev->resync_work.cb = w_resync_inactive; 2821 mdev->resync_work.cb = w_resync_inactive;
2674 mdev->unplug_work.cb = w_send_write_hint; 2822 mdev->unplug_work.cb = w_send_write_hint;
2823 mdev->go_diskless.cb = w_go_diskless;
2675 mdev->md_sync_work.cb = w_md_sync; 2824 mdev->md_sync_work.cb = w_md_sync;
2676 mdev->bm_io_work.w.cb = w_bitmap_io; 2825 mdev->bm_io_work.w.cb = w_bitmap_io;
2677 init_timer(&mdev->resync_timer); 2826 init_timer(&mdev->resync_timer);
@@ -2683,6 +2832,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2683 2832
2684 init_waitqueue_head(&mdev->misc_wait); 2833 init_waitqueue_head(&mdev->misc_wait);
2685 init_waitqueue_head(&mdev->state_wait); 2834 init_waitqueue_head(&mdev->state_wait);
2835 init_waitqueue_head(&mdev->net_cnt_wait);
2686 init_waitqueue_head(&mdev->ee_wait); 2836 init_waitqueue_head(&mdev->ee_wait);
2687 init_waitqueue_head(&mdev->al_wait); 2837 init_waitqueue_head(&mdev->al_wait);
2688 init_waitqueue_head(&mdev->seq_wait); 2838 init_waitqueue_head(&mdev->seq_wait);
@@ -2698,6 +2848,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2698 2848
2699void drbd_mdev_cleanup(struct drbd_conf *mdev) 2849void drbd_mdev_cleanup(struct drbd_conf *mdev)
2700{ 2850{
2851 int i;
2701 if (mdev->receiver.t_state != None) 2852 if (mdev->receiver.t_state != None)
2702 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 2853 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2703 mdev->receiver.t_state); 2854 mdev->receiver.t_state);
@@ -2714,9 +2865,13 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2714 mdev->p_size = 2865 mdev->p_size =
2715 mdev->rs_start = 2866 mdev->rs_start =
2716 mdev->rs_total = 2867 mdev->rs_total =
2717 mdev->rs_failed = 2868 mdev->rs_failed = 0;
2718 mdev->rs_mark_left = 2869 mdev->rs_last_events = 0;
2719 mdev->rs_mark_time = 0; 2870 mdev->rs_last_sect_ev = 0;
2871 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2872 mdev->rs_mark_left[i] = 0;
2873 mdev->rs_mark_time[i] = 0;
2874 }
2720 D_ASSERT(mdev->net_conf == NULL); 2875 D_ASSERT(mdev->net_conf == NULL);
2721 2876
2722 drbd_set_my_capacity(mdev, 0); 2877 drbd_set_my_capacity(mdev, 0);
@@ -2727,6 +2882,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2727 } 2882 }
2728 2883
2729 drbd_free_resources(mdev); 2884 drbd_free_resources(mdev);
2885 clear_bit(AL_SUSPENDED, &mdev->flags);
2730 2886
2731 /* 2887 /*
2732 * currently we drbd_init_ee only on module load, so 2888 * currently we drbd_init_ee only on module load, so
@@ -2742,6 +2898,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2742 D_ASSERT(list_empty(&mdev->meta.work.q)); 2898 D_ASSERT(list_empty(&mdev->meta.work.q));
2743 D_ASSERT(list_empty(&mdev->resync_work.list)); 2899 D_ASSERT(list_empty(&mdev->resync_work.list));
2744 D_ASSERT(list_empty(&mdev->unplug_work.list)); 2900 D_ASSERT(list_empty(&mdev->unplug_work.list));
2901 D_ASSERT(list_empty(&mdev->go_diskless.list));
2745 2902
2746} 2903}
2747 2904
@@ -3281,9 +3438,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
3281 sector_t sector; 3438 sector_t sector;
3282 int i; 3439 int i;
3283 3440
3441 del_timer(&mdev->md_sync_timer);
3442 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3284 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) 3443 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3285 return; 3444 return;
3286 del_timer(&mdev->md_sync_timer);
3287 3445
3288 /* We use here D_FAILED and not D_ATTACHING because we try to write 3446 /* We use here D_FAILED and not D_ATTACHING because we try to write
3289 * metadata even if we detach due to a disk failure! */ 3447 * metadata even if we detach due to a disk failure! */
@@ -3311,12 +3469,9 @@ void drbd_md_sync(struct drbd_conf *mdev)
3311 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); 3469 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3312 sector = mdev->ldev->md.md_offset; 3470 sector = mdev->ldev->md.md_offset;
3313 3471
3314 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 3472 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3315 clear_bit(MD_DIRTY, &mdev->flags);
3316 } else {
3317 /* this was a try anyways ... */ 3473 /* this was a try anyways ... */
3318 dev_err(DEV, "meta data update failed!\n"); 3474 dev_err(DEV, "meta data update failed!\n");
3319
3320 drbd_chk_io_error(mdev, 1, TRUE); 3475 drbd_chk_io_error(mdev, 1, TRUE);
3321 } 3476 }
3322 3477
@@ -3403,6 +3558,28 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3403 return rv; 3558 return rv;
3404} 3559}
3405 3560
3561static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3562{
3563 static char *uuid_str[UI_EXTENDED_SIZE] = {
3564 [UI_CURRENT] = "CURRENT",
3565 [UI_BITMAP] = "BITMAP",
3566 [UI_HISTORY_START] = "HISTORY_START",
3567 [UI_HISTORY_END] = "HISTORY_END",
3568 [UI_SIZE] = "SIZE",
3569 [UI_FLAGS] = "FLAGS",
3570 };
3571
3572 if (index >= UI_EXTENDED_SIZE) {
3573 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3574 return;
3575 }
3576
3577 dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3578 uuid_str[index],
3579 (unsigned long long)mdev->ldev->md.uuid[index]);
3580}
3581
3582
3406/** 3583/**
3407 * drbd_md_mark_dirty() - Mark meta data super block as dirty 3584 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3408 * @mdev: DRBD device. 3585 * @mdev: DRBD device.
@@ -3411,19 +3588,31 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3411 * the meta-data super block. This function sets MD_DIRTY, and starts a 3588 * the meta-data super block. This function sets MD_DIRTY, and starts a
3412 * timer that ensures that within five seconds you have to call drbd_md_sync(). 3589 * timer that ensures that within five seconds you have to call drbd_md_sync().
3413 */ 3590 */
3591#ifdef DEBUG
3592void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3593{
3594 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3595 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3596 mdev->last_md_mark_dirty.line = line;
3597 mdev->last_md_mark_dirty.func = func;
3598 }
3599}
3600#else
3414void drbd_md_mark_dirty(struct drbd_conf *mdev) 3601void drbd_md_mark_dirty(struct drbd_conf *mdev)
3415{ 3602{
3416 set_bit(MD_DIRTY, &mdev->flags); 3603 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3417 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); 3604 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3418} 3605}
3419 3606#endif
3420 3607
3421static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) 3608static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3422{ 3609{
3423 int i; 3610 int i;
3424 3611
3425 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) 3612 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3426 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; 3613 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3614 debug_drbd_uuid(mdev, i+1);
3615 }
3427} 3616}
3428 3617
3429void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3618void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
@@ -3438,6 +3627,7 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3438 } 3627 }
3439 3628
3440 mdev->ldev->md.uuid[idx] = val; 3629 mdev->ldev->md.uuid[idx] = val;
3630 debug_drbd_uuid(mdev, idx);
3441 drbd_md_mark_dirty(mdev); 3631 drbd_md_mark_dirty(mdev);
3442} 3632}
3443 3633
@@ -3447,6 +3637,7 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3447 if (mdev->ldev->md.uuid[idx]) { 3637 if (mdev->ldev->md.uuid[idx]) {
3448 drbd_uuid_move_history(mdev); 3638 drbd_uuid_move_history(mdev);
3449 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; 3639 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3640 debug_drbd_uuid(mdev, UI_HISTORY_START);
3450 } 3641 }
3451 _drbd_uuid_set(mdev, idx, val); 3642 _drbd_uuid_set(mdev, idx, val);
3452} 3643}
@@ -3465,6 +3656,7 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3465 dev_info(DEV, "Creating new current UUID\n"); 3656 dev_info(DEV, "Creating new current UUID\n");
3466 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); 3657 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3467 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; 3658 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3659 debug_drbd_uuid(mdev, UI_BITMAP);
3468 3660
3469 get_random_bytes(&val, sizeof(u64)); 3661 get_random_bytes(&val, sizeof(u64));
3470 _drbd_uuid_set(mdev, UI_CURRENT, val); 3662 _drbd_uuid_set(mdev, UI_CURRENT, val);
@@ -3479,6 +3671,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3479 drbd_uuid_move_history(mdev); 3671 drbd_uuid_move_history(mdev);
3480 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; 3672 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3481 mdev->ldev->md.uuid[UI_BITMAP] = 0; 3673 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3674 debug_drbd_uuid(mdev, UI_HISTORY_START);
3675 debug_drbd_uuid(mdev, UI_BITMAP);
3482 } else { 3676 } else {
3483 if (mdev->ldev->md.uuid[UI_BITMAP]) 3677 if (mdev->ldev->md.uuid[UI_BITMAP])
3484 dev_warn(DEV, "bm UUID already set"); 3678 dev_warn(DEV, "bm UUID already set");
@@ -3486,6 +3680,7 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3486 mdev->ldev->md.uuid[UI_BITMAP] = val; 3680 mdev->ldev->md.uuid[UI_BITMAP] = val;
3487 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); 3681 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3488 3682
3683 debug_drbd_uuid(mdev, UI_BITMAP);
3489 } 3684 }
3490 drbd_md_mark_dirty(mdev); 3685 drbd_md_mark_dirty(mdev);
3491} 3686}
@@ -3528,6 +3723,7 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3528{ 3723{
3529 int rv = -EIO; 3724 int rv = -EIO;
3530 3725
3726 drbd_resume_al(mdev);
3531 if (get_ldev_if_state(mdev, D_ATTACHING)) { 3727 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3532 drbd_bm_clear_all(mdev); 3728 drbd_bm_clear_all(mdev);
3533 rv = drbd_bm_write(mdev); 3729 rv = drbd_bm_write(mdev);
@@ -3560,6 +3756,32 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3560 return 1; 3756 return 1;
3561} 3757}
3562 3758
3759static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3760{
3761 D_ASSERT(mdev->state.disk == D_FAILED);
3762 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3763 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3764 * the protected members anymore, though, so in the after_state_ch work
3765 * it will be safe to free them. */
3766 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3767 /* We need to wait for return of references checked out while we still
3768 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
3769 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
3770
3771 clear_bit(GO_DISKLESS, &mdev->flags);
3772 return 1;
3773}
3774
3775void drbd_go_diskless(struct drbd_conf *mdev)
3776{
3777 D_ASSERT(mdev->state.disk == D_FAILED);
3778 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3779 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3780 /* don't drbd_queue_work_front,
3781 * we need to serialize with the after_state_ch work
3782 * of the -> D_FAILED transition. */
3783}
3784
3563/** 3785/**
3564 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap 3786 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3565 * @mdev: DRBD device. 3787 * @mdev: DRBD device.
@@ -3656,8 +3878,11 @@ static void md_sync_timer_fn(unsigned long data)
3656static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused) 3878static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3657{ 3879{
3658 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); 3880 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3881#ifdef DEBUG
3882 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3883 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3884#endif
3659 drbd_md_sync(mdev); 3885 drbd_md_sync(mdev);
3660
3661 return 1; 3886 return 1;
3662} 3887}
3663 3888
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 73131c5ae339..87925e97e613 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -33,10 +33,13 @@
33#include <linux/blkpg.h> 33#include <linux/blkpg.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include "drbd_int.h" 35#include "drbd_int.h"
36#include "drbd_req.h"
36#include "drbd_wrappers.h" 37#include "drbd_wrappers.h"
37#include <asm/unaligned.h> 38#include <asm/unaligned.h>
38#include <linux/drbd_tag_magic.h> 39#include <linux/drbd_tag_magic.h>
39#include <linux/drbd_limits.h> 40#include <linux/drbd_limits.h>
41#include <linux/compiler.h>
42#include <linux/kthread.h>
40 43
41static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
42static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
@@ -169,6 +172,10 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
169 put_net_conf(mdev); 172 put_net_conf(mdev);
170 } 173 }
171 174
175 /* The helper may take some time.
176 * write out any unsynced meta data changes now */
177 drbd_md_sync(mdev);
178
172 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 179 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
173 180
174 drbd_bcast_ev_helper(mdev, cmd); 181 drbd_bcast_ev_helper(mdev, cmd);
@@ -202,12 +209,10 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
202 put_ldev(mdev); 209 put_ldev(mdev);
203 } else { 210 } else {
204 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 211 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
205 return mdev->state.pdsk; 212 nps = mdev->state.pdsk;
213 goto out;
206 } 214 }
207 215
208 if (fp == FP_STONITH)
209 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE);
210
211 r = drbd_khelper(mdev, "fence-peer"); 216 r = drbd_khelper(mdev, "fence-peer");
212 217
213 switch ((r>>8) & 0xff) { 218 switch ((r>>8) & 0xff) {
@@ -252,9 +257,36 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
252 257
253 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 258 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
254 (r>>8) & 0xff, ex_to_string); 259 (r>>8) & 0xff, ex_to_string);
260
261out:
262 if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
263 /* The handler was not successful... unfreeze here, the
264 state engine can not unfreeze... */
265 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
266 }
267
255 return nps; 268 return nps;
256} 269}
257 270
271static int _try_outdate_peer_async(void *data)
272{
273 struct drbd_conf *mdev = (struct drbd_conf *)data;
274 enum drbd_disk_state nps;
275
276 nps = drbd_try_outdate_peer(mdev);
277 drbd_request_state(mdev, NS(pdsk, nps));
278
279 return 0;
280}
281
282void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
283{
284 struct task_struct *opa;
285
286 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
287 if (IS_ERR(opa))
288 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
289}
258 290
259int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 291int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
260{ 292{
@@ -394,6 +426,39 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
394 return r; 426 return r;
395} 427}
396 428
429static struct drbd_conf *ensure_mdev(int minor, int create)
430{
431 struct drbd_conf *mdev;
432
433 if (minor >= minor_count)
434 return NULL;
435
436 mdev = minor_to_mdev(minor);
437
438 if (!mdev && create) {
439 struct gendisk *disk = NULL;
440 mdev = drbd_new_device(minor);
441
442 spin_lock_irq(&drbd_pp_lock);
443 if (minor_table[minor] == NULL) {
444 minor_table[minor] = mdev;
445 disk = mdev->vdisk;
446 mdev = NULL;
447 } /* else: we lost the race */
448 spin_unlock_irq(&drbd_pp_lock);
449
450 if (disk) /* we won the race above */
451 /* in case we ever add a drbd_delete_device(),
452 * don't forget the del_gendisk! */
453 add_disk(disk);
454 else /* we lost the race above */
455 drbd_free_mdev(mdev);
456
457 mdev = minor_to_mdev(minor);
458 }
459
460 return mdev;
461}
397 462
398static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 463static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
399 struct drbd_nl_cfg_reply *reply) 464 struct drbd_nl_cfg_reply *reply)
@@ -494,6 +559,8 @@ char *ppsize(char *buf, unsigned long long size)
494void drbd_suspend_io(struct drbd_conf *mdev) 559void drbd_suspend_io(struct drbd_conf *mdev)
495{ 560{
496 set_bit(SUSPEND_IO, &mdev->flags); 561 set_bit(SUSPEND_IO, &mdev->flags);
562 if (is_susp(mdev->state))
563 return;
497 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 564 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
498} 565}
499 566
@@ -713,9 +780,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
713 blk_queue_segment_boundary(q, PAGE_SIZE-1); 780 blk_queue_segment_boundary(q, PAGE_SIZE-1);
714 blk_stack_limits(&q->limits, &b->limits, 0); 781 blk_stack_limits(&q->limits, &b->limits, 0);
715 782
716 if (b->merge_bvec_fn)
717 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n",
718 b->merge_bvec_fn);
719 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 783 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
720 784
721 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 785 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
@@ -729,14 +793,16 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
729/* serialize deconfig (worker exiting, doing cleanup) 793/* serialize deconfig (worker exiting, doing cleanup)
730 * and reconfig (drbdsetup disk, drbdsetup net) 794 * and reconfig (drbdsetup disk, drbdsetup net)
731 * 795 *
732 * wait for a potentially exiting worker, then restart it, 796 * Wait for a potentially exiting worker, then restart it,
733 * or start a new one. 797 * or start a new one. Flush any pending work, there may still be an
798 * after_state_change queued.
734 */ 799 */
735static void drbd_reconfig_start(struct drbd_conf *mdev) 800static void drbd_reconfig_start(struct drbd_conf *mdev)
736{ 801{
737 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 802 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
738 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 803 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
739 drbd_thread_start(&mdev->worker); 804 drbd_thread_start(&mdev->worker);
805 drbd_flush_workqueue(mdev);
740} 806}
741 807
742/* if still unconfigured, stops worker again. 808/* if still unconfigured, stops worker again.
@@ -756,6 +822,29 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
756 wake_up(&mdev->state_wait); 822 wake_up(&mdev->state_wait);
757} 823}
758 824
825/* Make sure IO is suspended before calling this function(). */
826static void drbd_suspend_al(struct drbd_conf *mdev)
827{
828 int s = 0;
829
830 if (lc_try_lock(mdev->act_log)) {
831 drbd_al_shrink(mdev);
832 lc_unlock(mdev->act_log);
833 } else {
834 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
835 return;
836 }
837
838 spin_lock_irq(&mdev->req_lock);
839 if (mdev->state.conn < C_CONNECTED)
840 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
841
842 spin_unlock_irq(&mdev->req_lock);
843
844 if (s)
845 dev_info(DEV, "Suspended AL updates\n");
846}
847
759/* does always return 0; 848/* does always return 0;
760 * interesting return code is in reply->ret_code */ 849 * interesting return code is in reply->ret_code */
761static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 850static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -769,6 +858,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
769 struct inode *inode, *inode2; 858 struct inode *inode, *inode2;
770 struct lru_cache *resync_lru = NULL; 859 struct lru_cache *resync_lru = NULL;
771 union drbd_state ns, os; 860 union drbd_state ns, os;
861 unsigned int max_seg_s;
772 int rv; 862 int rv;
773 int cp_discovered = 0; 863 int cp_discovered = 0;
774 int logical_block_size; 864 int logical_block_size;
@@ -803,6 +893,15 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
803 goto fail; 893 goto fail;
804 } 894 }
805 895
896 if (get_net_conf(mdev)) {
897 int prot = mdev->net_conf->wire_protocol;
898 put_net_conf(mdev);
899 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
900 retcode = ERR_STONITH_AND_PROT_A;
901 goto fail;
902 }
903 }
904
806 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 905 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
807 if (IS_ERR(nbc->lo_file)) { 906 if (IS_ERR(nbc->lo_file)) {
808 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 907 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
@@ -924,7 +1023,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
924 1023
925 drbd_suspend_io(mdev); 1024 drbd_suspend_io(mdev);
926 /* also wait for the last barrier ack. */ 1025 /* also wait for the last barrier ack. */
927 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 1026 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
928 /* and for any other previously queued work */ 1027 /* and for any other previously queued work */
929 drbd_flush_workqueue(mdev); 1028 drbd_flush_workqueue(mdev);
930 1029
@@ -1021,7 +1120,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1021 else 1120 else
1022 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1121 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1023 1122
1024 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { 1123 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1124 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1025 set_bit(CRASHED_PRIMARY, &mdev->flags); 1125 set_bit(CRASHED_PRIMARY, &mdev->flags);
1026 cp_discovered = 1; 1126 cp_discovered = 1;
1027 } 1127 }
@@ -1031,7 +1131,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1031 mdev->read_cnt = 0; 1131 mdev->read_cnt = 0;
1032 mdev->writ_cnt = 0; 1132 mdev->writ_cnt = 0;
1033 1133
1034 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1134 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
1135 if (mdev->state.conn == C_CONNECTED) {
1136 /* We are Primary, Connected, and now attach a new local
1137 * backing store. We must not increase the user visible maximum
1138 * bio size on this device to something the peer may not be
1139 * able to handle. */
1140 if (mdev->agreed_pro_version < 94)
1141 max_seg_s = queue_max_segment_size(mdev->rq_queue);
1142 else if (mdev->agreed_pro_version == 94)
1143 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
1144 /* else: drbd 8.3.9 and later, stay with default */
1145 }
1146
1147 drbd_setup_queue_param(mdev, max_seg_s);
1035 1148
1036 /* If I am currently not R_PRIMARY, 1149 /* If I am currently not R_PRIMARY,
1037 * but meta data primary indicator is set, 1150 * but meta data primary indicator is set,
@@ -1079,6 +1192,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1079 drbd_al_to_on_disk_bm(mdev); 1192 drbd_al_to_on_disk_bm(mdev);
1080 } 1193 }
1081 1194
1195 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1196 drbd_suspend_al(mdev); /* IO is still suspended here... */
1197
1082 spin_lock_irq(&mdev->req_lock); 1198 spin_lock_irq(&mdev->req_lock);
1083 os = mdev->state; 1199 os = mdev->state;
1084 ns.i = os.i; 1200 ns.i = os.i;
@@ -1235,7 +1351,16 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1235 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1351 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1236 retcode = ERR_NOT_PROTO_C; 1352 retcode = ERR_NOT_PROTO_C;
1237 goto fail; 1353 goto fail;
1238 }; 1354 }
1355
1356 if (get_ldev(mdev)) {
1357 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1358 put_ldev(mdev);
1359 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1360 retcode = ERR_STONITH_AND_PROT_A;
1361 goto fail;
1362 }
1363 }
1239 1364
1240 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1365 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1241 retcode = ERR_DISCARD; 1366 retcode = ERR_DISCARD;
@@ -1350,6 +1475,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1350 } 1475 }
1351 } 1476 }
1352 1477
1478 drbd_flush_workqueue(mdev);
1353 spin_lock_irq(&mdev->req_lock); 1479 spin_lock_irq(&mdev->req_lock);
1354 if (mdev->net_conf != NULL) { 1480 if (mdev->net_conf != NULL) {
1355 retcode = ERR_NET_CONFIGURED; 1481 retcode = ERR_NET_CONFIGURED;
@@ -1388,10 +1514,9 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1388 mdev->int_dig_out=int_dig_out; 1514 mdev->int_dig_out=int_dig_out;
1389 mdev->int_dig_in=int_dig_in; 1515 mdev->int_dig_in=int_dig_in;
1390 mdev->int_dig_vv=int_dig_vv; 1516 mdev->int_dig_vv=int_dig_vv;
1517 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
1391 spin_unlock_irq(&mdev->req_lock); 1518 spin_unlock_irq(&mdev->req_lock);
1392 1519
1393 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1394
1395 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1520 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1396 reply->ret_code = retcode; 1521 reply->ret_code = retcode;
1397 drbd_reconfig_done(mdev); 1522 drbd_reconfig_done(mdev);
@@ -1546,6 +1671,8 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1546 struct crypto_hash *csums_tfm = NULL; 1671 struct crypto_hash *csums_tfm = NULL;
1547 struct syncer_conf sc; 1672 struct syncer_conf sc;
1548 cpumask_var_t new_cpu_mask; 1673 cpumask_var_t new_cpu_mask;
1674 int *rs_plan_s = NULL;
1675 int fifo_size;
1549 1676
1550 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1677 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1551 retcode = ERR_NOMEM; 1678 retcode = ERR_NOMEM;
@@ -1557,6 +1684,12 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1557 sc.rate = DRBD_RATE_DEF; 1684 sc.rate = DRBD_RATE_DEF;
1558 sc.after = DRBD_AFTER_DEF; 1685 sc.after = DRBD_AFTER_DEF;
1559 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1686 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1687 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
1688 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1689 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1690 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1691 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1692 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1560 } else 1693 } else
1561 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1694 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1562 1695
@@ -1634,6 +1767,12 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1634 } 1767 }
1635#undef AL_MAX 1768#undef AL_MAX
1636 1769
1770 /* to avoid spurious errors when configuring minors before configuring
1771 * the minors they depend on: if necessary, first create the minor we
1772 * depend on */
1773 if (sc.after >= 0)
1774 ensure_mdev(sc.after, 1);
1775
1637 /* most sanity checks done, try to assign the new sync-after 1776 /* most sanity checks done, try to assign the new sync-after
1638 * dependency. need to hold the global lock in there, 1777 * dependency. need to hold the global lock in there,
1639 * to avoid a race in the dependency loop check. */ 1778 * to avoid a race in the dependency loop check. */
@@ -1641,6 +1780,16 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1641 if (retcode != NO_ERROR) 1780 if (retcode != NO_ERROR)
1642 goto fail; 1781 goto fail;
1643 1782
1783 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1784 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1785 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1786 if (!rs_plan_s) {
1787 dev_err(DEV, "kmalloc of fifo_buffer failed");
1788 retcode = ERR_NOMEM;
1789 goto fail;
1790 }
1791 }
1792
1644 /* ok, assign the rest of it as well. 1793 /* ok, assign the rest of it as well.
1645 * lock against receive_SyncParam() */ 1794 * lock against receive_SyncParam() */
1646 spin_lock(&mdev->peer_seq_lock); 1795 spin_lock(&mdev->peer_seq_lock);
@@ -1657,6 +1806,15 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1657 mdev->verify_tfm = verify_tfm; 1806 mdev->verify_tfm = verify_tfm;
1658 verify_tfm = NULL; 1807 verify_tfm = NULL;
1659 } 1808 }
1809
1810 if (fifo_size != mdev->rs_plan_s.size) {
1811 kfree(mdev->rs_plan_s.values);
1812 mdev->rs_plan_s.values = rs_plan_s;
1813 mdev->rs_plan_s.size = fifo_size;
1814 mdev->rs_planed = 0;
1815 rs_plan_s = NULL;
1816 }
1817
1660 spin_unlock(&mdev->peer_seq_lock); 1818 spin_unlock(&mdev->peer_seq_lock);
1661 1819
1662 if (get_ldev(mdev)) { 1820 if (get_ldev(mdev)) {
@@ -1688,6 +1846,7 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1688 1846
1689 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1847 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1690fail: 1848fail:
1849 kfree(rs_plan_s);
1691 free_cpumask_var(new_cpu_mask); 1850 free_cpumask_var(new_cpu_mask);
1692 crypto_free_hash(csums_tfm); 1851 crypto_free_hash(csums_tfm);
1693 crypto_free_hash(verify_tfm); 1852 crypto_free_hash(verify_tfm);
@@ -1721,12 +1880,38 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1721 return 0; 1880 return 0;
1722} 1881}
1723 1882
1883static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
1884{
1885 int rv;
1886
1887 rv = drbd_bmio_set_n_write(mdev);
1888 drbd_suspend_al(mdev);
1889 return rv;
1890}
1891
1724static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1892static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1725 struct drbd_nl_cfg_reply *reply) 1893 struct drbd_nl_cfg_reply *reply)
1726{ 1894{
1895 int retcode;
1727 1896
1728 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1897 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
1898
1899 if (retcode < SS_SUCCESS) {
1900 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
1901 /* The peer will get a resync upon connect anyways. Just make that
1902 into a full resync. */
1903 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
1904 if (retcode >= SS_SUCCESS) {
1905 /* open coded drbd_bitmap_io() */
1906 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
1907 "set_n_write from invalidate_peer"))
1908 retcode = ERR_IO_MD_DISK;
1909 }
1910 } else
1911 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1912 }
1729 1913
1914 reply->ret_code = retcode;
1730 return 0; 1915 return 0;
1731} 1916}
1732 1917
@@ -1765,7 +1950,21 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1765static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1950static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1766 struct drbd_nl_cfg_reply *reply) 1951 struct drbd_nl_cfg_reply *reply)
1767{ 1952{
1768 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1953 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1954 drbd_uuid_new_current(mdev);
1955 clear_bit(NEW_CUR_UUID, &mdev->flags);
1956 drbd_md_sync(mdev);
1957 }
1958 drbd_suspend_io(mdev);
1959 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
1960 if (reply->ret_code == SS_SUCCESS) {
1961 if (mdev->state.conn < C_CONNECTED)
1962 tl_clear(mdev);
1963 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
1964 tl_restart(mdev, fail_frozen_disk_io);
1965 }
1966 drbd_resume_io(mdev);
1967
1769 return 0; 1968 return 0;
1770} 1969}
1771 1970
@@ -1941,40 +2140,6 @@ out:
1941 return 0; 2140 return 0;
1942} 2141}
1943 2142
1944static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp)
1945{
1946 struct drbd_conf *mdev;
1947
1948 if (nlp->drbd_minor >= minor_count)
1949 return NULL;
1950
1951 mdev = minor_to_mdev(nlp->drbd_minor);
1952
1953 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
1954 struct gendisk *disk = NULL;
1955 mdev = drbd_new_device(nlp->drbd_minor);
1956
1957 spin_lock_irq(&drbd_pp_lock);
1958 if (minor_table[nlp->drbd_minor] == NULL) {
1959 minor_table[nlp->drbd_minor] = mdev;
1960 disk = mdev->vdisk;
1961 mdev = NULL;
1962 } /* else: we lost the race */
1963 spin_unlock_irq(&drbd_pp_lock);
1964
1965 if (disk) /* we won the race above */
1966 /* in case we ever add a drbd_delete_device(),
1967 * don't forget the del_gendisk! */
1968 add_disk(disk);
1969 else /* we lost the race above */
1970 drbd_free_mdev(mdev);
1971
1972 mdev = minor_to_mdev(nlp->drbd_minor);
1973 }
1974
1975 return mdev;
1976}
1977
1978struct cn_handler_struct { 2143struct cn_handler_struct {
1979 int (*function)(struct drbd_conf *, 2144 int (*function)(struct drbd_conf *,
1980 struct drbd_nl_cfg_req *, 2145 struct drbd_nl_cfg_req *,
@@ -2035,7 +2200,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
2035 goto fail; 2200 goto fail;
2036 } 2201 }
2037 2202
2038 mdev = ensure_mdev(nlp); 2203 mdev = ensure_mdev(nlp->drbd_minor,
2204 (nlp->flags & DRBD_NL_CREATE_DEVICE));
2039 if (!mdev) { 2205 if (!mdev) {
2040 retcode = ERR_MINOR_INVALID; 2206 retcode = ERR_MINOR_INVALID;
2041 goto fail; 2207 goto fail;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be3374b68460..ad325c5d0ce1 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -57,6 +57,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
57 unsigned long db, dt, dbdt, rt, rs_left; 57 unsigned long db, dt, dbdt, rt, rs_left;
58 unsigned int res; 58 unsigned int res;
59 int i, x, y; 59 int i, x, y;
60 int stalled = 0;
60 61
61 drbd_get_syncer_progress(mdev, &rs_left, &res); 62 drbd_get_syncer_progress(mdev, &rs_left, &res);
62 63
@@ -90,18 +91,17 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
90 * db: blocks written from mark until now 91 * db: blocks written from mark until now
91 * rt: remaining time 92 * rt: remaining time
92 */ 93 */
93 dt = (jiffies - mdev->rs_mark_time) / HZ; 94 /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is
94 95 * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
95 if (dt > 20) { 96 * least DRBD_SYNC_MARK_STEP time before it will be modified. */
96 /* if we made no update to rs_mark_time for too long, 97 i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
97 * we are stalled. show that. */ 98 dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
98 seq_printf(seq, "stalled\n"); 99 if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
99 return; 100 stalled = 1;
100 }
101 101
102 if (!dt) 102 if (!dt)
103 dt++; 103 dt++;
104 db = mdev->rs_mark_left - rs_left; 104 db = mdev->rs_mark_left[i] - rs_left;
105 rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ 105 rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
106 106
107 seq_printf(seq, "finish: %lu:%02lu:%02lu", 107 seq_printf(seq, "finish: %lu:%02lu:%02lu",
@@ -118,7 +118,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
118 /* mean speed since syncer started 118 /* mean speed since syncer started
119 * we do account for PausedSync periods */ 119 * we do account for PausedSync periods */
120 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; 120 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
121 if (dt <= 0) 121 if (dt == 0)
122 dt = 1; 122 dt = 1;
123 db = mdev->rs_total - rs_left; 123 db = mdev->rs_total - rs_left;
124 dbdt = Bit2KB(db/dt); 124 dbdt = Bit2KB(db/dt);
@@ -128,7 +128,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
128 else 128 else
129 seq_printf(seq, " (%ld)", dbdt); 129 seq_printf(seq, " (%ld)", dbdt);
130 130
131 seq_printf(seq, " K/sec\n"); 131 if (mdev->state.conn == C_SYNC_TARGET) {
132 if (mdev->c_sync_rate > 1000)
133 seq_printf(seq, " want: %d,%03d",
134 mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
135 else
136 seq_printf(seq, " want: %d", mdev->c_sync_rate);
137 }
138 seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
132} 139}
133 140
134static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) 141static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
@@ -196,7 +203,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
196 seq_printf(seq, "%2d: cs:Unconfigured\n", i); 203 seq_printf(seq, "%2d: cs:Unconfigured\n", i);
197 } else { 204 } else {
198 seq_printf(seq, 205 seq_printf(seq,
199 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c\n" 206 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
200 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " 207 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
201 "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", 208 "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
202 i, sn, 209 i, sn,
@@ -206,11 +213,12 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
206 drbd_disk_str(mdev->state.pdsk), 213 drbd_disk_str(mdev->state.pdsk),
207 (mdev->net_conf == NULL ? ' ' : 214 (mdev->net_conf == NULL ? ' ' :
208 (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')), 215 (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')),
209 mdev->state.susp ? 's' : 'r', 216 is_susp(mdev->state) ? 's' : 'r',
210 mdev->state.aftr_isp ? 'a' : '-', 217 mdev->state.aftr_isp ? 'a' : '-',
211 mdev->state.peer_isp ? 'p' : '-', 218 mdev->state.peer_isp ? 'p' : '-',
212 mdev->state.user_isp ? 'u' : '-', 219 mdev->state.user_isp ? 'u' : '-',
213 mdev->congestion_reason ?: '-', 220 mdev->congestion_reason ?: '-',
221 test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
214 mdev->send_cnt/2, 222 mdev->send_cnt/2,
215 mdev->recv_cnt/2, 223 mdev->recv_cnt/2,
216 mdev->writ_cnt/2, 224 mdev->writ_cnt/2,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 081522d3c742..efd6169acf2f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -241,7 +241,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
241 spin_unlock_irq(&mdev->req_lock); 241 spin_unlock_irq(&mdev->req_lock);
242 242
243 list_for_each_entry_safe(e, t, &reclaimed, w.list) 243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
244 drbd_free_ee(mdev, e); 244 drbd_free_net_ee(mdev, e);
245} 245}
246 246
247/** 247/**
@@ -298,9 +298,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool, 299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */ 300 * or returns all pages to the system. */
301static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) 301static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
302{ 302{
303 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
303 int i; 304 int i;
305
304 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) 306 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
305 i = page_chain_free(page); 307 i = page_chain_free(page);
306 else { 308 else {
@@ -311,10 +313,10 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
311 drbd_pp_vacant += i; 313 drbd_pp_vacant += i;
312 spin_unlock(&drbd_pp_lock); 314 spin_unlock(&drbd_pp_lock);
313 } 315 }
314 atomic_sub(i, &mdev->pp_in_use); 316 i = atomic_sub_return(i, a);
315 i = atomic_read(&mdev->pp_in_use);
316 if (i < 0) 317 if (i < 0)
317 dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); 318 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
318 wake_up(&drbd_pp_wait); 320 wake_up(&drbd_pp_wait);
319} 321}
320 322
@@ -365,7 +367,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
365 e->size = data_size; 367 e->size = data_size;
366 e->flags = 0; 368 e->flags = 0;
367 e->sector = sector; 369 e->sector = sector;
368 e->sector = sector;
369 e->block_id = id; 370 e->block_id = id;
370 371
371 return e; 372 return e;
@@ -375,9 +376,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
375 return NULL; 376 return NULL;
376} 377}
377 378
378void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) 379void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
379{ 380{
380 drbd_pp_free(mdev, e->pages); 381 if (e->flags & EE_HAS_DIGEST)
382 kfree(e->digest);
383 drbd_pp_free(mdev, e->pages, is_net);
381 D_ASSERT(atomic_read(&e->pending_bios) == 0); 384 D_ASSERT(atomic_read(&e->pending_bios) == 0);
382 D_ASSERT(hlist_unhashed(&e->colision)); 385 D_ASSERT(hlist_unhashed(&e->colision));
383 mempool_free(e, drbd_ee_mempool); 386 mempool_free(e, drbd_ee_mempool);
@@ -388,13 +391,14 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
388 LIST_HEAD(work_list); 391 LIST_HEAD(work_list);
389 struct drbd_epoch_entry *e, *t; 392 struct drbd_epoch_entry *e, *t;
390 int count = 0; 393 int count = 0;
394 int is_net = list == &mdev->net_ee;
391 395
392 spin_lock_irq(&mdev->req_lock); 396 spin_lock_irq(&mdev->req_lock);
393 list_splice_init(list, &work_list); 397 list_splice_init(list, &work_list);
394 spin_unlock_irq(&mdev->req_lock); 398 spin_unlock_irq(&mdev->req_lock);
395 399
396 list_for_each_entry_safe(e, t, &work_list, w.list) { 400 list_for_each_entry_safe(e, t, &work_list, w.list) {
397 drbd_free_ee(mdev, e); 401 drbd_free_some_ee(mdev, e, is_net);
398 count++; 402 count++;
399 } 403 }
400 return count; 404 return count;
@@ -423,7 +427,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
423 spin_unlock_irq(&mdev->req_lock); 427 spin_unlock_irq(&mdev->req_lock);
424 428
425 list_for_each_entry_safe(e, t, &reclaimed, w.list) 429 list_for_each_entry_safe(e, t, &reclaimed, w.list)
426 drbd_free_ee(mdev, e); 430 drbd_free_net_ee(mdev, e);
427 431
428 /* possible callbacks here: 432 /* possible callbacks here:
429 * e_end_block, and e_end_resync_block, e_send_discard_ack. 433 * e_end_block, and e_end_resync_block, e_send_discard_ack.
@@ -719,14 +723,14 @@ out:
719static int drbd_send_fp(struct drbd_conf *mdev, 723static int drbd_send_fp(struct drbd_conf *mdev,
720 struct socket *sock, enum drbd_packets cmd) 724 struct socket *sock, enum drbd_packets cmd)
721{ 725{
722 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; 726 struct p_header80 *h = &mdev->data.sbuf.header.h80;
723 727
724 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 728 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
725} 729}
726 730
727static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) 731static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
728{ 732{
729 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; 733 struct p_header80 *h = &mdev->data.rbuf.header.h80;
730 int rr; 734 int rr;
731 735
732 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); 736 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
@@ -776,9 +780,6 @@ static int drbd_connect(struct drbd_conf *mdev)
776 780
777 D_ASSERT(!mdev->data.socket); 781 D_ASSERT(!mdev->data.socket);
778 782
779 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
780 dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
781
782 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 783 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
783 return -2; 784 return -2;
784 785
@@ -927,6 +928,11 @@ retry:
927 928
928 drbd_thread_start(&mdev->asender); 929 drbd_thread_start(&mdev->asender);
929 930
931 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
933 put_ldev(mdev);
934 }
935
930 if (!drbd_send_protocol(mdev)) 936 if (!drbd_send_protocol(mdev))
931 return -1; 937 return -1;
932 drbd_send_sync_param(mdev, &mdev->sync_conf); 938 drbd_send_sync_param(mdev, &mdev->sync_conf);
@@ -946,22 +952,28 @@ out_release_sockets:
946 return -1; 952 return -1;
947} 953}
948 954
949static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h) 955static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
950{ 956{
957 union p_header *h = &mdev->data.rbuf.header;
951 int r; 958 int r;
952 959
953 r = drbd_recv(mdev, h, sizeof(*h)); 960 r = drbd_recv(mdev, h, sizeof(*h));
954
955 if (unlikely(r != sizeof(*h))) { 961 if (unlikely(r != sizeof(*h))) {
956 dev_err(DEV, "short read expecting header on sock: r=%d\n", r); 962 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
957 return FALSE; 963 return FALSE;
958 }; 964 }
959 h->command = be16_to_cpu(h->command); 965
960 h->length = be16_to_cpu(h->length); 966 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
961 if (unlikely(h->magic != BE_DRBD_MAGIC)) { 967 *cmd = be16_to_cpu(h->h80.command);
962 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n", 968 *packet_size = be16_to_cpu(h->h80.length);
963 (long)be32_to_cpu(h->magic), 969 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
964 h->command, h->length); 970 *cmd = be16_to_cpu(h->h95.command);
971 *packet_size = be32_to_cpu(h->h95.length);
972 } else {
973 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
974 be32_to_cpu(h->h80.magic),
975 be16_to_cpu(h->h80.command),
976 be16_to_cpu(h->h80.length));
965 return FALSE; 977 return FALSE;
966 } 978 }
967 mdev->last_received = jiffies; 979 mdev->last_received = jiffies;
@@ -975,7 +987,7 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
975 987
976 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { 988 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
977 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, 989 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
978 NULL, BLKDEV_IFL_WAIT); 990 NULL);
979 if (rv) { 991 if (rv) {
980 dev_err(DEV, "local disk flush failed with status %d\n", rv); 992 dev_err(DEV, "local disk flush failed with status %d\n", rv);
981 /* would rather check on EOPNOTSUPP, but that is not reliable. 993 /* would rather check on EOPNOTSUPP, but that is not reliable.
@@ -1268,17 +1280,12 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
1268 return 1; 1280 return 1;
1269} 1281}
1270 1282
1271static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) 1283static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1272{ 1284{
1273 int rv, issue_flush; 1285 int rv, issue_flush;
1274 struct p_barrier *p = (struct p_barrier *)h; 1286 struct p_barrier *p = &mdev->data.rbuf.barrier;
1275 struct drbd_epoch *epoch; 1287 struct drbd_epoch *epoch;
1276 1288
1277 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1278
1279 rv = drbd_recv(mdev, h->payload, h->length);
1280 ERR_IF(rv != h->length) return FALSE;
1281
1282 inc_unacked(mdev); 1289 inc_unacked(mdev);
1283 1290
1284 if (mdev->net_conf->wire_protocol != DRBD_PROT_C) 1291 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
@@ -1457,7 +1464,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1457 data_size -= rr; 1464 data_size -= rr;
1458 } 1465 }
1459 kunmap(page); 1466 kunmap(page);
1460 drbd_pp_free(mdev, page); 1467 drbd_pp_free(mdev, page, 0);
1461 return rv; 1468 return rv;
1462} 1469}
1463 1470
@@ -1562,30 +1569,29 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1562 list_add(&e->w.list, &mdev->sync_ee); 1569 list_add(&e->w.list, &mdev->sync_ee);
1563 spin_unlock_irq(&mdev->req_lock); 1570 spin_unlock_irq(&mdev->req_lock);
1564 1571
1572 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1565 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1573 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1566 return TRUE; 1574 return TRUE;
1567 1575
1576 /* drbd_submit_ee currently fails for one reason only:
1577 * not being able to allocate enough bios.
1578 * Is dropping the connection going to help? */
1579 spin_lock_irq(&mdev->req_lock);
1580 list_del(&e->w.list);
1581 spin_unlock_irq(&mdev->req_lock);
1582
1568 drbd_free_ee(mdev, e); 1583 drbd_free_ee(mdev, e);
1569fail: 1584fail:
1570 put_ldev(mdev); 1585 put_ldev(mdev);
1571 return FALSE; 1586 return FALSE;
1572} 1587}
1573 1588
1574static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) 1589static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1575{ 1590{
1576 struct drbd_request *req; 1591 struct drbd_request *req;
1577 sector_t sector; 1592 sector_t sector;
1578 unsigned int header_size, data_size;
1579 int ok; 1593 int ok;
1580 struct p_data *p = (struct p_data *)h; 1594 struct p_data *p = &mdev->data.rbuf.data;
1581
1582 header_size = sizeof(*p) - sizeof(*h);
1583 data_size = h->length - header_size;
1584
1585 ERR_IF(data_size == 0) return FALSE;
1586
1587 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1588 return FALSE;
1589 1595
1590 sector = be64_to_cpu(p->sector); 1596 sector = be64_to_cpu(p->sector);
1591 1597
@@ -1611,20 +1617,11 @@ static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1611 return ok; 1617 return ok;
1612} 1618}
1613 1619
1614static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h) 1620static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1615{ 1621{
1616 sector_t sector; 1622 sector_t sector;
1617 unsigned int header_size, data_size;
1618 int ok; 1623 int ok;
1619 struct p_data *p = (struct p_data *)h; 1624 struct p_data *p = &mdev->data.rbuf.data;
1620
1621 header_size = sizeof(*p) - sizeof(*h);
1622 data_size = h->length - header_size;
1623
1624 ERR_IF(data_size == 0) return FALSE;
1625
1626 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1627 return FALSE;
1628 1625
1629 sector = be64_to_cpu(p->sector); 1626 sector = be64_to_cpu(p->sector);
1630 D_ASSERT(p->block_id == ID_SYNCER); 1627 D_ASSERT(p->block_id == ID_SYNCER);
@@ -1640,9 +1637,11 @@ static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1640 1637
1641 ok = drbd_drain_block(mdev, data_size); 1638 ok = drbd_drain_block(mdev, data_size);
1642 1639
1643 drbd_send_ack_dp(mdev, P_NEG_ACK, p); 1640 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1644 } 1641 }
1645 1642
1643 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1644
1646 return ok; 1645 return ok;
1647} 1646}
1648 1647
@@ -1765,24 +1764,27 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1765 return ret; 1764 return ret;
1766} 1765}
1767 1766
1767static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1768{
1769 if (mdev->agreed_pro_version >= 95)
1770 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1771 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1772 (dpf & DP_FUA ? REQ_FUA : 0) |
1773 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1774 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1775 else
1776 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1777}
1778
1768/* mirrored write */ 1779/* mirrored write */
1769static int receive_Data(struct drbd_conf *mdev, struct p_header *h) 1780static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1770{ 1781{
1771 sector_t sector; 1782 sector_t sector;
1772 struct drbd_epoch_entry *e; 1783 struct drbd_epoch_entry *e;
1773 struct p_data *p = (struct p_data *)h; 1784 struct p_data *p = &mdev->data.rbuf.data;
1774 int header_size, data_size;
1775 int rw = WRITE; 1785 int rw = WRITE;
1776 u32 dp_flags; 1786 u32 dp_flags;
1777 1787
1778 header_size = sizeof(*p) - sizeof(*h);
1779 data_size = h->length - header_size;
1780
1781 ERR_IF(data_size == 0) return FALSE;
1782
1783 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1784 return FALSE;
1785
1786 if (!get_ldev(mdev)) { 1788 if (!get_ldev(mdev)) {
1787 if (__ratelimit(&drbd_ratelimit_state)) 1789 if (__ratelimit(&drbd_ratelimit_state))
1788 dev_err(DEV, "Can not write mirrored data block " 1790 dev_err(DEV, "Can not write mirrored data block "
@@ -1792,7 +1794,7 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1792 mdev->peer_seq++; 1794 mdev->peer_seq++;
1793 spin_unlock(&mdev->peer_seq_lock); 1795 spin_unlock(&mdev->peer_seq_lock);
1794 1796
1795 drbd_send_ack_dp(mdev, P_NEG_ACK, p); 1797 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1796 atomic_inc(&mdev->current_epoch->epoch_size); 1798 atomic_inc(&mdev->current_epoch->epoch_size);
1797 return drbd_drain_block(mdev, data_size); 1799 return drbd_drain_block(mdev, data_size);
1798 } 1800 }
@@ -1839,12 +1841,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1839 spin_unlock(&mdev->epoch_lock); 1841 spin_unlock(&mdev->epoch_lock);
1840 1842
1841 dp_flags = be32_to_cpu(p->dp_flags); 1843 dp_flags = be32_to_cpu(p->dp_flags);
1842 if (dp_flags & DP_HARDBARRIER) { 1844 rw |= write_flags_to_bio(mdev, dp_flags);
1843 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); 1845
1844 /* rw |= REQ_HARDBARRIER; */
1845 }
1846 if (dp_flags & DP_RW_SYNC)
1847 rw |= REQ_SYNC | REQ_UNPLUG;
1848 if (dp_flags & DP_MAY_SET_IN_SYNC) 1846 if (dp_flags & DP_MAY_SET_IN_SYNC)
1849 e->flags |= EE_MAY_SET_IN_SYNC; 1847 e->flags |= EE_MAY_SET_IN_SYNC;
1850 1848
@@ -2007,6 +2005,16 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
2007 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 2005 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2008 return TRUE; 2006 return TRUE;
2009 2007
2008 /* drbd_submit_ee currently fails for one reason only:
2009 * not being able to allocate enough bios.
2010 * Is dropping the connection going to help? */
2011 spin_lock_irq(&mdev->req_lock);
2012 list_del(&e->w.list);
2013 hlist_del_init(&e->colision);
2014 spin_unlock_irq(&mdev->req_lock);
2015 if (e->flags & EE_CALL_AL_COMPLETE_IO)
2016 drbd_al_complete_io(mdev, e->sector);
2017
2010out_interrupted: 2018out_interrupted:
2011 /* yes, the epoch_size now is imbalanced. 2019 /* yes, the epoch_size now is imbalanced.
2012 * but we drop the connection anyways, so we don't have a chance to 2020 * but we drop the connection anyways, so we don't have a chance to
@@ -2016,20 +2024,64 @@ out_interrupted:
2016 return FALSE; 2024 return FALSE;
2017} 2025}
2018 2026
2019static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) 2027/* We may throttle resync, if the lower device seems to be busy,
2028 * and current sync rate is above c_min_rate.
2029 *
2030 * To decide whether or not the lower device is busy, we use a scheme similar
2031 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2032 * (more than 64 sectors) of activity we cannot account for with our own resync
2033 * activity, it obviously is "busy".
2034 *
2035 * The current sync rate used here uses only the most recent two step marks,
2036 * to have a short time average so we can react faster.
2037 */
2038int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2039{
2040 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2041 unsigned long db, dt, dbdt;
2042 int curr_events;
2043 int throttle = 0;
2044
2045 /* feature disabled? */
2046 if (mdev->sync_conf.c_min_rate == 0)
2047 return 0;
2048
2049 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2050 (int)part_stat_read(&disk->part0, sectors[1]) -
2051 atomic_read(&mdev->rs_sect_ev);
2052 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2053 unsigned long rs_left;
2054 int i;
2055
2056 mdev->rs_last_events = curr_events;
2057
2058 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2059 * approx. */
2060 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2061 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2062
2063 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2064 if (!dt)
2065 dt++;
2066 db = mdev->rs_mark_left[i] - rs_left;
2067 dbdt = Bit2KB(db/dt);
2068
2069 if (dbdt > mdev->sync_conf.c_min_rate)
2070 throttle = 1;
2071 }
2072 return throttle;
2073}
2074
2075
2076static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
2020{ 2077{
2021 sector_t sector; 2078 sector_t sector;
2022 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 2079 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2023 struct drbd_epoch_entry *e; 2080 struct drbd_epoch_entry *e;
2024 struct digest_info *di = NULL; 2081 struct digest_info *di = NULL;
2025 int size, digest_size; 2082 int size, verb;
2026 unsigned int fault_type; 2083 unsigned int fault_type;
2027 struct p_block_req *p = 2084 struct p_block_req *p = &mdev->data.rbuf.block_req;
2028 (struct p_block_req *)h;
2029 const int brps = sizeof(*p)-sizeof(*h);
2030
2031 if (drbd_recv(mdev, h->payload, brps) != brps)
2032 return FALSE;
2033 2085
2034 sector = be64_to_cpu(p->sector); 2086 sector = be64_to_cpu(p->sector);
2035 size = be32_to_cpu(p->blksize); 2087 size = be32_to_cpu(p->blksize);
@@ -2046,12 +2098,31 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2046 } 2098 }
2047 2099
2048 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 2100 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2049 if (__ratelimit(&drbd_ratelimit_state)) 2101 verb = 1;
2102 switch (cmd) {
2103 case P_DATA_REQUEST:
2104 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2105 break;
2106 case P_RS_DATA_REQUEST:
2107 case P_CSUM_RS_REQUEST:
2108 case P_OV_REQUEST:
2109 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2110 break;
2111 case P_OV_REPLY:
2112 verb = 0;
2113 dec_rs_pending(mdev);
2114 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2115 break;
2116 default:
2117 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2118 cmdname(cmd));
2119 }
2120 if (verb && __ratelimit(&drbd_ratelimit_state))
2050 dev_err(DEV, "Can not satisfy peer's read request, " 2121 dev_err(DEV, "Can not satisfy peer's read request, "
2051 "no local data.\n"); 2122 "no local data.\n");
2052 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY : 2123
2053 P_NEG_RS_DREPLY , p); 2124 /* drain possibly payload */
2054 return drbd_drain_block(mdev, h->length - brps); 2125 return drbd_drain_block(mdev, digest_size);
2055 } 2126 }
2056 2127
2057 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2128 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
@@ -2063,31 +2134,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2063 return FALSE; 2134 return FALSE;
2064 } 2135 }
2065 2136
2066 switch (h->command) { 2137 switch (cmd) {
2067 case P_DATA_REQUEST: 2138 case P_DATA_REQUEST:
2068 e->w.cb = w_e_end_data_req; 2139 e->w.cb = w_e_end_data_req;
2069 fault_type = DRBD_FAULT_DT_RD; 2140 fault_type = DRBD_FAULT_DT_RD;
2070 break; 2141 /* application IO, don't drbd_rs_begin_io */
2142 goto submit;
2143
2071 case P_RS_DATA_REQUEST: 2144 case P_RS_DATA_REQUEST:
2072 e->w.cb = w_e_end_rsdata_req; 2145 e->w.cb = w_e_end_rsdata_req;
2073 fault_type = DRBD_FAULT_RS_RD; 2146 fault_type = DRBD_FAULT_RS_RD;
2074 /* Eventually this should become asynchronously. Currently it
2075 * blocks the whole receiver just to delay the reading of a
2076 * resync data block.
2077 * the drbd_work_queue mechanism is made for this...
2078 */
2079 if (!drbd_rs_begin_io(mdev, sector)) {
2080 /* we have been interrupted,
2081 * probably connection lost! */
2082 D_ASSERT(signal_pending(current));
2083 goto out_free_e;
2084 }
2085 break; 2147 break;
2086 2148
2087 case P_OV_REPLY: 2149 case P_OV_REPLY:
2088 case P_CSUM_RS_REQUEST: 2150 case P_CSUM_RS_REQUEST:
2089 fault_type = DRBD_FAULT_RS_RD; 2151 fault_type = DRBD_FAULT_RS_RD;
2090 digest_size = h->length - brps ;
2091 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); 2152 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2092 if (!di) 2153 if (!di)
2093 goto out_free_e; 2154 goto out_free_e;
@@ -2095,31 +2156,25 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2095 di->digest_size = digest_size; 2156 di->digest_size = digest_size;
2096 di->digest = (((char *)di)+sizeof(struct digest_info)); 2157 di->digest = (((char *)di)+sizeof(struct digest_info));
2097 2158
2159 e->digest = di;
2160 e->flags |= EE_HAS_DIGEST;
2161
2098 if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2162 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2099 goto out_free_e; 2163 goto out_free_e;
2100 2164
2101 e->block_id = (u64)(unsigned long)di; 2165 if (cmd == P_CSUM_RS_REQUEST) {
2102 if (h->command == P_CSUM_RS_REQUEST) {
2103 D_ASSERT(mdev->agreed_pro_version >= 89); 2166 D_ASSERT(mdev->agreed_pro_version >= 89);
2104 e->w.cb = w_e_end_csum_rs_req; 2167 e->w.cb = w_e_end_csum_rs_req;
2105 } else if (h->command == P_OV_REPLY) { 2168 } else if (cmd == P_OV_REPLY) {
2106 e->w.cb = w_e_end_ov_reply; 2169 e->w.cb = w_e_end_ov_reply;
2107 dec_rs_pending(mdev); 2170 dec_rs_pending(mdev);
2108 break; 2171 /* drbd_rs_begin_io done when we sent this request,
2109 } 2172 * but accounting still needs to be done. */
2110 2173 goto submit_for_resync;
2111 if (!drbd_rs_begin_io(mdev, sector)) {
2112 /* we have been interrupted, probably connection lost! */
2113 D_ASSERT(signal_pending(current));
2114 goto out_free_e;
2115 } 2174 }
2116 break; 2175 break;
2117 2176
2118 case P_OV_REQUEST: 2177 case P_OV_REQUEST:
2119 if (mdev->state.conn >= C_CONNECTED &&
2120 mdev->state.conn != C_VERIFY_T)
2121 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2122 drbd_conn_str(mdev->state.conn));
2123 if (mdev->ov_start_sector == ~(sector_t)0 && 2178 if (mdev->ov_start_sector == ~(sector_t)0 &&
2124 mdev->agreed_pro_version >= 90) { 2179 mdev->agreed_pro_version >= 90) {
2125 mdev->ov_start_sector = sector; 2180 mdev->ov_start_sector = sector;
@@ -2130,37 +2185,63 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2130 } 2185 }
2131 e->w.cb = w_e_end_ov_req; 2186 e->w.cb = w_e_end_ov_req;
2132 fault_type = DRBD_FAULT_RS_RD; 2187 fault_type = DRBD_FAULT_RS_RD;
2133 /* Eventually this should become asynchronous. Currently it
2134 * blocks the whole receiver just to delay the reading of a
2135 * resync data block.
2136 * the drbd_work_queue mechanism is made for this...
2137 */
2138 if (!drbd_rs_begin_io(mdev, sector)) {
2139 /* we have been interrupted,
2140 * probably connection lost! */
2141 D_ASSERT(signal_pending(current));
2142 goto out_free_e;
2143 }
2144 break; 2188 break;
2145 2189
2146
2147 default: 2190 default:
2148 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 2191 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2149 cmdname(h->command)); 2192 cmdname(cmd));
2150 fault_type = DRBD_FAULT_MAX; 2193 fault_type = DRBD_FAULT_MAX;
2194 goto out_free_e;
2151 } 2195 }
2152 2196
2153 spin_lock_irq(&mdev->req_lock); 2197 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2154 list_add(&e->w.list, &mdev->read_ee); 2198 * wrt the receiver, but it is not as straightforward as it may seem.
2155 spin_unlock_irq(&mdev->req_lock); 2199 * Various places in the resync start and stop logic assume resync
2200 * requests are processed in order, requeuing this on the worker thread
2201 * introduces a bunch of new code for synchronization between threads.
2202 *
2203 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2204 * "forever", throttling after drbd_rs_begin_io will lock that extent
2205 * for application writes for the same time. For now, just throttle
2206 * here, where the rest of the code expects the receiver to sleep for
2207 * a while, anyways.
2208 */
2209
2210 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2211 * this defers syncer requests for some time, before letting at least
2212 * on request through. The resync controller on the receiving side
2213 * will adapt to the incoming rate accordingly.
2214 *
2215 * We cannot throttle here if remote is Primary/SyncTarget:
2216 * we would also throttle its application reads.
2217 * In that case, throttling is done on the SyncTarget only.
2218 */
2219 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2220 msleep(100);
2221 if (drbd_rs_begin_io(mdev, e->sector))
2222 goto out_free_e;
2156 2223
2224submit_for_resync:
2225 atomic_add(size >> 9, &mdev->rs_sect_ev);
2226
2227submit:
2157 inc_unacked(mdev); 2228 inc_unacked(mdev);
2229 spin_lock_irq(&mdev->req_lock);
2230 list_add_tail(&e->w.list, &mdev->read_ee);
2231 spin_unlock_irq(&mdev->req_lock);
2158 2232
2159 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2233 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2160 return TRUE; 2234 return TRUE;
2161 2235
2236 /* drbd_submit_ee currently fails for one reason only:
2237 * not being able to allocate enough bios.
2238 * Is dropping the connection going to help? */
2239 spin_lock_irq(&mdev->req_lock);
2240 list_del(&e->w.list);
2241 spin_unlock_irq(&mdev->req_lock);
2242 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2243
2162out_free_e: 2244out_free_e:
2163 kfree(di);
2164 put_ldev(mdev); 2245 put_ldev(mdev);
2165 drbd_free_ee(mdev, e); 2246 drbd_free_ee(mdev, e);
2166 return FALSE; 2247 return FALSE;
@@ -2699,20 +2780,13 @@ static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2699 return 1; 2780 return 1;
2700} 2781}
2701 2782
2702static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) 2783static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2703{ 2784{
2704 struct p_protocol *p = (struct p_protocol *)h; 2785 struct p_protocol *p = &mdev->data.rbuf.protocol;
2705 int header_size, data_size;
2706 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2786 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2707 int p_want_lose, p_two_primaries, cf; 2787 int p_want_lose, p_two_primaries, cf;
2708 char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2788 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2709 2789
2710 header_size = sizeof(*p) - sizeof(*h);
2711 data_size = h->length - header_size;
2712
2713 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2714 return FALSE;
2715
2716 p_proto = be32_to_cpu(p->protocol); 2790 p_proto = be32_to_cpu(p->protocol);
2717 p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2791 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2718 p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2792 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
@@ -2805,39 +2879,46 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2805 return tfm; 2879 return tfm;
2806} 2880}
2807 2881
2808static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h) 2882static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2809{ 2883{
2810 int ok = TRUE; 2884 int ok = TRUE;
2811 struct p_rs_param_89 *p = (struct p_rs_param_89 *)h; 2885 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2812 unsigned int header_size, data_size, exp_max_sz; 2886 unsigned int header_size, data_size, exp_max_sz;
2813 struct crypto_hash *verify_tfm = NULL; 2887 struct crypto_hash *verify_tfm = NULL;
2814 struct crypto_hash *csums_tfm = NULL; 2888 struct crypto_hash *csums_tfm = NULL;
2815 const int apv = mdev->agreed_pro_version; 2889 const int apv = mdev->agreed_pro_version;
2890 int *rs_plan_s = NULL;
2891 int fifo_size = 0;
2816 2892
2817 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 2893 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2818 : apv == 88 ? sizeof(struct p_rs_param) 2894 : apv == 88 ? sizeof(struct p_rs_param)
2819 + SHARED_SECRET_MAX 2895 + SHARED_SECRET_MAX
2820 : /* 89 */ sizeof(struct p_rs_param_89); 2896 : apv <= 94 ? sizeof(struct p_rs_param_89)
2897 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2821 2898
2822 if (h->length > exp_max_sz) { 2899 if (packet_size > exp_max_sz) {
2823 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 2900 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2824 h->length, exp_max_sz); 2901 packet_size, exp_max_sz);
2825 return FALSE; 2902 return FALSE;
2826 } 2903 }
2827 2904
2828 if (apv <= 88) { 2905 if (apv <= 88) {
2829 header_size = sizeof(struct p_rs_param) - sizeof(*h); 2906 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2830 data_size = h->length - header_size; 2907 data_size = packet_size - header_size;
2831 } else /* apv >= 89 */ { 2908 } else if (apv <= 94) {
2832 header_size = sizeof(struct p_rs_param_89) - sizeof(*h); 2909 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2833 data_size = h->length - header_size; 2910 data_size = packet_size - header_size;
2911 D_ASSERT(data_size == 0);
2912 } else {
2913 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2914 data_size = packet_size - header_size;
2834 D_ASSERT(data_size == 0); 2915 D_ASSERT(data_size == 0);
2835 } 2916 }
2836 2917
2837 /* initialize verify_alg and csums_alg */ 2918 /* initialize verify_alg and csums_alg */
2838 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2919 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2839 2920
2840 if (drbd_recv(mdev, h->payload, header_size) != header_size) 2921 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2841 return FALSE; 2922 return FALSE;
2842 2923
2843 mdev->sync_conf.rate = be32_to_cpu(p->rate); 2924 mdev->sync_conf.rate = be32_to_cpu(p->rate);
@@ -2896,6 +2977,22 @@ static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2896 } 2977 }
2897 } 2978 }
2898 2979
2980 if (apv > 94) {
2981 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2982 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2983 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2984 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2985 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2986
2987 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2988 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2989 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2990 if (!rs_plan_s) {
2991 dev_err(DEV, "kmalloc of fifo_buffer failed");
2992 goto disconnect;
2993 }
2994 }
2995 }
2899 2996
2900 spin_lock(&mdev->peer_seq_lock); 2997 spin_lock(&mdev->peer_seq_lock);
2901 /* lock against drbd_nl_syncer_conf() */ 2998 /* lock against drbd_nl_syncer_conf() */
@@ -2913,6 +3010,12 @@ static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2913 mdev->csums_tfm = csums_tfm; 3010 mdev->csums_tfm = csums_tfm;
2914 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 3011 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2915 } 3012 }
3013 if (fifo_size != mdev->rs_plan_s.size) {
3014 kfree(mdev->rs_plan_s.values);
3015 mdev->rs_plan_s.values = rs_plan_s;
3016 mdev->rs_plan_s.size = fifo_size;
3017 mdev->rs_planed = 0;
3018 }
2916 spin_unlock(&mdev->peer_seq_lock); 3019 spin_unlock(&mdev->peer_seq_lock);
2917 } 3020 }
2918 3021
@@ -2946,19 +3049,15 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
2946 (unsigned long long)a, (unsigned long long)b); 3049 (unsigned long long)a, (unsigned long long)b);
2947} 3050}
2948 3051
2949static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) 3052static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2950{ 3053{
2951 struct p_sizes *p = (struct p_sizes *)h; 3054 struct p_sizes *p = &mdev->data.rbuf.sizes;
2952 enum determine_dev_size dd = unchanged; 3055 enum determine_dev_size dd = unchanged;
2953 unsigned int max_seg_s; 3056 unsigned int max_seg_s;
2954 sector_t p_size, p_usize, my_usize; 3057 sector_t p_size, p_usize, my_usize;
2955 int ldsc = 0; /* local disk size changed */ 3058 int ldsc = 0; /* local disk size changed */
2956 enum dds_flags ddsf; 3059 enum dds_flags ddsf;
2957 3060
2958 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2959 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2960 return FALSE;
2961
2962 p_size = be64_to_cpu(p->d_size); 3061 p_size = be64_to_cpu(p->d_size);
2963 p_usize = be64_to_cpu(p->u_size); 3062 p_usize = be64_to_cpu(p->u_size);
2964 3063
@@ -2972,7 +3071,6 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2972 * we still need to figure out whether we accept that. */ 3071 * we still need to figure out whether we accept that. */
2973 mdev->p_size = p_size; 3072 mdev->p_size = p_size;
2974 3073
2975#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2976 if (get_ldev(mdev)) { 3074 if (get_ldev(mdev)) {
2977 warn_if_differ_considerably(mdev, "lower level device sizes", 3075 warn_if_differ_considerably(mdev, "lower level device sizes",
2978 p_size, drbd_get_max_capacity(mdev->ldev)); 3076 p_size, drbd_get_max_capacity(mdev->ldev));
@@ -3029,6 +3127,8 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
3029 3127
3030 if (mdev->agreed_pro_version < 94) 3128 if (mdev->agreed_pro_version < 94)
3031 max_seg_s = be32_to_cpu(p->max_segment_size); 3129 max_seg_s = be32_to_cpu(p->max_segment_size);
3130 else if (mdev->agreed_pro_version == 94)
3131 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
3032 else /* drbd 8.3.8 onwards */ 3132 else /* drbd 8.3.8 onwards */
3033 max_seg_s = DRBD_MAX_SEGMENT_SIZE; 3133 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3034 3134
@@ -3062,16 +3162,12 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
3062 return TRUE; 3162 return TRUE;
3063} 3163}
3064 3164
3065static int receive_uuids(struct drbd_conf *mdev, struct p_header *h) 3165static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3066{ 3166{
3067 struct p_uuids *p = (struct p_uuids *)h; 3167 struct p_uuids *p = &mdev->data.rbuf.uuids;
3068 u64 *p_uuid; 3168 u64 *p_uuid;
3069 int i; 3169 int i;
3070 3170
3071 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3072 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3073 return FALSE;
3074
3075 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3171 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3076 3172
3077 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 3173 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
@@ -3107,6 +3203,11 @@ static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
3107 drbd_md_sync(mdev); 3203 drbd_md_sync(mdev);
3108 } 3204 }
3109 put_ldev(mdev); 3205 put_ldev(mdev);
3206 } else if (mdev->state.disk < D_INCONSISTENT &&
3207 mdev->state.role == R_PRIMARY) {
3208 /* I am a diskless primary, the peer just created a new current UUID
3209 for me. */
3210 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3110 } 3211 }
3111 3212
3112 /* Before we test for the disk state, we should wait until an eventually 3213 /* Before we test for the disk state, we should wait until an eventually
@@ -3150,16 +3251,12 @@ static union drbd_state convert_state(union drbd_state ps)
3150 return ms; 3251 return ms;
3151} 3252}
3152 3253
3153static int receive_req_state(struct drbd_conf *mdev, struct p_header *h) 3254static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3154{ 3255{
3155 struct p_req_state *p = (struct p_req_state *)h; 3256 struct p_req_state *p = &mdev->data.rbuf.req_state;
3156 union drbd_state mask, val; 3257 union drbd_state mask, val;
3157 int rv; 3258 int rv;
3158 3259
3159 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3160 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3161 return FALSE;
3162
3163 mask.i = be32_to_cpu(p->mask); 3260 mask.i = be32_to_cpu(p->mask);
3164 val.i = be32_to_cpu(p->val); 3261 val.i = be32_to_cpu(p->val);
3165 3262
@@ -3180,20 +3277,14 @@ static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3180 return TRUE; 3277 return TRUE;
3181} 3278}
3182 3279
3183static int receive_state(struct drbd_conf *mdev, struct p_header *h) 3280static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3184{ 3281{
3185 struct p_state *p = (struct p_state *)h; 3282 struct p_state *p = &mdev->data.rbuf.state;
3186 enum drbd_conns nconn, oconn; 3283 union drbd_state os, ns, peer_state;
3187 union drbd_state ns, peer_state;
3188 enum drbd_disk_state real_peer_disk; 3284 enum drbd_disk_state real_peer_disk;
3285 enum chg_state_flags cs_flags;
3189 int rv; 3286 int rv;
3190 3287
3191 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3192 return FALSE;
3193
3194 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3195 return FALSE;
3196
3197 peer_state.i = be32_to_cpu(p->state); 3288 peer_state.i = be32_to_cpu(p->state);
3198 3289
3199 real_peer_disk = peer_state.disk; 3290 real_peer_disk = peer_state.disk;
@@ -3204,38 +3295,72 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3204 3295
3205 spin_lock_irq(&mdev->req_lock); 3296 spin_lock_irq(&mdev->req_lock);
3206 retry: 3297 retry:
3207 oconn = nconn = mdev->state.conn; 3298 os = ns = mdev->state;
3208 spin_unlock_irq(&mdev->req_lock); 3299 spin_unlock_irq(&mdev->req_lock);
3209 3300
3210 if (nconn == C_WF_REPORT_PARAMS) 3301 /* peer says his disk is uptodate, while we think it is inconsistent,
3211 nconn = C_CONNECTED; 3302 * and this happens while we think we have a sync going on. */
3303 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3304 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3305 /* If we are (becoming) SyncSource, but peer is still in sync
3306 * preparation, ignore its uptodate-ness to avoid flapping, it
3307 * will change to inconsistent once the peer reaches active
3308 * syncing states.
3309 * It may have changed syncer-paused flags, however, so we
3310 * cannot ignore this completely. */
3311 if (peer_state.conn > C_CONNECTED &&
3312 peer_state.conn < C_SYNC_SOURCE)
3313 real_peer_disk = D_INCONSISTENT;
3314
3315 /* if peer_state changes to connected at the same time,
3316 * it explicitly notifies us that it finished resync.
3317 * Maybe we should finish it up, too? */
3318 else if (os.conn >= C_SYNC_SOURCE &&
3319 peer_state.conn == C_CONNECTED) {
3320 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3321 drbd_resync_finished(mdev);
3322 return TRUE;
3323 }
3324 }
3325
3326 /* peer says his disk is inconsistent, while we think it is uptodate,
3327 * and this happens while the peer still thinks we have a sync going on,
3328 * but we think we are already done with the sync.
3329 * We ignore this to avoid flapping pdsk.
3330 * This should not happen, if the peer is a recent version of drbd. */
3331 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3332 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3333 real_peer_disk = D_UP_TO_DATE;
3334
3335 if (ns.conn == C_WF_REPORT_PARAMS)
3336 ns.conn = C_CONNECTED;
3212 3337
3213 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3338 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3214 get_ldev_if_state(mdev, D_NEGOTIATING)) { 3339 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3215 int cr; /* consider resync */ 3340 int cr; /* consider resync */
3216 3341
3217 /* if we established a new connection */ 3342 /* if we established a new connection */
3218 cr = (oconn < C_CONNECTED); 3343 cr = (os.conn < C_CONNECTED);
3219 /* if we had an established connection 3344 /* if we had an established connection
3220 * and one of the nodes newly attaches a disk */ 3345 * and one of the nodes newly attaches a disk */
3221 cr |= (oconn == C_CONNECTED && 3346 cr |= (os.conn == C_CONNECTED &&
3222 (peer_state.disk == D_NEGOTIATING || 3347 (peer_state.disk == D_NEGOTIATING ||
3223 mdev->state.disk == D_NEGOTIATING)); 3348 os.disk == D_NEGOTIATING));
3224 /* if we have both been inconsistent, and the peer has been 3349 /* if we have both been inconsistent, and the peer has been
3225 * forced to be UpToDate with --overwrite-data */ 3350 * forced to be UpToDate with --overwrite-data */
3226 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3351 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3227 /* if we had been plain connected, and the admin requested to 3352 /* if we had been plain connected, and the admin requested to
3228 * start a sync by "invalidate" or "invalidate-remote" */ 3353 * start a sync by "invalidate" or "invalidate-remote" */
3229 cr |= (oconn == C_CONNECTED && 3354 cr |= (os.conn == C_CONNECTED &&
3230 (peer_state.conn >= C_STARTING_SYNC_S && 3355 (peer_state.conn >= C_STARTING_SYNC_S &&
3231 peer_state.conn <= C_WF_BITMAP_T)); 3356 peer_state.conn <= C_WF_BITMAP_T));
3232 3357
3233 if (cr) 3358 if (cr)
3234 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); 3359 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3235 3360
3236 put_ldev(mdev); 3361 put_ldev(mdev);
3237 if (nconn == C_MASK) { 3362 if (ns.conn == C_MASK) {
3238 nconn = C_CONNECTED; 3363 ns.conn = C_CONNECTED;
3239 if (mdev->state.disk == D_NEGOTIATING) { 3364 if (mdev->state.disk == D_NEGOTIATING) {
3240 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3365 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3241 } else if (peer_state.disk == D_NEGOTIATING) { 3366 } else if (peer_state.disk == D_NEGOTIATING) {
@@ -3245,7 +3370,7 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3245 } else { 3370 } else {
3246 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 3371 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3247 return FALSE; 3372 return FALSE;
3248 D_ASSERT(oconn == C_WF_REPORT_PARAMS); 3373 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3249 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3374 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3250 return FALSE; 3375 return FALSE;
3251 } 3376 }
@@ -3253,18 +3378,28 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3253 } 3378 }
3254 3379
3255 spin_lock_irq(&mdev->req_lock); 3380 spin_lock_irq(&mdev->req_lock);
3256 if (mdev->state.conn != oconn) 3381 if (mdev->state.i != os.i)
3257 goto retry; 3382 goto retry;
3258 clear_bit(CONSIDER_RESYNC, &mdev->flags); 3383 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3259 ns.i = mdev->state.i;
3260 ns.conn = nconn;
3261 ns.peer = peer_state.role; 3384 ns.peer = peer_state.role;
3262 ns.pdsk = real_peer_disk; 3385 ns.pdsk = real_peer_disk;
3263 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 3386 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3264 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 3387 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3265 ns.disk = mdev->new_state_tmp.disk; 3388 ns.disk = mdev->new_state_tmp.disk;
3266 3389 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3267 rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL); 3390 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3391 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3392 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3393 for temporal network outages! */
3394 spin_unlock_irq(&mdev->req_lock);
3395 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3396 tl_clear(mdev);
3397 drbd_uuid_new_current(mdev);
3398 clear_bit(NEW_CUR_UUID, &mdev->flags);
3399 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3400 return FALSE;
3401 }
3402 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3268 ns = mdev->state; 3403 ns = mdev->state;
3269 spin_unlock_irq(&mdev->req_lock); 3404 spin_unlock_irq(&mdev->req_lock);
3270 3405
@@ -3273,8 +3408,8 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3273 return FALSE; 3408 return FALSE;
3274 } 3409 }
3275 3410
3276 if (oconn > C_WF_REPORT_PARAMS) { 3411 if (os.conn > C_WF_REPORT_PARAMS) {
3277 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 3412 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3278 peer_state.disk != D_NEGOTIATING ) { 3413 peer_state.disk != D_NEGOTIATING ) {
3279 /* we want resync, peer has not yet decided to sync... */ 3414 /* we want resync, peer has not yet decided to sync... */
3280 /* Nowadays only used when forcing a node into primary role and 3415 /* Nowadays only used when forcing a node into primary role and
@@ -3291,9 +3426,9 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3291 return TRUE; 3426 return TRUE;
3292} 3427}
3293 3428
3294static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h) 3429static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3295{ 3430{
3296 struct p_rs_uuid *p = (struct p_rs_uuid *)h; 3431 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3297 3432
3298 wait_event(mdev->misc_wait, 3433 wait_event(mdev->misc_wait,
3299 mdev->state.conn == C_WF_SYNC_UUID || 3434 mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3302,10 +3437,6 @@ static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3302 3437
3303 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ 3438 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3304 3439
3305 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3306 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3307 return FALSE;
3308
3309 /* Here the _drbd_uuid_ functions are right, current should 3440 /* Here the _drbd_uuid_ functions are right, current should
3310 _not_ be rotated into the history */ 3441 _not_ be rotated into the history */
3311 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 3442 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -3324,14 +3455,14 @@ static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3324enum receive_bitmap_ret { OK, DONE, FAILED }; 3455enum receive_bitmap_ret { OK, DONE, FAILED };
3325 3456
3326static enum receive_bitmap_ret 3457static enum receive_bitmap_ret
3327receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h, 3458receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3328 unsigned long *buffer, struct bm_xfer_ctx *c) 3459 unsigned long *buffer, struct bm_xfer_ctx *c)
3329{ 3460{
3330 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3461 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3331 unsigned want = num_words * sizeof(long); 3462 unsigned want = num_words * sizeof(long);
3332 3463
3333 if (want != h->length) { 3464 if (want != data_size) {
3334 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length); 3465 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3335 return FAILED; 3466 return FAILED;
3336 } 3467 }
3337 if (want == 0) 3468 if (want == 0)
@@ -3360,7 +3491,7 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
3360 u64 tmp; 3491 u64 tmp;
3361 unsigned long s = c->bit_offset; 3492 unsigned long s = c->bit_offset;
3362 unsigned long e; 3493 unsigned long e;
3363 int len = p->head.length - (sizeof(*p) - sizeof(p->head)); 3494 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3364 int toggle = DCBP_get_start(p); 3495 int toggle = DCBP_get_start(p);
3365 int have; 3496 int have;
3366 int bits; 3497 int bits;
@@ -3429,7 +3560,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3429 const char *direction, struct bm_xfer_ctx *c) 3560 const char *direction, struct bm_xfer_ctx *c)
3430{ 3561{
3431 /* what would it take to transfer it "plaintext" */ 3562 /* what would it take to transfer it "plaintext" */
3432 unsigned plain = sizeof(struct p_header) * 3563 unsigned plain = sizeof(struct p_header80) *
3433 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3564 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3434 + c->bm_words * sizeof(long); 3565 + c->bm_words * sizeof(long);
3435 unsigned total = c->bytes[0] + c->bytes[1]; 3566 unsigned total = c->bytes[0] + c->bytes[1];
@@ -3467,12 +3598,13 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3467 in order to be agnostic to the 32 vs 64 bits issue. 3598 in order to be agnostic to the 32 vs 64 bits issue.
3468 3599
3469 returns 0 on failure, 1 if we successfully received it. */ 3600 returns 0 on failure, 1 if we successfully received it. */
3470static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) 3601static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3471{ 3602{
3472 struct bm_xfer_ctx c; 3603 struct bm_xfer_ctx c;
3473 void *buffer; 3604 void *buffer;
3474 enum receive_bitmap_ret ret; 3605 enum receive_bitmap_ret ret;
3475 int ok = FALSE; 3606 int ok = FALSE;
3607 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3476 3608
3477 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 3609 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3478 3610
@@ -3492,39 +3624,39 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3492 }; 3624 };
3493 3625
3494 do { 3626 do {
3495 if (h->command == P_BITMAP) { 3627 if (cmd == P_BITMAP) {
3496 ret = receive_bitmap_plain(mdev, h, buffer, &c); 3628 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3497 } else if (h->command == P_COMPRESSED_BITMAP) { 3629 } else if (cmd == P_COMPRESSED_BITMAP) {
3498 /* MAYBE: sanity check that we speak proto >= 90, 3630 /* MAYBE: sanity check that we speak proto >= 90,
3499 * and the feature is enabled! */ 3631 * and the feature is enabled! */
3500 struct p_compressed_bm *p; 3632 struct p_compressed_bm *p;
3501 3633
3502 if (h->length > BM_PACKET_PAYLOAD_BYTES) { 3634 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3503 dev_err(DEV, "ReportCBitmap packet too large\n"); 3635 dev_err(DEV, "ReportCBitmap packet too large\n");
3504 goto out; 3636 goto out;
3505 } 3637 }
3506 /* use the page buff */ 3638 /* use the page buff */
3507 p = buffer; 3639 p = buffer;
3508 memcpy(p, h, sizeof(*h)); 3640 memcpy(p, h, sizeof(*h));
3509 if (drbd_recv(mdev, p->head.payload, h->length) != h->length) 3641 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3510 goto out; 3642 goto out;
3511 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) { 3643 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3512 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length); 3644 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3513 return FAILED; 3645 return FAILED;
3514 } 3646 }
3515 ret = decode_bitmap_c(mdev, p, &c); 3647 ret = decode_bitmap_c(mdev, p, &c);
3516 } else { 3648 } else {
3517 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command); 3649 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3518 goto out; 3650 goto out;
3519 } 3651 }
3520 3652
3521 c.packets[h->command == P_BITMAP]++; 3653 c.packets[cmd == P_BITMAP]++;
3522 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length; 3654 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3523 3655
3524 if (ret != OK) 3656 if (ret != OK)
3525 break; 3657 break;
3526 3658
3527 if (!drbd_recv_header(mdev, h)) 3659 if (!drbd_recv_header(mdev, &cmd, &data_size))
3528 goto out; 3660 goto out;
3529 } while (ret == OK); 3661 } while (ret == OK);
3530 if (ret == FAILED) 3662 if (ret == FAILED)
@@ -3555,17 +3687,16 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3555 return ok; 3687 return ok;
3556} 3688}
3557 3689
3558static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) 3690static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3559{ 3691{
3560 /* TODO zero copy sink :) */ 3692 /* TODO zero copy sink :) */
3561 static char sink[128]; 3693 static char sink[128];
3562 int size, want, r; 3694 int size, want, r;
3563 3695
3564 if (!silent) 3696 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3565 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 3697 cmd, data_size);
3566 h->command, h->length);
3567 3698
3568 size = h->length; 3699 size = data_size;
3569 while (size > 0) { 3700 while (size > 0) {
3570 want = min_t(int, size, sizeof(sink)); 3701 want = min_t(int, size, sizeof(sink));
3571 r = drbd_recv(mdev, sink, want); 3702 r = drbd_recv(mdev, sink, want);
@@ -3575,17 +3706,7 @@ static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
3575 return size == 0; 3706 return size == 0;
3576} 3707}
3577 3708
3578static int receive_skip(struct drbd_conf *mdev, struct p_header *h) 3709static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3579{
3580 return receive_skip_(mdev, h, 0);
3581}
3582
3583static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
3584{
3585 return receive_skip_(mdev, h, 1);
3586}
3587
3588static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3589{ 3710{
3590 if (mdev->state.disk >= D_INCONSISTENT) 3711 if (mdev->state.disk >= D_INCONSISTENT)
3591 drbd_kick_lo(mdev); 3712 drbd_kick_lo(mdev);
@@ -3597,108 +3718,94 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3597 return TRUE; 3718 return TRUE;
3598} 3719}
3599 3720
3600typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *); 3721typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3601 3722
3602static drbd_cmd_handler_f drbd_default_handler[] = { 3723struct data_cmd {
3603 [P_DATA] = receive_Data, 3724 int expect_payload;
3604 [P_DATA_REPLY] = receive_DataReply, 3725 size_t pkt_size;
3605 [P_RS_DATA_REPLY] = receive_RSDataReply, 3726 drbd_cmd_handler_f function;
3606 [P_BARRIER] = receive_Barrier, 3727};
3607 [P_BITMAP] = receive_bitmap, 3728
3608 [P_COMPRESSED_BITMAP] = receive_bitmap, 3729static struct data_cmd drbd_cmd_handler[] = {
3609 [P_UNPLUG_REMOTE] = receive_UnplugRemote, 3730 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3610 [P_DATA_REQUEST] = receive_DataRequest, 3731 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3611 [P_RS_DATA_REQUEST] = receive_DataRequest, 3732 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3612 [P_SYNC_PARAM] = receive_SyncParam, 3733 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3613 [P_SYNC_PARAM89] = receive_SyncParam, 3734 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3614 [P_PROTOCOL] = receive_protocol, 3735 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3615 [P_UUIDS] = receive_uuids, 3736 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3616 [P_SIZES] = receive_sizes, 3737 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3617 [P_STATE] = receive_state, 3738 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3618 [P_STATE_CHG_REQ] = receive_req_state, 3739 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3619 [P_SYNC_UUID] = receive_sync_uuid, 3740 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3620 [P_OV_REQUEST] = receive_DataRequest, 3741 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3621 [P_OV_REPLY] = receive_DataRequest, 3742 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3622 [P_CSUM_RS_REQUEST] = receive_DataRequest, 3743 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3623 [P_DELAY_PROBE] = receive_skip_silent, 3744 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3745 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3746 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3747 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3748 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3749 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3750 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3624 /* anything missing from this table is in 3751 /* anything missing from this table is in
3625 * the asender_tbl, see get_asender_cmd */ 3752 * the asender_tbl, see get_asender_cmd */
3626 [P_MAX_CMD] = NULL, 3753 [P_MAX_CMD] = { 0, 0, NULL },
3627}; 3754};
3628 3755
3629static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler; 3756/* All handler functions that expect a sub-header get that sub-heder in
3630static drbd_cmd_handler_f *drbd_opt_cmd_handler; 3757 mdev->data.rbuf.header.head.payload.
3758
3759 Usually in mdev->data.rbuf.header.head the callback can find the usual
3760 p_header, but they may not rely on that. Since there is also p_header95 !
3761 */
3631 3762
3632static void drbdd(struct drbd_conf *mdev) 3763static void drbdd(struct drbd_conf *mdev)
3633{ 3764{
3634 drbd_cmd_handler_f handler; 3765 union p_header *header = &mdev->data.rbuf.header;
3635 struct p_header *header = &mdev->data.rbuf.header; 3766 unsigned int packet_size;
3767 enum drbd_packets cmd;
3768 size_t shs; /* sub header size */
3769 int rv;
3636 3770
3637 while (get_t_state(&mdev->receiver) == Running) { 3771 while (get_t_state(&mdev->receiver) == Running) {
3638 drbd_thread_current_set_cpu(mdev); 3772 drbd_thread_current_set_cpu(mdev);
3639 if (!drbd_recv_header(mdev, header)) { 3773 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3640 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3774 goto err_out;
3641 break;
3642 }
3643 3775
3644 if (header->command < P_MAX_CMD) 3776 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3645 handler = drbd_cmd_handler[header->command]; 3777 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3646 else if (P_MAY_IGNORE < header->command 3778 goto err_out;
3647 && header->command < P_MAX_OPT_CMD) 3779 }
3648 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3649 else if (header->command > P_MAX_OPT_CMD)
3650 handler = receive_skip;
3651 else
3652 handler = NULL;
3653 3780
3654 if (unlikely(!handler)) { 3781 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3655 dev_err(DEV, "unknown packet type %d, l: %d!\n", 3782 rv = drbd_recv(mdev, &header->h80.payload, shs);
3656 header->command, header->length); 3783 if (unlikely(rv != shs)) {
3657 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3784 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3658 break; 3785 goto err_out;
3659 } 3786 }
3660 if (unlikely(!handler(mdev, header))) { 3787
3661 dev_err(DEV, "error receiving %s, l: %d!\n", 3788 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3662 cmdname(header->command), header->length); 3789 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3663 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3790 goto err_out;
3664 break;
3665 } 3791 }
3666 }
3667}
3668 3792
3669static void drbd_fail_pending_reads(struct drbd_conf *mdev) 3793 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3670{
3671 struct hlist_head *slot;
3672 struct hlist_node *pos;
3673 struct hlist_node *tmp;
3674 struct drbd_request *req;
3675 int i;
3676 3794
3677 /* 3795 if (unlikely(!rv)) {
3678 * Application READ requests 3796 dev_err(DEV, "error receiving %s, l: %d!\n",
3679 */ 3797 cmdname(cmd), packet_size);
3680 spin_lock_irq(&mdev->req_lock); 3798 goto err_out;
3681 for (i = 0; i < APP_R_HSIZE; i++) {
3682 slot = mdev->app_reads_hash+i;
3683 hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3684 /* it may (but should not any longer!)
3685 * be on the work queue; if that assert triggers,
3686 * we need to also grab the
3687 * spin_lock_irq(&mdev->data.work.q_lock);
3688 * and list_del_init here. */
3689 D_ASSERT(list_empty(&req->w.list));
3690 /* It would be nice to complete outside of spinlock.
3691 * But this is easier for now. */
3692 _req_mod(req, connection_lost_while_pending);
3693 } 3799 }
3694 } 3800 }
3695 for (i = 0; i < APP_R_HSIZE; i++)
3696 if (!hlist_empty(mdev->app_reads_hash+i))
3697 dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3698 "%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3699 3801
3700 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 3802 if (0) {
3701 spin_unlock_irq(&mdev->req_lock); 3803 err_out:
3804 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3805 }
3806 /* If we leave here, we probably want to update at least the
3807 * "Connected" indicator on stable storage. Do so explicitly here. */
3808 drbd_md_sync(mdev);
3702} 3809}
3703 3810
3704void drbd_flush_workqueue(struct drbd_conf *mdev) 3811void drbd_flush_workqueue(struct drbd_conf *mdev)
@@ -3711,6 +3818,36 @@ void drbd_flush_workqueue(struct drbd_conf *mdev)
3711 wait_for_completion(&barr.done); 3818 wait_for_completion(&barr.done);
3712} 3819}
3713 3820
3821void drbd_free_tl_hash(struct drbd_conf *mdev)
3822{
3823 struct hlist_head *h;
3824
3825 spin_lock_irq(&mdev->req_lock);
3826
3827 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3828 spin_unlock_irq(&mdev->req_lock);
3829 return;
3830 }
3831 /* paranoia code */
3832 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3833 if (h->first)
3834 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3835 (int)(h - mdev->ee_hash), h->first);
3836 kfree(mdev->ee_hash);
3837 mdev->ee_hash = NULL;
3838 mdev->ee_hash_s = 0;
3839
3840 /* paranoia code */
3841 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3842 if (h->first)
3843 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3844 (int)(h - mdev->tl_hash), h->first);
3845 kfree(mdev->tl_hash);
3846 mdev->tl_hash = NULL;
3847 mdev->tl_hash_s = 0;
3848 spin_unlock_irq(&mdev->req_lock);
3849}
3850
3714static void drbd_disconnect(struct drbd_conf *mdev) 3851static void drbd_disconnect(struct drbd_conf *mdev)
3715{ 3852{
3716 enum drbd_fencing_p fp; 3853 enum drbd_fencing_p fp;
@@ -3728,6 +3865,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3728 drbd_thread_stop(&mdev->asender); 3865 drbd_thread_stop(&mdev->asender);
3729 drbd_free_sock(mdev); 3866 drbd_free_sock(mdev);
3730 3867
3868 /* wait for current activity to cease. */
3731 spin_lock_irq(&mdev->req_lock); 3869 spin_lock_irq(&mdev->req_lock);
3732 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3870 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3733 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3871 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
@@ -3752,7 +3890,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3752 3890
3753 /* make sure syncer is stopped and w_resume_next_sg queued */ 3891 /* make sure syncer is stopped and w_resume_next_sg queued */
3754 del_timer_sync(&mdev->resync_timer); 3892 del_timer_sync(&mdev->resync_timer);
3755 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3756 resync_timer_fn((unsigned long)mdev); 3893 resync_timer_fn((unsigned long)mdev);
3757 3894
3758 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 3895 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
@@ -3767,11 +3904,9 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3767 kfree(mdev->p_uuid); 3904 kfree(mdev->p_uuid);
3768 mdev->p_uuid = NULL; 3905 mdev->p_uuid = NULL;
3769 3906
3770 if (!mdev->state.susp) 3907 if (!is_susp(mdev->state))
3771 tl_clear(mdev); 3908 tl_clear(mdev);
3772 3909
3773 drbd_fail_pending_reads(mdev);
3774
3775 dev_info(DEV, "Connection closed\n"); 3910 dev_info(DEV, "Connection closed\n");
3776 3911
3777 drbd_md_sync(mdev); 3912 drbd_md_sync(mdev);
@@ -3782,12 +3917,8 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3782 put_ldev(mdev); 3917 put_ldev(mdev);
3783 } 3918 }
3784 3919
3785 if (mdev->state.role == R_PRIMARY) { 3920 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3786 if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) { 3921 drbd_try_outdate_peer_async(mdev);
3787 enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3788 drbd_request_state(mdev, NS(pdsk, nps));
3789 }
3790 }
3791 3922
3792 spin_lock_irq(&mdev->req_lock); 3923 spin_lock_irq(&mdev->req_lock);
3793 os = mdev->state; 3924 os = mdev->state;
@@ -3800,32 +3931,14 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3800 spin_unlock_irq(&mdev->req_lock); 3931 spin_unlock_irq(&mdev->req_lock);
3801 3932
3802 if (os.conn == C_DISCONNECTING) { 3933 if (os.conn == C_DISCONNECTING) {
3803 struct hlist_head *h; 3934 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3804 wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3805 3935
3806 /* we must not free the tl_hash 3936 if (!is_susp(mdev->state)) {
3807 * while application io is still on the fly */ 3937 /* we must not free the tl_hash
3808 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0); 3938 * while application io is still on the fly */
3809 3939 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3810 spin_lock_irq(&mdev->req_lock); 3940 drbd_free_tl_hash(mdev);
3811 /* paranoia code */ 3941 }
3812 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3813 if (h->first)
3814 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3815 (int)(h - mdev->ee_hash), h->first);
3816 kfree(mdev->ee_hash);
3817 mdev->ee_hash = NULL;
3818 mdev->ee_hash_s = 0;
3819
3820 /* paranoia code */
3821 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3822 if (h->first)
3823 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3824 (int)(h - mdev->tl_hash), h->first);
3825 kfree(mdev->tl_hash);
3826 mdev->tl_hash = NULL;
3827 mdev->tl_hash_s = 0;
3828 spin_unlock_irq(&mdev->req_lock);
3829 3942
3830 crypto_free_hash(mdev->cram_hmac_tfm); 3943 crypto_free_hash(mdev->cram_hmac_tfm);
3831 mdev->cram_hmac_tfm = NULL; 3944 mdev->cram_hmac_tfm = NULL;
@@ -3845,6 +3958,9 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3845 i = drbd_release_ee(mdev, &mdev->net_ee); 3958 i = drbd_release_ee(mdev, &mdev->net_ee);
3846 if (i) 3959 if (i)
3847 dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 3960 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3961 i = atomic_read(&mdev->pp_in_use_by_net);
3962 if (i)
3963 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3848 i = atomic_read(&mdev->pp_in_use); 3964 i = atomic_read(&mdev->pp_in_use);
3849 if (i) 3965 if (i)
3850 dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 3966 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
@@ -3888,7 +4004,7 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
3888 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 4004 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3889 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 4005 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3890 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, 4006 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3891 (struct p_header *)p, sizeof(*p), 0 ); 4007 (struct p_header80 *)p, sizeof(*p), 0 );
3892 mutex_unlock(&mdev->data.mutex); 4008 mutex_unlock(&mdev->data.mutex);
3893 return ok; 4009 return ok;
3894} 4010}
@@ -3904,27 +4020,28 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
3904{ 4020{
3905 /* ASSERT current == mdev->receiver ... */ 4021 /* ASSERT current == mdev->receiver ... */
3906 struct p_handshake *p = &mdev->data.rbuf.handshake; 4022 struct p_handshake *p = &mdev->data.rbuf.handshake;
3907 const int expect = sizeof(struct p_handshake) 4023 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3908 -sizeof(struct p_header); 4024 unsigned int length;
4025 enum drbd_packets cmd;
3909 int rv; 4026 int rv;
3910 4027
3911 rv = drbd_send_handshake(mdev); 4028 rv = drbd_send_handshake(mdev);
3912 if (!rv) 4029 if (!rv)
3913 return 0; 4030 return 0;
3914 4031
3915 rv = drbd_recv_header(mdev, &p->head); 4032 rv = drbd_recv_header(mdev, &cmd, &length);
3916 if (!rv) 4033 if (!rv)
3917 return 0; 4034 return 0;
3918 4035
3919 if (p->head.command != P_HAND_SHAKE) { 4036 if (cmd != P_HAND_SHAKE) {
3920 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", 4037 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3921 cmdname(p->head.command), p->head.command); 4038 cmdname(cmd), cmd);
3922 return -1; 4039 return -1;
3923 } 4040 }
3924 4041
3925 if (p->head.length != expect) { 4042 if (length != expect) {
3926 dev_err(DEV, "expected HandShake length: %u, received: %u\n", 4043 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3927 expect, p->head.length); 4044 expect, length);
3928 return -1; 4045 return -1;
3929 } 4046 }
3930 4047
@@ -3982,10 +4099,11 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3982 char *response = NULL; 4099 char *response = NULL;
3983 char *right_response = NULL; 4100 char *right_response = NULL;
3984 char *peers_ch = NULL; 4101 char *peers_ch = NULL;
3985 struct p_header p;
3986 unsigned int key_len = strlen(mdev->net_conf->shared_secret); 4102 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3987 unsigned int resp_size; 4103 unsigned int resp_size;
3988 struct hash_desc desc; 4104 struct hash_desc desc;
4105 enum drbd_packets cmd;
4106 unsigned int length;
3989 int rv; 4107 int rv;
3990 4108
3991 desc.tfm = mdev->cram_hmac_tfm; 4109 desc.tfm = mdev->cram_hmac_tfm;
@@ -4005,33 +4123,33 @@ static int drbd_do_auth(struct drbd_conf *mdev)
4005 if (!rv) 4123 if (!rv)
4006 goto fail; 4124 goto fail;
4007 4125
4008 rv = drbd_recv_header(mdev, &p); 4126 rv = drbd_recv_header(mdev, &cmd, &length);
4009 if (!rv) 4127 if (!rv)
4010 goto fail; 4128 goto fail;
4011 4129
4012 if (p.command != P_AUTH_CHALLENGE) { 4130 if (cmd != P_AUTH_CHALLENGE) {
4013 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", 4131 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4014 cmdname(p.command), p.command); 4132 cmdname(cmd), cmd);
4015 rv = 0; 4133 rv = 0;
4016 goto fail; 4134 goto fail;
4017 } 4135 }
4018 4136
4019 if (p.length > CHALLENGE_LEN*2) { 4137 if (length > CHALLENGE_LEN * 2) {
4020 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 4138 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4021 rv = -1; 4139 rv = -1;
4022 goto fail; 4140 goto fail;
4023 } 4141 }
4024 4142
4025 peers_ch = kmalloc(p.length, GFP_NOIO); 4143 peers_ch = kmalloc(length, GFP_NOIO);
4026 if (peers_ch == NULL) { 4144 if (peers_ch == NULL) {
4027 dev_err(DEV, "kmalloc of peers_ch failed\n"); 4145 dev_err(DEV, "kmalloc of peers_ch failed\n");
4028 rv = -1; 4146 rv = -1;
4029 goto fail; 4147 goto fail;
4030 } 4148 }
4031 4149
4032 rv = drbd_recv(mdev, peers_ch, p.length); 4150 rv = drbd_recv(mdev, peers_ch, length);
4033 4151
4034 if (rv != p.length) { 4152 if (rv != length) {
4035 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); 4153 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4036 rv = 0; 4154 rv = 0;
4037 goto fail; 4155 goto fail;
@@ -4046,7 +4164,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
4046 } 4164 }
4047 4165
4048 sg_init_table(&sg, 1); 4166 sg_init_table(&sg, 1);
4049 sg_set_buf(&sg, peers_ch, p.length); 4167 sg_set_buf(&sg, peers_ch, length);
4050 4168
4051 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4169 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4052 if (rv) { 4170 if (rv) {
@@ -4059,18 +4177,18 @@ static int drbd_do_auth(struct drbd_conf *mdev)
4059 if (!rv) 4177 if (!rv)
4060 goto fail; 4178 goto fail;
4061 4179
4062 rv = drbd_recv_header(mdev, &p); 4180 rv = drbd_recv_header(mdev, &cmd, &length);
4063 if (!rv) 4181 if (!rv)
4064 goto fail; 4182 goto fail;
4065 4183
4066 if (p.command != P_AUTH_RESPONSE) { 4184 if (cmd != P_AUTH_RESPONSE) {
4067 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", 4185 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4068 cmdname(p.command), p.command); 4186 cmdname(cmd), cmd);
4069 rv = 0; 4187 rv = 0;
4070 goto fail; 4188 goto fail;
4071 } 4189 }
4072 4190
4073 if (p.length != resp_size) { 4191 if (length != resp_size) {
4074 dev_err(DEV, "expected AuthResponse payload of wrong size\n"); 4192 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4075 rv = 0; 4193 rv = 0;
4076 goto fail; 4194 goto fail;
@@ -4155,7 +4273,7 @@ int drbdd_init(struct drbd_thread *thi)
4155 4273
4156/* ********* acknowledge sender ******** */ 4274/* ********* acknowledge sender ******** */
4157 4275
4158static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h) 4276static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4159{ 4277{
4160 struct p_req_state_reply *p = (struct p_req_state_reply *)h; 4278 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4161 4279
@@ -4173,13 +4291,13 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4173 return TRUE; 4291 return TRUE;
4174} 4292}
4175 4293
4176static int got_Ping(struct drbd_conf *mdev, struct p_header *h) 4294static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4177{ 4295{
4178 return drbd_send_ping_ack(mdev); 4296 return drbd_send_ping_ack(mdev);
4179 4297
4180} 4298}
4181 4299
4182static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) 4300static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4183{ 4301{
4184 /* restore idle timeout */ 4302 /* restore idle timeout */
4185 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4303 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
@@ -4189,7 +4307,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4189 return TRUE; 4307 return TRUE;
4190} 4308}
4191 4309
4192static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h) 4310static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4193{ 4311{
4194 struct p_block_ack *p = (struct p_block_ack *)h; 4312 struct p_block_ack *p = (struct p_block_ack *)h;
4195 sector_t sector = be64_to_cpu(p->sector); 4313 sector_t sector = be64_to_cpu(p->sector);
@@ -4199,11 +4317,15 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4199 4317
4200 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4318 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4201 4319
4202 drbd_rs_complete_io(mdev, sector); 4320 if (get_ldev(mdev)) {
4203 drbd_set_in_sync(mdev, sector, blksize); 4321 drbd_rs_complete_io(mdev, sector);
4204 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 4322 drbd_set_in_sync(mdev, sector, blksize);
4205 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 4323 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4324 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4325 put_ldev(mdev);
4326 }
4206 dec_rs_pending(mdev); 4327 dec_rs_pending(mdev);
4328 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4207 4329
4208 return TRUE; 4330 return TRUE;
4209} 4331}
@@ -4259,7 +4381,7 @@ static int validate_req_change_req_state(struct drbd_conf *mdev,
4259 return TRUE; 4381 return TRUE;
4260} 4382}
4261 4383
4262static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h) 4384static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4263{ 4385{
4264 struct p_block_ack *p = (struct p_block_ack *)h; 4386 struct p_block_ack *p = (struct p_block_ack *)h;
4265 sector_t sector = be64_to_cpu(p->sector); 4387 sector_t sector = be64_to_cpu(p->sector);
@@ -4299,7 +4421,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4299 _ack_id_to_req, __func__ , what); 4421 _ack_id_to_req, __func__ , what);
4300} 4422}
4301 4423
4302static int got_NegAck(struct drbd_conf *mdev, struct p_header *h) 4424static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4303{ 4425{
4304 struct p_block_ack *p = (struct p_block_ack *)h; 4426 struct p_block_ack *p = (struct p_block_ack *)h;
4305 sector_t sector = be64_to_cpu(p->sector); 4427 sector_t sector = be64_to_cpu(p->sector);
@@ -4319,7 +4441,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4319 _ack_id_to_req, __func__ , neg_acked); 4441 _ack_id_to_req, __func__ , neg_acked);
4320} 4442}
4321 4443
4322static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h) 4444static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4323{ 4445{
4324 struct p_block_ack *p = (struct p_block_ack *)h; 4446 struct p_block_ack *p = (struct p_block_ack *)h;
4325 sector_t sector = be64_to_cpu(p->sector); 4447 sector_t sector = be64_to_cpu(p->sector);
@@ -4332,7 +4454,7 @@ static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4332 _ar_id_to_req, __func__ , neg_acked); 4454 _ar_id_to_req, __func__ , neg_acked);
4333} 4455}
4334 4456
4335static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h) 4457static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4336{ 4458{
4337 sector_t sector; 4459 sector_t sector;
4338 int size; 4460 int size;
@@ -4354,7 +4476,7 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4354 return TRUE; 4476 return TRUE;
4355} 4477}
4356 4478
4357static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h) 4479static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4358{ 4480{
4359 struct p_barrier_ack *p = (struct p_barrier_ack *)h; 4481 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4360 4482
@@ -4363,7 +4485,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4363 return TRUE; 4485 return TRUE;
4364} 4486}
4365 4487
4366static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) 4488static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4367{ 4489{
4368 struct p_block_ack *p = (struct p_block_ack *)h; 4490 struct p_block_ack *p = (struct p_block_ack *)h;
4369 struct drbd_work *w; 4491 struct drbd_work *w;
@@ -4380,6 +4502,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4380 else 4502 else
4381 ov_oos_print(mdev); 4503 ov_oos_print(mdev);
4382 4504
4505 if (!get_ldev(mdev))
4506 return TRUE;
4507
4383 drbd_rs_complete_io(mdev, sector); 4508 drbd_rs_complete_io(mdev, sector);
4384 dec_rs_pending(mdev); 4509 dec_rs_pending(mdev);
4385 4510
@@ -4394,18 +4519,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4394 drbd_resync_finished(mdev); 4519 drbd_resync_finished(mdev);
4395 } 4520 }
4396 } 4521 }
4522 put_ldev(mdev);
4397 return TRUE; 4523 return TRUE;
4398} 4524}
4399 4525
4400static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h) 4526static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4401{ 4527{
4402 /* IGNORE */
4403 return TRUE; 4528 return TRUE;
4404} 4529}
4405 4530
4406struct asender_cmd { 4531struct asender_cmd {
4407 size_t pkt_size; 4532 size_t pkt_size;
4408 int (*process)(struct drbd_conf *mdev, struct p_header *h); 4533 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4409}; 4534};
4410 4535
4411static struct asender_cmd *get_asender_cmd(int cmd) 4536static struct asender_cmd *get_asender_cmd(int cmd)
@@ -4414,8 +4539,8 @@ static struct asender_cmd *get_asender_cmd(int cmd)
4414 /* anything missing from this table is in 4539 /* anything missing from this table is in
4415 * the drbd_cmd_handler (drbd_default_handler) table, 4540 * the drbd_cmd_handler (drbd_default_handler) table,
4416 * see the beginning of drbdd() */ 4541 * see the beginning of drbdd() */
4417 [P_PING] = { sizeof(struct p_header), got_Ping }, 4542 [P_PING] = { sizeof(struct p_header80), got_Ping },
4418 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck }, 4543 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4419 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4544 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4420 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4545 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4421 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4546 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
@@ -4427,7 +4552,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
4427 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4552 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4428 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4553 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4429 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 4554 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4430 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m }, 4555 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4431 [P_MAX_CMD] = { 0, NULL }, 4556 [P_MAX_CMD] = { 0, NULL },
4432 }; 4557 };
4433 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) 4558 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
@@ -4438,13 +4563,13 @@ static struct asender_cmd *get_asender_cmd(int cmd)
4438int drbd_asender(struct drbd_thread *thi) 4563int drbd_asender(struct drbd_thread *thi)
4439{ 4564{
4440 struct drbd_conf *mdev = thi->mdev; 4565 struct drbd_conf *mdev = thi->mdev;
4441 struct p_header *h = &mdev->meta.rbuf.header; 4566 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4442 struct asender_cmd *cmd = NULL; 4567 struct asender_cmd *cmd = NULL;
4443 4568
4444 int rv, len; 4569 int rv, len;
4445 void *buf = h; 4570 void *buf = h;
4446 int received = 0; 4571 int received = 0;
4447 int expect = sizeof(struct p_header); 4572 int expect = sizeof(struct p_header80);
4448 int empty; 4573 int empty;
4449 4574
4450 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4575 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
@@ -4468,10 +4593,8 @@ int drbd_asender(struct drbd_thread *thi)
4468 while (1) { 4593 while (1) {
4469 clear_bit(SIGNAL_ASENDER, &mdev->flags); 4594 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4470 flush_signals(current); 4595 flush_signals(current);
4471 if (!drbd_process_done_ee(mdev)) { 4596 if (!drbd_process_done_ee(mdev))
4472 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4473 goto reconnect; 4597 goto reconnect;
4474 }
4475 /* to avoid race with newly queued ACKs */ 4598 /* to avoid race with newly queued ACKs */
4476 set_bit(SIGNAL_ASENDER, &mdev->flags); 4599 set_bit(SIGNAL_ASENDER, &mdev->flags);
4477 spin_lock_irq(&mdev->req_lock); 4600 spin_lock_irq(&mdev->req_lock);
@@ -4530,21 +4653,23 @@ int drbd_asender(struct drbd_thread *thi)
4530 4653
4531 if (received == expect && cmd == NULL) { 4654 if (received == expect && cmd == NULL) {
4532 if (unlikely(h->magic != BE_DRBD_MAGIC)) { 4655 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4533 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n", 4656 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4534 (long)be32_to_cpu(h->magic), 4657 be32_to_cpu(h->magic),
4535 h->command, h->length); 4658 be16_to_cpu(h->command),
4659 be16_to_cpu(h->length));
4536 goto reconnect; 4660 goto reconnect;
4537 } 4661 }
4538 cmd = get_asender_cmd(be16_to_cpu(h->command)); 4662 cmd = get_asender_cmd(be16_to_cpu(h->command));
4539 len = be16_to_cpu(h->length); 4663 len = be16_to_cpu(h->length);
4540 if (unlikely(cmd == NULL)) { 4664 if (unlikely(cmd == NULL)) {
4541 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n", 4665 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4542 (long)be32_to_cpu(h->magic), 4666 be32_to_cpu(h->magic),
4543 h->command, h->length); 4667 be16_to_cpu(h->command),
4668 be16_to_cpu(h->length));
4544 goto disconnect; 4669 goto disconnect;
4545 } 4670 }
4546 expect = cmd->pkt_size; 4671 expect = cmd->pkt_size;
4547 ERR_IF(len != expect-sizeof(struct p_header)) 4672 ERR_IF(len != expect-sizeof(struct p_header80))
4548 goto reconnect; 4673 goto reconnect;
4549 } 4674 }
4550 if (received == expect) { 4675 if (received == expect) {
@@ -4554,7 +4679,7 @@ int drbd_asender(struct drbd_thread *thi)
4554 4679
4555 buf = h; 4680 buf = h;
4556 received = 0; 4681 received = 0;
4557 expect = sizeof(struct p_header); 4682 expect = sizeof(struct p_header80);
4558 cmd = NULL; 4683 cmd = NULL;
4559 } 4684 }
4560 } 4685 }
@@ -4562,10 +4687,12 @@ int drbd_asender(struct drbd_thread *thi)
4562 if (0) { 4687 if (0) {
4563reconnect: 4688reconnect:
4564 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 4689 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4690 drbd_md_sync(mdev);
4565 } 4691 }
4566 if (0) { 4692 if (0) {
4567disconnect: 4693disconnect:
4568 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4694 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4695 drbd_md_sync(mdev);
4569 } 4696 }
4570 clear_bit(SIGNAL_ASENDER, &mdev->flags); 4697 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4571 4698
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f761d98a4e90..9e91a2545fc8 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -59,17 +59,19 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
59static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) 59static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
60{ 60{
61 const unsigned long s = req->rq_state; 61 const unsigned long s = req->rq_state;
62
63 /* remove it from the transfer log.
64 * well, only if it had been there in the first
65 * place... if it had not (local only or conflicting
66 * and never sent), it should still be "empty" as
67 * initialized in drbd_req_new(), so we can list_del() it
68 * here unconditionally */
69 list_del(&req->tl_requests);
70
62 /* if it was a write, we may have to set the corresponding 71 /* if it was a write, we may have to set the corresponding
63 * bit(s) out-of-sync first. If it had a local part, we need to 72 * bit(s) out-of-sync first. If it had a local part, we need to
64 * release the reference to the activity log. */ 73 * release the reference to the activity log. */
65 if (rw == WRITE) { 74 if (rw == WRITE) {
66 /* remove it from the transfer log.
67 * well, only if it had been there in the first
68 * place... if it had not (local only or conflicting
69 * and never sent), it should still be "empty" as
70 * initialized in drbd_req_new(), so we can list_del() it
71 * here unconditionally */
72 list_del(&req->tl_requests);
73 /* Set out-of-sync unless both OK flags are set 75 /* Set out-of-sync unless both OK flags are set
74 * (local only or remote failed). 76 * (local only or remote failed).
75 * Other places where we set out-of-sync: 77 * Other places where we set out-of-sync:
@@ -92,7 +94,8 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
92 */ 94 */
93 if (s & RQ_LOCAL_MASK) { 95 if (s & RQ_LOCAL_MASK) {
94 if (get_ldev_if_state(mdev, D_FAILED)) { 96 if (get_ldev_if_state(mdev, D_FAILED)) {
95 drbd_al_complete_io(mdev, req->sector); 97 if (s & RQ_IN_ACT_LOG)
98 drbd_al_complete_io(mdev, req->sector);
96 put_ldev(mdev); 99 put_ldev(mdev);
97 } else if (__ratelimit(&drbd_ratelimit_state)) { 100 } else if (__ratelimit(&drbd_ratelimit_state)) {
98 dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), " 101 dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
@@ -280,6 +283,14 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
280 * protocol A or B, barrier ack still pending... */ 283 * protocol A or B, barrier ack still pending... */
281} 284}
282 285
286static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
287{
288 struct drbd_conf *mdev = req->mdev;
289
290 if (!is_susp(mdev->state))
291 _req_may_be_done(req, m);
292}
293
283/* 294/*
284 * checks whether there was an overlapping request 295 * checks whether there was an overlapping request
285 * or ee already registered. 296 * or ee already registered.
@@ -380,10 +391,11 @@ out_conflict:
380 * and it enforces that we have to think in a very structured manner 391 * and it enforces that we have to think in a very structured manner
381 * about the "events" that may happen to a request during its life time ... 392 * about the "events" that may happen to a request during its life time ...
382 */ 393 */
383void __req_mod(struct drbd_request *req, enum drbd_req_event what, 394int __req_mod(struct drbd_request *req, enum drbd_req_event what,
384 struct bio_and_error *m) 395 struct bio_and_error *m)
385{ 396{
386 struct drbd_conf *mdev = req->mdev; 397 struct drbd_conf *mdev = req->mdev;
398 int rv = 0;
387 m->bio = NULL; 399 m->bio = NULL;
388 400
389 switch (what) { 401 switch (what) {
@@ -420,7 +432,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
420 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); 432 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
421 req->rq_state &= ~RQ_LOCAL_PENDING; 433 req->rq_state &= ~RQ_LOCAL_PENDING;
422 434
423 _req_may_be_done(req, m); 435 _req_may_be_done_not_susp(req, m);
424 put_ldev(mdev); 436 put_ldev(mdev);
425 break; 437 break;
426 438
@@ -429,7 +441,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
429 req->rq_state &= ~RQ_LOCAL_PENDING; 441 req->rq_state &= ~RQ_LOCAL_PENDING;
430 442
431 __drbd_chk_io_error(mdev, FALSE); 443 __drbd_chk_io_error(mdev, FALSE);
432 _req_may_be_done(req, m); 444 _req_may_be_done_not_susp(req, m);
433 put_ldev(mdev); 445 put_ldev(mdev);
434 break; 446 break;
435 447
@@ -437,7 +449,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
437 /* it is legal to fail READA */ 449 /* it is legal to fail READA */
438 req->rq_state |= RQ_LOCAL_COMPLETED; 450 req->rq_state |= RQ_LOCAL_COMPLETED;
439 req->rq_state &= ~RQ_LOCAL_PENDING; 451 req->rq_state &= ~RQ_LOCAL_PENDING;
440 _req_may_be_done(req, m); 452 _req_may_be_done_not_susp(req, m);
441 put_ldev(mdev); 453 put_ldev(mdev);
442 break; 454 break;
443 455
@@ -455,7 +467,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
455 /* no point in retrying if there is no good remote data, 467 /* no point in retrying if there is no good remote data,
456 * or we have no connection. */ 468 * or we have no connection. */
457 if (mdev->state.pdsk != D_UP_TO_DATE) { 469 if (mdev->state.pdsk != D_UP_TO_DATE) {
458 _req_may_be_done(req, m); 470 _req_may_be_done_not_susp(req, m);
459 break; 471 break;
460 } 472 }
461 473
@@ -517,11 +529,9 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
517 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); 529 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
518 530
519 req->epoch = mdev->newest_tle->br_number; 531 req->epoch = mdev->newest_tle->br_number;
520 list_add_tail(&req->tl_requests,
521 &mdev->newest_tle->requests);
522 532
523 /* increment size of current epoch */ 533 /* increment size of current epoch */
524 mdev->newest_tle->n_req++; 534 mdev->newest_tle->n_writes++;
525 535
526 /* queue work item to send data */ 536 /* queue work item to send data */
527 D_ASSERT(req->rq_state & RQ_NET_PENDING); 537 D_ASSERT(req->rq_state & RQ_NET_PENDING);
@@ -530,7 +540,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
530 drbd_queue_work(&mdev->data.work, &req->w); 540 drbd_queue_work(&mdev->data.work, &req->w);
531 541
532 /* close the epoch, in case it outgrew the limit */ 542 /* close the epoch, in case it outgrew the limit */
533 if (mdev->newest_tle->n_req >= mdev->net_conf->max_epoch_size) 543 if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
534 queue_barrier(mdev); 544 queue_barrier(mdev);
535 545
536 break; 546 break;
@@ -543,7 +553,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
543 req->rq_state &= ~RQ_NET_QUEUED; 553 req->rq_state &= ~RQ_NET_QUEUED;
544 /* if we did it right, tl_clear should be scheduled only after 554 /* if we did it right, tl_clear should be scheduled only after
545 * this, so this should not be necessary! */ 555 * this, so this should not be necessary! */
546 _req_may_be_done(req, m); 556 _req_may_be_done_not_susp(req, m);
547 break; 557 break;
548 558
549 case handed_over_to_network: 559 case handed_over_to_network:
@@ -568,7 +578,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
568 * "completed_ok" events came in, once we return from 578 * "completed_ok" events came in, once we return from
569 * _drbd_send_zc_bio (drbd_send_dblock), we have to check 579 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
570 * whether it is done already, and end it. */ 580 * whether it is done already, and end it. */
571 _req_may_be_done(req, m); 581 _req_may_be_done_not_susp(req, m);
572 break; 582 break;
573 583
574 case read_retry_remote_canceled: 584 case read_retry_remote_canceled:
@@ -584,7 +594,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
584 /* if it is still queued, we may not complete it here. 594 /* if it is still queued, we may not complete it here.
585 * it will be canceled soon. */ 595 * it will be canceled soon. */
586 if (!(req->rq_state & RQ_NET_QUEUED)) 596 if (!(req->rq_state & RQ_NET_QUEUED))
587 _req_may_be_done(req, m); 597 _req_may_be_done(req, m); /* Allowed while state.susp */
588 break; 598 break;
589 599
590 case write_acked_by_peer_and_sis: 600 case write_acked_by_peer_and_sis:
@@ -619,7 +629,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
619 D_ASSERT(req->rq_state & RQ_NET_PENDING); 629 D_ASSERT(req->rq_state & RQ_NET_PENDING);
620 dec_ap_pending(mdev); 630 dec_ap_pending(mdev);
621 req->rq_state &= ~RQ_NET_PENDING; 631 req->rq_state &= ~RQ_NET_PENDING;
622 _req_may_be_done(req, m); 632 _req_may_be_done_not_susp(req, m);
623 break; 633 break;
624 634
625 case neg_acked: 635 case neg_acked:
@@ -629,11 +639,50 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
629 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); 639 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
630 640
631 req->rq_state |= RQ_NET_DONE; 641 req->rq_state |= RQ_NET_DONE;
632 _req_may_be_done(req, m); 642 _req_may_be_done_not_susp(req, m);
633 /* else: done by handed_over_to_network */ 643 /* else: done by handed_over_to_network */
634 break; 644 break;
635 645
646 case fail_frozen_disk_io:
647 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
648 break;
649
650 _req_may_be_done(req, m); /* Allowed while state.susp */
651 break;
652
653 case restart_frozen_disk_io:
654 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
655 break;
656
657 req->rq_state &= ~RQ_LOCAL_COMPLETED;
658
659 rv = MR_READ;
660 if (bio_data_dir(req->master_bio) == WRITE)
661 rv = MR_WRITE;
662
663 get_ldev(mdev);
664 req->w.cb = w_restart_disk_io;
665 drbd_queue_work(&mdev->data.work, &req->w);
666 break;
667
668 case resend:
669 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
670 before the connection loss (B&C only); only P_BARRIER_ACK was missing.
671 Trowing them out of the TL here by pretending we got a BARRIER_ACK
672 We ensure that the peer was not rebooted */
673 if (!(req->rq_state & RQ_NET_OK)) {
674 if (req->w.cb) {
675 drbd_queue_work(&mdev->data.work, &req->w);
676 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
677 }
678 break;
679 }
680 /* else, fall through to barrier_acked */
681
636 case barrier_acked: 682 case barrier_acked:
683 if (!(req->rq_state & RQ_WRITE))
684 break;
685
637 if (req->rq_state & RQ_NET_PENDING) { 686 if (req->rq_state & RQ_NET_PENDING) {
638 /* barrier came in before all requests have been acked. 687 /* barrier came in before all requests have been acked.
639 * this is bad, because if the connection is lost now, 688 * this is bad, because if the connection is lost now,
@@ -643,7 +692,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
643 } 692 }
644 D_ASSERT(req->rq_state & RQ_NET_SENT); 693 D_ASSERT(req->rq_state & RQ_NET_SENT);
645 req->rq_state |= RQ_NET_DONE; 694 req->rq_state |= RQ_NET_DONE;
646 _req_may_be_done(req, m); 695 _req_may_be_done(req, m); /* Allowed while state.susp */
647 break; 696 break;
648 697
649 case data_received: 698 case data_received:
@@ -651,9 +700,11 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
651 dec_ap_pending(mdev); 700 dec_ap_pending(mdev);
652 req->rq_state &= ~RQ_NET_PENDING; 701 req->rq_state &= ~RQ_NET_PENDING;
653 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); 702 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
654 _req_may_be_done(req, m); 703 _req_may_be_done_not_susp(req, m);
655 break; 704 break;
656 }; 705 };
706
707 return rv;
657} 708}
658 709
659/* we may do a local read if: 710/* we may do a local read if:
@@ -752,14 +803,16 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
752 * resync extent to finish, and, if necessary, pulls in the target 803 * resync extent to finish, and, if necessary, pulls in the target
753 * extent into the activity log, which involves further disk io because 804 * extent into the activity log, which involves further disk io because
754 * of transactional on-disk meta data updates. */ 805 * of transactional on-disk meta data updates. */
755 if (rw == WRITE && local) 806 if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
807 req->rq_state |= RQ_IN_ACT_LOG;
756 drbd_al_begin_io(mdev, sector); 808 drbd_al_begin_io(mdev, sector);
809 }
757 810
758 remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || 811 remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
759 (mdev->state.pdsk == D_INCONSISTENT && 812 (mdev->state.pdsk == D_INCONSISTENT &&
760 mdev->state.conn >= C_CONNECTED)); 813 mdev->state.conn >= C_CONNECTED));
761 814
762 if (!(local || remote) && !mdev->state.susp) { 815 if (!(local || remote) && !is_susp(mdev->state)) {
763 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 816 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
764 goto fail_free_complete; 817 goto fail_free_complete;
765 } 818 }
@@ -785,7 +838,7 @@ allocate_barrier:
785 /* GOOD, everything prepared, grab the spin_lock */ 838 /* GOOD, everything prepared, grab the spin_lock */
786 spin_lock_irq(&mdev->req_lock); 839 spin_lock_irq(&mdev->req_lock);
787 840
788 if (mdev->state.susp) { 841 if (is_susp(mdev->state)) {
789 /* If we got suspended, use the retry mechanism of 842 /* If we got suspended, use the retry mechanism of
790 generic_make_request() to restart processing of this 843 generic_make_request() to restart processing of this
791 bio. In the next call to drbd_make_request_26 844 bio. In the next call to drbd_make_request_26
@@ -867,30 +920,10 @@ allocate_barrier:
867 /* check this request on the collision detection hash tables. 920 /* check this request on the collision detection hash tables.
868 * if we have a conflict, just complete it here. 921 * if we have a conflict, just complete it here.
869 * THINK do we want to check reads, too? (I don't think so...) */ 922 * THINK do we want to check reads, too? (I don't think so...) */
870 if (rw == WRITE && _req_conflicts(req)) { 923 if (rw == WRITE && _req_conflicts(req))
871 /* this is a conflicting request. 924 goto fail_conflicting;
872 * even though it may have been only _partially_ 925
873 * overlapping with one of the currently pending requests, 926 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
874 * without even submitting or sending it, we will
875 * pretend that it was successfully served right now.
876 */
877 if (local) {
878 bio_put(req->private_bio);
879 req->private_bio = NULL;
880 drbd_al_complete_io(mdev, req->sector);
881 put_ldev(mdev);
882 local = 0;
883 }
884 if (remote)
885 dec_ap_pending(mdev);
886 _drbd_end_io_acct(mdev, req);
887 /* THINK: do we want to fail it (-EIO), or pretend success? */
888 bio_endio(req->master_bio, 0);
889 req->master_bio = NULL;
890 dec_ap_bio(mdev);
891 drbd_req_free(req);
892 remote = 0;
893 }
894 927
895 /* NOTE remote first: to get the concurrent write detection right, 928 /* NOTE remote first: to get the concurrent write detection right,
896 * we must register the request before start of local IO. */ 929 * we must register the request before start of local IO. */
@@ -923,6 +956,21 @@ allocate_barrier:
923 956
924 return 0; 957 return 0;
925 958
959fail_conflicting:
960 /* this is a conflicting request.
961 * even though it may have been only _partially_
962 * overlapping with one of the currently pending requests,
963 * without even submitting or sending it, we will
964 * pretend that it was successfully served right now.
965 */
966 _drbd_end_io_acct(mdev, req);
967 spin_unlock_irq(&mdev->req_lock);
968 if (remote)
969 dec_ap_pending(mdev);
970 /* THINK: do we want to fail it (-EIO), or pretend success?
971 * this pretends success. */
972 err = 0;
973
926fail_free_complete: 974fail_free_complete:
927 if (rw == WRITE && local) 975 if (rw == WRITE && local)
928 drbd_al_complete_io(mdev, sector); 976 drbd_al_complete_io(mdev, sector);
@@ -961,21 +1009,6 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
961 return 1; 1009 return 1;
962 } 1010 }
963 1011
964 /*
965 * Paranoia: we might have been primary, but sync target, or
966 * even diskless, then lost the connection.
967 * This should have been handled (panic? suspend?) somewhere
968 * else. But maybe it was not, so check again here.
969 * Caution: as long as we do not have a read/write lock on mdev,
970 * to serialize state changes, this is racy, since we may lose
971 * the connection *after* we test for the cstate.
972 */
973 if (mdev->state.disk < D_UP_TO_DATE && mdev->state.pdsk < D_UP_TO_DATE) {
974 if (__ratelimit(&drbd_ratelimit_state))
975 dev_err(DEV, "Sorry, I have no access to good data anymore.\n");
976 return 1;
977 }
978
979 return 0; 1012 return 0;
980} 1013}
981 1014
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 02d575d24518..181ea0364822 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -104,6 +104,9 @@ enum drbd_req_event {
104 read_ahead_completed_with_error, 104 read_ahead_completed_with_error,
105 write_completed_with_error, 105 write_completed_with_error,
106 completed_ok, 106 completed_ok,
107 resend,
108 fail_frozen_disk_io,
109 restart_frozen_disk_io,
107 nothing, /* for tracing only */ 110 nothing, /* for tracing only */
108}; 111};
109 112
@@ -183,6 +186,12 @@ enum drbd_req_state_bits {
183 186
184 /* keep this last, its for the RQ_NET_MASK */ 187 /* keep this last, its for the RQ_NET_MASK */
185 __RQ_NET_MAX, 188 __RQ_NET_MAX,
189
190 /* Set when this is a write, clear for a read */
191 __RQ_WRITE,
192
193 /* Should call drbd_al_complete_io() for this request... */
194 __RQ_IN_ACT_LOG,
186}; 195};
187 196
188#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) 197#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
@@ -201,6 +210,16 @@ enum drbd_req_state_bits {
201/* 0x1f8 */ 210/* 0x1f8 */
202#define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) 211#define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
203 212
213#define RQ_WRITE (1UL << __RQ_WRITE)
214#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
215
216/* For waking up the frozen transfer log mod_req() has to return if the request
217 should be counted in the epoch object*/
218#define MR_WRITE_SHIFT 0
219#define MR_WRITE (1 << MR_WRITE_SHIFT)
220#define MR_READ_SHIFT 1
221#define MR_READ (1 << MR_READ_SHIFT)
222
204/* epoch entries */ 223/* epoch entries */
205static inline 224static inline
206struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) 225struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
@@ -244,30 +263,36 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
244 return NULL; 263 return NULL;
245} 264}
246 265
266static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
267{
268 struct bio *bio;
269 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
270
271 req->private_bio = bio;
272
273 bio->bi_private = req;
274 bio->bi_end_io = drbd_endio_pri;
275 bio->bi_next = NULL;
276}
277
247static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, 278static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
248 struct bio *bio_src) 279 struct bio *bio_src)
249{ 280{
250 struct bio *bio;
251 struct drbd_request *req = 281 struct drbd_request *req =
252 mempool_alloc(drbd_request_mempool, GFP_NOIO); 282 mempool_alloc(drbd_request_mempool, GFP_NOIO);
253 if (likely(req)) { 283 if (likely(req)) {
254 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ 284 drbd_req_make_private_bio(req, bio_src);
255 285
256 req->rq_state = 0; 286 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
257 req->mdev = mdev; 287 req->mdev = mdev;
258 req->master_bio = bio_src; 288 req->master_bio = bio_src;
259 req->private_bio = bio;
260 req->epoch = 0; 289 req->epoch = 0;
261 req->sector = bio->bi_sector; 290 req->sector = bio_src->bi_sector;
262 req->size = bio->bi_size; 291 req->size = bio_src->bi_size;
263 req->start_time = jiffies; 292 req->start_time = jiffies;
264 INIT_HLIST_NODE(&req->colision); 293 INIT_HLIST_NODE(&req->colision);
265 INIT_LIST_HEAD(&req->tl_requests); 294 INIT_LIST_HEAD(&req->tl_requests);
266 INIT_LIST_HEAD(&req->w.list); 295 INIT_LIST_HEAD(&req->w.list);
267
268 bio->bi_private = req;
269 bio->bi_end_io = drbd_endio_pri;
270 bio->bi_next = NULL;
271 } 296 }
272 return req; 297 return req;
273} 298}
@@ -292,36 +317,43 @@ struct bio_and_error {
292 317
293extern void _req_may_be_done(struct drbd_request *req, 318extern void _req_may_be_done(struct drbd_request *req,
294 struct bio_and_error *m); 319 struct bio_and_error *m);
295extern void __req_mod(struct drbd_request *req, enum drbd_req_event what, 320extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
296 struct bio_and_error *m); 321 struct bio_and_error *m);
297extern void complete_master_bio(struct drbd_conf *mdev, 322extern void complete_master_bio(struct drbd_conf *mdev,
298 struct bio_and_error *m); 323 struct bio_and_error *m);
299 324
300/* use this if you don't want to deal with calling complete_master_bio() 325/* use this if you don't want to deal with calling complete_master_bio()
301 * outside the spinlock, e.g. when walking some list on cleanup. */ 326 * outside the spinlock, e.g. when walking some list on cleanup. */
302static inline void _req_mod(struct drbd_request *req, enum drbd_req_event what) 327static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
303{ 328{
304 struct drbd_conf *mdev = req->mdev; 329 struct drbd_conf *mdev = req->mdev;
305 struct bio_and_error m; 330 struct bio_and_error m;
331 int rv;
306 332
307 /* __req_mod possibly frees req, do not touch req after that! */ 333 /* __req_mod possibly frees req, do not touch req after that! */
308 __req_mod(req, what, &m); 334 rv = __req_mod(req, what, &m);
309 if (m.bio) 335 if (m.bio)
310 complete_master_bio(mdev, &m); 336 complete_master_bio(mdev, &m);
337
338 return rv;
311} 339}
312 340
313/* completion of master bio is outside of spinlock. 341/* completion of master bio is outside of spinlock.
314 * If you need it irqsave, do it your self! */ 342 * If you need it irqsave, do it your self! */
315static inline void req_mod(struct drbd_request *req, 343static inline int req_mod(struct drbd_request *req,
316 enum drbd_req_event what) 344 enum drbd_req_event what)
317{ 345{
318 struct drbd_conf *mdev = req->mdev; 346 struct drbd_conf *mdev = req->mdev;
319 struct bio_and_error m; 347 struct bio_and_error m;
348 int rv;
349
320 spin_lock_irq(&mdev->req_lock); 350 spin_lock_irq(&mdev->req_lock);
321 __req_mod(req, what, &m); 351 rv = __req_mod(req, what, &m);
322 spin_unlock_irq(&mdev->req_lock); 352 spin_unlock_irq(&mdev->req_lock);
323 353
324 if (m.bio) 354 if (m.bio)
325 complete_master_bio(mdev, &m); 355 complete_master_bio(mdev, &m);
356
357 return rv;
326} 358}
327#endif 359#endif
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ca4a16cea2d8..108d58015cd1 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -39,8 +39,6 @@
39#include "drbd_int.h" 39#include "drbd_int.h"
40#include "drbd_req.h" 40#include "drbd_req.h"
41 41
42#define SLEEP_TIME (HZ/10)
43
44static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); 42static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
45 43
46 44
@@ -217,10 +215,8 @@ void drbd_endio_sec(struct bio *bio, int error)
217 */ 215 */
218void drbd_endio_pri(struct bio *bio, int error) 216void drbd_endio_pri(struct bio *bio, int error)
219{ 217{
220 unsigned long flags;
221 struct drbd_request *req = bio->bi_private; 218 struct drbd_request *req = bio->bi_private;
222 struct drbd_conf *mdev = req->mdev; 219 struct drbd_conf *mdev = req->mdev;
223 struct bio_and_error m;
224 enum drbd_req_event what; 220 enum drbd_req_event what;
225 int uptodate = bio_flagged(bio, BIO_UPTODATE); 221 int uptodate = bio_flagged(bio, BIO_UPTODATE);
226 222
@@ -246,12 +242,7 @@ void drbd_endio_pri(struct bio *bio, int error)
246 bio_put(req->private_bio); 242 bio_put(req->private_bio);
247 req->private_bio = ERR_PTR(error); 243 req->private_bio = ERR_PTR(error);
248 244
249 spin_lock_irqsave(&mdev->req_lock, flags); 245 req_mod(req, what);
250 __req_mod(req, what, &m);
251 spin_unlock_irqrestore(&mdev->req_lock, flags);
252
253 if (m.bio)
254 complete_master_bio(mdev, &m);
255} 246}
256 247
257int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 248int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
@@ -376,54 +367,145 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
376 struct drbd_epoch_entry *e; 367 struct drbd_epoch_entry *e;
377 368
378 if (!get_ldev(mdev)) 369 if (!get_ldev(mdev))
379 return 0; 370 return -EIO;
371
372 if (drbd_rs_should_slow_down(mdev))
373 goto defer;
380 374
381 /* GFP_TRY, because if there is no memory available right now, this may 375 /* GFP_TRY, because if there is no memory available right now, this may
382 * be rescheduled for later. It is "only" background resync, after all. */ 376 * be rescheduled for later. It is "only" background resync, after all. */
383 e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY); 377 e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
384 if (!e) 378 if (!e)
385 goto fail; 379 goto defer;
386 380
381 e->w.cb = w_e_send_csum;
387 spin_lock_irq(&mdev->req_lock); 382 spin_lock_irq(&mdev->req_lock);
388 list_add(&e->w.list, &mdev->read_ee); 383 list_add(&e->w.list, &mdev->read_ee);
389 spin_unlock_irq(&mdev->req_lock); 384 spin_unlock_irq(&mdev->req_lock);
390 385
391 e->w.cb = w_e_send_csum; 386 atomic_add(size >> 9, &mdev->rs_sect_ev);
392 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) 387 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
393 return 1; 388 return 0;
389
390 /* drbd_submit_ee currently fails for one reason only:
391 * not being able to allocate enough bios.
392 * Is dropping the connection going to help? */
393 spin_lock_irq(&mdev->req_lock);
394 list_del(&e->w.list);
395 spin_unlock_irq(&mdev->req_lock);
394 396
395 drbd_free_ee(mdev, e); 397 drbd_free_ee(mdev, e);
396fail: 398defer:
397 put_ldev(mdev); 399 put_ldev(mdev);
398 return 2; 400 return -EAGAIN;
399} 401}
400 402
401void resync_timer_fn(unsigned long data) 403void resync_timer_fn(unsigned long data)
402{ 404{
403 unsigned long flags;
404 struct drbd_conf *mdev = (struct drbd_conf *) data; 405 struct drbd_conf *mdev = (struct drbd_conf *) data;
405 int queue; 406 int queue;
406 407
407 spin_lock_irqsave(&mdev->req_lock, flags); 408 queue = 1;
408 409 switch (mdev->state.conn) {
409 if (likely(!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))) { 410 case C_VERIFY_S:
410 queue = 1; 411 mdev->resync_work.cb = w_make_ov_request;
411 if (mdev->state.conn == C_VERIFY_S) 412 break;
412 mdev->resync_work.cb = w_make_ov_request; 413 case C_SYNC_TARGET:
413 else 414 mdev->resync_work.cb = w_make_resync_request;
414 mdev->resync_work.cb = w_make_resync_request; 415 break;
415 } else { 416 default:
416 queue = 0; 417 queue = 0;
417 mdev->resync_work.cb = w_resync_inactive; 418 mdev->resync_work.cb = w_resync_inactive;
418 } 419 }
419 420
420 spin_unlock_irqrestore(&mdev->req_lock, flags);
421
422 /* harmless race: list_empty outside data.work.q_lock */ 421 /* harmless race: list_empty outside data.work.q_lock */
423 if (list_empty(&mdev->resync_work.list) && queue) 422 if (list_empty(&mdev->resync_work.list) && queue)
424 drbd_queue_work(&mdev->data.work, &mdev->resync_work); 423 drbd_queue_work(&mdev->data.work, &mdev->resync_work);
425} 424}
426 425
426static void fifo_set(struct fifo_buffer *fb, int value)
427{
428 int i;
429
430 for (i = 0; i < fb->size; i++)
431 fb->values[i] = value;
432}
433
434static int fifo_push(struct fifo_buffer *fb, int value)
435{
436 int ov;
437
438 ov = fb->values[fb->head_index];
439 fb->values[fb->head_index++] = value;
440
441 if (fb->head_index >= fb->size)
442 fb->head_index = 0;
443
444 return ov;
445}
446
447static void fifo_add_val(struct fifo_buffer *fb, int value)
448{
449 int i;
450
451 for (i = 0; i < fb->size; i++)
452 fb->values[i] += value;
453}
454
455int drbd_rs_controller(struct drbd_conf *mdev)
456{
457 unsigned int sect_in; /* Number of sectors that came in since the last turn */
458 unsigned int want; /* The number of sectors we want in the proxy */
459 int req_sect; /* Number of sectors to request in this turn */
460 int correction; /* Number of sectors more we need in the proxy*/
461 int cps; /* correction per invocation of drbd_rs_controller() */
462 int steps; /* Number of time steps to plan ahead */
463 int curr_corr;
464 int max_sect;
465
466 sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
467 mdev->rs_in_flight -= sect_in;
468
469 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
470
471 steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
472
473 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
474 want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
475 } else { /* normal path */
476 want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
477 sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
478 }
479
480 correction = want - mdev->rs_in_flight - mdev->rs_planed;
481
482 /* Plan ahead */
483 cps = correction / steps;
484 fifo_add_val(&mdev->rs_plan_s, cps);
485 mdev->rs_planed += cps * steps;
486
487 /* What we do in this step */
488 curr_corr = fifo_push(&mdev->rs_plan_s, 0);
489 spin_unlock(&mdev->peer_seq_lock);
490 mdev->rs_planed -= curr_corr;
491
492 req_sect = sect_in + curr_corr;
493 if (req_sect < 0)
494 req_sect = 0;
495
496 max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
497 if (req_sect > max_sect)
498 req_sect = max_sect;
499
500 /*
501 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
502 sect_in, mdev->rs_in_flight, want, correction,
503 steps, cps, mdev->rs_planed, curr_corr, req_sect);
504 */
505
506 return req_sect;
507}
508
427int w_make_resync_request(struct drbd_conf *mdev, 509int w_make_resync_request(struct drbd_conf *mdev,
428 struct drbd_work *w, int cancel) 510 struct drbd_work *w, int cancel)
429{ 511{
@@ -431,8 +513,9 @@ int w_make_resync_request(struct drbd_conf *mdev,
431 sector_t sector; 513 sector_t sector;
432 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 514 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
433 int max_segment_size; 515 int max_segment_size;
434 int number, i, size, pe, mx; 516 int number, rollback_i, size, pe, mx;
435 int align, queued, sndbuf; 517 int align, queued, sndbuf;
518 int i = 0;
436 519
437 if (unlikely(cancel)) 520 if (unlikely(cancel))
438 return 1; 521 return 1;
@@ -446,6 +529,12 @@ int w_make_resync_request(struct drbd_conf *mdev,
446 dev_err(DEV, "%s in w_make_resync_request\n", 529 dev_err(DEV, "%s in w_make_resync_request\n",
447 drbd_conn_str(mdev->state.conn)); 530 drbd_conn_str(mdev->state.conn));
448 531
532 if (mdev->rs_total == 0) {
533 /* empty resync? */
534 drbd_resync_finished(mdev);
535 return 1;
536 }
537
449 if (!get_ldev(mdev)) { 538 if (!get_ldev(mdev)) {
450 /* Since we only need to access mdev->rsync a 539 /* Since we only need to access mdev->rsync a
451 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but 540 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
@@ -458,11 +547,25 @@ int w_make_resync_request(struct drbd_conf *mdev,
458 547
459 /* starting with drbd 8.3.8, we can handle multi-bio EEs, 548 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
460 * if it should be necessary */ 549 * if it should be necessary */
461 max_segment_size = mdev->agreed_pro_version < 94 ? 550 max_segment_size =
462 queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE; 551 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
552 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
463 553
464 number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ); 554 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
465 pe = atomic_read(&mdev->rs_pending_cnt); 555 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
556 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
557 } else {
558 mdev->c_sync_rate = mdev->sync_conf.rate;
559 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
560 }
561
562 /* Throttle resync on lower level disk activity, which may also be
563 * caused by application IO on Primary/SyncTarget.
564 * Keep this after the call to drbd_rs_controller, as that assumes
565 * to be called as precisely as possible every SLEEP_TIME,
566 * and would be confused otherwise. */
567 if (drbd_rs_should_slow_down(mdev))
568 goto requeue;
466 569
467 mutex_lock(&mdev->data.mutex); 570 mutex_lock(&mdev->data.mutex);
468 if (mdev->data.socket) 571 if (mdev->data.socket)
@@ -476,6 +579,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
476 mx = number; 579 mx = number;
477 580
478 /* Limit the number of pending RS requests to no more than the peer's receive buffer */ 581 /* Limit the number of pending RS requests to no more than the peer's receive buffer */
582 pe = atomic_read(&mdev->rs_pending_cnt);
479 if ((pe + number) > mx) { 583 if ((pe + number) > mx) {
480 number = mx - pe; 584 number = mx - pe;
481 } 585 }
@@ -526,6 +630,7 @@ next_sector:
526 * be prepared for all stripe sizes of software RAIDs. 630 * be prepared for all stripe sizes of software RAIDs.
527 */ 631 */
528 align = 1; 632 align = 1;
633 rollback_i = i;
529 for (;;) { 634 for (;;) {
530 if (size + BM_BLOCK_SIZE > max_segment_size) 635 if (size + BM_BLOCK_SIZE > max_segment_size)
531 break; 636 break;
@@ -561,14 +666,19 @@ next_sector:
561 size = (capacity-sector)<<9; 666 size = (capacity-sector)<<9;
562 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) { 667 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
563 switch (read_for_csum(mdev, sector, size)) { 668 switch (read_for_csum(mdev, sector, size)) {
564 case 0: /* Disk failure*/ 669 case -EIO: /* Disk failure */
565 put_ldev(mdev); 670 put_ldev(mdev);
566 return 0; 671 return 0;
567 case 2: /* Allocation failed */ 672 case -EAGAIN: /* allocation failed, or ldev busy */
568 drbd_rs_complete_io(mdev, sector); 673 drbd_rs_complete_io(mdev, sector);
569 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 674 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
675 i = rollback_i;
570 goto requeue; 676 goto requeue;
571 /* case 1: everything ok */ 677 case 0:
678 /* everything ok */
679 break;
680 default:
681 BUG();
572 } 682 }
573 } else { 683 } else {
574 inc_rs_pending(mdev); 684 inc_rs_pending(mdev);
@@ -595,6 +705,7 @@ next_sector:
595 } 705 }
596 706
597 requeue: 707 requeue:
708 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
598 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); 709 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
599 put_ldev(mdev); 710 put_ldev(mdev);
600 return 1; 711 return 1;
@@ -670,6 +781,14 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca
670 return 1; 781 return 1;
671} 782}
672 783
784static void ping_peer(struct drbd_conf *mdev)
785{
786 clear_bit(GOT_PING_ACK, &mdev->flags);
787 request_ping(mdev);
788 wait_event(mdev->misc_wait,
789 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
790}
791
673int drbd_resync_finished(struct drbd_conf *mdev) 792int drbd_resync_finished(struct drbd_conf *mdev)
674{ 793{
675 unsigned long db, dt, dbdt; 794 unsigned long db, dt, dbdt;
@@ -709,6 +828,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
709 if (!get_ldev(mdev)) 828 if (!get_ldev(mdev))
710 goto out; 829 goto out;
711 830
831 ping_peer(mdev);
832
712 spin_lock_irq(&mdev->req_lock); 833 spin_lock_irq(&mdev->req_lock);
713 os = mdev->state; 834 os = mdev->state;
714 835
@@ -801,6 +922,8 @@ out:
801 mdev->rs_paused = 0; 922 mdev->rs_paused = 0;
802 mdev->ov_start_sector = 0; 923 mdev->ov_start_sector = 0;
803 924
925 drbd_md_sync(mdev);
926
804 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { 927 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
805 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); 928 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
806 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); 929 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
@@ -817,9 +940,13 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
817{ 940{
818 if (drbd_ee_has_active_page(e)) { 941 if (drbd_ee_has_active_page(e)) {
819 /* This might happen if sendpage() has not finished */ 942 /* This might happen if sendpage() has not finished */
943 int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
944 atomic_add(i, &mdev->pp_in_use_by_net);
945 atomic_sub(i, &mdev->pp_in_use);
820 spin_lock_irq(&mdev->req_lock); 946 spin_lock_irq(&mdev->req_lock);
821 list_add_tail(&e->w.list, &mdev->net_ee); 947 list_add_tail(&e->w.list, &mdev->net_ee);
822 spin_unlock_irq(&mdev->req_lock); 948 spin_unlock_irq(&mdev->req_lock);
949 wake_up(&drbd_pp_wait);
823 } else 950 } else
824 drbd_free_ee(mdev, e); 951 drbd_free_ee(mdev, e);
825} 952}
@@ -926,9 +1053,12 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
926 return 1; 1053 return 1;
927 } 1054 }
928 1055
929 drbd_rs_complete_io(mdev, e->sector); 1056 if (get_ldev(mdev)) {
1057 drbd_rs_complete_io(mdev, e->sector);
1058 put_ldev(mdev);
1059 }
930 1060
931 di = (struct digest_info *)(unsigned long)e->block_id; 1061 di = e->digest;
932 1062
933 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1063 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
934 /* quick hack to try to avoid a race against reconfiguration. 1064 /* quick hack to try to avoid a race against reconfiguration.
@@ -952,7 +1082,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
952 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); 1082 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
953 } else { 1083 } else {
954 inc_rs_pending(mdev); 1084 inc_rs_pending(mdev);
955 e->block_id = ID_SYNCER; 1085 e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1086 e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
1087 kfree(di);
956 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); 1088 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
957 } 1089 }
958 } else { 1090 } else {
@@ -962,9 +1094,6 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
962 } 1094 }
963 1095
964 dec_unacked(mdev); 1096 dec_unacked(mdev);
965
966 kfree(di);
967
968 move_to_net_ee_or_free(mdev, e); 1097 move_to_net_ee_or_free(mdev, e);
969 1098
970 if (unlikely(!ok)) 1099 if (unlikely(!ok))
@@ -1034,9 +1163,12 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1034 1163
1035 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all 1164 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1036 * the resync lru has been cleaned up already */ 1165 * the resync lru has been cleaned up already */
1037 drbd_rs_complete_io(mdev, e->sector); 1166 if (get_ldev(mdev)) {
1167 drbd_rs_complete_io(mdev, e->sector);
1168 put_ldev(mdev);
1169 }
1038 1170
1039 di = (struct digest_info *)(unsigned long)e->block_id; 1171 di = e->digest;
1040 1172
1041 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1173 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1042 digest_size = crypto_hash_digestsize(mdev->verify_tfm); 1174 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
@@ -1055,9 +1187,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1055 } 1187 }
1056 1188
1057 dec_unacked(mdev); 1189 dec_unacked(mdev);
1058
1059 kfree(di);
1060
1061 if (!eq) 1190 if (!eq)
1062 drbd_ov_oos_found(mdev, e->sector, e->size); 1191 drbd_ov_oos_found(mdev, e->sector, e->size);
1063 else 1192 else
@@ -1108,7 +1237,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1108 * dec_ap_pending will be done in got_BarrierAck 1237 * dec_ap_pending will be done in got_BarrierAck
1109 * or (on connection loss) in w_clear_epoch. */ 1238 * or (on connection loss) in w_clear_epoch. */
1110 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER, 1239 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
1111 (struct p_header *)p, sizeof(*p), 0); 1240 (struct p_header80 *)p, sizeof(*p), 0);
1112 drbd_put_data_sock(mdev); 1241 drbd_put_data_sock(mdev);
1113 1242
1114 return ok; 1243 return ok;
@@ -1173,6 +1302,24 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1173 return ok; 1302 return ok;
1174} 1303}
1175 1304
1305int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1306{
1307 struct drbd_request *req = container_of(w, struct drbd_request, w);
1308
1309 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1310 drbd_al_begin_io(mdev, req->sector);
1311 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1312 theoretically. Practically it can not deadlock, since this is
1313 only used when unfreezing IOs. All the extents of the requests
1314 that made it into the TL are already active */
1315
1316 drbd_req_make_private_bio(req, req->master_bio);
1317 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1318 generic_make_request(req->private_bio);
1319
1320 return 1;
1321}
1322
1176static int _drbd_may_sync_now(struct drbd_conf *mdev) 1323static int _drbd_may_sync_now(struct drbd_conf *mdev)
1177{ 1324{
1178 struct drbd_conf *odev = mdev; 1325 struct drbd_conf *odev = mdev;
@@ -1298,14 +1445,6 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
1298 return retcode; 1445 return retcode;
1299} 1446}
1300 1447
1301static void ping_peer(struct drbd_conf *mdev)
1302{
1303 clear_bit(GOT_PING_ACK, &mdev->flags);
1304 request_ping(mdev);
1305 wait_event(mdev->misc_wait,
1306 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
1307}
1308
1309/** 1448/**
1310 * drbd_start_resync() - Start the resync process 1449 * drbd_start_resync() - Start the resync process
1311 * @mdev: DRBD device. 1450 * @mdev: DRBD device.
@@ -1379,13 +1518,21 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1379 r = SS_UNKNOWN_ERROR; 1518 r = SS_UNKNOWN_ERROR;
1380 1519
1381 if (r == SS_SUCCESS) { 1520 if (r == SS_SUCCESS) {
1382 mdev->rs_total = 1521 unsigned long tw = drbd_bm_total_weight(mdev);
1383 mdev->rs_mark_left = drbd_bm_total_weight(mdev); 1522 unsigned long now = jiffies;
1523 int i;
1524
1384 mdev->rs_failed = 0; 1525 mdev->rs_failed = 0;
1385 mdev->rs_paused = 0; 1526 mdev->rs_paused = 0;
1386 mdev->rs_start =
1387 mdev->rs_mark_time = jiffies;
1388 mdev->rs_same_csum = 0; 1527 mdev->rs_same_csum = 0;
1528 mdev->rs_last_events = 0;
1529 mdev->rs_last_sect_ev = 0;
1530 mdev->rs_total = tw;
1531 mdev->rs_start = now;
1532 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1533 mdev->rs_mark_left[i] = tw;
1534 mdev->rs_mark_time[i] = now;
1535 }
1389 _drbd_pause_after(mdev); 1536 _drbd_pause_after(mdev);
1390 } 1537 }
1391 write_unlock_irq(&global_state_lock); 1538 write_unlock_irq(&global_state_lock);
@@ -1397,12 +1544,31 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1397 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), 1544 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1398 (unsigned long) mdev->rs_total); 1545 (unsigned long) mdev->rs_total);
1399 1546
1400 if (mdev->rs_total == 0) { 1547 if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1401 /* Peer still reachable? Beware of failing before-resync-target handlers! */ 1548 /* This still has a race (about when exactly the peers
1402 ping_peer(mdev); 1549 * detect connection loss) that can lead to a full sync
1550 * on next handshake. In 8.3.9 we fixed this with explicit
1551 * resync-finished notifications, but the fix
1552 * introduces a protocol change. Sleeping for some
1553 * time longer than the ping interval + timeout on the
1554 * SyncSource, to give the SyncTarget the chance to
1555 * detect connection loss, then waiting for a ping
1556 * response (implicit in drbd_resync_finished) reduces
1557 * the race considerably, but does not solve it. */
1558 if (side == C_SYNC_SOURCE)
1559 schedule_timeout_interruptible(
1560 mdev->net_conf->ping_int * HZ +
1561 mdev->net_conf->ping_timeo*HZ/9);
1403 drbd_resync_finished(mdev); 1562 drbd_resync_finished(mdev);
1404 } 1563 }
1405 1564
1565 atomic_set(&mdev->rs_sect_in, 0);
1566 atomic_set(&mdev->rs_sect_ev, 0);
1567 mdev->rs_in_flight = 0;
1568 mdev->rs_planed = 0;
1569 spin_lock(&mdev->peer_seq_lock);
1570 fifo_set(&mdev->rs_plan_s, 0);
1571 spin_unlock(&mdev->peer_seq_lock);
1406 /* ns.conn may already be != mdev->state.conn, 1572 /* ns.conn may already be != mdev->state.conn,
1407 * we may have been paused in between, or become paused until 1573 * we may have been paused in between, or become paused until
1408 * the timer triggers. 1574 * the timer triggers.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3b57459bb745..767107cce982 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -258,8 +258,8 @@ static int irqdma_allocated;
258#include <linux/completion.h> 258#include <linux/completion.h>
259 259
260static struct request *current_req; 260static struct request *current_req;
261static struct request_queue *floppy_queue;
262static void do_fd_request(struct request_queue *q); 261static void do_fd_request(struct request_queue *q);
262static int set_next_request(void);
263 263
264#ifndef fd_get_dma_residue 264#ifndef fd_get_dma_residue
265#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) 265#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
@@ -413,6 +413,7 @@ static struct gendisk *disks[N_DRIVE];
413static struct block_device *opened_bdev[N_DRIVE]; 413static struct block_device *opened_bdev[N_DRIVE];
414static DEFINE_MUTEX(open_lock); 414static DEFINE_MUTEX(open_lock);
415static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; 415static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
416static int fdc_queue;
416 417
417/* 418/*
418 * This struct defines the different floppy types. 419 * This struct defines the different floppy types.
@@ -890,8 +891,8 @@ static void unlock_fdc(void)
890 del_timer(&fd_timeout); 891 del_timer(&fd_timeout);
891 cont = NULL; 892 cont = NULL;
892 clear_bit(0, &fdc_busy); 893 clear_bit(0, &fdc_busy);
893 if (current_req || blk_peek_request(floppy_queue)) 894 if (current_req || set_next_request())
894 do_fd_request(floppy_queue); 895 do_fd_request(current_req->q);
895 spin_unlock_irqrestore(&floppy_lock, flags); 896 spin_unlock_irqrestore(&floppy_lock, flags);
896 wake_up(&fdc_wait); 897 wake_up(&fdc_wait);
897} 898}
@@ -2243,8 +2244,8 @@ static void floppy_end_request(struct request *req, int error)
2243 * logical buffer */ 2244 * logical buffer */
2244static void request_done(int uptodate) 2245static void request_done(int uptodate)
2245{ 2246{
2246 struct request_queue *q = floppy_queue;
2247 struct request *req = current_req; 2247 struct request *req = current_req;
2248 struct request_queue *q;
2248 unsigned long flags; 2249 unsigned long flags;
2249 int block; 2250 int block;
2250 char msg[sizeof("request done ") + sizeof(int) * 3]; 2251 char msg[sizeof("request done ") + sizeof(int) * 3];
@@ -2258,6 +2259,8 @@ static void request_done(int uptodate)
2258 return; 2259 return;
2259 } 2260 }
2260 2261
2262 q = req->q;
2263
2261 if (uptodate) { 2264 if (uptodate) {
2262 /* maintain values for invalidation on geometry 2265 /* maintain values for invalidation on geometry
2263 * change */ 2266 * change */
@@ -2811,6 +2814,28 @@ static int make_raw_rw_request(void)
2811 return 2; 2814 return 2;
2812} 2815}
2813 2816
2817/*
2818 * Round-robin between our available drives, doing one request from each
2819 */
2820static int set_next_request(void)
2821{
2822 struct request_queue *q;
2823 int old_pos = fdc_queue;
2824
2825 do {
2826 q = disks[fdc_queue]->queue;
2827 if (++fdc_queue == N_DRIVE)
2828 fdc_queue = 0;
2829 if (q) {
2830 current_req = blk_fetch_request(q);
2831 if (current_req)
2832 break;
2833 }
2834 } while (fdc_queue != old_pos);
2835
2836 return current_req != NULL;
2837}
2838
2814static void redo_fd_request(void) 2839static void redo_fd_request(void)
2815{ 2840{
2816 int drive; 2841 int drive;
@@ -2822,17 +2847,17 @@ static void redo_fd_request(void)
2822 2847
2823do_request: 2848do_request:
2824 if (!current_req) { 2849 if (!current_req) {
2825 struct request *req; 2850 int pending;
2851
2852 spin_lock_irq(&floppy_lock);
2853 pending = set_next_request();
2854 spin_unlock_irq(&floppy_lock);
2826 2855
2827 spin_lock_irq(floppy_queue->queue_lock); 2856 if (!pending) {
2828 req = blk_fetch_request(floppy_queue);
2829 spin_unlock_irq(floppy_queue->queue_lock);
2830 if (!req) {
2831 do_floppy = NULL; 2857 do_floppy = NULL;
2832 unlock_fdc(); 2858 unlock_fdc();
2833 return; 2859 return;
2834 } 2860 }
2835 current_req = req;
2836 } 2861 }
2837 drive = (long)current_req->rq_disk->private_data; 2862 drive = (long)current_req->rq_disk->private_data;
2838 set_fdc(drive); 2863 set_fdc(drive);
@@ -4165,6 +4190,13 @@ static int __init floppy_init(void)
4165 goto out_put_disk; 4190 goto out_put_disk;
4166 } 4191 }
4167 4192
4193 disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
4194 if (!disks[dr]->queue) {
4195 err = -ENOMEM;
4196 goto out_put_disk;
4197 }
4198
4199 blk_queue_max_hw_sectors(disks[dr]->queue, 64);
4168 disks[dr]->major = FLOPPY_MAJOR; 4200 disks[dr]->major = FLOPPY_MAJOR;
4169 disks[dr]->first_minor = TOMINOR(dr); 4201 disks[dr]->first_minor = TOMINOR(dr);
4170 disks[dr]->fops = &floppy_fops; 4202 disks[dr]->fops = &floppy_fops;
@@ -4183,13 +4215,6 @@ static int __init floppy_init(void)
4183 if (err) 4215 if (err)
4184 goto out_unreg_blkdev; 4216 goto out_unreg_blkdev;
4185 4217
4186 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
4187 if (!floppy_queue) {
4188 err = -ENOMEM;
4189 goto out_unreg_driver;
4190 }
4191 blk_queue_max_hw_sectors(floppy_queue, 64);
4192
4193 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, 4218 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
4194 floppy_find, NULL, NULL); 4219 floppy_find, NULL, NULL);
4195 4220
@@ -4317,7 +4342,6 @@ static int __init floppy_init(void)
4317 4342
4318 /* to be cleaned up... */ 4343 /* to be cleaned up... */
4319 disks[drive]->private_data = (void *)(long)drive; 4344 disks[drive]->private_data = (void *)(long)drive;
4320 disks[drive]->queue = floppy_queue;
4321 disks[drive]->flags |= GENHD_FL_REMOVABLE; 4345 disks[drive]->flags |= GENHD_FL_REMOVABLE;
4322 disks[drive]->driverfs_dev = &floppy_device[drive].dev; 4346 disks[drive]->driverfs_dev = &floppy_device[drive].dev;
4323 add_disk(disks[drive]); 4347 add_disk(disks[drive]);
@@ -4333,8 +4357,6 @@ out_flush_work:
4333 floppy_release_irq_and_dma(); 4357 floppy_release_irq_and_dma();
4334out_unreg_region: 4358out_unreg_region:
4335 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4359 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4336 blk_cleanup_queue(floppy_queue);
4337out_unreg_driver:
4338 platform_driver_unregister(&floppy_driver); 4360 platform_driver_unregister(&floppy_driver);
4339out_unreg_blkdev: 4361out_unreg_blkdev:
4340 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4362 unregister_blkdev(FLOPPY_MAJOR, "fd");
@@ -4342,6 +4364,8 @@ out_put_disk:
4342 while (dr--) { 4364 while (dr--) {
4343 del_timer(&motor_off_timer[dr]); 4365 del_timer(&motor_off_timer[dr]);
4344 put_disk(disks[dr]); 4366 put_disk(disks[dr]);
4367 if (disks[dr]->queue)
4368 blk_cleanup_queue(disks[dr]->queue);
4345 } 4369 }
4346 return err; 4370 return err;
4347} 4371}
@@ -4550,11 +4574,11 @@ static void __exit floppy_module_exit(void)
4550 platform_device_unregister(&floppy_device[drive]); 4574 platform_device_unregister(&floppy_device[drive]);
4551 } 4575 }
4552 put_disk(disks[drive]); 4576 put_disk(disks[drive]);
4577 blk_cleanup_queue(disks[drive]->queue);
4553 } 4578 }
4554 4579
4555 del_timer_sync(&fd_timeout); 4580 del_timer_sync(&fd_timeout);
4556 del_timer_sync(&fd_timer); 4581 del_timer_sync(&fd_timer);
4557 blk_cleanup_queue(floppy_queue);
4558 4582
4559 if (atomic_read(&usage_count)) 4583 if (atomic_read(&usage_count))
4560 floppy_release_irq_and_dma(); 4584 floppy_release_irq_and_dma();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a10c8c9b6b78..6c48b3545f84 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -74,6 +74,7 @@
74#include <linux/highmem.h> 74#include <linux/highmem.h>
75#include <linux/kthread.h> 75#include <linux/kthread.h>
76#include <linux/splice.h> 76#include <linux/splice.h>
77#include <linux/sysfs.h>
77 78
78#include <asm/uaccess.h> 79#include <asm/uaccess.h>
79 80
@@ -478,17 +479,17 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
478 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 479 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
479 480
480 if (bio_rw(bio) == WRITE) { 481 if (bio_rw(bio) == WRITE) {
481 bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
482 struct file *file = lo->lo_backing_file; 482 struct file *file = lo->lo_backing_file;
483 483
484 if (barrier) { 484 /* REQ_HARDBARRIER is deprecated */
485 if (unlikely(!file->f_op->fsync)) { 485 if (bio->bi_rw & REQ_HARDBARRIER) {
486 ret = -EOPNOTSUPP; 486 ret = -EOPNOTSUPP;
487 goto out; 487 goto out;
488 } 488 }
489 489
490 if (bio->bi_rw & REQ_FLUSH) {
490 ret = vfs_fsync(file, 0); 491 ret = vfs_fsync(file, 0);
491 if (unlikely(ret)) { 492 if (unlikely(ret && ret != -EINVAL)) {
492 ret = -EIO; 493 ret = -EIO;
493 goto out; 494 goto out;
494 } 495 }
@@ -496,9 +497,9 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
496 497
497 ret = lo_send(lo, bio, pos); 498 ret = lo_send(lo, bio, pos);
498 499
499 if (barrier && !ret) { 500 if ((bio->bi_rw & REQ_FUA) && !ret) {
500 ret = vfs_fsync(file, 0); 501 ret = vfs_fsync(file, 0);
501 if (unlikely(ret)) 502 if (unlikely(ret && ret != -EINVAL))
502 ret = -EIO; 503 ret = -EIO;
503 } 504 }
504 } else 505 } else
@@ -738,6 +739,103 @@ static inline int is_loop_device(struct file *file)
738 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 739 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
739} 740}
740 741
742/* loop sysfs attributes */
743
744static ssize_t loop_attr_show(struct device *dev, char *page,
745 ssize_t (*callback)(struct loop_device *, char *))
746{
747 struct loop_device *l, *lo = NULL;
748
749 mutex_lock(&loop_devices_mutex);
750 list_for_each_entry(l, &loop_devices, lo_list)
751 if (disk_to_dev(l->lo_disk) == dev) {
752 lo = l;
753 break;
754 }
755 mutex_unlock(&loop_devices_mutex);
756
757 return lo ? callback(lo, page) : -EIO;
758}
759
760#define LOOP_ATTR_RO(_name) \
761static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
762static ssize_t loop_attr_do_show_##_name(struct device *d, \
763 struct device_attribute *attr, char *b) \
764{ \
765 return loop_attr_show(d, b, loop_attr_##_name##_show); \
766} \
767static struct device_attribute loop_attr_##_name = \
768 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
769
770static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
771{
772 ssize_t ret;
773 char *p = NULL;
774
775 mutex_lock(&lo->lo_ctl_mutex);
776 if (lo->lo_backing_file)
777 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
778 mutex_unlock(&lo->lo_ctl_mutex);
779
780 if (IS_ERR_OR_NULL(p))
781 ret = PTR_ERR(p);
782 else {
783 ret = strlen(p);
784 memmove(buf, p, ret);
785 buf[ret++] = '\n';
786 buf[ret] = 0;
787 }
788
789 return ret;
790}
791
792static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
793{
794 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
795}
796
797static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
798{
799 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
800}
801
802static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
803{
804 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
805
806 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
807}
808
809LOOP_ATTR_RO(backing_file);
810LOOP_ATTR_RO(offset);
811LOOP_ATTR_RO(sizelimit);
812LOOP_ATTR_RO(autoclear);
813
814static struct attribute *loop_attrs[] = {
815 &loop_attr_backing_file.attr,
816 &loop_attr_offset.attr,
817 &loop_attr_sizelimit.attr,
818 &loop_attr_autoclear.attr,
819 NULL,
820};
821
822static struct attribute_group loop_attribute_group = {
823 .name = "loop",
824 .attrs= loop_attrs,
825};
826
827static int loop_sysfs_init(struct loop_device *lo)
828{
829 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
830 &loop_attribute_group);
831}
832
833static void loop_sysfs_exit(struct loop_device *lo)
834{
835 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
836 &loop_attribute_group);
837}
838
741static int loop_set_fd(struct loop_device *lo, fmode_t mode, 839static int loop_set_fd(struct loop_device *lo, fmode_t mode,
742 struct block_device *bdev, unsigned int arg) 840 struct block_device *bdev, unsigned int arg)
743{ 841{
@@ -833,10 +931,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
833 lo->lo_queue->unplug_fn = loop_unplug; 931 lo->lo_queue->unplug_fn = loop_unplug;
834 932
835 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 933 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
836 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN); 934 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
837 935
838 set_capacity(lo->lo_disk, size); 936 set_capacity(lo->lo_disk, size);
839 bd_set_size(bdev, size << 9); 937 bd_set_size(bdev, size << 9);
938 loop_sysfs_init(lo);
840 /* let user-space know about the new size */ 939 /* let user-space know about the new size */
841 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 940 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
842 941
@@ -855,6 +954,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
855 return 0; 954 return 0;
856 955
857out_clr: 956out_clr:
957 loop_sysfs_exit(lo);
858 lo->lo_thread = NULL; 958 lo->lo_thread = NULL;
859 lo->lo_device = NULL; 959 lo->lo_device = NULL;
860 lo->lo_backing_file = NULL; 960 lo->lo_backing_file = NULL;
@@ -951,6 +1051,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
951 set_capacity(lo->lo_disk, 0); 1051 set_capacity(lo->lo_disk, 0);
952 if (bdev) { 1052 if (bdev) {
953 bd_set_size(bdev, 0); 1053 bd_set_size(bdev, 0);
1054 loop_sysfs_exit(lo);
954 /* let user-space know about this change */ 1055 /* let user-space know about this change */
955 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1056 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
956 } 1057 }
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 2284b4f05c62..87311ebac0db 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -310,8 +310,7 @@ static void osdblk_rq_fn(struct request_queue *q)
310 break; 310 break;
311 311
312 /* filter out block requests we don't understand */ 312 /* filter out block requests we don't understand */
313 if (rq->cmd_type != REQ_TYPE_FS && 313 if (rq->cmd_type != REQ_TYPE_FS) {
314 !(rq->cmd_flags & REQ_HARDBARRIER)) {
315 blk_end_request_all(rq, 0); 314 blk_end_request_all(rq, 0);
316 continue; 315 continue;
317 } 316 }
@@ -439,7 +438,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); 438 blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
440 439
441 blk_queue_prep_rq(q, blk_queue_start_tag); 440 blk_queue_prep_rq(q, blk_queue_start_tag);
442 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 441 blk_queue_flush(q, REQ_FLUSH);
443 442
444 disk->queue = q; 443 disk->queue = q;
445 444
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ef58fccadad3..19b3568e9326 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -753,7 +753,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
753 753
754 rq->timeout = 60*HZ; 754 rq->timeout = 60*HZ;
755 rq->cmd_type = REQ_TYPE_BLOCK_PC; 755 rq->cmd_type = REQ_TYPE_BLOCK_PC;
756 rq->cmd_flags |= REQ_HARDBARRIER;
757 if (cgc->quiet) 756 if (cgc->quiet)
758 rq->cmd_flags |= REQ_QUIET; 757 rq->cmd_flags |= REQ_QUIET;
759 758
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 03688c2da319..8e1ce2e2916a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
468 blk_queue_dma_alignment(queue, dev->blk_size-1); 468 blk_queue_dma_alignment(queue, dev->blk_size-1);
469 blk_queue_logical_block_size(queue, dev->blk_size); 469 blk_queue_logical_block_size(queue, dev->blk_size);
470 470
471 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); 471 blk_queue_flush(queue, REQ_FLUSH);
472 472
473 blk_queue_max_segments(queue, -1); 473 blk_queue_max_segments(queue, -1);
474 blk_queue_max_segment_size(queue, dev->bounce_size); 474 blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index b5690a045a01..9ae3bb713286 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -397,7 +397,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum);
397#else 397#else
398 398
399static const struct usb_device_id ub_usb_ids[] = { 399static const struct usb_device_id ub_usb_ids[] = {
400 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, 400 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
401 { } 401 { }
402}; 402};
403 403
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 8320490226b7..6ecf89cdf006 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -127,9 +127,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
127 } 127 }
128 } 128 }
129 129
130 if (vbr->req->cmd_flags & REQ_HARDBARRIER)
131 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
132
133 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); 130 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
134 131
135 /* 132 /*
@@ -379,31 +376,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
379 vblk->disk->driverfs_dev = &vdev->dev; 376 vblk->disk->driverfs_dev = &vdev->dev;
380 index++; 377 index++;
381 378
382 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { 379 /* configure queue flush support */
383 /* 380 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
384 * If the FLUSH feature is supported we do have support for 381 blk_queue_flush(q, REQ_FLUSH);
385 * flushing a volatile write cache on the host. Use that
386 * to implement write barrier support.
387 */
388 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
389 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
390 /*
391 * If the BARRIER feature is supported the host expects us
392 * to order request by tags. This implies there is not
393 * volatile write cache on the host, and that the host
394 * never re-orders outstanding I/O. This feature is not
395 * useful for real life scenarious and deprecated.
396 */
397 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
398 } else {
399 /*
400 * If the FLUSH feature is not supported we must assume that
401 * the host does not perform any kind of volatile write
402 * caching. We still need to drain the queue to provider
403 * proper barrier semantics.
404 */
405 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
406 }
407 382
408 /* If disk is read-only in the host, the guest should obey */ 383 /* If disk is read-only in the host, the guest should obey */
409 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 384 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -522,9 +497,9 @@ static const struct virtio_device_id id_table[] = {
522}; 497};
523 498
524static unsigned int features[] = { 499static unsigned int features[] = {
525 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 500 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
526 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 501 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
527 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY 502 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
528}; 503};
529 504
530/* 505/*
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3ff06f475eef..4b33a18c32e0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -96,7 +96,7 @@ struct blkfront_info
96 struct gnttab_free_callback callback; 96 struct gnttab_free_callback callback;
97 struct blk_shadow shadow[BLK_RING_SIZE]; 97 struct blk_shadow shadow[BLK_RING_SIZE];
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 int feature_barrier; 99 unsigned int feature_flush;
100 int is_ready; 100 int is_ready;
101}; 101};
102 102
@@ -419,26 +419,12 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
419} 419}
420 420
421 421
422static int xlvbd_barrier(struct blkfront_info *info) 422static void xlvbd_flush(struct blkfront_info *info)
423{ 423{
424 int err; 424 blk_queue_flush(info->rq, info->feature_flush);
425 const char *barrier;
426
427 switch (info->feature_barrier) {
428 case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break;
429 case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break;
430 case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
431 default: return -EINVAL;
432 }
433
434 err = blk_queue_ordered(info->rq, info->feature_barrier);
435
436 if (err)
437 return err;
438
439 printk(KERN_INFO "blkfront: %s: barriers %s\n", 425 printk(KERN_INFO "blkfront: %s: barriers %s\n",
440 info->gd->disk_name, barrier); 426 info->gd->disk_name,
441 return 0; 427 info->feature_flush ? "enabled" : "disabled");
442} 428}
443 429
444 430
@@ -517,7 +503,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
517 info->rq = gd->queue; 503 info->rq = gd->queue;
518 info->gd = gd; 504 info->gd = gd;
519 505
520 xlvbd_barrier(info); 506 xlvbd_flush(info);
521 507
522 if (vdisk_info & VDISK_READONLY) 508 if (vdisk_info & VDISK_READONLY)
523 set_disk_ro(gd, 1); 509 set_disk_ro(gd, 1);
@@ -663,8 +649,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
663 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 649 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
664 info->gd->disk_name); 650 info->gd->disk_name);
665 error = -EOPNOTSUPP; 651 error = -EOPNOTSUPP;
666 info->feature_barrier = QUEUE_ORDERED_NONE; 652 info->feature_flush = 0;
667 xlvbd_barrier(info); 653 xlvbd_flush(info);
668 } 654 }
669 /* fall through */ 655 /* fall through */
670 case BLKIF_OP_READ: 656 case BLKIF_OP_READ:
@@ -1077,20 +1063,20 @@ static void blkfront_connect(struct blkfront_info *info)
1077 /* 1063 /*
1078 * If there's no "feature-barrier" defined, then it means 1064 * If there's no "feature-barrier" defined, then it means
1079 * we're dealing with a very old backend which writes 1065 * we're dealing with a very old backend which writes
1080 * synchronously; draining will do what needs to get done. 1066 * synchronously; nothing to do.
1081 * 1067 *
1082 * If there are barriers, then we can do full queued writes 1068 * If there are barriers, then we use flush.
1083 * with tagged barriers.
1084 *
1085 * If barriers are not supported, then there's no much we can
1086 * do, so just set ordering to NONE.
1087 */ 1069 */
1088 if (err) 1070 info->feature_flush = 0;
1089 info->feature_barrier = QUEUE_ORDERED_DRAIN; 1071
1090 else if (barrier) 1072 /*
1091 info->feature_barrier = QUEUE_ORDERED_TAG; 1073 * The driver doesn't properly handled empty flushes, so
1092 else 1074 * lets disable barrier support for now.
1093 info->feature_barrier = QUEUE_ORDERED_NONE; 1075 */
1076#if 0
1077 if (!err && barrier)
1078 info->feature_flush = REQ_FLUSH;
1079#endif
1094 1080
1095 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1081 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1096 if (err) { 1082 if (err) {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3d44ec724c17..43d3395325c5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -493,6 +493,21 @@ config LEGACY_PTY_COUNT
493 When not in use, each legacy PTY occupies 12 bytes on 32-bit 493 When not in use, each legacy PTY occupies 12 bytes on 32-bit
494 architectures and 24 bytes on 64-bit architectures. 494 architectures and 24 bytes on 64-bit architectures.
495 495
496config TTY_PRINTK
497 bool "TTY driver to output user messages via printk"
498 depends on EMBEDDED
499 default n
500 ---help---
501 If you say Y here, the support for writing user messages (i.e.
502 console messages) via printk is available.
503
504 The feature is useful to inline user messages with kernel
505 messages.
506 In order to use this feature, you should output user messages
507 to /dev/ttyprintk or redirect console to this TTY.
508
509 If unsure, say N.
510
496config BRIQ_PANEL 511config BRIQ_PANEL
497 tristate 'Total Impact briQ front panel driver' 512 tristate 'Total Impact briQ front panel driver'
498 depends on PPC_CHRP 513 depends on PPC_CHRP
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index dc9641660605..3a9c01416839 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -12,6 +12,7 @@ obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o t
12obj-y += tty_mutex.o 12obj-y += tty_mutex.o
13obj-$(CONFIG_LEGACY_PTYS) += pty.o 13obj-$(CONFIG_LEGACY_PTYS) += pty.o
14obj-$(CONFIG_UNIX98_PTYS) += pty.o 14obj-$(CONFIG_UNIX98_PTYS) += pty.o
15obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
15obj-y += misc.o 16obj-y += misc.o
16obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o 17obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
17obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o 18obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index a11c8c9ca3d4..b0a70461a12c 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1263,6 +1263,36 @@ static int rs_break(struct tty_struct *tty, int break_state)
1263 return 0; 1263 return 0;
1264} 1264}
1265 1265
1266/*
1267 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1268 * Return: write counters to the user passed counter struct
1269 * NB: both 1->0 and 0->1 transitions are counted except for
1270 * RI where only 0->1 is counted.
1271 */
1272static int rs_get_icount(struct tty_struct *tty,
1273 struct serial_icounter_struct *icount)
1274{
1275 struct async_struct *info = tty->driver_data;
1276 struct async_icount cnow;
1277 unsigned long flags;
1278
1279 local_irq_save(flags);
1280 cnow = info->state->icount;
1281 local_irq_restore(flags);
1282 icount->cts = cnow.cts;
1283 icount->dsr = cnow.dsr;
1284 icount->rng = cnow.rng;
1285 icount->dcd = cnow.dcd;
1286 icount->rx = cnow.rx;
1287 icount->tx = cnow.tx;
1288 icount->frame = cnow.frame;
1289 icount->overrun = cnow.overrun;
1290 icount->parity = cnow.parity;
1291 icount->brk = cnow.brk;
1292 icount->buf_overrun = cnow.buf_overrun;
1293
1294 return 0;
1295}
1266 1296
1267static int rs_ioctl(struct tty_struct *tty, struct file * file, 1297static int rs_ioctl(struct tty_struct *tty, struct file * file,
1268 unsigned int cmd, unsigned long arg) 1298 unsigned int cmd, unsigned long arg)
@@ -1332,31 +1362,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1332 } 1362 }
1333 /* NOTREACHED */ 1363 /* NOTREACHED */
1334 1364
1335 /*
1336 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1337 * Return: write counters to the user passed counter struct
1338 * NB: both 1->0 and 0->1 transitions are counted except for
1339 * RI where only 0->1 is counted.
1340 */
1341 case TIOCGICOUNT:
1342 local_irq_save(flags);
1343 cnow = info->state->icount;
1344 local_irq_restore(flags);
1345 icount.cts = cnow.cts;
1346 icount.dsr = cnow.dsr;
1347 icount.rng = cnow.rng;
1348 icount.dcd = cnow.dcd;
1349 icount.rx = cnow.rx;
1350 icount.tx = cnow.tx;
1351 icount.frame = cnow.frame;
1352 icount.overrun = cnow.overrun;
1353 icount.parity = cnow.parity;
1354 icount.brk = cnow.brk;
1355 icount.buf_overrun = cnow.buf_overrun;
1356
1357 if (copy_to_user(argp, &icount, sizeof(icount)))
1358 return -EFAULT;
1359 return 0;
1360 case TIOCSERGWILD: 1365 case TIOCSERGWILD:
1361 case TIOCSERSWILD: 1366 case TIOCSERSWILD:
1362 /* "setserial -W" is called in Debian boot */ 1367 /* "setserial -W" is called in Debian boot */
@@ -1958,6 +1963,7 @@ static const struct tty_operations serial_ops = {
1958 .wait_until_sent = rs_wait_until_sent, 1963 .wait_until_sent = rs_wait_until_sent,
1959 .tiocmget = rs_tiocmget, 1964 .tiocmget = rs_tiocmget,
1960 .tiocmset = rs_tiocmset, 1965 .tiocmset = rs_tiocmset,
1966 .get_icount = rs_get_icount,
1961 .proc_fops = &rs_proc_fops, 1967 .proc_fops = &rs_proc_fops,
1962}; 1968};
1963 1969
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 27aad9422332..4f152c28f40e 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -2790,29 +2790,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
2790 * NB: both 1->0 and 0->1 transitions are counted except for 2790 * NB: both 1->0 and 0->1 transitions are counted except for
2791 * RI where only 0->1 is counted. 2791 * RI where only 0->1 is counted.
2792 */ 2792 */
2793 case TIOCGICOUNT: {
2794 struct serial_icounter_struct sic = { };
2795
2796 spin_lock_irqsave(&info->card->card_lock, flags);
2797 cnow = info->icount;
2798 spin_unlock_irqrestore(&info->card->card_lock, flags);
2799
2800 sic.cts = cnow.cts;
2801 sic.dsr = cnow.dsr;
2802 sic.rng = cnow.rng;
2803 sic.dcd = cnow.dcd;
2804 sic.rx = cnow.rx;
2805 sic.tx = cnow.tx;
2806 sic.frame = cnow.frame;
2807 sic.overrun = cnow.overrun;
2808 sic.parity = cnow.parity;
2809 sic.brk = cnow.brk;
2810 sic.buf_overrun = cnow.buf_overrun;
2811
2812 if (copy_to_user(argp, &sic, sizeof(sic)))
2813 ret_val = -EFAULT;
2814 break;
2815 }
2816 default: 2793 default:
2817 ret_val = -ENOIOCTLCMD; 2794 ret_val = -ENOIOCTLCMD;
2818 } 2795 }
@@ -2823,6 +2800,31 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
2823 return ret_val; 2800 return ret_val;
2824} /* cy_ioctl */ 2801} /* cy_ioctl */
2825 2802
2803static int cy_get_icount(struct tty_struct *tty,
2804 struct serial_icounter_struct *sic)
2805{
2806 struct cyclades_port *info = tty->driver_data;
2807 struct cyclades_icount cnow; /* Used to snapshot */
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&info->card->card_lock, flags);
2811 cnow = info->icount;
2812 spin_unlock_irqrestore(&info->card->card_lock, flags);
2813
2814 sic->cts = cnow.cts;
2815 sic->dsr = cnow.dsr;
2816 sic->rng = cnow.rng;
2817 sic->dcd = cnow.dcd;
2818 sic->rx = cnow.rx;
2819 sic->tx = cnow.tx;
2820 sic->frame = cnow.frame;
2821 sic->overrun = cnow.overrun;
2822 sic->parity = cnow.parity;
2823 sic->brk = cnow.brk;
2824 sic->buf_overrun = cnow.buf_overrun;
2825 return 0;
2826}
2827
2826/* 2828/*
2827 * This routine allows the tty driver to be notified when 2829 * This routine allows the tty driver to be notified when
2828 * device's termios settings have changed. Note that a 2830 * device's termios settings have changed. Note that a
@@ -4084,6 +4086,7 @@ static const struct tty_operations cy_ops = {
4084 .wait_until_sent = cy_wait_until_sent, 4086 .wait_until_sent = cy_wait_until_sent,
4085 .tiocmget = cy_tiocmget, 4087 .tiocmget = cy_tiocmget,
4086 .tiocmset = cy_tiocmset, 4088 .tiocmset = cy_tiocmset,
4089 .get_icount = cy_get_icount,
4087 .proc_fops = &cyclades_proc_fops, 4090 .proc_fops = &cyclades_proc_fops,
4088}; 4091};
4089 4092
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 64a439ce2f89..fcd02baa7d65 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -184,6 +184,8 @@ static void ip2_hangup(PTTY);
184static int ip2_tiocmget(struct tty_struct *tty, struct file *file); 184static int ip2_tiocmget(struct tty_struct *tty, struct file *file);
185static int ip2_tiocmset(struct tty_struct *tty, struct file *file, 185static int ip2_tiocmset(struct tty_struct *tty, struct file *file,
186 unsigned int set, unsigned int clear); 186 unsigned int set, unsigned int clear);
187static int ip2_get_icount(struct tty_struct *tty,
188 struct serial_icounter_struct *icount);
187 189
188static void set_irq(int, int); 190static void set_irq(int, int);
189static void ip2_interrupt_bh(struct work_struct *work); 191static void ip2_interrupt_bh(struct work_struct *work);
@@ -456,6 +458,7 @@ static const struct tty_operations ip2_ops = {
456 .hangup = ip2_hangup, 458 .hangup = ip2_hangup,
457 .tiocmget = ip2_tiocmget, 459 .tiocmget = ip2_tiocmget,
458 .tiocmset = ip2_tiocmset, 460 .tiocmset = ip2_tiocmset,
461 .get_icount = ip2_get_icount,
459 .proc_fops = &ip2_proc_fops, 462 .proc_fops = &ip2_proc_fops,
460}; 463};
461 464
@@ -2130,7 +2133,6 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2130 i2ChanStrPtr pCh = DevTable[tty->index]; 2133 i2ChanStrPtr pCh = DevTable[tty->index];
2131 i2eBordStrPtr pB; 2134 i2eBordStrPtr pB;
2132 struct async_icount cprev, cnow; /* kernel counter temps */ 2135 struct async_icount cprev, cnow; /* kernel counter temps */
2133 struct serial_icounter_struct __user *p_cuser;
2134 int rc = 0; 2136 int rc = 0;
2135 unsigned long flags; 2137 unsigned long flags;
2136 void __user *argp = (void __user *)arg; 2138 void __user *argp = (void __user *)arg;
@@ -2299,34 +2301,6 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2299 break; 2301 break;
2300 2302
2301 /* 2303 /*
2302 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2303 * Return: write counters to the user passed counter struct
2304 * NB: both 1->0 and 0->1 transitions are counted except for RI where
2305 * only 0->1 is counted. The controller is quite capable of counting
2306 * both, but this done to preserve compatibility with the standard
2307 * serial driver.
2308 */
2309 case TIOCGICOUNT:
2310 ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc );
2311
2312 write_lock_irqsave(&pB->read_fifo_spinlock, flags);
2313 cnow = pCh->icount;
2314 write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
2315 p_cuser = argp;
2316 rc = put_user(cnow.cts, &p_cuser->cts);
2317 rc = put_user(cnow.dsr, &p_cuser->dsr);
2318 rc = put_user(cnow.rng, &p_cuser->rng);
2319 rc = put_user(cnow.dcd, &p_cuser->dcd);
2320 rc = put_user(cnow.rx, &p_cuser->rx);
2321 rc = put_user(cnow.tx, &p_cuser->tx);
2322 rc = put_user(cnow.frame, &p_cuser->frame);
2323 rc = put_user(cnow.overrun, &p_cuser->overrun);
2324 rc = put_user(cnow.parity, &p_cuser->parity);
2325 rc = put_user(cnow.brk, &p_cuser->brk);
2326 rc = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
2327 break;
2328
2329 /*
2330 * The rest are not supported by this driver. By returning -ENOIOCTLCMD they 2304 * The rest are not supported by this driver. By returning -ENOIOCTLCMD they
2331 * will be passed to the line discipline for it to handle. 2305 * will be passed to the line discipline for it to handle.
2332 */ 2306 */
@@ -2350,6 +2324,46 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2350 return rc; 2324 return rc;
2351} 2325}
2352 2326
2327static int ip2_get_icount(struct tty_struct *tty,
2328 struct serial_icounter_struct *icount)
2329{
2330 i2ChanStrPtr pCh = DevTable[tty->index];
2331 i2eBordStrPtr pB;
2332 struct async_icount cnow; /* kernel counter temp */
2333 unsigned long flags;
2334
2335 if ( pCh == NULL )
2336 return -ENODEV;
2337
2338 pB = pCh->pMyBord;
2339
2340 /*
2341 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2342 * Return: write counters to the user passed counter struct
2343 * NB: both 1->0 and 0->1 transitions are counted except for RI where
2344 * only 0->1 is counted. The controller is quite capable of counting
2345 * both, but this done to preserve compatibility with the standard
2346 * serial driver.
2347 */
2348
2349 write_lock_irqsave(&pB->read_fifo_spinlock, flags);
2350 cnow = pCh->icount;
2351 write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
2352
2353 icount->cts = cnow.cts;
2354 icount->dsr = cnow.dsr;
2355 icount->rng = cnow.rng;
2356 icount->dcd = cnow.dcd;
2357 icount->rx = cnow.rx;
2358 icount->tx = cnow.tx;
2359 icount->frame = cnow.frame;
2360 icount->overrun = cnow.overrun;
2361 icount->parity = cnow.parity;
2362 icount->brk = cnow.brk;
2363 icount->buf_overrun = cnow.buf_overrun;
2364 return 0;
2365}
2366
2353/******************************************************************************/ 2367/******************************************************************************/
2354/* Function: GetSerialInfo() */ 2368/* Function: GetSerialInfo() */
2355/* Parameters: Pointer to channel structure */ 2369/* Parameters: Pointer to channel structure */
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3fc89da856ae..463df27494bd 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1700,7 +1700,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1700 return 0; 1700 return 0;
1701 } 1701 }
1702 1702
1703 if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT && 1703 if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT &&
1704 test_bit(TTY_IO_ERROR, &tty->flags)) 1704 test_bit(TTY_IO_ERROR, &tty->flags))
1705 return -EIO; 1705 return -EIO;
1706 1706
@@ -1730,32 +1730,6 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1730 1730
1731 return wait_event_interruptible(info->port.delta_msr_wait, 1731 return wait_event_interruptible(info->port.delta_msr_wait,
1732 mxser_cflags_changed(info, arg, &cnow)); 1732 mxser_cflags_changed(info, arg, &cnow));
1733 /*
1734 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1735 * Return: write counters to the user passed counter struct
1736 * NB: both 1->0 and 0->1 transitions are counted except for
1737 * RI where only 0->1 is counted.
1738 */
1739 case TIOCGICOUNT: {
1740 struct serial_icounter_struct icnt = { 0 };
1741 spin_lock_irqsave(&info->slock, flags);
1742 cnow = info->icount;
1743 spin_unlock_irqrestore(&info->slock, flags);
1744
1745 icnt.frame = cnow.frame;
1746 icnt.brk = cnow.brk;
1747 icnt.overrun = cnow.overrun;
1748 icnt.buf_overrun = cnow.buf_overrun;
1749 icnt.parity = cnow.parity;
1750 icnt.rx = cnow.rx;
1751 icnt.tx = cnow.tx;
1752 icnt.cts = cnow.cts;
1753 icnt.dsr = cnow.dsr;
1754 icnt.rng = cnow.rng;
1755 icnt.dcd = cnow.dcd;
1756
1757 return copy_to_user(argp, &icnt, sizeof(icnt)) ? -EFAULT : 0;
1758 }
1759 case MOXA_HighSpeedOn: 1733 case MOXA_HighSpeedOn:
1760 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); 1734 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
1761 case MOXA_SDS_RSTICOUNTER: 1735 case MOXA_SDS_RSTICOUNTER:
@@ -1828,6 +1802,39 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1828 return 0; 1802 return 0;
1829} 1803}
1830 1804
1805 /*
1806 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1807 * Return: write counters to the user passed counter struct
1808 * NB: both 1->0 and 0->1 transitions are counted except for
1809 * RI where only 0->1 is counted.
1810 */
1811
1812static int mxser_get_icount(struct tty_struct *tty,
1813 struct serial_icounter_struct *icount)
1814
1815{
1816 struct mxser_port *info = tty->driver_data;
1817 struct async_icount cnow;
1818 unsigned long flags;
1819
1820 spin_lock_irqsave(&info->slock, flags);
1821 cnow = info->icount;
1822 spin_unlock_irqrestore(&info->slock, flags);
1823
1824 icount->frame = cnow.frame;
1825 icount->brk = cnow.brk;
1826 icount->overrun = cnow.overrun;
1827 icount->buf_overrun = cnow.buf_overrun;
1828 icount->parity = cnow.parity;
1829 icount->rx = cnow.rx;
1830 icount->tx = cnow.tx;
1831 icount->cts = cnow.cts;
1832 icount->dsr = cnow.dsr;
1833 icount->rng = cnow.rng;
1834 icount->dcd = cnow.dcd;
1835 return 0;
1836}
1837
1831static void mxser_stoprx(struct tty_struct *tty) 1838static void mxser_stoprx(struct tty_struct *tty)
1832{ 1839{
1833 struct mxser_port *info = tty->driver_data; 1840 struct mxser_port *info = tty->driver_data;
@@ -2326,6 +2333,7 @@ static const struct tty_operations mxser_ops = {
2326 .wait_until_sent = mxser_wait_until_sent, 2333 .wait_until_sent = mxser_wait_until_sent,
2327 .tiocmget = mxser_tiocmget, 2334 .tiocmget = mxser_tiocmget,
2328 .tiocmset = mxser_tiocmset, 2335 .tiocmset = mxser_tiocmset,
2336 .get_icount = mxser_get_icount,
2329}; 2337};
2330 2338
2331struct tty_port_operations mxser_port_ops = { 2339struct tty_port_operations mxser_port_ops = {
@@ -2339,20 +2347,11 @@ struct tty_port_operations mxser_port_ops = {
2339 * The MOXA Smartio/Industio serial driver boot-time initialization code! 2347 * The MOXA Smartio/Industio serial driver boot-time initialization code!
2340 */ 2348 */
2341 2349
2342static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev, 2350static void mxser_release_ISA_res(struct mxser_board *brd)
2343 unsigned int irq)
2344{ 2351{
2345 if (irq) 2352 free_irq(brd->irq, brd);
2346 free_irq(brd->irq, brd); 2353 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
2347 if (pdev != NULL) { /* PCI */ 2354 release_region(brd->vector, 1);
2348#ifdef CONFIG_PCI
2349 pci_release_region(pdev, 2);
2350 pci_release_region(pdev, 3);
2351#endif
2352 } else {
2353 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
2354 release_region(brd->vector, 1);
2355 }
2356} 2355}
2357 2356
2358static int __devinit mxser_initbrd(struct mxser_board *brd, 2357static int __devinit mxser_initbrd(struct mxser_board *brd,
@@ -2397,13 +2396,11 @@ static int __devinit mxser_initbrd(struct mxser_board *brd,
2397 2396
2398 retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser", 2397 retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
2399 brd); 2398 brd);
2400 if (retval) { 2399 if (retval)
2401 printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may " 2400 printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
2402 "conflict with another device.\n", 2401 "conflict with another device.\n",
2403 brd->info->name, brd->irq); 2402 brd->info->name, brd->irq);
2404 /* We hold resources, we need to release them. */ 2403
2405 mxser_release_res(brd, pdev, 0);
2406 }
2407 return retval; 2404 return retval;
2408} 2405}
2409 2406
@@ -2555,7 +2552,7 @@ static int __devinit mxser_probe(struct pci_dev *pdev,
2555 ioaddress = pci_resource_start(pdev, 2); 2552 ioaddress = pci_resource_start(pdev, 2);
2556 retval = pci_request_region(pdev, 2, "mxser(IO)"); 2553 retval = pci_request_region(pdev, 2, "mxser(IO)");
2557 if (retval) 2554 if (retval)
2558 goto err; 2555 goto err_dis;
2559 2556
2560 brd->info = &mxser_cards[ent->driver_data]; 2557 brd->info = &mxser_cards[ent->driver_data];
2561 for (i = 0; i < brd->info->nports; i++) 2558 for (i = 0; i < brd->info->nports; i++)
@@ -2565,7 +2562,7 @@ static int __devinit mxser_probe(struct pci_dev *pdev,
2565 ioaddress = pci_resource_start(pdev, 3); 2562 ioaddress = pci_resource_start(pdev, 3);
2566 retval = pci_request_region(pdev, 3, "mxser(vector)"); 2563 retval = pci_request_region(pdev, 3, "mxser(vector)");
2567 if (retval) 2564 if (retval)
2568 goto err_relio; 2565 goto err_zero;
2569 brd->vector = ioaddress; 2566 brd->vector = ioaddress;
2570 2567
2571 /* irq */ 2568 /* irq */
@@ -2608,7 +2605,7 @@ static int __devinit mxser_probe(struct pci_dev *pdev,
2608 /* mxser_initbrd will hook ISR. */ 2605 /* mxser_initbrd will hook ISR. */
2609 retval = mxser_initbrd(brd, pdev); 2606 retval = mxser_initbrd(brd, pdev);
2610 if (retval) 2607 if (retval)
2611 goto err_null; 2608 goto err_rel3;
2612 2609
2613 for (i = 0; i < brd->info->nports; i++) 2610 for (i = 0; i < brd->info->nports; i++)
2614 tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev); 2611 tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
@@ -2616,10 +2613,13 @@ static int __devinit mxser_probe(struct pci_dev *pdev,
2616 pci_set_drvdata(pdev, brd); 2613 pci_set_drvdata(pdev, brd);
2617 2614
2618 return 0; 2615 return 0;
2619err_relio: 2616err_rel3:
2620 pci_release_region(pdev, 2); 2617 pci_release_region(pdev, 3);
2621err_null: 2618err_zero:
2622 brd->info = NULL; 2619 brd->info = NULL;
2620 pci_release_region(pdev, 2);
2621err_dis:
2622 pci_disable_device(pdev);
2623err: 2623err:
2624 return retval; 2624 return retval;
2625#else 2625#else
@@ -2629,14 +2629,19 @@ err:
2629 2629
2630static void __devexit mxser_remove(struct pci_dev *pdev) 2630static void __devexit mxser_remove(struct pci_dev *pdev)
2631{ 2631{
2632#ifdef CONFIG_PCI
2632 struct mxser_board *brd = pci_get_drvdata(pdev); 2633 struct mxser_board *brd = pci_get_drvdata(pdev);
2633 unsigned int i; 2634 unsigned int i;
2634 2635
2635 for (i = 0; i < brd->info->nports; i++) 2636 for (i = 0; i < brd->info->nports; i++)
2636 tty_unregister_device(mxvar_sdriver, brd->idx + i); 2637 tty_unregister_device(mxvar_sdriver, brd->idx + i);
2637 2638
2638 mxser_release_res(brd, pdev, 1); 2639 free_irq(pdev->irq, brd);
2640 pci_release_region(pdev, 2);
2641 pci_release_region(pdev, 3);
2642 pci_disable_device(pdev);
2639 brd->info = NULL; 2643 brd->info = NULL;
2644#endif
2640} 2645}
2641 2646
2642static struct pci_driver mxser_driver = { 2647static struct pci_driver mxser_driver = {
@@ -2741,7 +2746,7 @@ static void __exit mxser_module_exit(void)
2741 2746
2742 for (i = 0; i < MXSER_BOARDS; i++) 2747 for (i = 0; i < MXSER_BOARDS; i++)
2743 if (mxser_boards[i].info != NULL) 2748 if (mxser_boards[i].info != NULL)
2744 mxser_release_res(&mxser_boards[i], NULL, 1); 2749 mxser_release_ISA_res(&mxser_boards[i]);
2745} 2750}
2746 2751
2747module_init(mxser_module_init); 2752module_init(mxser_module_init);
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 817169cbb245..dd3f9b1f11b4 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1804,24 +1804,24 @@ static int ntty_cflags_changed(struct port *port, unsigned long flags,
1804 return ret; 1804 return ret;
1805} 1805}
1806 1806
1807static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp) 1807static int ntty_tiocgicount(struct tty_struct *tty,
1808 struct serial_icounter_struct *icount)
1808{ 1809{
1810 struct port *port = tty->driver_data;
1809 const struct async_icount cnow = port->tty_icount; 1811 const struct async_icount cnow = port->tty_icount;
1810 struct serial_icounter_struct icount; 1812
1811 1813 icount->cts = cnow.cts;
1812 icount.cts = cnow.cts; 1814 icount->dsr = cnow.dsr;
1813 icount.dsr = cnow.dsr; 1815 icount->rng = cnow.rng;
1814 icount.rng = cnow.rng; 1816 icount->dcd = cnow.dcd;
1815 icount.dcd = cnow.dcd; 1817 icount->rx = cnow.rx;
1816 icount.rx = cnow.rx; 1818 icount->tx = cnow.tx;
1817 icount.tx = cnow.tx; 1819 icount->frame = cnow.frame;
1818 icount.frame = cnow.frame; 1820 icount->overrun = cnow.overrun;
1819 icount.overrun = cnow.overrun; 1821 icount->parity = cnow.parity;
1820 icount.parity = cnow.parity; 1822 icount->brk = cnow.brk;
1821 icount.brk = cnow.brk; 1823 icount->buf_overrun = cnow.buf_overrun;
1822 icount.buf_overrun = cnow.buf_overrun; 1824 return 0;
1823
1824 return copy_to_user(argp, &icount, sizeof(icount)) ? -EFAULT : 0;
1825} 1825}
1826 1826
1827static int ntty_ioctl(struct tty_struct *tty, struct file *file, 1827static int ntty_ioctl(struct tty_struct *tty, struct file *file,
@@ -1840,9 +1840,7 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1840 rval = wait_event_interruptible(port->tty_wait, 1840 rval = wait_event_interruptible(port->tty_wait,
1841 ntty_cflags_changed(port, arg, &cprev)); 1841 ntty_cflags_changed(port, arg, &cprev));
1842 break; 1842 break;
1843 } case TIOCGICOUNT: 1843 }
1844 rval = ntty_ioctl_tiocgicount(port, argp);
1845 break;
1846 default: 1844 default:
1847 DBG1("ERR: 0x%08X, %d", cmd, cmd); 1845 DBG1("ERR: 0x%08X, %d", cmd, cmd);
1848 break; 1846 break;
@@ -1922,6 +1920,7 @@ static const struct tty_operations tty_ops = {
1922 .chars_in_buffer = ntty_chars_in_buffer, 1920 .chars_in_buffer = ntty_chars_in_buffer,
1923 .tiocmget = ntty_tiocmget, 1921 .tiocmget = ntty_tiocmget,
1924 .tiocmset = ntty_tiocmset, 1922 .tiocmset = ntty_tiocmset,
1923 .get_icount = ntty_tiocgicount,
1925 .install = ntty_install, 1924 .install = ntty_install,
1926 .cleanup = ntty_cleanup, 1925 .cleanup = ntty_cleanup,
1927}; 1926};
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index be1810057607..bfc10f89d951 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2191,6 +2191,32 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
2191 return 0; 2191 return 0;
2192} 2192}
2193 2193
2194static int mgslpc_get_icount(struct tty_struct *tty,
2195 struct serial_icounter_struct *icount)
2196{
2197 MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
2198 struct mgsl_icount cnow; /* kernel counter temps */
2199 unsigned long flags;
2200
2201 spin_lock_irqsave(&info->lock,flags);
2202 cnow = info->icount;
2203 spin_unlock_irqrestore(&info->lock,flags);
2204
2205 icount->cts = cnow.cts;
2206 icount->dsr = cnow.dsr;
2207 icount->rng = cnow.rng;
2208 icount->dcd = cnow.dcd;
2209 icount->rx = cnow.rx;
2210 icount->tx = cnow.tx;
2211 icount->frame = cnow.frame;
2212 icount->overrun = cnow.overrun;
2213 icount->parity = cnow.parity;
2214 icount->brk = cnow.brk;
2215 icount->buf_overrun = cnow.buf_overrun;
2216
2217 return 0;
2218}
2219
2194/* Service an IOCTL request 2220/* Service an IOCTL request
2195 * 2221 *
2196 * Arguments: 2222 * Arguments:
@@ -2206,11 +2232,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
2206 unsigned int cmd, unsigned long arg) 2232 unsigned int cmd, unsigned long arg)
2207{ 2233{
2208 MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; 2234 MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
2209 int error;
2210 struct mgsl_icount cnow; /* kernel counter temps */
2211 struct serial_icounter_struct __user *p_cuser; /* user space */
2212 void __user *argp = (void __user *)arg; 2235 void __user *argp = (void __user *)arg;
2213 unsigned long flags;
2214 2236
2215 if (debug_level >= DEBUG_LEVEL_INFO) 2237 if (debug_level >= DEBUG_LEVEL_INFO)
2216 printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2238 printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
@@ -2220,7 +2242,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
2220 return -ENODEV; 2242 return -ENODEV;
2221 2243
2222 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2244 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2223 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2245 (cmd != TIOCMIWAIT)) {
2224 if (tty->flags & (1 << TTY_IO_ERROR)) 2246 if (tty->flags & (1 << TTY_IO_ERROR))
2225 return -EIO; 2247 return -EIO;
2226 } 2248 }
@@ -2250,34 +2272,6 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
2250 return wait_events(info, argp); 2272 return wait_events(info, argp);
2251 case TIOCMIWAIT: 2273 case TIOCMIWAIT:
2252 return modem_input_wait(info,(int)arg); 2274 return modem_input_wait(info,(int)arg);
2253 case TIOCGICOUNT:
2254 spin_lock_irqsave(&info->lock,flags);
2255 cnow = info->icount;
2256 spin_unlock_irqrestore(&info->lock,flags);
2257 p_cuser = argp;
2258 PUT_USER(error,cnow.cts, &p_cuser->cts);
2259 if (error) return error;
2260 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
2261 if (error) return error;
2262 PUT_USER(error,cnow.rng, &p_cuser->rng);
2263 if (error) return error;
2264 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
2265 if (error) return error;
2266 PUT_USER(error,cnow.rx, &p_cuser->rx);
2267 if (error) return error;
2268 PUT_USER(error,cnow.tx, &p_cuser->tx);
2269 if (error) return error;
2270 PUT_USER(error,cnow.frame, &p_cuser->frame);
2271 if (error) return error;
2272 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
2273 if (error) return error;
2274 PUT_USER(error,cnow.parity, &p_cuser->parity);
2275 if (error) return error;
2276 PUT_USER(error,cnow.brk, &p_cuser->brk);
2277 if (error) return error;
2278 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
2279 if (error) return error;
2280 return 0;
2281 default: 2275 default:
2282 return -ENOIOCTLCMD; 2276 return -ENOIOCTLCMD;
2283 } 2277 }
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index c350d01716bd..923a48585501 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -676,7 +676,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
676 676
677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
678 678
679 tty_add_file(tty, filp); 679 retval = tty_add_file(tty, filp);
680 if (retval)
681 goto out;
680 682
681 retval = devpts_pty_new(inode, tty->link); 683 retval = devpts_pty_new(inode, tty->link);
682 if (retval) 684 if (retval)
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index a2a58004e188..3a6824f12be2 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2925,6 +2925,38 @@ static int mgsl_break(struct tty_struct *tty, int break_state)
2925 2925
2926} /* end of mgsl_break() */ 2926} /* end of mgsl_break() */
2927 2927
2928/*
2929 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2930 * Return: write counters to the user passed counter struct
2931 * NB: both 1->0 and 0->1 transitions are counted except for
2932 * RI where only 0->1 is counted.
2933 */
2934static int msgl_get_icount(struct tty_struct *tty,
2935 struct serial_icounter_struct *icount)
2936
2937{
2938 struct mgsl_struct * info = tty->driver_data;
2939 struct mgsl_icount cnow; /* kernel counter temps */
2940 unsigned long flags;
2941
2942 spin_lock_irqsave(&info->irq_spinlock,flags);
2943 cnow = info->icount;
2944 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2945
2946 icount->cts = cnow.cts;
2947 icount->dsr = cnow.dsr;
2948 icount->rng = cnow.rng;
2949 icount->dcd = cnow.dcd;
2950 icount->rx = cnow.rx;
2951 icount->tx = cnow.tx;
2952 icount->frame = cnow.frame;
2953 icount->overrun = cnow.overrun;
2954 icount->parity = cnow.parity;
2955 icount->brk = cnow.brk;
2956 icount->buf_overrun = cnow.buf_overrun;
2957 return 0;
2958}
2959
2928/* mgsl_ioctl() Service an IOCTL request 2960/* mgsl_ioctl() Service an IOCTL request
2929 * 2961 *
2930 * Arguments: 2962 * Arguments:
@@ -2949,7 +2981,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2949 return -ENODEV; 2981 return -ENODEV;
2950 2982
2951 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2983 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2952 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2984 (cmd != TIOCMIWAIT)) {
2953 if (tty->flags & (1 << TTY_IO_ERROR)) 2985 if (tty->flags & (1 << TTY_IO_ERROR))
2954 return -EIO; 2986 return -EIO;
2955 } 2987 }
@@ -2959,11 +2991,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2959 2991
2960static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2992static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2961{ 2993{
2962 int error;
2963 struct mgsl_icount cnow; /* kernel counter temps */
2964 void __user *argp = (void __user *)arg; 2994 void __user *argp = (void __user *)arg;
2965 struct serial_icounter_struct __user *p_cuser; /* user space */
2966 unsigned long flags;
2967 2995
2968 switch (cmd) { 2996 switch (cmd) {
2969 case MGSL_IOCGPARAMS: 2997 case MGSL_IOCGPARAMS:
@@ -2992,40 +3020,6 @@ static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigne
2992 case TIOCMIWAIT: 3020 case TIOCMIWAIT:
2993 return modem_input_wait(info,(int)arg); 3021 return modem_input_wait(info,(int)arg);
2994 3022
2995 /*
2996 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2997 * Return: write counters to the user passed counter struct
2998 * NB: both 1->0 and 0->1 transitions are counted except for
2999 * RI where only 0->1 is counted.
3000 */
3001 case TIOCGICOUNT:
3002 spin_lock_irqsave(&info->irq_spinlock,flags);
3003 cnow = info->icount;
3004 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3005 p_cuser = argp;
3006 PUT_USER(error,cnow.cts, &p_cuser->cts);
3007 if (error) return error;
3008 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3009 if (error) return error;
3010 PUT_USER(error,cnow.rng, &p_cuser->rng);
3011 if (error) return error;
3012 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3013 if (error) return error;
3014 PUT_USER(error,cnow.rx, &p_cuser->rx);
3015 if (error) return error;
3016 PUT_USER(error,cnow.tx, &p_cuser->tx);
3017 if (error) return error;
3018 PUT_USER(error,cnow.frame, &p_cuser->frame);
3019 if (error) return error;
3020 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3021 if (error) return error;
3022 PUT_USER(error,cnow.parity, &p_cuser->parity);
3023 if (error) return error;
3024 PUT_USER(error,cnow.brk, &p_cuser->brk);
3025 if (error) return error;
3026 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3027 if (error) return error;
3028 return 0;
3029 default: 3023 default:
3030 return -ENOIOCTLCMD; 3024 return -ENOIOCTLCMD;
3031 } 3025 }
@@ -4328,6 +4322,7 @@ static const struct tty_operations mgsl_ops = {
4328 .hangup = mgsl_hangup, 4322 .hangup = mgsl_hangup,
4329 .tiocmget = tiocmget, 4323 .tiocmget = tiocmget,
4330 .tiocmset = tiocmset, 4324 .tiocmset = tiocmset,
4325 .get_icount = msgl_get_icount,
4331 .proc_fops = &mgsl_proc_fops, 4326 .proc_fops = &mgsl_proc_fops,
4332}; 4327};
4333 4328
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index e63b830c86cc..1746d91205f7 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1032,9 +1032,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1032 unsigned int cmd, unsigned long arg) 1032 unsigned int cmd, unsigned long arg)
1033{ 1033{
1034 struct slgt_info *info = tty->driver_data; 1034 struct slgt_info *info = tty->driver_data;
1035 struct mgsl_icount cnow; /* kernel counter temps */
1036 struct serial_icounter_struct __user *p_cuser; /* user space */
1037 unsigned long flags;
1038 void __user *argp = (void __user *)arg; 1035 void __user *argp = (void __user *)arg;
1039 int ret; 1036 int ret;
1040 1037
@@ -1043,7 +1040,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1043 DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd)); 1040 DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
1044 1041
1045 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 1042 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
1046 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 1043 (cmd != TIOCMIWAIT)) {
1047 if (tty->flags & (1 << TTY_IO_ERROR)) 1044 if (tty->flags & (1 << TTY_IO_ERROR))
1048 return -EIO; 1045 return -EIO;
1049 } 1046 }
@@ -1053,24 +1050,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1053 return wait_mgsl_event(info, argp); 1050 return wait_mgsl_event(info, argp);
1054 case TIOCMIWAIT: 1051 case TIOCMIWAIT:
1055 return modem_input_wait(info,(int)arg); 1052 return modem_input_wait(info,(int)arg);
1056 case TIOCGICOUNT:
1057 spin_lock_irqsave(&info->lock,flags);
1058 cnow = info->icount;
1059 spin_unlock_irqrestore(&info->lock,flags);
1060 p_cuser = argp;
1061 if (put_user(cnow.cts, &p_cuser->cts) ||
1062 put_user(cnow.dsr, &p_cuser->dsr) ||
1063 put_user(cnow.rng, &p_cuser->rng) ||
1064 put_user(cnow.dcd, &p_cuser->dcd) ||
1065 put_user(cnow.rx, &p_cuser->rx) ||
1066 put_user(cnow.tx, &p_cuser->tx) ||
1067 put_user(cnow.frame, &p_cuser->frame) ||
1068 put_user(cnow.overrun, &p_cuser->overrun) ||
1069 put_user(cnow.parity, &p_cuser->parity) ||
1070 put_user(cnow.brk, &p_cuser->brk) ||
1071 put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
1072 return -EFAULT;
1073 return 0;
1074 case MGSL_IOCSGPIO: 1053 case MGSL_IOCSGPIO:
1075 return set_gpio(info, argp); 1054 return set_gpio(info, argp);
1076 case MGSL_IOCGGPIO: 1055 case MGSL_IOCGGPIO:
@@ -1117,6 +1096,33 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1117 return ret; 1096 return ret;
1118} 1097}
1119 1098
1099static int get_icount(struct tty_struct *tty,
1100 struct serial_icounter_struct *icount)
1101
1102{
1103 struct slgt_info *info = tty->driver_data;
1104 struct mgsl_icount cnow; /* kernel counter temps */
1105 unsigned long flags;
1106
1107 spin_lock_irqsave(&info->lock,flags);
1108 cnow = info->icount;
1109 spin_unlock_irqrestore(&info->lock,flags);
1110
1111 icount->cts = cnow.cts;
1112 icount->dsr = cnow.dsr;
1113 icount->rng = cnow.rng;
1114 icount->dcd = cnow.dcd;
1115 icount->rx = cnow.rx;
1116 icount->tx = cnow.tx;
1117 icount->frame = cnow.frame;
1118 icount->overrun = cnow.overrun;
1119 icount->parity = cnow.parity;
1120 icount->brk = cnow.brk;
1121 icount->buf_overrun = cnow.buf_overrun;
1122
1123 return 0;
1124}
1125
1120/* 1126/*
1121 * support for 32 bit ioctl calls on 64 bit systems 1127 * support for 32 bit ioctl calls on 64 bit systems
1122 */ 1128 */
@@ -1206,10 +1212,6 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
1206 case MGSL_IOCSGPIO: 1212 case MGSL_IOCSGPIO:
1207 case MGSL_IOCGGPIO: 1213 case MGSL_IOCGGPIO:
1208 case MGSL_IOCWAITGPIO: 1214 case MGSL_IOCWAITGPIO:
1209 case TIOCGICOUNT:
1210 rc = ioctl(tty, file, cmd, (unsigned long)(compat_ptr(arg)));
1211 break;
1212
1213 case MGSL_IOCSTXIDLE: 1215 case MGSL_IOCSTXIDLE:
1214 case MGSL_IOCTXENABLE: 1216 case MGSL_IOCTXENABLE:
1215 case MGSL_IOCRXENABLE: 1217 case MGSL_IOCRXENABLE:
@@ -3642,6 +3644,7 @@ static const struct tty_operations ops = {
3642 .hangup = hangup, 3644 .hangup = hangup,
3643 .tiocmget = tiocmget, 3645 .tiocmget = tiocmget,
3644 .tiocmset = tiocmset, 3646 .tiocmset = tiocmset,
3647 .get_icount = get_icount,
3645 .proc_fops = &synclink_gt_proc_fops, 3648 .proc_fops = &synclink_gt_proc_fops,
3646}; 3649};
3647 3650
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index e56caf7d82aa..2f9eb4b0dec1 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -1258,10 +1258,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1258 unsigned int cmd, unsigned long arg) 1258 unsigned int cmd, unsigned long arg)
1259{ 1259{
1260 SLMP_INFO *info = tty->driver_data; 1260 SLMP_INFO *info = tty->driver_data;
1261 int error;
1262 struct mgsl_icount cnow; /* kernel counter temps */
1263 struct serial_icounter_struct __user *p_cuser; /* user space */
1264 unsigned long flags;
1265 void __user *argp = (void __user *)arg; 1261 void __user *argp = (void __user *)arg;
1266 1262
1267 if (debug_level >= DEBUG_LEVEL_INFO) 1263 if (debug_level >= DEBUG_LEVEL_INFO)
@@ -1272,7 +1268,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1272 return -ENODEV; 1268 return -ENODEV;
1273 1269
1274 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 1270 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
1275 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 1271 (cmd != TIOCMIWAIT)) {
1276 if (tty->flags & (1 << TTY_IO_ERROR)) 1272 if (tty->flags & (1 << TTY_IO_ERROR))
1277 return -EIO; 1273 return -EIO;
1278 } 1274 }
@@ -1310,40 +1306,38 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1310 * NB: both 1->0 and 0->1 transitions are counted except for 1306 * NB: both 1->0 and 0->1 transitions are counted except for
1311 * RI where only 0->1 is counted. 1307 * RI where only 0->1 is counted.
1312 */ 1308 */
1313 case TIOCGICOUNT:
1314 spin_lock_irqsave(&info->lock,flags);
1315 cnow = info->icount;
1316 spin_unlock_irqrestore(&info->lock,flags);
1317 p_cuser = argp;
1318 PUT_USER(error,cnow.cts, &p_cuser->cts);
1319 if (error) return error;
1320 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
1321 if (error) return error;
1322 PUT_USER(error,cnow.rng, &p_cuser->rng);
1323 if (error) return error;
1324 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
1325 if (error) return error;
1326 PUT_USER(error,cnow.rx, &p_cuser->rx);
1327 if (error) return error;
1328 PUT_USER(error,cnow.tx, &p_cuser->tx);
1329 if (error) return error;
1330 PUT_USER(error,cnow.frame, &p_cuser->frame);
1331 if (error) return error;
1332 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
1333 if (error) return error;
1334 PUT_USER(error,cnow.parity, &p_cuser->parity);
1335 if (error) return error;
1336 PUT_USER(error,cnow.brk, &p_cuser->brk);
1337 if (error) return error;
1338 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
1339 if (error) return error;
1340 return 0;
1341 default: 1309 default:
1342 return -ENOIOCTLCMD; 1310 return -ENOIOCTLCMD;
1343 } 1311 }
1344 return 0; 1312 return 0;
1345} 1313}
1346 1314
1315static int get_icount(struct tty_struct *tty,
1316 struct serial_icounter_struct *icount)
1317{
1318 SLMP_INFO *info = tty->driver_data;
1319 struct mgsl_icount cnow; /* kernel counter temps */
1320 unsigned long flags;
1321
1322 spin_lock_irqsave(&info->lock,flags);
1323 cnow = info->icount;
1324 spin_unlock_irqrestore(&info->lock,flags);
1325
1326 icount->cts = cnow.cts;
1327 icount->dsr = cnow.dsr;
1328 icount->rng = cnow.rng;
1329 icount->dcd = cnow.dcd;
1330 icount->rx = cnow.rx;
1331 icount->tx = cnow.tx;
1332 icount->frame = cnow.frame;
1333 icount->overrun = cnow.overrun;
1334 icount->parity = cnow.parity;
1335 icount->brk = cnow.brk;
1336 icount->buf_overrun = cnow.buf_overrun;
1337
1338 return 0;
1339}
1340
1347/* 1341/*
1348 * /proc fs routines.... 1342 * /proc fs routines....
1349 */ 1343 */
@@ -3909,6 +3903,7 @@ static const struct tty_operations ops = {
3909 .hangup = hangup, 3903 .hangup = hangup,
3910 .tiocmget = tiocmget, 3904 .tiocmget = tiocmget,
3911 .tiocmset = tiocmset, 3905 .tiocmset = tiocmset,
3906 .get_icount = get_icount,
3912 .proc_fops = &synclinkmp_proc_fops, 3907 .proc_fops = &synclinkmp_proc_fops,
3913}; 3908};
3914 3909
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 613c852ee0fe..c05c5af5aa04 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -96,6 +96,7 @@
96#include <linux/bitops.h> 96#include <linux/bitops.h>
97#include <linux/delay.h> 97#include <linux/delay.h>
98#include <linux/seq_file.h> 98#include <linux/seq_file.h>
99#include <linux/serial.h>
99 100
100#include <linux/uaccess.h> 101#include <linux/uaccess.h>
101#include <asm/system.h> 102#include <asm/system.h>
@@ -183,6 +184,8 @@ struct tty_struct *alloc_tty_struct(void)
183 184
184void free_tty_struct(struct tty_struct *tty) 185void free_tty_struct(struct tty_struct *tty)
185{ 186{
187 if (tty->dev)
188 put_device(tty->dev);
186 kfree(tty->write_buf); 189 kfree(tty->write_buf);
187 tty_buffer_free_all(tty); 190 tty_buffer_free_all(tty);
188 kfree(tty); 191 kfree(tty);
@@ -194,12 +197,13 @@ static inline struct tty_struct *file_tty(struct file *file)
194} 197}
195 198
196/* Associate a new file with the tty structure */ 199/* Associate a new file with the tty structure */
197void tty_add_file(struct tty_struct *tty, struct file *file) 200int tty_add_file(struct tty_struct *tty, struct file *file)
198{ 201{
199 struct tty_file_private *priv; 202 struct tty_file_private *priv;
200 203
201 /* XXX: must implement proper error handling in callers */ 204 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
202 priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL); 205 if (!priv)
206 return -ENOMEM;
203 207
204 priv->tty = tty; 208 priv->tty = tty;
205 priv->file = file; 209 priv->file = file;
@@ -208,6 +212,8 @@ void tty_add_file(struct tty_struct *tty, struct file *file)
208 spin_lock(&tty_files_lock); 212 spin_lock(&tty_files_lock);
209 list_add(&priv->list, &tty->tty_files); 213 list_add(&priv->list, &tty->tty_files);
210 spin_unlock(&tty_files_lock); 214 spin_unlock(&tty_files_lock);
215
216 return 0;
211} 217}
212 218
213/* Delete file from its tty */ 219/* Delete file from its tty */
@@ -1875,7 +1881,11 @@ got_driver:
1875 return PTR_ERR(tty); 1881 return PTR_ERR(tty);
1876 } 1882 }
1877 1883
1878 tty_add_file(tty, filp); 1884 retval = tty_add_file(tty, filp);
1885 if (retval) {
1886 tty_unlock();
1887 return retval;
1888 }
1879 1889
1880 check_tty_count(tty, "tty_open"); 1890 check_tty_count(tty, "tty_open");
1881 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 1891 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -2502,6 +2512,20 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
2502 return tty->ops->tiocmset(tty, file, set, clear); 2512 return tty->ops->tiocmset(tty, file, set, clear);
2503} 2513}
2504 2514
2515static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
2516{
2517 int retval = -EINVAL;
2518 struct serial_icounter_struct icount;
2519 memset(&icount, 0, sizeof(icount));
2520 if (tty->ops->get_icount)
2521 retval = tty->ops->get_icount(tty, &icount);
2522 if (retval != 0)
2523 return retval;
2524 if (copy_to_user(arg, &icount, sizeof(icount)))
2525 return -EFAULT;
2526 return 0;
2527}
2528
2505struct tty_struct *tty_pair_get_tty(struct tty_struct *tty) 2529struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
2506{ 2530{
2507 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 2531 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -2622,6 +2646,12 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2622 case TIOCMBIC: 2646 case TIOCMBIC:
2623 case TIOCMBIS: 2647 case TIOCMBIS:
2624 return tty_tiocmset(tty, file, cmd, p); 2648 return tty_tiocmset(tty, file, cmd, p);
2649 case TIOCGICOUNT:
2650 retval = tty_tiocgicount(tty, p);
2651 /* For the moment allow fall through to the old method */
2652 if (retval != -EINVAL)
2653 return retval;
2654 break;
2625 case TCFLSH: 2655 case TCFLSH:
2626 switch (arg) { 2656 switch (arg) {
2627 case TCIFLUSH: 2657 case TCIFLUSH:
@@ -2783,6 +2813,20 @@ void do_SAK(struct tty_struct *tty)
2783 2813
2784EXPORT_SYMBOL(do_SAK); 2814EXPORT_SYMBOL(do_SAK);
2785 2815
2816static int dev_match_devt(struct device *dev, void *data)
2817{
2818 dev_t *devt = data;
2819 return dev->devt == *devt;
2820}
2821
2822/* Must put_device() after it's unused! */
2823static struct device *tty_get_device(struct tty_struct *tty)
2824{
2825 dev_t devt = tty_devnum(tty);
2826 return class_find_device(tty_class, NULL, &devt, dev_match_devt);
2827}
2828
2829
2786/** 2830/**
2787 * initialize_tty_struct 2831 * initialize_tty_struct
2788 * @tty: tty to initialize 2832 * @tty: tty to initialize
@@ -2823,6 +2867,7 @@ void initialize_tty_struct(struct tty_struct *tty,
2823 tty->ops = driver->ops; 2867 tty->ops = driver->ops;
2824 tty->index = idx; 2868 tty->index = idx;
2825 tty_line_name(driver, idx, tty->name); 2869 tty_line_name(driver, idx, tty->name);
2870 tty->dev = tty_get_device(tty);
2826} 2871}
2827 2872
2828/** 2873/**
@@ -2980,6 +3025,7 @@ int tty_register_driver(struct tty_driver *driver)
2980 int i; 3025 int i;
2981 dev_t dev; 3026 dev_t dev;
2982 void **p = NULL; 3027 void **p = NULL;
3028 struct device *d;
2983 3029
2984 if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) { 3030 if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) {
2985 p = kzalloc(driver->num * 2 * sizeof(void *), GFP_KERNEL); 3031 p = kzalloc(driver->num * 2 * sizeof(void *), GFP_KERNEL);
@@ -3027,12 +3073,31 @@ int tty_register_driver(struct tty_driver *driver)
3027 mutex_unlock(&tty_mutex); 3073 mutex_unlock(&tty_mutex);
3028 3074
3029 if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) { 3075 if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
3030 for (i = 0; i < driver->num; i++) 3076 for (i = 0; i < driver->num; i++) {
3031 tty_register_device(driver, i, NULL); 3077 d = tty_register_device(driver, i, NULL);
3078 if (IS_ERR(d)) {
3079 error = PTR_ERR(d);
3080 goto err;
3081 }
3082 }
3032 } 3083 }
3033 proc_tty_register_driver(driver); 3084 proc_tty_register_driver(driver);
3034 driver->flags |= TTY_DRIVER_INSTALLED; 3085 driver->flags |= TTY_DRIVER_INSTALLED;
3035 return 0; 3086 return 0;
3087
3088err:
3089 for (i--; i >= 0; i--)
3090 tty_unregister_device(driver, i);
3091
3092 mutex_lock(&tty_mutex);
3093 list_del(&driver->tty_drivers);
3094 mutex_unlock(&tty_mutex);
3095
3096 unregister_chrdev_region(dev, driver->num);
3097 driver->ttys = NULL;
3098 driver->termios = NULL;
3099 kfree(p);
3100 return error;
3036} 3101}
3037 3102
3038EXPORT_SYMBOL(tty_register_driver); 3103EXPORT_SYMBOL(tty_register_driver);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
new file mode 100644
index 000000000000..c40c1612c8a7
--- /dev/null
+++ b/drivers/char/ttyprintk.c
@@ -0,0 +1,225 @@
1/*
2 * linux/drivers/char/ttyprintk.c
3 *
4 * Copyright (C) 2010 Samo Pogacnik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the smems of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11/*
12 * This pseudo device allows user to make printk messages. It is possible
13 * to store "console" messages inline with kernel messages for better analyses
14 * of the boot process, for example.
15 */
16
17#include <linux/device.h>
18#include <linux/serial.h>
19#include <linux/tty.h>
20
21struct ttyprintk_port {
22 struct tty_port port;
23 struct mutex port_write_mutex;
24};
25
26static struct ttyprintk_port tpk_port;
27
28/*
29 * Our simple preformatting supports transparent output of (time-stamped)
30 * printk messages (also suitable for logging service):
31 * - any cr is replaced by nl
32 * - adds a ttyprintk source tag in front of each line
33 * - too long message is fragmeted, with '\'nl between fragments
34 * - TPK_STR_SIZE isn't really the write_room limiting factor, bcause
35 * it is emptied on the fly during preformatting.
36 */
37#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
38#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
39static const char *tpk_tag = "[U] "; /* U for User */
40static int tpk_curr;
41
42static int tpk_printk(const unsigned char *buf, int count)
43{
44 static char tmp[TPK_STR_SIZE + 4];
45 int i = tpk_curr;
46
47 if (buf == NULL) {
48 /* flush tmp[] */
49 if (tpk_curr > 0) {
50 /* non nl or cr terminated message - add nl */
51 tmp[tpk_curr + 0] = '\n';
52 tmp[tpk_curr + 1] = '\0';
53 printk(KERN_INFO "%s%s", tpk_tag, tmp);
54 tpk_curr = 0;
55 }
56 return i;
57 }
58
59 for (i = 0; i < count; i++) {
60 tmp[tpk_curr] = buf[i];
61 if (tpk_curr < TPK_STR_SIZE) {
62 switch (buf[i]) {
63 case '\r':
64 /* replace cr with nl */
65 tmp[tpk_curr + 0] = '\n';
66 tmp[tpk_curr + 1] = '\0';
67 printk(KERN_INFO "%s%s", tpk_tag, tmp);
68 tpk_curr = 0;
69 if (buf[i + 1] == '\n')
70 i++;
71 break;
72 case '\n':
73 tmp[tpk_curr + 1] = '\0';
74 printk(KERN_INFO "%s%s", tpk_tag, tmp);
75 tpk_curr = 0;
76 break;
77 default:
78 tpk_curr++;
79 }
80 } else {
81 /* end of tmp buffer reached: cut the message in two */
82 tmp[tpk_curr + 1] = '\\';
83 tmp[tpk_curr + 2] = '\n';
84 tmp[tpk_curr + 3] = '\0';
85 printk(KERN_INFO "%s%s", tpk_tag, tmp);
86 tpk_curr = 0;
87 }
88 }
89
90 return count;
91}
92
93/*
94 * TTY operations open function.
95 */
96static int tpk_open(struct tty_struct *tty, struct file *filp)
97{
98 tty->driver_data = &tpk_port;
99
100 return tty_port_open(&tpk_port.port, tty, filp);
101}
102
103/*
104 * TTY operations close function.
105 */
106static void tpk_close(struct tty_struct *tty, struct file *filp)
107{
108 struct ttyprintk_port *tpkp = tty->driver_data;
109
110 mutex_lock(&tpkp->port_write_mutex);
111 /* flush tpk_printk buffer */
112 tpk_printk(NULL, 0);
113 mutex_unlock(&tpkp->port_write_mutex);
114
115 tty_port_close(&tpkp->port, tty, filp);
116}
117
118/*
119 * TTY operations write function.
120 */
121static int tpk_write(struct tty_struct *tty,
122 const unsigned char *buf, int count)
123{
124 struct ttyprintk_port *tpkp = tty->driver_data;
125 int ret;
126
127
128 /* exclusive use of tpk_printk within this tty */
129 mutex_lock(&tpkp->port_write_mutex);
130 ret = tpk_printk(buf, count);
131 mutex_unlock(&tpkp->port_write_mutex);
132
133 return ret;
134}
135
136/*
137 * TTY operations write_room function.
138 */
139static int tpk_write_room(struct tty_struct *tty)
140{
141 return TPK_MAX_ROOM;
142}
143
144/*
145 * TTY operations ioctl function.
146 */
147static int tpk_ioctl(struct tty_struct *tty, struct file *file,
148 unsigned int cmd, unsigned long arg)
149{
150 struct ttyprintk_port *tpkp = tty->driver_data;
151
152 if (!tpkp)
153 return -EINVAL;
154
155 switch (cmd) {
156 /* Stop TIOCCONS */
157 case TIOCCONS:
158 return -EOPNOTSUPP;
159 default:
160 return -ENOIOCTLCMD;
161 }
162 return 0;
163}
164
165static const struct tty_operations ttyprintk_ops = {
166 .open = tpk_open,
167 .close = tpk_close,
168 .write = tpk_write,
169 .write_room = tpk_write_room,
170 .ioctl = tpk_ioctl,
171};
172
173struct tty_port_operations null_ops = { };
174
175static struct tty_driver *ttyprintk_driver;
176
177static int __init ttyprintk_init(void)
178{
179 int ret = -ENOMEM;
180 void *rp;
181
182 ttyprintk_driver = alloc_tty_driver(1);
183 if (!ttyprintk_driver)
184 return ret;
185
186 ttyprintk_driver->owner = THIS_MODULE;
187 ttyprintk_driver->driver_name = "ttyprintk";
188 ttyprintk_driver->name = "ttyprintk";
189 ttyprintk_driver->major = TTYAUX_MAJOR;
190 ttyprintk_driver->minor_start = 3;
191 ttyprintk_driver->num = 1;
192 ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
193 ttyprintk_driver->init_termios = tty_std_termios;
194 ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
195 ttyprintk_driver->flags = TTY_DRIVER_RESET_TERMIOS |
196 TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
197 tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
198
199 ret = tty_register_driver(ttyprintk_driver);
200 if (ret < 0) {
201 printk(KERN_ERR "Couldn't register ttyprintk driver\n");
202 goto error;
203 }
204
205 /* create our unnumbered device */
206 rp = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 3), NULL,
207 ttyprintk_driver->name);
208 if (IS_ERR(rp)) {
209 printk(KERN_ERR "Couldn't create ttyprintk device\n");
210 ret = PTR_ERR(rp);
211 goto error;
212 }
213
214 tty_port_init(&tpk_port.port);
215 tpk_port.port.ops = &null_ops;
216 mutex_init(&tpk_port.port_write_mutex);
217
218 return 0;
219
220error:
221 put_tty_driver(ttyprintk_driver);
222 ttyprintk_driver = NULL;
223 return ret;
224}
225module_init(ttyprintk_init);
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index bcce46c96b88..273ab44cc91d 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -35,6 +35,12 @@
35#include <linux/console.h> 35#include <linux/console.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/sched.h>
39#include <linux/fs.h>
40#include <linux/poll.h>
41#include <linux/signal.h>
42#include <linux/slab.h>
43#include <linux/notifier.h>
38 44
39#include <asm/uaccess.h> 45#include <asm/uaccess.h>
40#include <asm/byteorder.h> 46#include <asm/byteorder.h>
@@ -45,6 +51,86 @@
45#undef addr 51#undef addr
46#define HEADER_SIZE 4 52#define HEADER_SIZE 4
47 53
54struct vcs_poll_data {
55 struct notifier_block notifier;
56 unsigned int cons_num;
57 bool seen_last_update;
58 wait_queue_head_t waitq;
59 struct fasync_struct *fasync;
60};
61
62static int
63vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
64{
65 struct vt_notifier_param *param = _param;
66 struct vc_data *vc = param->vc;
67 struct vcs_poll_data *poll =
68 container_of(nb, struct vcs_poll_data, notifier);
69 int currcons = poll->cons_num;
70
71 if (code != VT_UPDATE)
72 return NOTIFY_DONE;
73
74 if (currcons == 0)
75 currcons = fg_console;
76 else
77 currcons--;
78 if (currcons != vc->vc_num)
79 return NOTIFY_DONE;
80
81 poll->seen_last_update = false;
82 wake_up_interruptible(&poll->waitq);
83 kill_fasync(&poll->fasync, SIGIO, POLL_IN);
84 return NOTIFY_OK;
85}
86
87static void
88vcs_poll_data_free(struct vcs_poll_data *poll)
89{
90 unregister_vt_notifier(&poll->notifier);
91 kfree(poll);
92}
93
94static struct vcs_poll_data *
95vcs_poll_data_get(struct file *file)
96{
97 struct vcs_poll_data *poll = file->private_data;
98
99 if (poll)
100 return poll;
101
102 poll = kzalloc(sizeof(*poll), GFP_KERNEL);
103 if (!poll)
104 return NULL;
105 poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
106 init_waitqueue_head(&poll->waitq);
107 poll->notifier.notifier_call = vcs_notifier;
108 if (register_vt_notifier(&poll->notifier) != 0) {
109 kfree(poll);
110 return NULL;
111 }
112
113 /*
114 * This code may be called either through ->poll() or ->fasync().
115 * If we have two threads using the same file descriptor, they could
116 * both enter this function, both notice that the structure hasn't
117 * been allocated yet and go ahead allocating it in parallel, but
118 * only one of them must survive and be shared otherwise we'd leak
119 * memory with a dangling notifier callback.
120 */
121 spin_lock(&file->f_lock);
122 if (!file->private_data) {
123 file->private_data = poll;
124 } else {
125 /* someone else raced ahead of us */
126 vcs_poll_data_free(poll);
127 poll = file->private_data;
128 }
129 spin_unlock(&file->f_lock);
130
131 return poll;
132}
133
48static int 134static int
49vcs_size(struct inode *inode) 135vcs_size(struct inode *inode)
50{ 136{
@@ -102,6 +188,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
102 struct inode *inode = file->f_path.dentry->d_inode; 188 struct inode *inode = file->f_path.dentry->d_inode;
103 unsigned int currcons = iminor(inode); 189 unsigned int currcons = iminor(inode);
104 struct vc_data *vc; 190 struct vc_data *vc;
191 struct vcs_poll_data *poll;
105 long pos; 192 long pos;
106 long viewed, attr, read; 193 long viewed, attr, read;
107 int col, maxcol; 194 int col, maxcol;
@@ -134,6 +221,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
134 ret = -EINVAL; 221 ret = -EINVAL;
135 if (pos < 0) 222 if (pos < 0)
136 goto unlock_out; 223 goto unlock_out;
224 poll = file->private_data;
225 if (count && poll)
226 poll->seen_last_update = true;
137 read = 0; 227 read = 0;
138 ret = 0; 228 ret = 0;
139 while (count) { 229 while (count) {
@@ -448,6 +538,8 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
448 } 538 }
449 *ppos += written; 539 *ppos += written;
450 ret = written; 540 ret = written;
541 if (written)
542 vcs_scr_updated(vc);
451 543
452unlock_out: 544unlock_out:
453 release_console_sem(); 545 release_console_sem();
@@ -457,6 +549,37 @@ unlock_out:
457 return ret; 549 return ret;
458} 550}
459 551
552static unsigned int
553vcs_poll(struct file *file, poll_table *wait)
554{
555 struct vcs_poll_data *poll = vcs_poll_data_get(file);
556 int ret = 0;
557
558 if (poll) {
559 poll_wait(file, &poll->waitq, wait);
560 if (!poll->seen_last_update)
561 ret = POLLIN | POLLRDNORM;
562 }
563 return ret;
564}
565
566static int
567vcs_fasync(int fd, struct file *file, int on)
568{
569 struct vcs_poll_data *poll = file->private_data;
570
571 if (!poll) {
572 /* don't allocate anything if all we want is disable fasync */
573 if (!on)
574 return 0;
575 poll = vcs_poll_data_get(file);
576 if (!poll)
577 return -ENOMEM;
578 }
579
580 return fasync_helper(fd, file, on, &poll->fasync);
581}
582
460static int 583static int
461vcs_open(struct inode *inode, struct file *filp) 584vcs_open(struct inode *inode, struct file *filp)
462{ 585{
@@ -470,11 +593,23 @@ vcs_open(struct inode *inode, struct file *filp)
470 return ret; 593 return ret;
471} 594}
472 595
596static int vcs_release(struct inode *inode, struct file *file)
597{
598 struct vcs_poll_data *poll = file->private_data;
599
600 if (poll)
601 vcs_poll_data_free(poll);
602 return 0;
603}
604
473static const struct file_operations vcs_fops = { 605static const struct file_operations vcs_fops = {
474 .llseek = vcs_lseek, 606 .llseek = vcs_lseek,
475 .read = vcs_read, 607 .read = vcs_read,
476 .write = vcs_write, 608 .write = vcs_write,
609 .poll = vcs_poll,
610 .fasync = vcs_fasync,
477 .open = vcs_open, 611 .open = vcs_open,
612 .release = vcs_release,
478}; 613};
479 614
480static struct class *vc_class; 615static struct class *vc_class;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 281aada7b4a1..a8ec48ed14d9 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -4182,6 +4182,11 @@ void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
4182 } 4182 }
4183} 4183}
4184 4184
4185void vcs_scr_updated(struct vc_data *vc)
4186{
4187 notify_update(vc);
4188}
4189
4185/* 4190/*
4186 * Visible symbols for modules 4191 * Visible symbols for modules
4187 */ 4192 */
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index a0dea3d1296e..3cb6632d4518 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1662,6 +1662,7 @@ static const struct hid_device_id hid_ignore_list[] = {
1662 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) }, 1662 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
1663 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, 1663 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
1664 { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, 1664 { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
1665 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_YUREX) },
1665 { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, 1666 { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
1666 { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, 1667 { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
1667 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, 1668 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c5ae5f1545bd..855aa8e355f4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -304,6 +304,9 @@
304#define USB_VENDOR_ID_IMATION 0x0718 304#define USB_VENDOR_ID_IMATION 0x0718
305#define USB_DEVICE_ID_DISC_STAKKA 0xd000 305#define USB_DEVICE_ID_DISC_STAKKA 0xd000
306 306
307#define USB_VENDOR_ID_JESS 0x0c45
308#define USB_DEVICE_ID_JESS_YUREX 0x1010
309
307#define USB_VENDOR_ID_KBGEAR 0x084e 310#define USB_VENDOR_ID_KBGEAR 0x084e
308#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001 311#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
309 312
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 7433e07de30e..7c5b01ce51d2 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -516,10 +516,10 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
516 return ide_no_data_taskfile(drive, &cmd); 516 return ide_no_data_taskfile(drive, &cmd);
517} 517}
518 518
519static void update_ordered(ide_drive_t *drive) 519static void update_flush(ide_drive_t *drive)
520{ 520{
521 u16 *id = drive->id; 521 u16 *id = drive->id;
522 unsigned ordered = QUEUE_ORDERED_NONE; 522 unsigned flush = 0;
523 523
524 if (drive->dev_flags & IDE_DFLAG_WCACHE) { 524 if (drive->dev_flags & IDE_DFLAG_WCACHE) {
525 unsigned long long capacity; 525 unsigned long long capacity;
@@ -543,13 +543,12 @@ static void update_ordered(ide_drive_t *drive)
543 drive->name, barrier ? "" : "not "); 543 drive->name, barrier ? "" : "not ");
544 544
545 if (barrier) { 545 if (barrier) {
546 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 546 flush = REQ_FLUSH;
547 blk_queue_prep_rq(drive->queue, idedisk_prep_fn); 547 blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
548 } 548 }
549 } else 549 }
550 ordered = QUEUE_ORDERED_DRAIN;
551 550
552 blk_queue_ordered(drive->queue, ordered); 551 blk_queue_flush(drive->queue, flush);
553} 552}
554 553
555ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE); 554ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
@@ -572,7 +571,7 @@ static int set_wcache(ide_drive_t *drive, int arg)
572 } 571 }
573 } 572 }
574 573
575 update_ordered(drive); 574 update_flush(drive);
576 575
577 return err; 576 return err;
578} 577}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index a381be814070..999dac054bcc 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -441,19 +441,6 @@ void do_ide_request(struct request_queue *q)
441 struct request *rq = NULL; 441 struct request *rq = NULL;
442 ide_startstop_t startstop; 442 ide_startstop_t startstop;
443 443
444 /*
445 * drive is doing pre-flush, ordered write, post-flush sequence. even
446 * though that is 3 requests, it must be seen as a single transaction.
447 * we must not preempt this drive until that is complete
448 */
449 if (blk_queue_flushing(q))
450 /*
451 * small race where queue could get replugged during
452 * the 3-request flush cycle, just yank the plug since
453 * we want it to finish asap
454 */
455 blk_remove_plug(q);
456
457 spin_unlock_irq(q->queue_lock); 444 spin_unlock_irq(q->queue_lock);
458 445
459 /* HLD do_request() callback might sleep, make sure it's okay */ 446 /* HLD do_request() callback might sleep, make sure it's okay */
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 6d345112bcb7..6e362de3f412 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -165,6 +165,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
165 serio->open = serport_serio_open; 165 serio->open = serport_serio_open;
166 serio->close = serport_serio_close; 166 serio->close = serport_serio_close;
167 serio->port_data = serport; 167 serio->port_data = serport;
168 serio->dev.parent = tty->dev;
168 169
169 serio_register_port(serport->serio); 170 serio_register_port(serport->serio);
170 printk(KERN_INFO "serio: Serial port %s\n", tty_name(tty, name)); 171 printk(KERN_INFO "serio: Serial port %s\n", tty_name(tty, name));
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index ed9c55506797..f332b60eff6b 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -15,7 +15,6 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/io.h> 16#include <asm/io.h>
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/workqueue.h>
19#include <linux/pci.h> 18#include <linux/pci.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21#include <linux/list.h> 20#include <linux/list.h>
@@ -546,7 +545,6 @@ void diva_os_remove_soft_isr(diva_os_soft_isr_t * psoft_isr)
546 void *mem; 545 void *mem;
547 546
548 tasklet_kill(&pdpc->divas_task); 547 tasklet_kill(&pdpc->divas_task);
549 flush_scheduled_work();
550 mem = psoft_isr->object; 548 mem = psoft_isr->object;
551 psoft_isr->object = NULL; 549 psoft_isr->object = NULL;
552 diva_os_free(0, mem); 550 diva_os_free(0, mem);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 368e8e98f705..d5b0e4c0e702 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1278,7 +1278,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
1278 struct dm_crypt_io *io; 1278 struct dm_crypt_io *io;
1279 struct crypt_config *cc; 1279 struct crypt_config *cc;
1280 1280
1281 if (unlikely(bio_empty_barrier(bio))) { 1281 if (bio->bi_rw & REQ_FLUSH) {
1282 cc = ti->private; 1282 cc = ti->private;
1283 bio->bi_bdev = cc->dev->bdev; 1283 bio->bi_bdev = cc->dev->bdev;
1284 return DM_MAPIO_REMAPPED; 1284 return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0590c75b0ab6..136d4f71a116 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -31,7 +31,6 @@ struct dm_io_client {
31 */ 31 */
32struct io { 32struct io {
33 unsigned long error_bits; 33 unsigned long error_bits;
34 unsigned long eopnotsupp_bits;
35 atomic_t count; 34 atomic_t count;
36 struct task_struct *sleeper; 35 struct task_struct *sleeper;
37 struct dm_io_client *client; 36 struct dm_io_client *client;
@@ -130,11 +129,8 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
130 *---------------------------------------------------------------*/ 129 *---------------------------------------------------------------*/
131static void dec_count(struct io *io, unsigned int region, int error) 130static void dec_count(struct io *io, unsigned int region, int error)
132{ 131{
133 if (error) { 132 if (error)
134 set_bit(region, &io->error_bits); 133 set_bit(region, &io->error_bits);
135 if (error == -EOPNOTSUPP)
136 set_bit(region, &io->eopnotsupp_bits);
137 }
138 134
139 if (atomic_dec_and_test(&io->count)) { 135 if (atomic_dec_and_test(&io->count)) {
140 if (io->sleeper) 136 if (io->sleeper)
@@ -310,8 +306,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
310 sector_t remaining = where->count; 306 sector_t remaining = where->count;
311 307
312 /* 308 /*
313 * where->count may be zero if rw holds a write barrier and we 309 * where->count may be zero if rw holds a flush and we need to
314 * need to send a zero-sized barrier. 310 * send a zero-sized flush.
315 */ 311 */
316 do { 312 do {
317 /* 313 /*
@@ -364,7 +360,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
364 */ 360 */
365 for (i = 0; i < num_regions; i++) { 361 for (i = 0; i < num_regions; i++) {
366 *dp = old_pages; 362 *dp = old_pages;
367 if (where[i].count || (rw & REQ_HARDBARRIER)) 363 if (where[i].count || (rw & REQ_FLUSH))
368 do_region(rw, i, where + i, dp, io); 364 do_region(rw, i, where + i, dp, io);
369 } 365 }
370 366
@@ -393,9 +389,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
393 return -EIO; 389 return -EIO;
394 } 390 }
395 391
396retry:
397 io->error_bits = 0; 392 io->error_bits = 0;
398 io->eopnotsupp_bits = 0;
399 atomic_set(&io->count, 1); /* see dispatch_io() */ 393 atomic_set(&io->count, 1); /* see dispatch_io() */
400 io->sleeper = current; 394 io->sleeper = current;
401 io->client = client; 395 io->client = client;
@@ -412,11 +406,6 @@ retry:
412 } 406 }
413 set_current_state(TASK_RUNNING); 407 set_current_state(TASK_RUNNING);
414 408
415 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
416 rw &= ~REQ_HARDBARRIER;
417 goto retry;
418 }
419
420 if (error_bits) 409 if (error_bits)
421 *error_bits = io->error_bits; 410 *error_bits = io->error_bits;
422 411
@@ -437,7 +426,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
437 426
438 io = mempool_alloc(client->pool, GFP_NOIO); 427 io = mempool_alloc(client->pool, GFP_NOIO);
439 io->error_bits = 0; 428 io->error_bits = 0;
440 io->eopnotsupp_bits = 0;
441 atomic_set(&io->count, 1); /* see dispatch_io() */ 429 atomic_set(&io->count, 1); /* see dispatch_io() */
442 io->sleeper = NULL; 430 io->sleeper = NULL;
443 io->client = client; 431 io->client = client;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 5a08be0222db..33420e68d153 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -300,7 +300,7 @@ static int flush_header(struct log_c *lc)
300 .count = 0, 300 .count = 0,
301 }; 301 };
302 302
303 lc->io_req.bi_rw = WRITE_BARRIER; 303 lc->io_req.bi_rw = WRITE_FLUSH;
304 304
305 return dm_io(&lc->io_req, 1, &null_location, NULL); 305 return dm_io(&lc->io_req, 1, &null_location, NULL);
306} 306}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 7c081bcbc3cf..19a59b041c27 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -259,7 +259,7 @@ static int mirror_flush(struct dm_target *ti)
259 struct dm_io_region io[ms->nr_mirrors]; 259 struct dm_io_region io[ms->nr_mirrors];
260 struct mirror *m; 260 struct mirror *m;
261 struct dm_io_request io_req = { 261 struct dm_io_request io_req = {
262 .bi_rw = WRITE_BARRIER, 262 .bi_rw = WRITE_FLUSH,
263 .mem.type = DM_IO_KMEM, 263 .mem.type = DM_IO_KMEM,
264 .mem.ptr.bvec = NULL, 264 .mem.ptr.bvec = NULL,
265 .client = ms->io_client, 265 .client = ms->io_client,
@@ -629,7 +629,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
629 struct dm_io_region io[ms->nr_mirrors], *dest = io; 629 struct dm_io_region io[ms->nr_mirrors], *dest = io;
630 struct mirror *m; 630 struct mirror *m;
631 struct dm_io_request io_req = { 631 struct dm_io_request io_req = {
632 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), 632 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
633 .mem.type = DM_IO_BVEC, 633 .mem.type = DM_IO_BVEC,
634 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 634 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
635 .notify.fn = write_callback, 635 .notify.fn = write_callback,
@@ -670,7 +670,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
670 bio_list_init(&requeue); 670 bio_list_init(&requeue);
671 671
672 while ((bio = bio_list_pop(writes))) { 672 while ((bio = bio_list_pop(writes))) {
673 if (unlikely(bio_empty_barrier(bio))) { 673 if (bio->bi_rw & REQ_FLUSH) {
674 bio_list_add(&sync, bio); 674 bio_list_add(&sync, bio);
675 continue; 675 continue;
676 } 676 }
@@ -1203,7 +1203,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1203 * We need to dec pending if this was a write. 1203 * We need to dec pending if this was a write.
1204 */ 1204 */
1205 if (rw == WRITE) { 1205 if (rw == WRITE) {
1206 if (likely(!bio_empty_barrier(bio))) 1206 if (!(bio->bi_rw & REQ_FLUSH))
1207 dm_rh_dec(ms->rh, map_context->ll); 1207 dm_rh_dec(ms->rh, map_context->ll);
1208 return error; 1208 return error;
1209 } 1209 }
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index bd5c58b28868..dad011aed0c9 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -81,9 +81,9 @@ struct dm_region_hash {
81 struct list_head failed_recovered_regions; 81 struct list_head failed_recovered_regions;
82 82
83 /* 83 /*
84 * If there was a barrier failure no regions can be marked clean. 84 * If there was a flush failure no regions can be marked clean.
85 */ 85 */
86 int barrier_failure; 86 int flush_failure;
87 87
88 void *context; 88 void *context;
89 sector_t target_begin; 89 sector_t target_begin;
@@ -217,7 +217,7 @@ struct dm_region_hash *dm_region_hash_create(
217 INIT_LIST_HEAD(&rh->quiesced_regions); 217 INIT_LIST_HEAD(&rh->quiesced_regions);
218 INIT_LIST_HEAD(&rh->recovered_regions); 218 INIT_LIST_HEAD(&rh->recovered_regions);
219 INIT_LIST_HEAD(&rh->failed_recovered_regions); 219 INIT_LIST_HEAD(&rh->failed_recovered_regions);
220 rh->barrier_failure = 0; 220 rh->flush_failure = 0;
221 221
222 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, 222 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
223 sizeof(struct dm_region)); 223 sizeof(struct dm_region));
@@ -399,8 +399,8 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
399 region_t region = dm_rh_bio_to_region(rh, bio); 399 region_t region = dm_rh_bio_to_region(rh, bio);
400 int recovering = 0; 400 int recovering = 0;
401 401
402 if (bio_empty_barrier(bio)) { 402 if (bio->bi_rw & REQ_FLUSH) {
403 rh->barrier_failure = 1; 403 rh->flush_failure = 1;
404 return; 404 return;
405 } 405 }
406 406
@@ -524,7 +524,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
524 struct bio *bio; 524 struct bio *bio;
525 525
526 for (bio = bios->head; bio; bio = bio->bi_next) { 526 for (bio = bios->head; bio; bio = bio->bi_next) {
527 if (bio_empty_barrier(bio)) 527 if (bio->bi_rw & REQ_FLUSH)
528 continue; 528 continue;
529 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); 529 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
530 } 530 }
@@ -555,9 +555,9 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region)
555 */ 555 */
556 556
557 /* do nothing for DM_RH_NOSYNC */ 557 /* do nothing for DM_RH_NOSYNC */
558 if (unlikely(rh->barrier_failure)) { 558 if (unlikely(rh->flush_failure)) {
559 /* 559 /*
560 * If a write barrier failed some time ago, we 560 * If a write flush failed some time ago, we
561 * don't know whether or not this write made it 561 * don't know whether or not this write made it
562 * to the disk, so we must resync the device. 562 * to the disk, so we must resync the device.
563 */ 563 */
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index cc2bdb83f9ad..0b61792a2780 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -687,7 +687,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
687 /* 687 /*
688 * Commit exceptions to disk. 688 * Commit exceptions to disk.
689 */ 689 */
690 if (ps->valid && area_io(ps, WRITE_BARRIER)) 690 if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
691 ps->valid = 0; 691 ps->valid = 0;
692 692
693 /* 693 /*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5974d3094d97..53cf79d8bcbc 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -706,8 +706,6 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
706 return 0; 706 return 0;
707} 707}
708 708
709#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
710
711/* 709/*
712 * Return a minimum chunk size of all snapshots that have the specified origin. 710 * Return a minimum chunk size of all snapshots that have the specified origin.
713 * Return zero if the origin has no snapshots. 711 * Return zero if the origin has no snapshots.
@@ -1587,7 +1585,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1587 chunk_t chunk; 1585 chunk_t chunk;
1588 struct dm_snap_pending_exception *pe = NULL; 1586 struct dm_snap_pending_exception *pe = NULL;
1589 1587
1590 if (unlikely(bio_empty_barrier(bio))) { 1588 if (bio->bi_rw & REQ_FLUSH) {
1591 bio->bi_bdev = s->cow->bdev; 1589 bio->bi_bdev = s->cow->bdev;
1592 return DM_MAPIO_REMAPPED; 1590 return DM_MAPIO_REMAPPED;
1593 } 1591 }
@@ -1691,7 +1689,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1691 int r = DM_MAPIO_REMAPPED; 1689 int r = DM_MAPIO_REMAPPED;
1692 chunk_t chunk; 1690 chunk_t chunk;
1693 1691
1694 if (unlikely(bio_empty_barrier(bio))) { 1692 if (bio->bi_rw & REQ_FLUSH) {
1695 if (!map_context->target_request_nr) 1693 if (!map_context->target_request_nr)
1696 bio->bi_bdev = s->origin->bdev; 1694 bio->bi_bdev = s->origin->bdev;
1697 else 1695 else
@@ -2135,7 +2133,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
2135 struct dm_dev *dev = ti->private; 2133 struct dm_dev *dev = ti->private;
2136 bio->bi_bdev = dev->bdev; 2134 bio->bi_bdev = dev->bdev;
2137 2135
2138 if (unlikely(bio_empty_barrier(bio))) 2136 if (bio->bi_rw & REQ_FLUSH)
2139 return DM_MAPIO_REMAPPED; 2137 return DM_MAPIO_REMAPPED;
2140 2138
2141 /* Only tell snapshots if this is a write */ 2139 /* Only tell snapshots if this is a write */
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c297f6da91ea..f0371b4c4fbf 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -271,7 +271,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
271 uint32_t stripe; 271 uint32_t stripe;
272 unsigned target_request_nr; 272 unsigned target_request_nr;
273 273
274 if (unlikely(bio_empty_barrier(bio))) { 274 if (bio->bi_rw & REQ_FLUSH) {
275 target_request_nr = map_context->target_request_nr; 275 target_request_nr = map_context->target_request_nr;
276 BUG_ON(target_request_nr >= sc->stripes); 276 BUG_ON(target_request_nr >= sc->stripes);
277 bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; 277 bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f9fc07d7a4b9..90267f8d64ee 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -486,11 +486,6 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
486 return 0; 486 return 0;
487} 487}
488 488
489/*
490 * Returns the minimum that is _not_ zero, unless both are zero.
491 */
492#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
493
494int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 489int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
495 sector_t start, sector_t len, void *data) 490 sector_t start, sector_t len, void *data)
496{ 491{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7967eca5a2d5..7cb1352f7e7a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -110,7 +110,6 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
110#define DMF_FREEING 3 110#define DMF_FREEING 3
111#define DMF_DELETING 4 111#define DMF_DELETING 4
112#define DMF_NOFLUSH_SUSPENDING 5 112#define DMF_NOFLUSH_SUSPENDING 5
113#define DMF_QUEUE_IO_TO_THREAD 6
114 113
115/* 114/*
116 * Work processed by per-device workqueue. 115 * Work processed by per-device workqueue.
@@ -144,24 +143,9 @@ struct mapped_device {
144 spinlock_t deferred_lock; 143 spinlock_t deferred_lock;
145 144
146 /* 145 /*
147 * An error from the barrier request currently being processed. 146 * Processing queue (flush)
148 */
149 int barrier_error;
150
151 /*
152 * Protect barrier_error from concurrent endio processing
153 * in request-based dm.
154 */
155 spinlock_t barrier_error_lock;
156
157 /*
158 * Processing queue (flush/barriers)
159 */ 147 */
160 struct workqueue_struct *wq; 148 struct workqueue_struct *wq;
161 struct work_struct barrier_work;
162
163 /* A pointer to the currently processing pre/post flush request */
164 struct request *flush_request;
165 149
166 /* 150 /*
167 * The current mapping. 151 * The current mapping.
@@ -200,8 +184,8 @@ struct mapped_device {
200 /* sysfs handle */ 184 /* sysfs handle */
201 struct kobject kobj; 185 struct kobject kobj;
202 186
203 /* zero-length barrier that will be cloned and submitted to targets */ 187 /* zero-length flush that will be cloned and submitted to targets */
204 struct bio barrier_bio; 188 struct bio flush_bio;
205}; 189};
206 190
207/* 191/*
@@ -512,7 +496,7 @@ static void end_io_acct(struct dm_io *io)
512 496
513 /* 497 /*
514 * After this is decremented the bio must not be touched if it is 498 * After this is decremented the bio must not be touched if it is
515 * a barrier. 499 * a flush.
516 */ 500 */
517 dm_disk(md)->part0.in_flight[rw] = pending = 501 dm_disk(md)->part0.in_flight[rw] = pending =
518 atomic_dec_return(&md->pending[rw]); 502 atomic_dec_return(&md->pending[rw]);
@@ -528,16 +512,12 @@ static void end_io_acct(struct dm_io *io)
528 */ 512 */
529static void queue_io(struct mapped_device *md, struct bio *bio) 513static void queue_io(struct mapped_device *md, struct bio *bio)
530{ 514{
531 down_write(&md->io_lock); 515 unsigned long flags;
532 516
533 spin_lock_irq(&md->deferred_lock); 517 spin_lock_irqsave(&md->deferred_lock, flags);
534 bio_list_add(&md->deferred, bio); 518 bio_list_add(&md->deferred, bio);
535 spin_unlock_irq(&md->deferred_lock); 519 spin_unlock_irqrestore(&md->deferred_lock, flags);
536 520 queue_work(md->wq, &md->work);
537 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
538 queue_work(md->wq, &md->work);
539
540 up_write(&md->io_lock);
541} 521}
542 522
543/* 523/*
@@ -625,11 +605,9 @@ static void dec_pending(struct dm_io *io, int error)
625 * Target requested pushing back the I/O. 605 * Target requested pushing back the I/O.
626 */ 606 */
627 spin_lock_irqsave(&md->deferred_lock, flags); 607 spin_lock_irqsave(&md->deferred_lock, flags);
628 if (__noflush_suspending(md)) { 608 if (__noflush_suspending(md))
629 if (!(io->bio->bi_rw & REQ_HARDBARRIER)) 609 bio_list_add_head(&md->deferred, io->bio);
630 bio_list_add_head(&md->deferred, 610 else
631 io->bio);
632 } else
633 /* noflush suspend was interrupted. */ 611 /* noflush suspend was interrupted. */
634 io->error = -EIO; 612 io->error = -EIO;
635 spin_unlock_irqrestore(&md->deferred_lock, flags); 613 spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -637,32 +615,23 @@ static void dec_pending(struct dm_io *io, int error)
637 615
638 io_error = io->error; 616 io_error = io->error;
639 bio = io->bio; 617 bio = io->bio;
618 end_io_acct(io);
619 free_io(md, io);
620
621 if (io_error == DM_ENDIO_REQUEUE)
622 return;
640 623
641 if (bio->bi_rw & REQ_HARDBARRIER) { 624 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
642 /* 625 /*
643 * There can be just one barrier request so we use 626 * Preflush done for flush with data, reissue
644 * a per-device variable for error reporting. 627 * without REQ_FLUSH.
645 * Note that you can't touch the bio after end_io_acct
646 *
647 * We ignore -EOPNOTSUPP for empty flush reported by
648 * underlying devices. We assume that if the device
649 * doesn't support empty barriers, it doesn't need
650 * cache flushing commands.
651 */ 628 */
652 if (!md->barrier_error && 629 bio->bi_rw &= ~REQ_FLUSH;
653 !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP)) 630 queue_io(md, bio);
654 md->barrier_error = io_error;
655 end_io_acct(io);
656 free_io(md, io);
657 } else { 631 } else {
658 end_io_acct(io); 632 /* done with normal IO or empty flush */
659 free_io(md, io); 633 trace_block_bio_complete(md->queue, bio);
660 634 bio_endio(bio, io_error);
661 if (io_error != DM_ENDIO_REQUEUE) {
662 trace_block_bio_complete(md->queue, bio);
663
664 bio_endio(bio, io_error);
665 }
666 } 635 }
667 } 636 }
668} 637}
@@ -755,23 +724,6 @@ static void end_clone_bio(struct bio *clone, int error)
755 blk_update_request(tio->orig, 0, nr_bytes); 724 blk_update_request(tio->orig, 0, nr_bytes);
756} 725}
757 726
758static void store_barrier_error(struct mapped_device *md, int error)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&md->barrier_error_lock, flags);
763 /*
764 * Basically, the first error is taken, but:
765 * -EOPNOTSUPP supersedes any I/O error.
766 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
767 */
768 if (!md->barrier_error || error == -EOPNOTSUPP ||
769 (md->barrier_error != -EOPNOTSUPP &&
770 error == DM_ENDIO_REQUEUE))
771 md->barrier_error = error;
772 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
773}
774
775/* 727/*
776 * Don't touch any member of the md after calling this function because 728 * Don't touch any member of the md after calling this function because
777 * the md may be freed in dm_put() at the end of this function. 729 * the md may be freed in dm_put() at the end of this function.
@@ -809,13 +761,11 @@ static void free_rq_clone(struct request *clone)
809static void dm_end_request(struct request *clone, int error) 761static void dm_end_request(struct request *clone, int error)
810{ 762{
811 int rw = rq_data_dir(clone); 763 int rw = rq_data_dir(clone);
812 int run_queue = 1;
813 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
814 struct dm_rq_target_io *tio = clone->end_io_data; 764 struct dm_rq_target_io *tio = clone->end_io_data;
815 struct mapped_device *md = tio->md; 765 struct mapped_device *md = tio->md;
816 struct request *rq = tio->orig; 766 struct request *rq = tio->orig;
817 767
818 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { 768 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
819 rq->errors = clone->errors; 769 rq->errors = clone->errors;
820 rq->resid_len = clone->resid_len; 770 rq->resid_len = clone->resid_len;
821 771
@@ -829,15 +779,8 @@ static void dm_end_request(struct request *clone, int error)
829 } 779 }
830 780
831 free_rq_clone(clone); 781 free_rq_clone(clone);
832 782 blk_end_request_all(rq, error);
833 if (unlikely(is_barrier)) { 783 rq_completed(md, rw, true);
834 if (unlikely(error))
835 store_barrier_error(md, error);
836 run_queue = 0;
837 } else
838 blk_end_request_all(rq, error);
839
840 rq_completed(md, rw, run_queue);
841} 784}
842 785
843static void dm_unprep_request(struct request *rq) 786static void dm_unprep_request(struct request *rq)
@@ -862,16 +805,6 @@ void dm_requeue_unmapped_request(struct request *clone)
862 struct request_queue *q = rq->q; 805 struct request_queue *q = rq->q;
863 unsigned long flags; 806 unsigned long flags;
864 807
865 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
866 /*
867 * Barrier clones share an original request.
868 * Leave it to dm_end_request(), which handles this special
869 * case.
870 */
871 dm_end_request(clone, DM_ENDIO_REQUEUE);
872 return;
873 }
874
875 dm_unprep_request(rq); 808 dm_unprep_request(rq);
876 809
877 spin_lock_irqsave(q->queue_lock, flags); 810 spin_lock_irqsave(q->queue_lock, flags);
@@ -961,19 +894,6 @@ static void dm_complete_request(struct request *clone, int error)
961 struct dm_rq_target_io *tio = clone->end_io_data; 894 struct dm_rq_target_io *tio = clone->end_io_data;
962 struct request *rq = tio->orig; 895 struct request *rq = tio->orig;
963 896
964 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
965 /*
966 * Barrier clones share an original request. So can't use
967 * softirq_done with the original.
968 * Pass the clone to dm_done() directly in this special case.
969 * It is safe (even if clone->q->queue_lock is held here)
970 * because there is no I/O dispatching during the completion
971 * of barrier clone.
972 */
973 dm_done(clone, error, true);
974 return;
975 }
976
977 tio->error = error; 897 tio->error = error;
978 rq->completion_data = clone; 898 rq->completion_data = clone;
979 blk_complete_request(rq); 899 blk_complete_request(rq);
@@ -990,17 +910,6 @@ void dm_kill_unmapped_request(struct request *clone, int error)
990 struct dm_rq_target_io *tio = clone->end_io_data; 910 struct dm_rq_target_io *tio = clone->end_io_data;
991 struct request *rq = tio->orig; 911 struct request *rq = tio->orig;
992 912
993 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
994 /*
995 * Barrier clones share an original request.
996 * Leave it to dm_end_request(), which handles this special
997 * case.
998 */
999 BUG_ON(error > 0);
1000 dm_end_request(clone, error);
1001 return;
1002 }
1003
1004 rq->cmd_flags |= REQ_FAILED; 913 rq->cmd_flags |= REQ_FAILED;
1005 dm_complete_request(clone, error); 914 dm_complete_request(clone, error);
1006} 915}
@@ -1119,7 +1028,7 @@ static void dm_bio_destructor(struct bio *bio)
1119} 1028}
1120 1029
1121/* 1030/*
1122 * Creates a little bio that is just does part of a bvec. 1031 * Creates a little bio that just does part of a bvec.
1123 */ 1032 */
1124static struct bio *split_bvec(struct bio *bio, sector_t sector, 1033static struct bio *split_bvec(struct bio *bio, sector_t sector,
1125 unsigned short idx, unsigned int offset, 1034 unsigned short idx, unsigned int offset,
@@ -1134,7 +1043,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1134 1043
1135 clone->bi_sector = sector; 1044 clone->bi_sector = sector;
1136 clone->bi_bdev = bio->bi_bdev; 1045 clone->bi_bdev = bio->bi_bdev;
1137 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; 1046 clone->bi_rw = bio->bi_rw;
1138 clone->bi_vcnt = 1; 1047 clone->bi_vcnt = 1;
1139 clone->bi_size = to_bytes(len); 1048 clone->bi_size = to_bytes(len);
1140 clone->bi_io_vec->bv_offset = offset; 1049 clone->bi_io_vec->bv_offset = offset;
@@ -1161,7 +1070,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1161 1070
1162 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1071 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1163 __bio_clone(clone, bio); 1072 __bio_clone(clone, bio);
1164 clone->bi_rw &= ~REQ_HARDBARRIER;
1165 clone->bi_destructor = dm_bio_destructor; 1073 clone->bi_destructor = dm_bio_destructor;
1166 clone->bi_sector = sector; 1074 clone->bi_sector = sector;
1167 clone->bi_idx = idx; 1075 clone->bi_idx = idx;
@@ -1225,16 +1133,15 @@ static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1225 __issue_target_request(ci, ti, request_nr, len); 1133 __issue_target_request(ci, ti, request_nr, len);
1226} 1134}
1227 1135
1228static int __clone_and_map_empty_barrier(struct clone_info *ci) 1136static int __clone_and_map_empty_flush(struct clone_info *ci)
1229{ 1137{
1230 unsigned target_nr = 0; 1138 unsigned target_nr = 0;
1231 struct dm_target *ti; 1139 struct dm_target *ti;
1232 1140
1141 BUG_ON(bio_has_data(ci->bio));
1233 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1142 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1234 __issue_target_requests(ci, ti, ti->num_flush_requests, 0); 1143 __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
1235 1144
1236 ci->sector_count = 0;
1237
1238 return 0; 1145 return 0;
1239} 1146}
1240 1147
@@ -1289,9 +1196,6 @@ static int __clone_and_map(struct clone_info *ci)
1289 sector_t len = 0, max; 1196 sector_t len = 0, max;
1290 struct dm_target_io *tio; 1197 struct dm_target_io *tio;
1291 1198
1292 if (unlikely(bio_empty_barrier(bio)))
1293 return __clone_and_map_empty_barrier(ci);
1294
1295 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1199 if (unlikely(bio->bi_rw & REQ_DISCARD))
1296 return __clone_and_map_discard(ci); 1200 return __clone_and_map_discard(ci);
1297 1201
@@ -1383,16 +1287,11 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1383 1287
1384 ci.map = dm_get_live_table(md); 1288 ci.map = dm_get_live_table(md);
1385 if (unlikely(!ci.map)) { 1289 if (unlikely(!ci.map)) {
1386 if (!(bio->bi_rw & REQ_HARDBARRIER)) 1290 bio_io_error(bio);
1387 bio_io_error(bio);
1388 else
1389 if (!md->barrier_error)
1390 md->barrier_error = -EIO;
1391 return; 1291 return;
1392 } 1292 }
1393 1293
1394 ci.md = md; 1294 ci.md = md;
1395 ci.bio = bio;
1396 ci.io = alloc_io(md); 1295 ci.io = alloc_io(md);
1397 ci.io->error = 0; 1296 ci.io->error = 0;
1398 atomic_set(&ci.io->io_count, 1); 1297 atomic_set(&ci.io->io_count, 1);
@@ -1400,14 +1299,20 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1400 ci.io->md = md; 1299 ci.io->md = md;
1401 spin_lock_init(&ci.io->endio_lock); 1300 spin_lock_init(&ci.io->endio_lock);
1402 ci.sector = bio->bi_sector; 1301 ci.sector = bio->bi_sector;
1403 ci.sector_count = bio_sectors(bio);
1404 if (unlikely(bio_empty_barrier(bio)))
1405 ci.sector_count = 1;
1406 ci.idx = bio->bi_idx; 1302 ci.idx = bio->bi_idx;
1407 1303
1408 start_io_acct(ci.io); 1304 start_io_acct(ci.io);
1409 while (ci.sector_count && !error) 1305 if (bio->bi_rw & REQ_FLUSH) {
1410 error = __clone_and_map(&ci); 1306 ci.bio = &ci.md->flush_bio;
1307 ci.sector_count = 0;
1308 error = __clone_and_map_empty_flush(&ci);
1309 /* dec_pending submits any data associated with flush */
1310 } else {
1311 ci.bio = bio;
1312 ci.sector_count = bio_sectors(bio);
1313 while (ci.sector_count && !error)
1314 error = __clone_and_map(&ci);
1315 }
1411 1316
1412 /* drop the extra reference count */ 1317 /* drop the extra reference count */
1413 dec_pending(ci.io, error); 1318 dec_pending(ci.io, error);
@@ -1491,22 +1396,14 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1491 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1396 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1492 part_stat_unlock(); 1397 part_stat_unlock();
1493 1398
1494 /* 1399 /* if we're suspended, we have to queue this io for later */
1495 * If we're suspended or the thread is processing barriers 1400 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1496 * we have to queue this io for later.
1497 */
1498 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1499 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
1500 up_read(&md->io_lock); 1401 up_read(&md->io_lock);
1501 1402
1502 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1403 if (bio_rw(bio) != READA)
1503 bio_rw(bio) == READA) { 1404 queue_io(md, bio);
1405 else
1504 bio_io_error(bio); 1406 bio_io_error(bio);
1505 return 0;
1506 }
1507
1508 queue_io(md, bio);
1509
1510 return 0; 1407 return 0;
1511 } 1408 }
1512 1409
@@ -1537,14 +1434,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
1537 return _dm_request(q, bio); 1434 return _dm_request(q, bio);
1538} 1435}
1539 1436
1540static bool dm_rq_is_flush_request(struct request *rq)
1541{
1542 if (rq->cmd_flags & REQ_FLUSH)
1543 return true;
1544 else
1545 return false;
1546}
1547
1548void dm_dispatch_request(struct request *rq) 1437void dm_dispatch_request(struct request *rq)
1549{ 1438{
1550 int r; 1439 int r;
@@ -1592,22 +1481,15 @@ static int setup_clone(struct request *clone, struct request *rq,
1592{ 1481{
1593 int r; 1482 int r;
1594 1483
1595 if (dm_rq_is_flush_request(rq)) { 1484 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1596 blk_rq_init(NULL, clone); 1485 dm_rq_bio_constructor, tio);
1597 clone->cmd_type = REQ_TYPE_FS; 1486 if (r)
1598 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE); 1487 return r;
1599 } else {
1600 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1601 dm_rq_bio_constructor, tio);
1602 if (r)
1603 return r;
1604
1605 clone->cmd = rq->cmd;
1606 clone->cmd_len = rq->cmd_len;
1607 clone->sense = rq->sense;
1608 clone->buffer = rq->buffer;
1609 }
1610 1488
1489 clone->cmd = rq->cmd;
1490 clone->cmd_len = rq->cmd_len;
1491 clone->sense = rq->sense;
1492 clone->buffer = rq->buffer;
1611 clone->end_io = end_clone_request; 1493 clone->end_io = end_clone_request;
1612 clone->end_io_data = tio; 1494 clone->end_io_data = tio;
1613 1495
@@ -1648,9 +1530,6 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1648 struct mapped_device *md = q->queuedata; 1530 struct mapped_device *md = q->queuedata;
1649 struct request *clone; 1531 struct request *clone;
1650 1532
1651 if (unlikely(dm_rq_is_flush_request(rq)))
1652 return BLKPREP_OK;
1653
1654 if (unlikely(rq->special)) { 1533 if (unlikely(rq->special)) {
1655 DMWARN("Already has something in rq->special."); 1534 DMWARN("Already has something in rq->special.");
1656 return BLKPREP_KILL; 1535 return BLKPREP_KILL;
@@ -1727,6 +1606,7 @@ static void dm_request_fn(struct request_queue *q)
1727 struct dm_table *map = dm_get_live_table(md); 1606 struct dm_table *map = dm_get_live_table(md);
1728 struct dm_target *ti; 1607 struct dm_target *ti;
1729 struct request *rq, *clone; 1608 struct request *rq, *clone;
1609 sector_t pos;
1730 1610
1731 /* 1611 /*
1732 * For suspend, check blk_queue_stopped() and increment 1612 * For suspend, check blk_queue_stopped() and increment
@@ -1739,15 +1619,14 @@ static void dm_request_fn(struct request_queue *q)
1739 if (!rq) 1619 if (!rq)
1740 goto plug_and_out; 1620 goto plug_and_out;
1741 1621
1742 if (unlikely(dm_rq_is_flush_request(rq))) { 1622 /* always use block 0 to find the target for flushes for now */
1743 BUG_ON(md->flush_request); 1623 pos = 0;
1744 md->flush_request = rq; 1624 if (!(rq->cmd_flags & REQ_FLUSH))
1745 blk_start_request(rq); 1625 pos = blk_rq_pos(rq);
1746 queue_work(md->wq, &md->barrier_work); 1626
1747 goto out; 1627 ti = dm_table_find_target(map, pos);
1748 } 1628 BUG_ON(!dm_target_is_valid(ti));
1749 1629
1750 ti = dm_table_find_target(map, blk_rq_pos(rq));
1751 if (ti->type->busy && ti->type->busy(ti)) 1630 if (ti->type->busy && ti->type->busy(ti))
1752 goto plug_and_out; 1631 goto plug_and_out;
1753 1632
@@ -1918,7 +1797,6 @@ out:
1918static const struct block_device_operations dm_blk_dops; 1797static const struct block_device_operations dm_blk_dops;
1919 1798
1920static void dm_wq_work(struct work_struct *work); 1799static void dm_wq_work(struct work_struct *work);
1921static void dm_rq_barrier_work(struct work_struct *work);
1922 1800
1923static void dm_init_md_queue(struct mapped_device *md) 1801static void dm_init_md_queue(struct mapped_device *md)
1924{ 1802{
@@ -1940,6 +1818,7 @@ static void dm_init_md_queue(struct mapped_device *md)
1940 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1818 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1941 md->queue->unplug_fn = dm_unplug_all; 1819 md->queue->unplug_fn = dm_unplug_all;
1942 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1820 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1821 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1943} 1822}
1944 1823
1945/* 1824/*
@@ -1972,7 +1851,6 @@ static struct mapped_device *alloc_dev(int minor)
1972 mutex_init(&md->suspend_lock); 1851 mutex_init(&md->suspend_lock);
1973 mutex_init(&md->type_lock); 1852 mutex_init(&md->type_lock);
1974 spin_lock_init(&md->deferred_lock); 1853 spin_lock_init(&md->deferred_lock);
1975 spin_lock_init(&md->barrier_error_lock);
1976 rwlock_init(&md->map_lock); 1854 rwlock_init(&md->map_lock);
1977 atomic_set(&md->holders, 1); 1855 atomic_set(&md->holders, 1);
1978 atomic_set(&md->open_count, 0); 1856 atomic_set(&md->open_count, 0);
@@ -1995,7 +1873,6 @@ static struct mapped_device *alloc_dev(int minor)
1995 atomic_set(&md->pending[1], 0); 1873 atomic_set(&md->pending[1], 0);
1996 init_waitqueue_head(&md->wait); 1874 init_waitqueue_head(&md->wait);
1997 INIT_WORK(&md->work, dm_wq_work); 1875 INIT_WORK(&md->work, dm_wq_work);
1998 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
1999 init_waitqueue_head(&md->eventq); 1876 init_waitqueue_head(&md->eventq);
2000 1877
2001 md->disk->major = _major; 1878 md->disk->major = _major;
@@ -2015,6 +1892,10 @@ static struct mapped_device *alloc_dev(int minor)
2015 if (!md->bdev) 1892 if (!md->bdev)
2016 goto bad_bdev; 1893 goto bad_bdev;
2017 1894
1895 bio_init(&md->flush_bio);
1896 md->flush_bio.bi_bdev = md->bdev;
1897 md->flush_bio.bi_rw = WRITE_FLUSH;
1898
2018 /* Populate the mapping, nobody knows we exist yet */ 1899 /* Populate the mapping, nobody knows we exist yet */
2019 spin_lock(&_minor_lock); 1900 spin_lock(&_minor_lock);
2020 old_md = idr_replace(&_minor_idr, md, minor); 1901 old_md = idr_replace(&_minor_idr, md, minor);
@@ -2245,7 +2126,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2245 blk_queue_softirq_done(md->queue, dm_softirq_done); 2126 blk_queue_softirq_done(md->queue, dm_softirq_done);
2246 blk_queue_prep_rq(md->queue, dm_prep_fn); 2127 blk_queue_prep_rq(md->queue, dm_prep_fn);
2247 blk_queue_lld_busy(md->queue, dm_lld_busy); 2128 blk_queue_lld_busy(md->queue, dm_lld_busy);
2248 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
2249 2129
2250 elv_register_queue(md->queue); 2130 elv_register_queue(md->queue);
2251 2131
@@ -2406,43 +2286,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2406 return r; 2286 return r;
2407} 2287}
2408 2288
2409static void dm_flush(struct mapped_device *md)
2410{
2411 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2412
2413 bio_init(&md->barrier_bio);
2414 md->barrier_bio.bi_bdev = md->bdev;
2415 md->barrier_bio.bi_rw = WRITE_BARRIER;
2416 __split_and_process_bio(md, &md->barrier_bio);
2417
2418 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2419}
2420
2421static void process_barrier(struct mapped_device *md, struct bio *bio)
2422{
2423 md->barrier_error = 0;
2424
2425 dm_flush(md);
2426
2427 if (!bio_empty_barrier(bio)) {
2428 __split_and_process_bio(md, bio);
2429 /*
2430 * If the request isn't supported, don't waste time with
2431 * the second flush.
2432 */
2433 if (md->barrier_error != -EOPNOTSUPP)
2434 dm_flush(md);
2435 }
2436
2437 if (md->barrier_error != DM_ENDIO_REQUEUE)
2438 bio_endio(bio, md->barrier_error);
2439 else {
2440 spin_lock_irq(&md->deferred_lock);
2441 bio_list_add_head(&md->deferred, bio);
2442 spin_unlock_irq(&md->deferred_lock);
2443 }
2444}
2445
2446/* 2289/*
2447 * Process the deferred bios 2290 * Process the deferred bios
2448 */ 2291 */
@@ -2452,33 +2295,27 @@ static void dm_wq_work(struct work_struct *work)
2452 work); 2295 work);
2453 struct bio *c; 2296 struct bio *c;
2454 2297
2455 down_write(&md->io_lock); 2298 down_read(&md->io_lock);
2456 2299
2457 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2300 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2458 spin_lock_irq(&md->deferred_lock); 2301 spin_lock_irq(&md->deferred_lock);
2459 c = bio_list_pop(&md->deferred); 2302 c = bio_list_pop(&md->deferred);
2460 spin_unlock_irq(&md->deferred_lock); 2303 spin_unlock_irq(&md->deferred_lock);
2461 2304
2462 if (!c) { 2305 if (!c)
2463 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2464 break; 2306 break;
2465 }
2466 2307
2467 up_write(&md->io_lock); 2308 up_read(&md->io_lock);
2468 2309
2469 if (dm_request_based(md)) 2310 if (dm_request_based(md))
2470 generic_make_request(c); 2311 generic_make_request(c);
2471 else { 2312 else
2472 if (c->bi_rw & REQ_HARDBARRIER) 2313 __split_and_process_bio(md, c);
2473 process_barrier(md, c);
2474 else
2475 __split_and_process_bio(md, c);
2476 }
2477 2314
2478 down_write(&md->io_lock); 2315 down_read(&md->io_lock);
2479 } 2316 }
2480 2317
2481 up_write(&md->io_lock); 2318 up_read(&md->io_lock);
2482} 2319}
2483 2320
2484static void dm_queue_flush(struct mapped_device *md) 2321static void dm_queue_flush(struct mapped_device *md)
@@ -2488,73 +2325,6 @@ static void dm_queue_flush(struct mapped_device *md)
2488 queue_work(md->wq, &md->work); 2325 queue_work(md->wq, &md->work);
2489} 2326}
2490 2327
2491static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
2492{
2493 struct dm_rq_target_io *tio = clone->end_io_data;
2494
2495 tio->info.target_request_nr = request_nr;
2496}
2497
2498/* Issue barrier requests to targets and wait for their completion. */
2499static int dm_rq_barrier(struct mapped_device *md)
2500{
2501 int i, j;
2502 struct dm_table *map = dm_get_live_table(md);
2503 unsigned num_targets = dm_table_get_num_targets(map);
2504 struct dm_target *ti;
2505 struct request *clone;
2506
2507 md->barrier_error = 0;
2508
2509 for (i = 0; i < num_targets; i++) {
2510 ti = dm_table_get_target(map, i);
2511 for (j = 0; j < ti->num_flush_requests; j++) {
2512 clone = clone_rq(md->flush_request, md, GFP_NOIO);
2513 dm_rq_set_target_request_nr(clone, j);
2514 atomic_inc(&md->pending[rq_data_dir(clone)]);
2515 map_request(ti, clone, md);
2516 }
2517 }
2518
2519 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2520 dm_table_put(map);
2521
2522 return md->barrier_error;
2523}
2524
2525static void dm_rq_barrier_work(struct work_struct *work)
2526{
2527 int error;
2528 struct mapped_device *md = container_of(work, struct mapped_device,
2529 barrier_work);
2530 struct request_queue *q = md->queue;
2531 struct request *rq;
2532 unsigned long flags;
2533
2534 /*
2535 * Hold the md reference here and leave it at the last part so that
2536 * the md can't be deleted by device opener when the barrier request
2537 * completes.
2538 */
2539 dm_get(md);
2540
2541 error = dm_rq_barrier(md);
2542
2543 rq = md->flush_request;
2544 md->flush_request = NULL;
2545
2546 if (error == DM_ENDIO_REQUEUE) {
2547 spin_lock_irqsave(q->queue_lock, flags);
2548 blk_requeue_request(q, rq);
2549 spin_unlock_irqrestore(q->queue_lock, flags);
2550 } else
2551 blk_end_request_all(rq, error);
2552
2553 blk_run_queue(q);
2554
2555 dm_put(md);
2556}
2557
2558/* 2328/*
2559 * Swap in a new table, returning the old one for the caller to destroy. 2329 * Swap in a new table, returning the old one for the caller to destroy.
2560 */ 2330 */
@@ -2677,23 +2447,17 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2677 * 2447 *
2678 * To get all processes out of __split_and_process_bio in dm_request, 2448 * To get all processes out of __split_and_process_bio in dm_request,
2679 * we take the write lock. To prevent any process from reentering 2449 * we take the write lock. To prevent any process from reentering
2680 * __split_and_process_bio from dm_request, we set 2450 * __split_and_process_bio from dm_request and quiesce the thread
2681 * DMF_QUEUE_IO_TO_THREAD. 2451 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2682 * 2452 * flush_workqueue(md->wq).
2683 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2684 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2685 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2686 * further calls to __split_and_process_bio from dm_wq_work.
2687 */ 2453 */
2688 down_write(&md->io_lock); 2454 down_write(&md->io_lock);
2689 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2455 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2690 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2691 up_write(&md->io_lock); 2456 up_write(&md->io_lock);
2692 2457
2693 /* 2458 /*
2694 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which 2459 * Stop md->queue before flushing md->wq in case request-based
2695 * can be kicked until md->queue is stopped. So stop md->queue before 2460 * dm defers requests to md->wq from md->queue.
2696 * flushing md->wq.
2697 */ 2461 */
2698 if (dm_request_based(md)) 2462 if (dm_request_based(md))
2699 stop_queue(md->queue); 2463 stop_queue(md->queue);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index ba19060bcf3f..8a2f767f26d8 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,8 +294,8 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
294 dev_info_t *tmp_dev; 294 dev_info_t *tmp_dev;
295 sector_t start_sector; 295 sector_t start_sector;
296 296
297 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 297 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
298 md_barrier_request(mddev, bio); 298 md_flush_request(mddev, bio);
299 return 0; 299 return 0;
300 } 300 }
301 301
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dbf822df942a..225815197a3d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -227,12 +227,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
227 return 0; 227 return 0;
228 } 228 }
229 rcu_read_lock(); 229 rcu_read_lock();
230 if (mddev->suspended || mddev->barrier) { 230 if (mddev->suspended) {
231 DEFINE_WAIT(__wait); 231 DEFINE_WAIT(__wait);
232 for (;;) { 232 for (;;) {
233 prepare_to_wait(&mddev->sb_wait, &__wait, 233 prepare_to_wait(&mddev->sb_wait, &__wait,
234 TASK_UNINTERRUPTIBLE); 234 TASK_UNINTERRUPTIBLE);
235 if (!mddev->suspended && !mddev->barrier) 235 if (!mddev->suspended)
236 break; 236 break;
237 rcu_read_unlock(); 237 rcu_read_unlock();
238 schedule(); 238 schedule();
@@ -283,40 +283,29 @@ EXPORT_SYMBOL_GPL(mddev_resume);
283 283
284int mddev_congested(mddev_t *mddev, int bits) 284int mddev_congested(mddev_t *mddev, int bits)
285{ 285{
286 if (mddev->barrier)
287 return 1;
288 return mddev->suspended; 286 return mddev->suspended;
289} 287}
290EXPORT_SYMBOL(mddev_congested); 288EXPORT_SYMBOL(mddev_congested);
291 289
292/* 290/*
293 * Generic barrier handling for md 291 * Generic flush handling for md
294 */ 292 */
295 293
296#define POST_REQUEST_BARRIER ((void*)1) 294static void md_end_flush(struct bio *bio, int err)
297
298static void md_end_barrier(struct bio *bio, int err)
299{ 295{
300 mdk_rdev_t *rdev = bio->bi_private; 296 mdk_rdev_t *rdev = bio->bi_private;
301 mddev_t *mddev = rdev->mddev; 297 mddev_t *mddev = rdev->mddev;
302 if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
303 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
304 298
305 rdev_dec_pending(rdev, mddev); 299 rdev_dec_pending(rdev, mddev);
306 300
307 if (atomic_dec_and_test(&mddev->flush_pending)) { 301 if (atomic_dec_and_test(&mddev->flush_pending)) {
308 if (mddev->barrier == POST_REQUEST_BARRIER) { 302 /* The pre-request flush has finished */
309 /* This was a post-request barrier */ 303 schedule_work(&mddev->flush_work);
310 mddev->barrier = NULL;
311 wake_up(&mddev->sb_wait);
312 } else
313 /* The pre-request barrier has finished */
314 schedule_work(&mddev->barrier_work);
315 } 304 }
316 bio_put(bio); 305 bio_put(bio);
317} 306}
318 307
319static void submit_barriers(mddev_t *mddev) 308static void submit_flushes(mddev_t *mddev)
320{ 309{
321 mdk_rdev_t *rdev; 310 mdk_rdev_t *rdev;
322 311
@@ -333,60 +322,56 @@ static void submit_barriers(mddev_t *mddev)
333 atomic_inc(&rdev->nr_pending); 322 atomic_inc(&rdev->nr_pending);
334 rcu_read_unlock(); 323 rcu_read_unlock();
335 bi = bio_alloc(GFP_KERNEL, 0); 324 bi = bio_alloc(GFP_KERNEL, 0);
336 bi->bi_end_io = md_end_barrier; 325 bi->bi_end_io = md_end_flush;
337 bi->bi_private = rdev; 326 bi->bi_private = rdev;
338 bi->bi_bdev = rdev->bdev; 327 bi->bi_bdev = rdev->bdev;
339 atomic_inc(&mddev->flush_pending); 328 atomic_inc(&mddev->flush_pending);
340 submit_bio(WRITE_BARRIER, bi); 329 submit_bio(WRITE_FLUSH, bi);
341 rcu_read_lock(); 330 rcu_read_lock();
342 rdev_dec_pending(rdev, mddev); 331 rdev_dec_pending(rdev, mddev);
343 } 332 }
344 rcu_read_unlock(); 333 rcu_read_unlock();
345} 334}
346 335
347static void md_submit_barrier(struct work_struct *ws) 336static void md_submit_flush_data(struct work_struct *ws)
348{ 337{
349 mddev_t *mddev = container_of(ws, mddev_t, barrier_work); 338 mddev_t *mddev = container_of(ws, mddev_t, flush_work);
350 struct bio *bio = mddev->barrier; 339 struct bio *bio = mddev->flush_bio;
351 340
352 atomic_set(&mddev->flush_pending, 1); 341 atomic_set(&mddev->flush_pending, 1);
353 342
354 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) 343 if (bio->bi_size == 0)
355 bio_endio(bio, -EOPNOTSUPP);
356 else if (bio->bi_size == 0)
357 /* an empty barrier - all done */ 344 /* an empty barrier - all done */
358 bio_endio(bio, 0); 345 bio_endio(bio, 0);
359 else { 346 else {
360 bio->bi_rw &= ~REQ_HARDBARRIER; 347 bio->bi_rw &= ~REQ_FLUSH;
361 if (mddev->pers->make_request(mddev, bio)) 348 if (mddev->pers->make_request(mddev, bio))
362 generic_make_request(bio); 349 generic_make_request(bio);
363 mddev->barrier = POST_REQUEST_BARRIER;
364 submit_barriers(mddev);
365 } 350 }
366 if (atomic_dec_and_test(&mddev->flush_pending)) { 351 if (atomic_dec_and_test(&mddev->flush_pending)) {
367 mddev->barrier = NULL; 352 mddev->flush_bio = NULL;
368 wake_up(&mddev->sb_wait); 353 wake_up(&mddev->sb_wait);
369 } 354 }
370} 355}
371 356
372void md_barrier_request(mddev_t *mddev, struct bio *bio) 357void md_flush_request(mddev_t *mddev, struct bio *bio)
373{ 358{
374 spin_lock_irq(&mddev->write_lock); 359 spin_lock_irq(&mddev->write_lock);
375 wait_event_lock_irq(mddev->sb_wait, 360 wait_event_lock_irq(mddev->sb_wait,
376 !mddev->barrier, 361 !mddev->flush_bio,
377 mddev->write_lock, /*nothing*/); 362 mddev->write_lock, /*nothing*/);
378 mddev->barrier = bio; 363 mddev->flush_bio = bio;
379 spin_unlock_irq(&mddev->write_lock); 364 spin_unlock_irq(&mddev->write_lock);
380 365
381 atomic_set(&mddev->flush_pending, 1); 366 atomic_set(&mddev->flush_pending, 1);
382 INIT_WORK(&mddev->barrier_work, md_submit_barrier); 367 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
383 368
384 submit_barriers(mddev); 369 submit_flushes(mddev);
385 370
386 if (atomic_dec_and_test(&mddev->flush_pending)) 371 if (atomic_dec_and_test(&mddev->flush_pending))
387 schedule_work(&mddev->barrier_work); 372 schedule_work(&mddev->flush_work);
388} 373}
389EXPORT_SYMBOL(md_barrier_request); 374EXPORT_SYMBOL(md_flush_request);
390 375
391/* Support for plugging. 376/* Support for plugging.
392 * This mirrors the plugging support in request_queue, but does not 377 * This mirrors the plugging support in request_queue, but does not
@@ -697,31 +682,6 @@ static void super_written(struct bio *bio, int error)
697 bio_put(bio); 682 bio_put(bio);
698} 683}
699 684
700static void super_written_barrier(struct bio *bio, int error)
701{
702 struct bio *bio2 = bio->bi_private;
703 mdk_rdev_t *rdev = bio2->bi_private;
704 mddev_t *mddev = rdev->mddev;
705
706 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
707 error == -EOPNOTSUPP) {
708 unsigned long flags;
709 /* barriers don't appear to be supported :-( */
710 set_bit(BarriersNotsupp, &rdev->flags);
711 mddev->barriers_work = 0;
712 spin_lock_irqsave(&mddev->write_lock, flags);
713 bio2->bi_next = mddev->biolist;
714 mddev->biolist = bio2;
715 spin_unlock_irqrestore(&mddev->write_lock, flags);
716 wake_up(&mddev->sb_wait);
717 bio_put(bio);
718 } else {
719 bio_put(bio2);
720 bio->bi_private = rdev;
721 super_written(bio, error);
722 }
723}
724
725void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 685void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
726 sector_t sector, int size, struct page *page) 686 sector_t sector, int size, struct page *page)
727{ 687{
@@ -730,51 +690,28 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
730 * and decrement it on completion, waking up sb_wait 690 * and decrement it on completion, waking up sb_wait
731 * if zero is reached. 691 * if zero is reached.
732 * If an error occurred, call md_error 692 * If an error occurred, call md_error
733 *
734 * As we might need to resubmit the request if REQ_HARDBARRIER
735 * causes ENOTSUPP, we allocate a spare bio...
736 */ 693 */
737 struct bio *bio = bio_alloc(GFP_NOIO, 1); 694 struct bio *bio = bio_alloc(GFP_NOIO, 1);
738 int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
739 695
740 bio->bi_bdev = rdev->bdev; 696 bio->bi_bdev = rdev->bdev;
741 bio->bi_sector = sector; 697 bio->bi_sector = sector;
742 bio_add_page(bio, page, size, 0); 698 bio_add_page(bio, page, size, 0);
743 bio->bi_private = rdev; 699 bio->bi_private = rdev;
744 bio->bi_end_io = super_written; 700 bio->bi_end_io = super_written;
745 bio->bi_rw = rw;
746 701
747 atomic_inc(&mddev->pending_writes); 702 atomic_inc(&mddev->pending_writes);
748 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 703 submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
749 struct bio *rbio; 704 bio);
750 rw |= REQ_HARDBARRIER;
751 rbio = bio_clone(bio, GFP_NOIO);
752 rbio->bi_private = bio;
753 rbio->bi_end_io = super_written_barrier;
754 submit_bio(rw, rbio);
755 } else
756 submit_bio(rw, bio);
757} 705}
758 706
759void md_super_wait(mddev_t *mddev) 707void md_super_wait(mddev_t *mddev)
760{ 708{
761 /* wait for all superblock writes that were scheduled to complete. 709 /* wait for all superblock writes that were scheduled to complete */
762 * if any had to be retried (due to BARRIER problems), retry them
763 */
764 DEFINE_WAIT(wq); 710 DEFINE_WAIT(wq);
765 for(;;) { 711 for(;;) {
766 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 712 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
767 if (atomic_read(&mddev->pending_writes)==0) 713 if (atomic_read(&mddev->pending_writes)==0)
768 break; 714 break;
769 while (mddev->biolist) {
770 struct bio *bio;
771 spin_lock_irq(&mddev->write_lock);
772 bio = mddev->biolist;
773 mddev->biolist = bio->bi_next ;
774 bio->bi_next = NULL;
775 spin_unlock_irq(&mddev->write_lock);
776 submit_bio(bio->bi_rw, bio);
777 }
778 schedule(); 715 schedule();
779 } 716 }
780 finish_wait(&mddev->sb_wait, &wq); 717 finish_wait(&mddev->sb_wait, &wq);
@@ -1071,7 +1008,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1071 clear_bit(Faulty, &rdev->flags); 1008 clear_bit(Faulty, &rdev->flags);
1072 clear_bit(In_sync, &rdev->flags); 1009 clear_bit(In_sync, &rdev->flags);
1073 clear_bit(WriteMostly, &rdev->flags); 1010 clear_bit(WriteMostly, &rdev->flags);
1074 clear_bit(BarriersNotsupp, &rdev->flags);
1075 1011
1076 if (mddev->raid_disks == 0) { 1012 if (mddev->raid_disks == 0) {
1077 mddev->major_version = 0; 1013 mddev->major_version = 0;
@@ -1486,7 +1422,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1486 clear_bit(Faulty, &rdev->flags); 1422 clear_bit(Faulty, &rdev->flags);
1487 clear_bit(In_sync, &rdev->flags); 1423 clear_bit(In_sync, &rdev->flags);
1488 clear_bit(WriteMostly, &rdev->flags); 1424 clear_bit(WriteMostly, &rdev->flags);
1489 clear_bit(BarriersNotsupp, &rdev->flags);
1490 1425
1491 if (mddev->raid_disks == 0) { 1426 if (mddev->raid_disks == 0) {
1492 mddev->major_version = 1; 1427 mddev->major_version = 1;
@@ -4505,7 +4440,6 @@ int md_run(mddev_t *mddev)
4505 /* may be over-ridden by personality */ 4440 /* may be over-ridden by personality */
4506 mddev->resync_max_sectors = mddev->dev_sectors; 4441 mddev->resync_max_sectors = mddev->dev_sectors;
4507 4442
4508 mddev->barriers_work = 1;
4509 mddev->ok_start_degraded = start_dirty_degraded; 4443 mddev->ok_start_degraded = start_dirty_degraded;
4510 4444
4511 if (start_readonly && mddev->ro == 0) 4445 if (start_readonly && mddev->ro == 0)
@@ -4684,7 +4618,6 @@ static void md_clean(mddev_t *mddev)
4684 mddev->recovery = 0; 4618 mddev->recovery = 0;
4685 mddev->in_sync = 0; 4619 mddev->in_sync = 0;
4686 mddev->degraded = 0; 4620 mddev->degraded = 0;
4687 mddev->barriers_work = 0;
4688 mddev->safemode = 0; 4621 mddev->safemode = 0;
4689 mddev->bitmap_info.offset = 0; 4622 mddev->bitmap_info.offset = 0;
4690 mddev->bitmap_info.default_offset = 0; 4623 mddev->bitmap_info.default_offset = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 3931299788dc..112a2c32db0c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -87,7 +87,6 @@ struct mdk_rdev_s
87#define Faulty 1 /* device is known to have a fault */ 87#define Faulty 1 /* device is known to have a fault */
88#define In_sync 2 /* device is in_sync with rest of array */ 88#define In_sync 2 /* device is in_sync with rest of array */
89#define WriteMostly 4 /* Avoid reading if at all possible */ 89#define WriteMostly 4 /* Avoid reading if at all possible */
90#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
91#define AllReserved 6 /* If whole device is reserved for 90#define AllReserved 6 /* If whole device is reserved for
92 * one array */ 91 * one array */
93#define AutoDetected 7 /* added by auto-detect */ 92#define AutoDetected 7 /* added by auto-detect */
@@ -273,13 +272,6 @@ struct mddev_s
273 int degraded; /* whether md should consider 272 int degraded; /* whether md should consider
274 * adding a spare 273 * adding a spare
275 */ 274 */
276 int barriers_work; /* initialised to true, cleared as soon
277 * as a barrier request to slave
278 * fails. Only supported
279 */
280 struct bio *biolist; /* bios that need to be retried
281 * because REQ_HARDBARRIER is not supported
282 */
283 275
284 atomic_t recovery_active; /* blocks scheduled, but not written */ 276 atomic_t recovery_active; /* blocks scheduled, but not written */
285 wait_queue_head_t recovery_wait; 277 wait_queue_head_t recovery_wait;
@@ -339,16 +331,13 @@ struct mddev_s
339 struct attribute_group *to_remove; 331 struct attribute_group *to_remove;
340 struct plug_handle *plug; /* if used by personality */ 332 struct plug_handle *plug; /* if used by personality */
341 333
342 /* Generic barrier handling. 334 /* Generic flush handling.
343 * If there is a pending barrier request, all other 335 * The last to finish preflush schedules a worker to submit
344 * writes are blocked while the devices are flushed. 336 * the rest of the request (without the REQ_FLUSH flag).
345 * The last to finish a flush schedules a worker to
346 * submit the barrier request (without the barrier flag),
347 * then submit more flush requests.
348 */ 337 */
349 struct bio *barrier; 338 struct bio *flush_bio;
350 atomic_t flush_pending; 339 atomic_t flush_pending;
351 struct work_struct barrier_work; 340 struct work_struct flush_work;
352 struct work_struct event_work; /* used by dm to report failure event */ 341 struct work_struct event_work; /* used by dm to report failure event */
353}; 342};
354 343
@@ -502,7 +491,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
502extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); 491extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
503 492
504extern int mddev_congested(mddev_t *mddev, int bits); 493extern int mddev_congested(mddev_t *mddev, int bits);
505extern void md_barrier_request(mddev_t *mddev, struct bio *bio); 494extern void md_flush_request(mddev_t *mddev, struct bio *bio);
506extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 495extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
507 sector_t sector, int size, struct page *page); 496 sector_t sector, int size, struct page *page);
508extern void md_super_wait(mddev_t *mddev); 497extern void md_super_wait(mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0307d217e7a4..6d7ddf32ef2e 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -142,8 +142,8 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
142 struct multipath_bh * mp_bh; 142 struct multipath_bh * mp_bh;
143 struct multipath_info *multipath; 143 struct multipath_info *multipath;
144 144
145 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 145 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
146 md_barrier_request(mddev, bio); 146 md_flush_request(mddev, bio);
147 return 0; 147 return 0;
148 } 148 }
149 149
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6f7af46d623c..a39f4c355e55 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,8 +483,8 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
483 struct strip_zone *zone; 483 struct strip_zone *zone;
484 mdk_rdev_t *tmp_dev; 484 mdk_rdev_t *tmp_dev;
485 485
486 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 486 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
487 md_barrier_request(mddev, bio); 487 md_flush_request(mddev, bio);
488 return 0; 488 return 0;
489 } 489 }
490 490
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0b830bbe1d8b..378a25894c57 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -319,83 +319,74 @@ static void raid1_end_write_request(struct bio *bio, int error)
319 if (r1_bio->bios[mirror] == bio) 319 if (r1_bio->bios[mirror] == bio)
320 break; 320 break;
321 321
322 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { 322 /*
323 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); 323 * 'one mirror IO has finished' event handler:
324 set_bit(R1BIO_BarrierRetry, &r1_bio->state); 324 */
325 r1_bio->mddev->barriers_work = 0; 325 r1_bio->bios[mirror] = NULL;
326 /* Don't rdev_dec_pending in this branch - keep it for the retry */ 326 to_put = bio;
327 } else { 327 if (!uptodate) {
328 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
329 /* an I/O failed, we can't clear the bitmap */
330 set_bit(R1BIO_Degraded, &r1_bio->state);
331 } else
328 /* 332 /*
329 * this branch is our 'one mirror IO has finished' event handler: 333 * Set R1BIO_Uptodate in our master bio, so that we
334 * will return a good error code for to the higher
335 * levels even if IO on some other mirrored buffer
336 * fails.
337 *
338 * The 'master' represents the composite IO operation
339 * to user-side. So if something waits for IO, then it
340 * will wait for the 'master' bio.
330 */ 341 */
331 r1_bio->bios[mirror] = NULL; 342 set_bit(R1BIO_Uptodate, &r1_bio->state);
332 to_put = bio; 343
333 if (!uptodate) { 344 update_head_pos(mirror, r1_bio);
334 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 345
335 /* an I/O failed, we can't clear the bitmap */ 346 if (behind) {
336 set_bit(R1BIO_Degraded, &r1_bio->state); 347 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
337 } else 348 atomic_dec(&r1_bio->behind_remaining);
338 /* 349
339 * Set R1BIO_Uptodate in our master bio, so that 350 /*
340 * we will return a good error code for to the higher 351 * In behind mode, we ACK the master bio once the I/O
341 * levels even if IO on some other mirrored buffer fails. 352 * has safely reached all non-writemostly
342 * 353 * disks. Setting the Returned bit ensures that this
343 * The 'master' represents the composite IO operation to 354 * gets done only once -- we don't ever want to return
344 * user-side. So if something waits for IO, then it will 355 * -EIO here, instead we'll wait
345 * wait for the 'master' bio. 356 */
346 */ 357 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
347 set_bit(R1BIO_Uptodate, &r1_bio->state); 358 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
348 359 /* Maybe we can return now */
349 update_head_pos(mirror, r1_bio); 360 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
350 361 struct bio *mbio = r1_bio->master_bio;
351 if (behind) { 362 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
352 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 363 (unsigned long long) mbio->bi_sector,
353 atomic_dec(&r1_bio->behind_remaining); 364 (unsigned long long) mbio->bi_sector +
354 365 (mbio->bi_size >> 9) - 1);
355 /* In behind mode, we ACK the master bio once the I/O has safely 366 bio_endio(mbio, 0);
356 * reached all non-writemostly disks. Setting the Returned bit
357 * ensures that this gets done only once -- we don't ever want to
358 * return -EIO here, instead we'll wait */
359
360 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
361 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
362 /* Maybe we can return now */
363 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
364 struct bio *mbio = r1_bio->master_bio;
365 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
366 (unsigned long long) mbio->bi_sector,
367 (unsigned long long) mbio->bi_sector +
368 (mbio->bi_size >> 9) - 1);
369 bio_endio(mbio, 0);
370 }
371 } 367 }
372 } 368 }
373 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
374 } 369 }
370 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
371
375 /* 372 /*
376 *
377 * Let's see if all mirrored write operations have finished 373 * Let's see if all mirrored write operations have finished
378 * already. 374 * already.
379 */ 375 */
380 if (atomic_dec_and_test(&r1_bio->remaining)) { 376 if (atomic_dec_and_test(&r1_bio->remaining)) {
381 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) 377 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
382 reschedule_retry(r1_bio); 378 /* free extra copy of the data pages */
383 else { 379 int i = bio->bi_vcnt;
384 /* it really is the end of this request */ 380 while (i--)
385 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 381 safe_put_page(bio->bi_io_vec[i].bv_page);
386 /* free extra copy of the data pages */
387 int i = bio->bi_vcnt;
388 while (i--)
389 safe_put_page(bio->bi_io_vec[i].bv_page);
390 }
391 /* clear the bitmap if all writes complete successfully */
392 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
393 r1_bio->sectors,
394 !test_bit(R1BIO_Degraded, &r1_bio->state),
395 behind);
396 md_write_end(r1_bio->mddev);
397 raid_end_bio_io(r1_bio);
398 } 382 }
383 /* clear the bitmap if all writes complete successfully */
384 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
385 r1_bio->sectors,
386 !test_bit(R1BIO_Degraded, &r1_bio->state),
387 behind);
388 md_write_end(r1_bio->mddev);
389 raid_end_bio_io(r1_bio);
399 } 390 }
400 391
401 if (to_put) 392 if (to_put)
@@ -788,16 +779,13 @@ static int make_request(mddev_t *mddev, struct bio * bio)
788 struct page **behind_pages = NULL; 779 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 780 const int rw = bio_data_dir(bio);
790 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 781 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
791 unsigned long do_barriers; 782 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
792 mdk_rdev_t *blocked_rdev; 783 mdk_rdev_t *blocked_rdev;
793 784
794 /* 785 /*
795 * Register the new request and wait if the reconstruction 786 * Register the new request and wait if the reconstruction
796 * thread has put up a bar for new requests. 787 * thread has put up a bar for new requests.
797 * Continue immediately if no resync is active currently. 788 * Continue immediately if no resync is active currently.
798 * We test barriers_work *after* md_write_start as md_write_start
799 * may cause the first superblock write, and that will check out
800 * if barriers work.
801 */ 789 */
802 790
803 md_write_start(mddev, bio); /* wait on superblock update early */ 791 md_write_start(mddev, bio); /* wait on superblock update early */
@@ -821,13 +809,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
821 } 809 }
822 finish_wait(&conf->wait_barrier, &w); 810 finish_wait(&conf->wait_barrier, &w);
823 } 811 }
824 if (unlikely(!mddev->barriers_work &&
825 (bio->bi_rw & REQ_HARDBARRIER))) {
826 if (rw == WRITE)
827 md_write_end(mddev);
828 bio_endio(bio, -EOPNOTSUPP);
829 return 0;
830 }
831 812
832 wait_barrier(conf); 813 wait_barrier(conf);
833 814
@@ -959,10 +940,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_set(&r1_bio->remaining, 0); 940 atomic_set(&r1_bio->remaining, 0);
960 atomic_set(&r1_bio->behind_remaining, 0); 941 atomic_set(&r1_bio->behind_remaining, 0);
961 942
962 do_barriers = bio->bi_rw & REQ_HARDBARRIER;
963 if (do_barriers)
964 set_bit(R1BIO_Barrier, &r1_bio->state);
965
966 bio_list_init(&bl); 943 bio_list_init(&bl);
967 for (i = 0; i < disks; i++) { 944 for (i = 0; i < disks; i++) {
968 struct bio *mbio; 945 struct bio *mbio;
@@ -975,7 +952,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 952 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 953 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
977 mbio->bi_end_io = raid1_end_write_request; 954 mbio->bi_end_io = raid1_end_write_request;
978 mbio->bi_rw = WRITE | do_barriers | do_sync; 955 mbio->bi_rw = WRITE | do_flush_fua | do_sync;
979 mbio->bi_private = r1_bio; 956 mbio->bi_private = r1_bio;
980 957
981 if (behind_pages) { 958 if (behind_pages) {
@@ -1634,41 +1611,6 @@ static void raid1d(mddev_t *mddev)
1634 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1611 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1635 sync_request_write(mddev, r1_bio); 1612 sync_request_write(mddev, r1_bio);
1636 unplug = 1; 1613 unplug = 1;
1637 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1638 /* some requests in the r1bio were REQ_HARDBARRIER
1639 * requests which failed with -EOPNOTSUPP. Hohumm..
1640 * Better resubmit without the barrier.
1641 * We know which devices to resubmit for, because
1642 * all others have had their bios[] entry cleared.
1643 * We already have a nr_pending reference on these rdevs.
1644 */
1645 int i;
1646 const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1647 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1648 clear_bit(R1BIO_Barrier, &r1_bio->state);
1649 for (i=0; i < conf->raid_disks; i++)
1650 if (r1_bio->bios[i])
1651 atomic_inc(&r1_bio->remaining);
1652 for (i=0; i < conf->raid_disks; i++)
1653 if (r1_bio->bios[i]) {
1654 struct bio_vec *bvec;
1655 int j;
1656
1657 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1658 /* copy pages from the failed bio, as
1659 * this might be a write-behind device */
1660 __bio_for_each_segment(bvec, bio, j, 0)
1661 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1662 bio_put(r1_bio->bios[i]);
1663 bio->bi_sector = r1_bio->sector +
1664 conf->mirrors[i].rdev->data_offset;
1665 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1666 bio->bi_end_io = raid1_end_write_request;
1667 bio->bi_rw = WRITE | do_sync;
1668 bio->bi_private = r1_bio;
1669 r1_bio->bios[i] = bio;
1670 generic_make_request(bio);
1671 }
1672 } else { 1614 } else {
1673 int disk; 1615 int disk;
1674 1616
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5f2d443ae28a..adf8cfd73313 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -117,8 +117,6 @@ struct r1bio_s {
117#define R1BIO_IsSync 1 117#define R1BIO_IsSync 1
118#define R1BIO_Degraded 2 118#define R1BIO_Degraded 2
119#define R1BIO_BehindIO 3 119#define R1BIO_BehindIO 3
120#define R1BIO_Barrier 4
121#define R1BIO_BarrierRetry 5
122/* For write-behind requests, we call bi_end_io when 120/* For write-behind requests, we call bi_end_io when
123 * the last non-write-behind device completes, providing 121 * the last non-write-behind device completes, providing
124 * any write was successful. Otherwise we call when 122 * any write was successful. Otherwise we call when
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 84718383124d..f0d082f749be 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -800,12 +800,13 @@ static int make_request(mddev_t *mddev, struct bio * bio)
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 802 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
803 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
803 struct bio_list bl; 804 struct bio_list bl;
804 unsigned long flags; 805 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 806 mdk_rdev_t *blocked_rdev;
806 807
807 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 808 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
808 md_barrier_request(mddev, bio); 809 md_flush_request(mddev, bio);
809 return 0; 810 return 0;
810 } 811 }
811 812
@@ -965,7 +966,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
965 conf->mirrors[d].rdev->data_offset; 966 conf->mirrors[d].rdev->data_offset;
966 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 967 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
967 mbio->bi_end_io = raid10_end_write_request; 968 mbio->bi_end_io = raid10_end_write_request;
968 mbio->bi_rw = WRITE | do_sync; 969 mbio->bi_rw = WRITE | do_sync | do_fua;
969 mbio->bi_private = r10_bio; 970 mbio->bi_private = r10_bio;
970 971
971 atomic_inc(&r10_bio->remaining); 972 atomic_inc(&r10_bio->remaining);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 69b0a169e43d..31140d1259dc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -506,9 +506,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
506 int rw; 506 int rw;
507 struct bio *bi; 507 struct bio *bi;
508 mdk_rdev_t *rdev; 508 mdk_rdev_t *rdev;
509 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 509 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
510 rw = WRITE; 510 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
511 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 511 rw = WRITE_FUA;
512 else
513 rw = WRITE;
514 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
512 rw = READ; 515 rw = READ;
513 else 516 else
514 continue; 517 continue;
@@ -1031,6 +1034,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1031 1034
1032 while (wbi && wbi->bi_sector < 1035 while (wbi && wbi->bi_sector <
1033 dev->sector + STRIPE_SECTORS) { 1036 dev->sector + STRIPE_SECTORS) {
1037 if (wbi->bi_rw & REQ_FUA)
1038 set_bit(R5_WantFUA, &dev->flags);
1034 tx = async_copy_data(1, wbi, dev->page, 1039 tx = async_copy_data(1, wbi, dev->page,
1035 dev->sector, tx); 1040 dev->sector, tx);
1036 wbi = r5_next_bio(wbi, dev->sector); 1041 wbi = r5_next_bio(wbi, dev->sector);
@@ -1048,15 +1053,22 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
1048 int pd_idx = sh->pd_idx; 1053 int pd_idx = sh->pd_idx;
1049 int qd_idx = sh->qd_idx; 1054 int qd_idx = sh->qd_idx;
1050 int i; 1055 int i;
1056 bool fua = false;
1051 1057
1052 pr_debug("%s: stripe %llu\n", __func__, 1058 pr_debug("%s: stripe %llu\n", __func__,
1053 (unsigned long long)sh->sector); 1059 (unsigned long long)sh->sector);
1054 1060
1061 for (i = disks; i--; )
1062 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1063
1055 for (i = disks; i--; ) { 1064 for (i = disks; i--; ) {
1056 struct r5dev *dev = &sh->dev[i]; 1065 struct r5dev *dev = &sh->dev[i];
1057 1066
1058 if (dev->written || i == pd_idx || i == qd_idx) 1067 if (dev->written || i == pd_idx || i == qd_idx) {
1059 set_bit(R5_UPTODATE, &dev->flags); 1068 set_bit(R5_UPTODATE, &dev->flags);
1069 if (fua)
1070 set_bit(R5_WantFUA, &dev->flags);
1071 }
1060 } 1072 }
1061 1073
1062 if (sh->reconstruct_state == reconstruct_state_drain_run) 1074 if (sh->reconstruct_state == reconstruct_state_drain_run)
@@ -3281,7 +3293,7 @@ static void handle_stripe5(struct stripe_head *sh)
3281 3293
3282 if (dec_preread_active) { 3294 if (dec_preread_active) {
3283 /* We delay this until after ops_run_io so that if make_request 3295 /* We delay this until after ops_run_io so that if make_request
3284 * is waiting on a barrier, it won't continue until the writes 3296 * is waiting on a flush, it won't continue until the writes
3285 * have actually been submitted. 3297 * have actually been submitted.
3286 */ 3298 */
3287 atomic_dec(&conf->preread_active_stripes); 3299 atomic_dec(&conf->preread_active_stripes);
@@ -3583,7 +3595,7 @@ static void handle_stripe6(struct stripe_head *sh)
3583 3595
3584 if (dec_preread_active) { 3596 if (dec_preread_active) {
3585 /* We delay this until after ops_run_io so that if make_request 3597 /* We delay this until after ops_run_io so that if make_request
3586 * is waiting on a barrier, it won't continue until the writes 3598 * is waiting on a flush, it won't continue until the writes
3587 * have actually been submitted. 3599 * have actually been submitted.
3588 */ 3600 */
3589 atomic_dec(&conf->preread_active_stripes); 3601 atomic_dec(&conf->preread_active_stripes);
@@ -3978,14 +3990,8 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3978 const int rw = bio_data_dir(bi); 3990 const int rw = bio_data_dir(bi);
3979 int remaining; 3991 int remaining;
3980 3992
3981 if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) { 3993 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3982 /* Drain all pending writes. We only really need 3994 md_flush_request(mddev, bi);
3983 * to ensure they have been submitted, but this is
3984 * easier.
3985 */
3986 mddev->pers->quiesce(mddev, 1);
3987 mddev->pers->quiesce(mddev, 0);
3988 md_barrier_request(mddev, bi);
3989 return 0; 3995 return 0;
3990 } 3996 }
3991 3997
@@ -4103,7 +4109,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4103 finish_wait(&conf->wait_for_overlap, &w); 4109 finish_wait(&conf->wait_for_overlap, &w);
4104 set_bit(STRIPE_HANDLE, &sh->state); 4110 set_bit(STRIPE_HANDLE, &sh->state);
4105 clear_bit(STRIPE_DELAYED, &sh->state); 4111 clear_bit(STRIPE_DELAYED, &sh->state);
4106 if (mddev->barrier && 4112 if ((bi->bi_rw & REQ_SYNC) &&
4107 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4113 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4108 atomic_inc(&conf->preread_active_stripes); 4114 atomic_inc(&conf->preread_active_stripes);
4109 release_stripe(sh); 4115 release_stripe(sh);
@@ -4126,13 +4132,6 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4126 bio_endio(bi, 0); 4132 bio_endio(bi, 0);
4127 } 4133 }
4128 4134
4129 if (mddev->barrier) {
4130 /* We need to wait for the stripes to all be handled.
4131 * So: wait for preread_active_stripes to drop to 0.
4132 */
4133 wait_event(mddev->thread->wqueue,
4134 atomic_read(&conf->preread_active_stripes) == 0);
4135 }
4136 return 0; 4135 return 0;
4137} 4136}
4138 4137
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 36eaed5dfd6e..2ace0582b409 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -275,6 +275,7 @@ struct r6_state {
275 * filling 275 * filling
276 */ 276 */
277#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ 277#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
278#define R5_WantFUA 14 /* Write should be FUA */
278/* 279/*
279 * Write method 280 * Write method
280 */ 281 */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 6837a8ef9371..3e57b61ca446 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5945,8 +5945,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
5945 goto out; 5945 goto out;
5946 5946
5947 mem = kmalloc(iocpage2sz, GFP_KERNEL); 5947 mem = kmalloc(iocpage2sz, GFP_KERNEL);
5948 if (!mem) 5948 if (!mem) {
5949 rc = -ENOMEM;
5949 goto out; 5950 goto out;
5951 }
5950 5952
5951 memcpy(mem, (u8 *)pIoc2, iocpage2sz); 5953 memcpy(mem, (u8 *)pIoc2, iocpage2sz);
5952 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem; 5954 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b74331260744..db2fbe2d4146 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -248,15 +248,15 @@ config CS5535_CLOCK_EVENT_SRC
248 generic PIT, and are suitable for use as high-res timers. 248 generic PIT, and are suitable for use as high-res timers.
249 249
250config HP_ILO 250config HP_ILO
251 tristate "Channel interface driver for HP iLO/iLO2 processor" 251 tristate "Channel interface driver for the HP iLO processor"
252 depends on PCI 252 depends on PCI
253 default n 253 default n
254 help 254 help
255 The channel interface driver allows applications to communicate 255 The channel interface driver allows applications to communicate
256 with iLO/iLO2 management processors present on HP ProLiant 256 with iLO management processors present on HP ProLiant servers.
257 servers. Upon loading, the driver creates /dev/hpilo/dXccbN files, 257 Upon loading, the driver creates /dev/hpilo/dXccbN files, which
258 which can be used to gather data from the management processor, 258 can be used to gather data from the management processor, via
259 via read and write system calls. 259 read and write system calls.
260 260
261 To compile this driver as a module, choose M here: the 261 To compile this driver as a module, choose M here: the
262 module will be called hpilo. 262 module will be called hpilo.
@@ -390,6 +390,18 @@ config BMP085
390 To compile this driver as a module, choose M here: the 390 To compile this driver as a module, choose M here: the
391 module will be called bmp085. 391 module will be called bmp085.
392 392
393config PCH_PHUB
394 tristate "PCH Packet Hub of Intel Topcliff"
395 depends on PCI
396 help
397 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
398 Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
399 processor. The Topcliff has MAC address and Option ROM data in SROM.
400 This driver can access MAC address and Option ROM data in SROM.
401
402 To compile this driver as a module, choose M here: the module will
403 be called pch_phub.
404
393source "drivers/misc/c2port/Kconfig" 405source "drivers/misc/c2port/Kconfig"
394source "drivers/misc/eeprom/Kconfig" 406source "drivers/misc/eeprom/Kconfig"
395source "drivers/misc/cb710/Kconfig" 407source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 42eab95cde2a..9f2986b4da2f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -35,3 +35,4 @@ obj-y += eeprom/
35obj-y += cb710/ 35obj-y += cb710/
36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o 36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
38obj-$(CONFIG_PCH_PHUB) += pch_phub.o
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 69c1f2fca141..fffc227181b0 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for HP iLO/iLO2 management processor. 2 * Driver for the HP iLO management processor.
3 * 3 *
4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
5 * David Altobelli <david.altobelli@hp.com> 5 * David Altobelli <david.altobelli@hp.com>
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
new file mode 100644
index 000000000000..744b804aca15
--- /dev/null
+++ b/drivers/misc/pch_phub.c
@@ -0,0 +1,717 @@
1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/fs.h>
22#include <linux/uaccess.h>
23#include <linux/string.h>
24#include <linux/pci.h>
25#include <linux/io.h>
26#include <linux/delay.h>
27#include <linux/mutex.h>
28#include <linux/if_ether.h>
29#include <linux/ctype.h>
30
31#define PHUB_STATUS 0x00 /* Status Register offset */
32#define PHUB_CONTROL 0x04 /* Control Register offset */
33#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */
34#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */
35#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */
36#define PCH_PHUB_ROM_START_ADDR 0x14 /* ROM data area start address offset */
37
38/* MAX number of INT_REDUCE_CONTROL registers */
39#define MAX_NUM_INT_REDUCE_CONTROL_REG 128
40#define PCI_DEVICE_ID_PCH1_PHUB 0x8801
41#define PCH_MINOR_NOS 1
42#define CLKCFG_CAN_50MHZ 0x12000000
43#define CLKCFG_CANCLK_MASK 0xFF000000
44
45/* SROM ACCESS Macro */
46#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
47
48/* Registers address offset */
49#define PCH_PHUB_ID_REG 0x0000
50#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004
51#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008
52#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C
53#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010
54#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014
55#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018
56#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020
57#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024
58#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028
59#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C
60#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040
61#define CLKCFG_REG_OFFSET 0x500
62
63#define PCH_PHUB_OROM_SIZE 15360
64
65/**
66 * struct pch_phub_reg - PHUB register structure
67 * @phub_id_reg: PHUB_ID register val
68 * @q_pri_val_reg: QUEUE_PRI_VAL register val
69 * @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val
70 * @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val
71 * @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val
72 * @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val
73 * @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val
74 * @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val
75 * @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val
76 * @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val
77 * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val
78 * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val
79 * @clkcfg_reg: CLK CFG register val
80 * @pch_phub_base_address: Register base address
81 * @pch_phub_extrom_base_address: external rom base address
82 */
83struct pch_phub_reg {
84 u32 phub_id_reg;
85 u32 q_pri_val_reg;
86 u32 rc_q_maxsize_reg;
87 u32 bri_q_maxsize_reg;
88 u32 comp_resp_timeout_reg;
89 u32 bus_slave_control_reg;
90 u32 deadlock_avoid_type_reg;
91 u32 intpin_reg_wpermit_reg0;
92 u32 intpin_reg_wpermit_reg1;
93 u32 intpin_reg_wpermit_reg2;
94 u32 intpin_reg_wpermit_reg3;
95 u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
96 u32 clkcfg_reg;
97 void __iomem *pch_phub_base_address;
98 void __iomem *pch_phub_extrom_base_address;
99};
100
101/* SROM SPEC for MAC address assignment offset */
102static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa};
103
104static DEFINE_MUTEX(pch_phub_mutex);
105
106/**
107 * pch_phub_read_modify_write_reg() - Reading modifying and writing register
108 * @reg_addr_offset: Register offset address value.
109 * @data: Writing value.
110 * @mask: Mask value.
111 */
112static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
113 unsigned int reg_addr_offset,
114 unsigned int data, unsigned int mask)
115{
116 void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset;
117 iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
118}
119
120/* pch_phub_save_reg_conf - saves register configuration */
121static void pch_phub_save_reg_conf(struct pci_dev *pdev)
122{
123 unsigned int i;
124 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
125
126 void __iomem *p = chip->pch_phub_base_address;
127
128 chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG);
129 chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG);
130 chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
131 chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
132 chip->comp_resp_timeout_reg =
133 ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
134 chip->bus_slave_control_reg =
135 ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
136 chip->deadlock_avoid_type_reg =
137 ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
138 chip->intpin_reg_wpermit_reg0 =
139 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
140 chip->intpin_reg_wpermit_reg1 =
141 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
142 chip->intpin_reg_wpermit_reg2 =
143 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
144 chip->intpin_reg_wpermit_reg3 =
145 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
146 dev_dbg(&pdev->dev, "%s : "
147 "chip->phub_id_reg=%x, "
148 "chip->q_pri_val_reg=%x, "
149 "chip->rc_q_maxsize_reg=%x, "
150 "chip->bri_q_maxsize_reg=%x, "
151 "chip->comp_resp_timeout_reg=%x, "
152 "chip->bus_slave_control_reg=%x, "
153 "chip->deadlock_avoid_type_reg=%x, "
154 "chip->intpin_reg_wpermit_reg0=%x, "
155 "chip->intpin_reg_wpermit_reg1=%x, "
156 "chip->intpin_reg_wpermit_reg2=%x, "
157 "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
158 chip->phub_id_reg,
159 chip->q_pri_val_reg,
160 chip->rc_q_maxsize_reg,
161 chip->bri_q_maxsize_reg,
162 chip->comp_resp_timeout_reg,
163 chip->bus_slave_control_reg,
164 chip->deadlock_avoid_type_reg,
165 chip->intpin_reg_wpermit_reg0,
166 chip->intpin_reg_wpermit_reg1,
167 chip->intpin_reg_wpermit_reg2,
168 chip->intpin_reg_wpermit_reg3);
169 for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
170 chip->int_reduce_control_reg[i] =
171 ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
172 dev_dbg(&pdev->dev, "%s : "
173 "chip->int_reduce_control_reg[%d]=%x\n",
174 __func__, i, chip->int_reduce_control_reg[i]);
175 }
176 chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
177}
178
179/* pch_phub_restore_reg_conf - restore register configuration */
180static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
181{
182 unsigned int i;
183 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
184 void __iomem *p;
185 p = chip->pch_phub_base_address;
186
187 iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG);
188 iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG);
189 iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
190 iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
191 iowrite32(chip->comp_resp_timeout_reg,
192 p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
193 iowrite32(chip->bus_slave_control_reg,
194 p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
195 iowrite32(chip->deadlock_avoid_type_reg,
196 p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
197 iowrite32(chip->intpin_reg_wpermit_reg0,
198 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
199 iowrite32(chip->intpin_reg_wpermit_reg1,
200 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
201 iowrite32(chip->intpin_reg_wpermit_reg2,
202 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
203 iowrite32(chip->intpin_reg_wpermit_reg3,
204 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
205 dev_dbg(&pdev->dev, "%s : "
206 "chip->phub_id_reg=%x, "
207 "chip->q_pri_val_reg=%x, "
208 "chip->rc_q_maxsize_reg=%x, "
209 "chip->bri_q_maxsize_reg=%x, "
210 "chip->comp_resp_timeout_reg=%x, "
211 "chip->bus_slave_control_reg=%x, "
212 "chip->deadlock_avoid_type_reg=%x, "
213 "chip->intpin_reg_wpermit_reg0=%x, "
214 "chip->intpin_reg_wpermit_reg1=%x, "
215 "chip->intpin_reg_wpermit_reg2=%x, "
216 "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
217 chip->phub_id_reg,
218 chip->q_pri_val_reg,
219 chip->rc_q_maxsize_reg,
220 chip->bri_q_maxsize_reg,
221 chip->comp_resp_timeout_reg,
222 chip->bus_slave_control_reg,
223 chip->deadlock_avoid_type_reg,
224 chip->intpin_reg_wpermit_reg0,
225 chip->intpin_reg_wpermit_reg1,
226 chip->intpin_reg_wpermit_reg2,
227 chip->intpin_reg_wpermit_reg3);
228 for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
229 iowrite32(chip->int_reduce_control_reg[i],
230 p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
231 dev_dbg(&pdev->dev, "%s : "
232 "chip->int_reduce_control_reg[%d]=%x\n",
233 __func__, i, chip->int_reduce_control_reg[i]);
234 }
235
236 iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
237}
238
239/**
240 * pch_phub_read_serial_rom() - Reading Serial ROM
241 * @offset_address: Serial ROM offset address to read.
242 * @data: Read buffer for specified Serial ROM value.
243 */
244static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
245 unsigned int offset_address, u8 *data)
246{
247 void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
248 offset_address;
249
250 *data = ioread8(mem_addr);
251}
252
253/**
254 * pch_phub_write_serial_rom() - Writing Serial ROM
255 * @offset_address: Serial ROM offset address.
256 * @data: Serial ROM value to write.
257 */
258static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
259 unsigned int offset_address, u8 data)
260{
261 void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
262 (offset_address & PCH_WORD_ADDR_MASK);
263 int i;
264 unsigned int word_data;
265 unsigned int pos;
266 unsigned int mask;
267 pos = (offset_address % 4) * 8;
268 mask = ~(0xFF << pos);
269
270 iowrite32(PCH_PHUB_ROM_WRITE_ENABLE,
271 chip->pch_phub_extrom_base_address + PHUB_CONTROL);
272
273 word_data = ioread32(mem_addr);
274 iowrite32((word_data & mask) | (u32)data << pos, mem_addr);
275
276 i = 0;
277 while (ioread8(chip->pch_phub_extrom_base_address +
278 PHUB_STATUS) != 0x00) {
279 msleep(1);
280 if (i == PHUB_TIMEOUT)
281 return -ETIMEDOUT;
282 i++;
283 }
284
285 iowrite32(PCH_PHUB_ROM_WRITE_DISABLE,
286 chip->pch_phub_extrom_base_address + PHUB_CONTROL);
287
288 return 0;
289}
290
291/**
292 * pch_phub_read_serial_rom_val() - Read Serial ROM value
293 * @offset_address: Serial ROM address offset value.
294 * @data: Serial ROM value to read.
295 */
296static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
297 unsigned int offset_address, u8 *data)
298{
299 unsigned int mem_addr;
300
301 mem_addr = PCH_PHUB_ROM_START_ADDR +
302 pch_phub_mac_offset[offset_address];
303
304 pch_phub_read_serial_rom(chip, mem_addr, data);
305}
306
307/**
308 * pch_phub_write_serial_rom_val() - writing Serial ROM value
309 * @offset_address: Serial ROM address offset value.
310 * @data: Serial ROM value.
311 */
312static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip,
313 unsigned int offset_address, u8 data)
314{
315 int retval;
316 unsigned int mem_addr;
317
318 mem_addr = PCH_PHUB_ROM_START_ADDR +
319 pch_phub_mac_offset[offset_address];
320
321 retval = pch_phub_write_serial_rom(chip, mem_addr, data);
322
323 return retval;
324}
325
326/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration
327 * for Gigabit Ethernet MAC address
328 */
329static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip)
330{
331 int retval;
332
333 retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc);
334 retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10);
335 retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01);
336 retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02);
337
338 retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00);
339 retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00);
340 retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00);
341 retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80);
342
343 retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc);
344 retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10);
345 retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01);
346 retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18);
347
348 retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc);
349 retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10);
350 retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01);
351 retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19);
352
353 retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc);
354 retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10);
355 retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01);
356 retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a);
357
358 retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01);
359 retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00);
360 retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00);
361 retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00);
362
363 return retval;
364}
365
366/**
367 * pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
368 * @offset_address: Gigabit Ethernet MAC address offset value.
369 * @data: Buffer of the Gigabit Ethernet MAC address value.
370 */
371static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
372{
373 int i;
374 for (i = 0; i < ETH_ALEN; i++)
375 pch_phub_read_serial_rom_val(chip, i, &data[i]);
376}
377
378/**
379 * pch_phub_write_gbe_mac_addr() - Write MAC address
380 * @offset_address: Gigabit Ethernet MAC address offset value.
381 * @data: Gigabit Ethernet MAC address value.
382 */
383static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
384{
385 int retval;
386 int i;
387
388 retval = pch_phub_gbe_serial_rom_conf(chip);
389 if (retval)
390 return retval;
391
392 for (i = 0; i < ETH_ALEN; i++) {
393 retval = pch_phub_write_serial_rom_val(chip, i, data[i]);
394 if (retval)
395 return retval;
396 }
397
398 return retval;
399}
400
401static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
402 struct bin_attribute *attr, char *buf,
403 loff_t off, size_t count)
404{
405 unsigned int rom_signature;
406 unsigned char rom_length;
407 unsigned int tmp;
408 unsigned int addr_offset;
409 unsigned int orom_size;
410 int ret;
411 int err;
412
413 struct pch_phub_reg *chip =
414 dev_get_drvdata(container_of(kobj, struct device, kobj));
415
416 ret = mutex_lock_interruptible(&pch_phub_mutex);
417 if (ret) {
418 err = -ERESTARTSYS;
419 goto return_err_nomutex;
420 }
421
422 /* Get Rom signature */
423 pch_phub_read_serial_rom(chip, 0x80, (unsigned char *)&rom_signature);
424 rom_signature &= 0xff;
425 pch_phub_read_serial_rom(chip, 0x81, (unsigned char *)&tmp);
426 rom_signature |= (tmp & 0xff) << 8;
427 if (rom_signature == 0xAA55) {
428 pch_phub_read_serial_rom(chip, 0x82, &rom_length);
429 orom_size = rom_length * 512;
430 if (orom_size < off) {
431 addr_offset = 0;
432 goto return_ok;
433 }
434 if (orom_size < count) {
435 addr_offset = 0;
436 goto return_ok;
437 }
438
439 for (addr_offset = 0; addr_offset < count; addr_offset++) {
440 pch_phub_read_serial_rom(chip, 0x80 + addr_offset + off,
441 &buf[addr_offset]);
442 }
443 } else {
444 err = -ENODATA;
445 goto return_err;
446 }
447return_ok:
448 mutex_unlock(&pch_phub_mutex);
449 return addr_offset;
450
451return_err:
452 mutex_unlock(&pch_phub_mutex);
453return_err_nomutex:
454 return err;
455}
456
457static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
458 struct bin_attribute *attr,
459 char *buf, loff_t off, size_t count)
460{
461 int err;
462 unsigned int addr_offset;
463 int ret;
464 struct pch_phub_reg *chip =
465 dev_get_drvdata(container_of(kobj, struct device, kobj));
466
467 ret = mutex_lock_interruptible(&pch_phub_mutex);
468 if (ret)
469 return -ERESTARTSYS;
470
471 if (off > PCH_PHUB_OROM_SIZE) {
472 addr_offset = 0;
473 goto return_ok;
474 }
475 if (count > PCH_PHUB_OROM_SIZE) {
476 addr_offset = 0;
477 goto return_ok;
478 }
479
480 for (addr_offset = 0; addr_offset < count; addr_offset++) {
481 if (PCH_PHUB_OROM_SIZE < off + addr_offset)
482 goto return_ok;
483
484 ret = pch_phub_write_serial_rom(chip, 0x80 + addr_offset + off,
485 buf[addr_offset]);
486 if (ret) {
487 err = ret;
488 goto return_err;
489 }
490 }
491
492return_ok:
493 mutex_unlock(&pch_phub_mutex);
494 return addr_offset;
495
496return_err:
497 mutex_unlock(&pch_phub_mutex);
498 return err;
499}
500
501static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
502 char *buf)
503{
504 u8 mac[8];
505 struct pch_phub_reg *chip = dev_get_drvdata(dev);
506
507 pch_phub_read_gbe_mac_addr(chip, mac);
508
509 return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
510 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
511}
512
513static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 u8 mac[6];
517 struct pch_phub_reg *chip = dev_get_drvdata(dev);
518
519 if (count != 18)
520 return -EINVAL;
521
522 sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
523 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
524 (u32 *)&mac[4], (u32 *)&mac[5]);
525
526 pch_phub_write_gbe_mac_addr(chip, mac);
527
528 return count;
529}
530
531static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac);
532
533static struct bin_attribute pch_bin_attr = {
534 .attr = {
535 .name = "pch_firmware",
536 .mode = S_IRUGO | S_IWUSR,
537 },
538 .size = PCH_PHUB_OROM_SIZE + 1,
539 .read = pch_phub_bin_read,
540 .write = pch_phub_bin_write,
541};
542
543static int __devinit pch_phub_probe(struct pci_dev *pdev,
544 const struct pci_device_id *id)
545{
546 int retval;
547
548 int ret;
549 ssize_t rom_size;
550 struct pch_phub_reg *chip;
551
552 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
553 if (chip == NULL)
554 return -ENOMEM;
555
556 ret = pci_enable_device(pdev);
557 if (ret) {
558 dev_err(&pdev->dev,
559 "%s : pci_enable_device FAILED(ret=%d)", __func__, ret);
560 goto err_pci_enable_dev;
561 }
562 dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__,
563 ret);
564
565 ret = pci_request_regions(pdev, KBUILD_MODNAME);
566 if (ret) {
567 dev_err(&pdev->dev,
568 "%s : pci_request_regions FAILED(ret=%d)", __func__, ret);
569 goto err_req_regions;
570 }
571 dev_dbg(&pdev->dev, "%s : "
572 "pci_request_regions returns %d\n", __func__, ret);
573
574 chip->pch_phub_base_address = pci_iomap(pdev, 1, 0);
575
576
577 if (chip->pch_phub_base_address == 0) {
578 dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
579 ret = -ENOMEM;
580 goto err_pci_iomap;
581 }
582 dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value "
583 "in pch_phub_base_address variable is %p\n", __func__,
584 chip->pch_phub_base_address);
585 chip->pch_phub_extrom_base_address = pci_map_rom(pdev, &rom_size);
586
587 if (chip->pch_phub_extrom_base_address == 0) {
588 dev_err(&pdev->dev, "%s : pci_map_rom FAILED", __func__);
589 ret = -ENOMEM;
590 goto err_pci_map;
591 }
592 dev_dbg(&pdev->dev, "%s : "
593 "pci_map_rom SUCCESS and value in "
594 "pch_phub_extrom_base_address variable is %p\n", __func__,
595 chip->pch_phub_extrom_base_address);
596
597 pci_set_drvdata(pdev, chip);
598
599 retval = sysfs_create_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
600 if (retval)
601 goto err_sysfs_create;
602
603 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
604 if (retval)
605 goto exit_bin_attr;
606
607 pch_phub_read_modify_write_reg(chip, (unsigned int)CLKCFG_REG_OFFSET,
608 CLKCFG_CAN_50MHZ, CLKCFG_CANCLK_MASK);
609
610 /* set the prefech value */
611 iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
612 /* set the interrupt delay value */
613 iowrite32(0x25, chip->pch_phub_base_address + 0x44);
614
615 return 0;
616exit_bin_attr:
617 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
618
619err_sysfs_create:
620 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
621err_pci_map:
622 pci_iounmap(pdev, chip->pch_phub_base_address);
623err_pci_iomap:
624 pci_release_regions(pdev);
625err_req_regions:
626 pci_disable_device(pdev);
627err_pci_enable_dev:
628 kfree(chip);
629 dev_err(&pdev->dev, "%s returns %d\n", __func__, ret);
630 return ret;
631}
632
633static void __devexit pch_phub_remove(struct pci_dev *pdev)
634{
635 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
636
637 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
638 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
639 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
640 pci_iounmap(pdev, chip->pch_phub_base_address);
641 pci_release_regions(pdev);
642 pci_disable_device(pdev);
643 kfree(chip);
644}
645
646#ifdef CONFIG_PM
647
648static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
649{
650 int ret;
651
652 pch_phub_save_reg_conf(pdev);
653 ret = pci_save_state(pdev);
654 if (ret) {
655 dev_err(&pdev->dev,
656 " %s -pci_save_state returns %d\n", __func__, ret);
657 return ret;
658 }
659 pci_enable_wake(pdev, PCI_D3hot, 0);
660 pci_disable_device(pdev);
661 pci_set_power_state(pdev, pci_choose_state(pdev, state));
662
663 return 0;
664}
665
666static int pch_phub_resume(struct pci_dev *pdev)
667{
668 int ret;
669
670 pci_set_power_state(pdev, PCI_D0);
671 pci_restore_state(pdev);
672 ret = pci_enable_device(pdev);
673 if (ret) {
674 dev_err(&pdev->dev,
675 "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
676 return ret;
677 }
678
679 pci_enable_wake(pdev, PCI_D3hot, 0);
680 pch_phub_restore_reg_conf(pdev);
681
682 return 0;
683}
684#else
685#define pch_phub_suspend NULL
686#define pch_phub_resume NULL
687#endif /* CONFIG_PM */
688
689static struct pci_device_id pch_phub_pcidev_id[] = {
690 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH1_PHUB)},
691 {0,}
692};
693
694static struct pci_driver pch_phub_driver = {
695 .name = "pch_phub",
696 .id_table = pch_phub_pcidev_id,
697 .probe = pch_phub_probe,
698 .remove = __devexit_p(pch_phub_remove),
699 .suspend = pch_phub_suspend,
700 .resume = pch_phub_resume
701};
702
703static int __init pch_phub_pci_init(void)
704{
705 return pci_register_driver(&pch_phub_driver);
706}
707
708static void __exit pch_phub_pci_exit(void)
709{
710 pci_unregister_driver(&pch_phub_driver);
711}
712
713module_init(pch_phub_pci_init);
714module_exit(pch_phub_pci_exit);
715
716MODULE_DESCRIPTION("PCH Packet Hub PCI Driver");
717MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e876678176be..9c0b42bfe089 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -128,7 +128,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
128 mq->req = NULL; 128 mq->req = NULL;
129 129
130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 131 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 132 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 133 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index f702a163d8df..3cf193fb5e00 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -1,9 +1,5 @@
1menu "UBI - Unsorted block images" 1menuconfig MTD_UBI
2 depends on MTD 2 tristate "Enable UBI - Unsorted block images"
3
4config MTD_UBI
5 tristate "Enable UBI"
6 depends on MTD
7 select CRC32 3 select CRC32
8 help 4 help
9 UBI is a software layer above MTD layer which admits of LVM-like 5 UBI is a software layer above MTD layer which admits of LVM-like
@@ -12,11 +8,12 @@ config MTD_UBI
12 capabilities. Please, consult the MTD web site for more details 8 capabilities. Please, consult the MTD web site for more details
13 (www.linux-mtd.infradead.org). 9 (www.linux-mtd.infradead.org).
14 10
11if MTD_UBI
12
15config MTD_UBI_WL_THRESHOLD 13config MTD_UBI_WL_THRESHOLD
16 int "UBI wear-leveling threshold" 14 int "UBI wear-leveling threshold"
17 default 4096 15 default 4096
18 range 2 65536 16 range 2 65536
19 depends on MTD_UBI
20 help 17 help
21 This parameter defines the maximum difference between the highest 18 This parameter defines the maximum difference between the highest
22 erase counter value and the lowest erase counter value of eraseblocks 19 erase counter value and the lowest erase counter value of eraseblocks
@@ -34,7 +31,6 @@ config MTD_UBI_BEB_RESERVE
34 int "Percentage of reserved eraseblocks for bad eraseblocks handling" 31 int "Percentage of reserved eraseblocks for bad eraseblocks handling"
35 default 1 32 default 1
36 range 0 25 33 range 0 25
37 depends on MTD_UBI
38 help 34 help
39 If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI 35 If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
40 reserves some amount of physical eraseblocks to handle new bad 36 reserves some amount of physical eraseblocks to handle new bad
@@ -48,8 +44,6 @@ config MTD_UBI_BEB_RESERVE
48 44
49config MTD_UBI_GLUEBI 45config MTD_UBI_GLUEBI
50 tristate "MTD devices emulation driver (gluebi)" 46 tristate "MTD devices emulation driver (gluebi)"
51 default n
52 depends on MTD_UBI
53 help 47 help
54 This option enables gluebi - an additional driver which emulates MTD 48 This option enables gluebi - an additional driver which emulates MTD
55 devices on top of UBI volumes: for each UBI volumes an MTD device is 49 devices on top of UBI volumes: for each UBI volumes an MTD device is
@@ -59,4 +53,5 @@ config MTD_UBI_GLUEBI
59 software. 53 software.
60 54
61source "drivers/mtd/ubi/Kconfig.debug" 55source "drivers/mtd/ubi/Kconfig.debug"
62endmenu 56
57endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 61f6e5e40458..fad4adc0fe2c 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -1,94 +1,73 @@
1comment "UBI debugging options" 1comment "UBI debugging options"
2 depends on MTD_UBI
3 2
4config MTD_UBI_DEBUG 3config MTD_UBI_DEBUG
5 bool "UBI debugging" 4 bool "UBI debugging"
6 depends on SYSFS 5 depends on SYSFS
7 depends on MTD_UBI
8 select DEBUG_FS 6 select DEBUG_FS
9 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL 7 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
10 help 8 help
11 This option enables UBI debugging. 9 This option enables UBI debugging.
12 10
11if MTD_UBI_DEBUG
12
13config MTD_UBI_DEBUG_MSG 13config MTD_UBI_DEBUG_MSG
14 bool "UBI debugging messages" 14 bool "UBI debugging messages"
15 depends on MTD_UBI_DEBUG
16 default n
17 help 15 help
18 This option enables UBI debugging messages. 16 This option enables UBI debugging messages.
19 17
20config MTD_UBI_DEBUG_PARANOID 18config MTD_UBI_DEBUG_PARANOID
21 bool "Extra self-checks" 19 bool "Extra self-checks"
22 default n
23 depends on MTD_UBI_DEBUG
24 help 20 help
25 This option enables extra checks in UBI code. Note this slows UBI down 21 This option enables extra checks in UBI code. Note this slows UBI down
26 significantly. 22 significantly.
27 23
28config MTD_UBI_DEBUG_DISABLE_BGT 24config MTD_UBI_DEBUG_DISABLE_BGT
29 bool "Do not enable the UBI background thread" 25 bool "Do not enable the UBI background thread"
30 depends on MTD_UBI_DEBUG
31 default n
32 help 26 help
33 This option switches the background thread off by default. The thread 27 This option switches the background thread off by default. The thread
34 may be also be enabled/disabled via UBI sysfs. 28 may be also be enabled/disabled via UBI sysfs.
35 29
36config MTD_UBI_DEBUG_EMULATE_BITFLIPS 30config MTD_UBI_DEBUG_EMULATE_BITFLIPS
37 bool "Emulate flash bit-flips" 31 bool "Emulate flash bit-flips"
38 depends on MTD_UBI_DEBUG
39 default n
40 help 32 help
41 This option emulates bit-flips with probability 1/50, which in turn 33 This option emulates bit-flips with probability 1/50, which in turn
42 causes scrubbing. Useful for debugging and stressing UBI. 34 causes scrubbing. Useful for debugging and stressing UBI.
43 35
44config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES 36config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
45 bool "Emulate flash write failures" 37 bool "Emulate flash write failures"
46 depends on MTD_UBI_DEBUG
47 default n
48 help 38 help
49 This option emulates write failures with probability 1/100. Useful for 39 This option emulates write failures with probability 1/100. Useful for
50 debugging and testing how UBI handlines errors. 40 debugging and testing how UBI handlines errors.
51 41
52config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES 42config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
53 bool "Emulate flash erase failures" 43 bool "Emulate flash erase failures"
54 depends on MTD_UBI_DEBUG
55 default n
56 help 44 help
57 This option emulates erase failures with probability 1/100. Useful for 45 This option emulates erase failures with probability 1/100. Useful for
58 debugging and testing how UBI handlines errors. 46 debugging and testing how UBI handlines errors.
59 47
60menu "Additional UBI debugging messages" 48comment "Additional UBI debugging messages"
61 depends on MTD_UBI_DEBUG
62 49
63config MTD_UBI_DEBUG_MSG_BLD 50config MTD_UBI_DEBUG_MSG_BLD
64 bool "Additional UBI initialization and build messages" 51 bool "Additional UBI initialization and build messages"
65 default n
66 depends on MTD_UBI_DEBUG
67 help 52 help
68 This option enables detailed UBI initialization and device build 53 This option enables detailed UBI initialization and device build
69 debugging messages. 54 debugging messages.
70 55
71config MTD_UBI_DEBUG_MSG_EBA 56config MTD_UBI_DEBUG_MSG_EBA
72 bool "Eraseblock association unit messages" 57 bool "Eraseblock association unit messages"
73 default n
74 depends on MTD_UBI_DEBUG
75 help 58 help
76 This option enables debugging messages from the UBI eraseblock 59 This option enables debugging messages from the UBI eraseblock
77 association unit. 60 association unit.
78 61
79config MTD_UBI_DEBUG_MSG_WL 62config MTD_UBI_DEBUG_MSG_WL
80 bool "Wear-leveling unit messages" 63 bool "Wear-leveling unit messages"
81 default n
82 depends on MTD_UBI_DEBUG
83 help 64 help
84 This option enables debugging messages from the UBI wear-leveling 65 This option enables debugging messages from the UBI wear-leveling
85 unit. 66 unit.
86 67
87config MTD_UBI_DEBUG_MSG_IO 68config MTD_UBI_DEBUG_MSG_IO
88 bool "Input/output unit messages" 69 bool "Input/output unit messages"
89 default n
90 depends on MTD_UBI_DEBUG
91 help 70 help
92 This option enables debugging messages from the UBI input/output unit. 71 This option enables debugging messages from the UBI input/output unit.
93 72
94endmenu # UBI debugging messages 73endif # MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 78ae89488a4f..5ebe280225d6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -95,8 +95,8 @@ DEFINE_MUTEX(ubi_devices_mutex);
95static DEFINE_SPINLOCK(ubi_devices_lock); 95static DEFINE_SPINLOCK(ubi_devices_lock);
96 96
97/* "Show" method for files in '/<sysfs>/class/ubi/' */ 97/* "Show" method for files in '/<sysfs>/class/ubi/' */
98static ssize_t ubi_version_show(struct class *class, struct class_attribute *attr, 98static ssize_t ubi_version_show(struct class *class,
99 char *buf) 99 struct class_attribute *attr, char *buf)
100{ 100{
101 return sprintf(buf, "%d\n", UBI_VERSION); 101 return sprintf(buf, "%d\n", UBI_VERSION);
102} 102}
@@ -591,6 +591,7 @@ static int attach_by_scanning(struct ubi_device *ubi)
591 591
592 ubi->bad_peb_count = si->bad_peb_count; 592 ubi->bad_peb_count = si->bad_peb_count;
593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
594 ubi->corr_peb_count = si->corr_peb_count;
594 ubi->max_ec = si->max_ec; 595 ubi->max_ec = si->max_ec;
595 ubi->mean_ec = si->mean_ec; 596 ubi->mean_ec = si->mean_ec;
596 ubi_msg("max. sequence number: %llu", si->max_sqnum); 597 ubi_msg("max. sequence number: %llu", si->max_sqnum);
@@ -972,6 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
972 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 973 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
973 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 974 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
974 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 975 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
976 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
975 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 977 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
976 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 978 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
977 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 979 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 17a107129726..9eca95074bc2 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -57,6 +57,9 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
57void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 57void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
58void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); 58void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
59 59
60#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
61 print_hex_dump(l, ps, pt, r, g, b, len, a)
62
60#ifdef CONFIG_MTD_UBI_DEBUG_MSG 63#ifdef CONFIG_MTD_UBI_DEBUG_MSG
61/* General debugging messages */ 64/* General debugging messages */
62#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 65#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
@@ -172,6 +175,7 @@ static inline int ubi_dbg_is_erase_failure(void)
172#define ubi_dbg_dump_seb(seb, type) ({}) 175#define ubi_dbg_dump_seb(seb, type) ({})
173#define ubi_dbg_dump_mkvol_req(req) ({}) 176#define ubi_dbg_dump_mkvol_req(req) ({})
174#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({}) 177#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({})
178#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({})
175 179
176#define UBI_IO_DEBUG 0 180#define UBI_IO_DEBUG 0
177#define DBG_DISABLE_BGT 0 181#define DBG_DISABLE_BGT 0
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fe74749e0dae..4be671815014 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -418,7 +418,7 @@ retry:
418 * may try to recover data. FIXME: but this is 418 * may try to recover data. FIXME: but this is
419 * not implemented. 419 * not implemented.
420 */ 420 */
421 if (err == UBI_IO_BAD_HDR_READ || 421 if (err == UBI_IO_BAD_HDR_EBADMSG ||
422 err == UBI_IO_BAD_HDR) { 422 err == UBI_IO_BAD_HDR) {
423 ubi_warn("corrupted VID header at PEB " 423 ubi_warn("corrupted VID header at PEB "
424 "%d, LEB %d:%d", pnum, vol_id, 424 "%d, LEB %d:%d", pnum, vol_id,
@@ -963,7 +963,7 @@ write_error:
963static int is_error_sane(int err) 963static int is_error_sane(int err)
964{ 964{
965 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || 965 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
966 err == UBI_IO_BAD_HDR_READ || err == -ETIMEDOUT) 966 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
967 return 0; 967 return 0;
968 return 1; 968 return 1;
969} 969}
@@ -1201,6 +1201,9 @@ static void print_rsvd_warning(struct ubi_device *ubi,
1201 1201
1202 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d," 1202 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
1203 " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1203 " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1204 if (ubi->corr_peb_count)
1205 ubi_warn("%d PEBs are corrupted and not used",
1206 ubi->corr_peb_count);
1204} 1207}
1205 1208
1206/** 1209/**
@@ -1263,6 +1266,9 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1263 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1266 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1264 ubi_err("no enough physical eraseblocks (%d, need %d)", 1267 ubi_err("no enough physical eraseblocks (%d, need %d)",
1265 ubi->avail_pebs, EBA_RESERVED_PEBS); 1268 ubi->avail_pebs, EBA_RESERVED_PEBS);
1269 if (ubi->corr_peb_count)
1270 ubi_err("%d PEBs are corrupted and not used",
1271 ubi->corr_peb_count);
1266 err = -ENOSPC; 1272 err = -ENOSPC;
1267 goto out_free; 1273 goto out_free;
1268 } 1274 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 332f992f13d9..c2960ac9f39c 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -376,25 +376,6 @@ retry:
376 return 0; 376 return 0;
377} 377}
378 378
379/**
380 * check_pattern - check if buffer contains only a certain byte pattern.
381 * @buf: buffer to check
382 * @patt: the pattern to check
383 * @size: buffer size in bytes
384 *
385 * This function returns %1 in there are only @patt bytes in @buf, and %0 if
386 * something else was also found.
387 */
388static int check_pattern(const void *buf, uint8_t patt, int size)
389{
390 int i;
391
392 for (i = 0; i < size; i++)
393 if (((const uint8_t *)buf)[i] != patt)
394 return 0;
395 return 1;
396}
397
398/* Patterns to write to a physical eraseblock when torturing it */ 379/* Patterns to write to a physical eraseblock when torturing it */
399static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; 380static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
400 381
@@ -426,7 +407,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
426 if (err) 407 if (err)
427 goto out; 408 goto out;
428 409
429 err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); 410 err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
430 if (err == 0) { 411 if (err == 0) {
431 ubi_err("erased PEB %d, but a non-0xFF byte found", 412 ubi_err("erased PEB %d, but a non-0xFF byte found",
432 pnum); 413 pnum);
@@ -445,7 +426,8 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
445 if (err) 426 if (err)
446 goto out; 427 goto out;
447 428
448 err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size); 429 err = ubi_check_pattern(ubi->peb_buf1, patterns[i],
430 ubi->peb_size);
449 if (err == 0) { 431 if (err == 0) {
450 ubi_err("pattern %x checking failed for PEB %d", 432 ubi_err("pattern %x checking failed for PEB %d",
451 patterns[i], pnum); 433 patterns[i], pnum);
@@ -517,7 +499,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
517 * In this case we probably anyway have garbage in this PEB. 499 * In this case we probably anyway have garbage in this PEB.
518 */ 500 */
519 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); 501 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
520 if (err1 == UBI_IO_BAD_HDR_READ || err1 == UBI_IO_BAD_HDR) 502 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
521 /* 503 /*
522 * The VID header is corrupted, so we can safely erase this 504 * The VID header is corrupted, so we can safely erase this
523 * PEB and not afraid that it will be treated as a valid PEB in 505 * PEB and not afraid that it will be treated as a valid PEB in
@@ -712,47 +694,47 @@ bad:
712 * and corrected by the flash driver; this is harmless but may indicate that 694 * and corrected by the flash driver; this is harmless but may indicate that
713 * this eraseblock may become bad soon (but may be not); 695 * this eraseblock may become bad soon (but may be not);
714 * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error); 696 * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
715 * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; 697 * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
698 * a data integrity error (uncorrectable ECC error in case of NAND);
699 * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
716 * o a negative error code in case of failure. 700 * o a negative error code in case of failure.
717 */ 701 */
718int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, 702int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
719 struct ubi_ec_hdr *ec_hdr, int verbose) 703 struct ubi_ec_hdr *ec_hdr, int verbose)
720{ 704{
721 int err, read_err = 0; 705 int err, read_err;
722 uint32_t crc, magic, hdr_crc; 706 uint32_t crc, magic, hdr_crc;
723 707
724 dbg_io("read EC header from PEB %d", pnum); 708 dbg_io("read EC header from PEB %d", pnum);
725 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 709 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
726 710
727 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 711 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
728 if (err) { 712 if (read_err) {
729 if (err != UBI_IO_BITFLIPS && err != -EBADMSG) 713 if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
730 return err; 714 return read_err;
731 715
732 /* 716 /*
733 * We read all the data, but either a correctable bit-flip 717 * We read all the data, but either a correctable bit-flip
734 * occurred, or MTD reported about some data integrity error, 718 * occurred, or MTD reported a data integrity error
735 * like an ECC error in case of NAND. The former is harmless, 719 * (uncorrectable ECC error in case of NAND). The former is
736 * the later may mean that the read data is corrupted. But we 720 * harmless, the later may mean that the read data is
737 * have a CRC check-sum and we will detect this. If the EC 721 * corrupted. But we have a CRC check-sum and we will detect
738 * header is still OK, we just report this as there was a 722 * this. If the EC header is still OK, we just report this as
739 * bit-flip. 723 * there was a bit-flip, to force scrubbing.
740 */ 724 */
741 if (err == -EBADMSG)
742 read_err = UBI_IO_BAD_HDR_READ;
743 } 725 }
744 726
745 magic = be32_to_cpu(ec_hdr->magic); 727 magic = be32_to_cpu(ec_hdr->magic);
746 if (magic != UBI_EC_HDR_MAGIC) { 728 if (magic != UBI_EC_HDR_MAGIC) {
747 if (read_err) 729 if (read_err == -EBADMSG)
748 return read_err; 730 return UBI_IO_BAD_HDR_EBADMSG;
749 731
750 /* 732 /*
751 * The magic field is wrong. Let's check if we have read all 733 * The magic field is wrong. Let's check if we have read all
752 * 0xFF. If yes, this physical eraseblock is assumed to be 734 * 0xFF. If yes, this physical eraseblock is assumed to be
753 * empty. 735 * empty.
754 */ 736 */
755 if (check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 737 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
756 /* The physical eraseblock is supposedly empty */ 738 /* The physical eraseblock is supposedly empty */
757 if (verbose) 739 if (verbose)
758 ubi_warn("no EC header found at PEB %d, " 740 ubi_warn("no EC header found at PEB %d, "
@@ -760,7 +742,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
760 else if (UBI_IO_DEBUG) 742 else if (UBI_IO_DEBUG)
761 dbg_msg("no EC header found at PEB %d, " 743 dbg_msg("no EC header found at PEB %d, "
762 "only 0xFF bytes", pnum); 744 "only 0xFF bytes", pnum);
763 return UBI_IO_PEB_EMPTY; 745 if (!read_err)
746 return UBI_IO_FF;
747 else
748 return UBI_IO_FF_BITFLIPS;
764 } 749 }
765 750
766 /* 751 /*
@@ -788,7 +773,11 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
788 } else if (UBI_IO_DEBUG) 773 } else if (UBI_IO_DEBUG)
789 dbg_msg("bad EC header CRC at PEB %d, calculated " 774 dbg_msg("bad EC header CRC at PEB %d, calculated "
790 "%#08x, read %#08x", pnum, crc, hdr_crc); 775 "%#08x, read %#08x", pnum, crc, hdr_crc);
791 return read_err ?: UBI_IO_BAD_HDR; 776
777 if (!read_err)
778 return UBI_IO_BAD_HDR;
779 else
780 return UBI_IO_BAD_HDR_EBADMSG;
792 } 781 }
793 782
794 /* And of course validate what has just been read from the media */ 783 /* And of course validate what has just been read from the media */
@@ -975,22 +964,16 @@ bad:
975 * 964 *
976 * This function reads the volume identifier header from physical eraseblock 965 * This function reads the volume identifier header from physical eraseblock
977 * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read 966 * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
978 * volume identifier header. The following codes may be returned: 967 * volume identifier header. The error codes are the same as in
968 * 'ubi_io_read_ec_hdr()'.
979 * 969 *
980 * o %0 if the CRC checksum is correct and the header was successfully read; 970 * Note, the implementation of this function is also very similar to
981 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected 971 * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
982 * and corrected by the flash driver; this is harmless but may indicate that
983 * this eraseblock may become bad soon;
984 * o %UBI_IO_BAD_HDR if the volume identifier header is corrupted (a CRC
985 * error detected);
986 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
987 * header there);
988 * o a negative error code in case of failure.
989 */ 972 */
990int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, 973int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
991 struct ubi_vid_hdr *vid_hdr, int verbose) 974 struct ubi_vid_hdr *vid_hdr, int verbose)
992{ 975{
993 int err, read_err = 0; 976 int err, read_err;
994 uint32_t crc, magic, hdr_crc; 977 uint32_t crc, magic, hdr_crc;
995 void *p; 978 void *p;
996 979
@@ -998,48 +981,29 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
998 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 981 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
999 982
1000 p = (char *)vid_hdr - ubi->vid_hdr_shift; 983 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1001 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 984 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1002 ubi->vid_hdr_alsize); 985 ubi->vid_hdr_alsize);
1003 if (err) { 986 if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
1004 if (err != UBI_IO_BITFLIPS && err != -EBADMSG) 987 return read_err;
1005 return err;
1006
1007 /*
1008 * We read all the data, but either a correctable bit-flip
1009 * occurred, or MTD reported about some data integrity error,
1010 * like an ECC error in case of NAND. The former is harmless,
1011 * the later may mean the read data is corrupted. But we have a
1012 * CRC check-sum and we will identify this. If the VID header is
1013 * still OK, we just report this as there was a bit-flip.
1014 */
1015 if (err == -EBADMSG)
1016 read_err = UBI_IO_BAD_HDR_READ;
1017 }
1018 988
1019 magic = be32_to_cpu(vid_hdr->magic); 989 magic = be32_to_cpu(vid_hdr->magic);
1020 if (magic != UBI_VID_HDR_MAGIC) { 990 if (magic != UBI_VID_HDR_MAGIC) {
1021 if (read_err) 991 if (read_err == -EBADMSG)
1022 return read_err; 992 return UBI_IO_BAD_HDR_EBADMSG;
1023 993
1024 /* 994 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1025 * If we have read all 0xFF bytes, the VID header probably does
1026 * not exist and the physical eraseblock is assumed to be free.
1027 */
1028 if (check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1029 /* The physical eraseblock is supposedly free */
1030 if (verbose) 995 if (verbose)
1031 ubi_warn("no VID header found at PEB %d, " 996 ubi_warn("no VID header found at PEB %d, "
1032 "only 0xFF bytes", pnum); 997 "only 0xFF bytes", pnum);
1033 else if (UBI_IO_DEBUG) 998 else if (UBI_IO_DEBUG)
1034 dbg_msg("no VID header found at PEB %d, " 999 dbg_msg("no VID header found at PEB %d, "
1035 "only 0xFF bytes", pnum); 1000 "only 0xFF bytes", pnum);
1036 return UBI_IO_PEB_FREE; 1001 if (!read_err)
1002 return UBI_IO_FF;
1003 else
1004 return UBI_IO_FF_BITFLIPS;
1037 } 1005 }
1038 1006
1039 /*
1040 * This is not a valid VID header, and these are not 0xFF
1041 * bytes. Report that the header is corrupted.
1042 */
1043 if (verbose) { 1007 if (verbose) {
1044 ubi_warn("bad magic number at PEB %d: %08x instead of " 1008 ubi_warn("bad magic number at PEB %d: %08x instead of "
1045 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 1009 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
@@ -1061,20 +1025,18 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1061 } else if (UBI_IO_DEBUG) 1025 } else if (UBI_IO_DEBUG)
1062 dbg_msg("bad CRC at PEB %d, calculated %#08x, " 1026 dbg_msg("bad CRC at PEB %d, calculated %#08x, "
1063 "read %#08x", pnum, crc, hdr_crc); 1027 "read %#08x", pnum, crc, hdr_crc);
1064 return read_err ?: UBI_IO_BAD_HDR; 1028 if (!read_err)
1029 return UBI_IO_BAD_HDR;
1030 else
1031 return UBI_IO_BAD_HDR_EBADMSG;
1065 } 1032 }
1066 1033
1067 /* Validate the VID header that we have just read */
1068 err = validate_vid_hdr(ubi, vid_hdr); 1034 err = validate_vid_hdr(ubi, vid_hdr);
1069 if (err) { 1035 if (err) {
1070 ubi_err("validation failed for PEB %d", pnum); 1036 ubi_err("validation failed for PEB %d", pnum);
1071 return -EINVAL; 1037 return -EINVAL;
1072 } 1038 }
1073 1039
1074 /*
1075 * If there was a read error (%-EBADMSG), but the header CRC is still
1076 * OK, report about a bit-flip to force scrubbing on this PEB.
1077 */
1078 return read_err ? UBI_IO_BITFLIPS : 0; 1040 return read_err ? UBI_IO_BITFLIPS : 0;
1079} 1041}
1080 1042
@@ -1383,7 +1345,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1383 goto error; 1345 goto error;
1384 } 1346 }
1385 1347
1386 err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); 1348 err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len);
1387 if (err == 0) { 1349 if (err == 0) {
1388 ubi_err("flash region at PEB %d:%d, length %d does not " 1350 ubi_err("flash region at PEB %d:%d, length %d does not "
1389 "contain all 0xFF bytes", pnum, offset, len); 1351 "contain all 0xFF bytes", pnum, offset, len);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 22ad31402945..ff2a65c37f69 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -103,3 +103,22 @@ void ubi_calculate_reserved(struct ubi_device *ubi)
103 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) 103 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
104 ubi->beb_rsvd_level = MIN_RESEVED_PEBS; 104 ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
105} 105}
106
107/**
108 * ubi_check_pattern - check if buffer contains only a certain byte pattern.
109 * @buf: buffer to check
110 * @patt: the pattern to check
111 * @size: buffer size in bytes
112 *
113 * This function returns %1 in there are only @patt bytes in @buf, and %0 if
114 * something else was also found.
115 */
116int ubi_check_pattern(const void *buf, uint8_t patt, int size)
117{
118 int i;
119
120 for (i = 0; i < size; i++)
121 if (((const uint8_t *)buf)[i] != patt)
122 return 0;
123 return 1;
124}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 69b52e9c9489..3c631863bf40 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -29,7 +29,7 @@
29 * objects which are kept in volume RB-tree with root at the @volumes field. 29 * objects which are kept in volume RB-tree with root at the @volumes field.
30 * The RB-tree is indexed by the volume ID. 30 * The RB-tree is indexed by the volume ID.
31 * 31 *
32 * Found logical eraseblocks are represented by &struct ubi_scan_leb objects. 32 * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
33 * These objects are kept in per-volume RB-trees with the root at the 33 * These objects are kept in per-volume RB-trees with the root at the
34 * corresponding &struct ubi_scan_volume object. To put it differently, we keep 34 * corresponding &struct ubi_scan_volume object. To put it differently, we keep
35 * an RB-tree of per-volume objects and each of these objects is the root of 35 * an RB-tree of per-volume objects and each of these objects is the root of
@@ -38,6 +38,33 @@
38 * Corrupted physical eraseblocks are put to the @corr list, free physical 38 * Corrupted physical eraseblocks are put to the @corr list, free physical
39 * eraseblocks are put to the @free list and the physical eraseblock to be 39 * eraseblocks are put to the @free list and the physical eraseblock to be
40 * erased are put to the @erase list. 40 * erased are put to the @erase list.
41 *
42 * UBI tries to distinguish between 2 types of corruptions.
43 * 1. Corruptions caused by power cuts. These are harmless and expected
44 * corruptions and UBI tries to handle them gracefully, without printing too
45 * many warnings and error messages. The idea is that we do not lose
46 * important data in these case - we may lose only the data which was being
47 * written to the media just before the power cut happened, and the upper
48 * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts
49 * these PEBs to the head of the @erase list and they are scheduled for
50 * erasure.
51 *
52 * 2. Unexpected corruptions which are not caused by power cuts. During
53 * scanning, such PEBs are put to the @corr list and UBI preserves them.
54 * Obviously, this lessens the amount of available PEBs, and if at some
55 * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly
56 * informs about such PEBs every time the MTD device is attached.
57 *
58 * However, it is difficult to reliably distinguish between these types of
59 * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID
60 * header is corrupted and the data area does not contain all 0xFFs, and there
61 * were not bit-flips or integrity errors while reading the data area. Otherwise
62 * UBI assumes (1.). The assumptions are:
63 * o if the data area contains only 0xFFs, there is no data, and it is safe
64 * to just erase this PEB.
65 * o if the data area has bit-flips and data integrity errors (ECC errors on
66 * NAND), it is probably a PEB which was being erased when power cut
67 * happened.
41 */ 68 */
42 69
43#include <linux/err.h> 70#include <linux/err.h>
@@ -62,26 +89,26 @@ static struct ubi_vid_hdr *vidh;
62 * @si: scanning information 89 * @si: scanning information
63 * @pnum: physical eraseblock number to add 90 * @pnum: physical eraseblock number to add
64 * @ec: erase counter of the physical eraseblock 91 * @ec: erase counter of the physical eraseblock
92 * @to_head: if not zero, add to the head of the list
65 * @list: the list to add to 93 * @list: the list to add to
66 * 94 *
67 * This function adds physical eraseblock @pnum to free, erase, corrupted or 95 * This function adds physical eraseblock @pnum to free, erase, or alien lists.
68 * alien lists. Returns zero in case of success and a negative error code in 96 * If @to_head is not zero, PEB will be added to the head of the list, which
69 * case of failure. 97 * basically means it will be processed first later. E.g., we add corrupted
98 * PEBs (corrupted due to power cuts) to the head of the erase list to make
99 * sure we erase them first and get rid of corruptions ASAP. This function
100 * returns zero in case of success and a negative error code in case of
101 * failure.
70 */ 102 */
71static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, 103static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
72 struct list_head *list) 104 struct list_head *list)
73{ 105{
74 struct ubi_scan_leb *seb; 106 struct ubi_scan_leb *seb;
75 107
76 if (list == &si->free) { 108 if (list == &si->free) {
77 dbg_bld("add to free: PEB %d, EC %d", pnum, ec); 109 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
78 si->free_peb_count += 1;
79 } else if (list == &si->erase) { 110 } else if (list == &si->erase) {
80 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); 111 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
81 si->erase_peb_count += 1;
82 } else if (list == &si->corr) {
83 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
84 si->corr_peb_count += 1;
85 } else if (list == &si->alien) { 112 } else if (list == &si->alien) {
86 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); 113 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
87 si->alien_peb_count += 1; 114 si->alien_peb_count += 1;
@@ -94,7 +121,37 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
94 121
95 seb->pnum = pnum; 122 seb->pnum = pnum;
96 seb->ec = ec; 123 seb->ec = ec;
97 list_add_tail(&seb->u.list, list); 124 if (to_head)
125 list_add(&seb->u.list, list);
126 else
127 list_add_tail(&seb->u.list, list);
128 return 0;
129}
130
131/**
132 * add_corrupted - add a corrupted physical eraseblock.
133 * @si: scanning information
134 * @pnum: physical eraseblock number to add
135 * @ec: erase counter of the physical eraseblock
136 *
137 * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
138 * The corruption was presumably not caused by a power cut. Returns zero in
139 * case of success and a negative error code in case of failure.
140 */
141static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
142{
143 struct ubi_scan_leb *seb;
144
145 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
146
147 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
148 if (!seb)
149 return -ENOMEM;
150
151 si->corr_peb_count += 1;
152 seb->pnum = pnum;
153 seb->ec = ec;
154 list_add(&seb->u.list, &si->corr);
98 return 0; 155 return 0;
99} 156}
100 157
@@ -258,8 +315,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
258 * created before sequence numbers support has been added. At 315 * created before sequence numbers support has been added. At
259 * that times we used 32-bit LEB versions stored in logical 316 * that times we used 32-bit LEB versions stored in logical
260 * eraseblocks. That was before UBI got into mainline. We do not 317 * eraseblocks. That was before UBI got into mainline. We do not
261 * support these images anymore. Well, those images will work 318 * support these images anymore. Well, those images still work,
262 * still work, but only if no unclean reboots happened. 319 * but only if no unclean reboots happened.
263 */ 320 */
264 ubi_err("unsupported on-flash UBI format\n"); 321 ubi_err("unsupported on-flash UBI format\n");
265 return -EINVAL; 322 return -EINVAL;
@@ -285,19 +342,25 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
285 return 1; 342 return 1;
286 } 343 }
287 } else { 344 } else {
288 pnum = seb->pnum; 345 if (!seb->copy_flag) {
346 /* It is not a copy, so it is newer */
347 dbg_bld("first PEB %d is newer, copy_flag is unset",
348 pnum);
349 return bitflips << 1;
350 }
289 351
290 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 352 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
291 if (!vh) 353 if (!vh)
292 return -ENOMEM; 354 return -ENOMEM;
293 355
356 pnum = seb->pnum;
294 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 357 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
295 if (err) { 358 if (err) {
296 if (err == UBI_IO_BITFLIPS) 359 if (err == UBI_IO_BITFLIPS)
297 bitflips = 1; 360 bitflips = 1;
298 else { 361 else {
299 dbg_err("VID of PEB %d header is bad, but it " 362 dbg_err("VID of PEB %d header is bad, but it "
300 "was OK earlier", pnum); 363 "was OK earlier, err %d", pnum, err);
301 if (err > 0) 364 if (err > 0)
302 err = -EIO; 365 err = -EIO;
303 366
@@ -305,14 +368,6 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
305 } 368 }
306 } 369 }
307 370
308 if (!vh->copy_flag) {
309 /* It is not a copy, so it is newer */
310 dbg_bld("first PEB %d is newer, copy_flag is unset",
311 pnum);
312 err = bitflips << 1;
313 goto out_free_vidh;
314 }
315
316 vid_hdr = vh; 371 vid_hdr = vh;
317 } 372 }
318 373
@@ -463,18 +518,15 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
463 if (err) 518 if (err)
464 return err; 519 return err;
465 520
466 if (cmp_res & 4) 521 err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
467 err = add_to_list(si, seb->pnum, seb->ec, 522 &si->erase);
468 &si->corr);
469 else
470 err = add_to_list(si, seb->pnum, seb->ec,
471 &si->erase);
472 if (err) 523 if (err)
473 return err; 524 return err;
474 525
475 seb->ec = ec; 526 seb->ec = ec;
476 seb->pnum = pnum; 527 seb->pnum = pnum;
477 seb->scrub = ((cmp_res & 2) || bitflips); 528 seb->scrub = ((cmp_res & 2) || bitflips);
529 seb->copy_flag = vid_hdr->copy_flag;
478 seb->sqnum = sqnum; 530 seb->sqnum = sqnum;
479 531
480 if (sv->highest_lnum == lnum) 532 if (sv->highest_lnum == lnum)
@@ -487,10 +539,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
487 * This logical eraseblock is older than the one found 539 * This logical eraseblock is older than the one found
488 * previously. 540 * previously.
489 */ 541 */
490 if (cmp_res & 4) 542 return add_to_list(si, pnum, ec, cmp_res & 4,
491 return add_to_list(si, pnum, ec, &si->corr); 543 &si->erase);
492 else
493 return add_to_list(si, pnum, ec, &si->erase);
494 } 544 }
495 } 545 }
496 546
@@ -510,8 +560,9 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
510 seb->ec = ec; 560 seb->ec = ec;
511 seb->pnum = pnum; 561 seb->pnum = pnum;
512 seb->lnum = lnum; 562 seb->lnum = lnum;
513 seb->sqnum = sqnum;
514 seb->scrub = bitflips; 563 seb->scrub = bitflips;
564 seb->copy_flag = vid_hdr->copy_flag;
565 seb->sqnum = sqnum;
515 566
516 if (sv->highest_lnum <= lnum) { 567 if (sv->highest_lnum <= lnum) {
517 sv->highest_lnum = lnum; 568 sv->highest_lnum = lnum;
@@ -521,7 +572,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
521 sv->leb_count += 1; 572 sv->leb_count += 1;
522 rb_link_node(&seb->u.rb, parent, p); 573 rb_link_node(&seb->u.rb, parent, p);
523 rb_insert_color(&seb->u.rb, &sv->root); 574 rb_insert_color(&seb->u.rb, &sv->root);
524 si->used_peb_count += 1;
525 return 0; 575 return 0;
526} 576}
527 577
@@ -668,8 +718,8 @@ out_free:
668struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, 718struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
669 struct ubi_scan_info *si) 719 struct ubi_scan_info *si)
670{ 720{
671 int err = 0, i; 721 int err = 0;
672 struct ubi_scan_leb *seb; 722 struct ubi_scan_leb *seb, *tmp_seb;
673 723
674 if (!list_empty(&si->free)) { 724 if (!list_empty(&si->free)) {
675 seb = list_entry(si->free.next, struct ubi_scan_leb, u.list); 725 seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
@@ -678,38 +728,86 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
678 return seb; 728 return seb;
679 } 729 }
680 730
681 for (i = 0; i < 2; i++) { 731 /*
682 struct list_head *head; 732 * We try to erase the first physical eraseblock from the erase list
683 struct ubi_scan_leb *tmp_seb; 733 * and pick it if we succeed, or try to erase the next one if not. And
734 * so forth. We don't want to take care about bad eraseblocks here -
735 * they'll be handled later.
736 */
737 list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
738 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
739 seb->ec = si->mean_ec;
684 740
685 if (i == 0) 741 err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
686 head = &si->erase; 742 if (err)
687 else 743 continue;
688 head = &si->corr;
689 744
745 seb->ec += 1;
746 list_del(&seb->u.list);
747 dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
748 return seb;
749 }
750
751 ubi_err("no free eraseblocks");
752 return ERR_PTR(-ENOSPC);
753}
754
755/**
756 * check_corruption - check the data area of PEB.
757 * @ubi: UBI device description object
758 * @vid_hrd: the (corrupted) VID header of this PEB
759 * @pnum: the physical eraseblock number to check
760 *
761 * This is a helper function which is used to distinguish between VID header
762 * corruptions caused by power cuts and other reasons. If the PEB contains only
763 * 0xFF bytes in the data area, the VID header is most probably corrupted
764 * because of a power cut (%0 is returned in this case). Otherwise, it was
765 * probably corrupted for some other reasons (%1 is returned in this case). A
766 * negative error code is returned if a read error occurred.
767 *
768 * If the corruption reason was a power cut, UBI can safely erase this PEB.
769 * Otherwise, it should preserve it to avoid possibly destroying important
770 * information.
771 */
772static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
773 int pnum)
774{
775 int err;
776
777 mutex_lock(&ubi->buf_mutex);
778 memset(ubi->peb_buf1, 0x00, ubi->leb_size);
779
780 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
781 ubi->leb_size);
782 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
690 /* 783 /*
691 * We try to erase the first physical eraseblock from the @head 784 * Bit-flips or integrity errors while reading the data area.
692 * list and pick it if we succeed, or try to erase the 785 * It is difficult to say for sure what type of corruption is
693 * next one if not. And so forth. We don't want to take care 786 * this, but presumably a power cut happened while this PEB was
694 * about bad eraseblocks here - they'll be handled later. 787 * erased, so it became unstable and corrupted, and should be
788 * erased.
695 */ 789 */
696 list_for_each_entry_safe(seb, tmp_seb, head, u.list) { 790 return 0;
697 if (seb->ec == UBI_SCAN_UNKNOWN_EC) 791 }
698 seb->ec = si->mean_ec;
699 792
700 err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); 793 if (err)
701 if (err) 794 return err;
702 continue;
703 795
704 seb->ec += 1; 796 if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) {
705 list_del(&seb->u.list); 797 mutex_unlock(&ubi->buf_mutex);
706 dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); 798 return 0;
707 return seb;
708 }
709 } 799 }
710 800
711 ubi_err("no eraseblocks found"); 801 ubi_err("PEB %d contains corrupted VID header, and the data does not "
712 return ERR_PTR(-ENOSPC); 802 "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
803 "header corruption which requires manual inspection", pnum);
804 ubi_dbg_dump_vid_hdr(vid_hdr);
805 dbg_msg("hexdump of PEB %d offset %d, length %d",
806 pnum, ubi->leb_start, ubi->leb_size);
807 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
808 ubi->peb_buf1, ubi->leb_size, 1);
809 mutex_unlock(&ubi->buf_mutex);
810 return 1;
713} 811}
714 812
715/** 813/**
@@ -725,7 +823,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
725 int pnum) 823 int pnum)
726{ 824{
727 long long uninitialized_var(ec); 825 long long uninitialized_var(ec);
728 int err, bitflips = 0, vol_id, ec_corr = 0; 826 int err, bitflips = 0, vol_id, ec_err = 0;
729 827
730 dbg_bld("scan PEB %d", pnum); 828 dbg_bld("scan PEB %d", pnum);
731 829
@@ -746,22 +844,37 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
746 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 844 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
747 if (err < 0) 845 if (err < 0)
748 return err; 846 return err;
749 else if (err == UBI_IO_BITFLIPS) 847 switch (err) {
848 case 0:
849 break;
850 case UBI_IO_BITFLIPS:
750 bitflips = 1; 851 bitflips = 1;
751 else if (err == UBI_IO_PEB_EMPTY) 852 break;
752 return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase); 853 case UBI_IO_FF:
753 else if (err == UBI_IO_BAD_HDR_READ || err == UBI_IO_BAD_HDR) { 854 si->empty_peb_count += 1;
855 return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
856 &si->erase);
857 case UBI_IO_FF_BITFLIPS:
858 si->empty_peb_count += 1;
859 return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
860 &si->erase);
861 case UBI_IO_BAD_HDR_EBADMSG:
862 case UBI_IO_BAD_HDR:
754 /* 863 /*
755 * We have to also look at the VID header, possibly it is not 864 * We have to also look at the VID header, possibly it is not
756 * corrupted. Set %bitflips flag in order to make this PEB be 865 * corrupted. Set %bitflips flag in order to make this PEB be
757 * moved and EC be re-created. 866 * moved and EC be re-created.
758 */ 867 */
759 ec_corr = err; 868 ec_err = err;
760 ec = UBI_SCAN_UNKNOWN_EC; 869 ec = UBI_SCAN_UNKNOWN_EC;
761 bitflips = 1; 870 bitflips = 1;
871 break;
872 default:
873 ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
874 return -EINVAL;
762 } 875 }
763 876
764 if (!ec_corr) { 877 if (!ec_err) {
765 int image_seq; 878 int image_seq;
766 879
767 /* Make sure UBI version is OK */ 880 /* Make sure UBI version is OK */
@@ -814,24 +927,67 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
814 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); 927 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
815 if (err < 0) 928 if (err < 0)
816 return err; 929 return err;
817 else if (err == UBI_IO_BITFLIPS) 930 switch (err) {
931 case 0:
932 break;
933 case UBI_IO_BITFLIPS:
818 bitflips = 1; 934 bitflips = 1;
819 else if (err == UBI_IO_BAD_HDR_READ || err == UBI_IO_BAD_HDR || 935 break;
820 (err == UBI_IO_PEB_FREE && ec_corr)) { 936 case UBI_IO_BAD_HDR_EBADMSG:
821 /* VID header is corrupted */ 937 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
822 if (err == UBI_IO_BAD_HDR_READ || 938 /*
823 ec_corr == UBI_IO_BAD_HDR_READ) 939 * Both EC and VID headers are corrupted and were read
824 si->read_err_count += 1; 940 * with data integrity error, probably this is a bad
825 err = add_to_list(si, pnum, ec, &si->corr); 941 * PEB, bit it is not marked as bad yet. This may also
942 * be a result of power cut during erasure.
943 */
944 si->maybe_bad_peb_count += 1;
945 case UBI_IO_BAD_HDR:
946 if (ec_err)
947 /*
948 * Both headers are corrupted. There is a possibility
949 * that this a valid UBI PEB which has corresponding
950 * LEB, but the headers are corrupted. However, it is
951 * impossible to distinguish it from a PEB which just
952 * contains garbage because of a power cut during erase
953 * operation. So we just schedule this PEB for erasure.
954 */
955 err = 0;
956 else
957 /*
958 * The EC was OK, but the VID header is corrupted. We
959 * have to check what is in the data area.
960 */
961 err = check_corruption(ubi, vidh, pnum);
962
963 if (err < 0)
964 return err;
965 else if (!err)
966 /* This corruption is caused by a power cut */
967 err = add_to_list(si, pnum, ec, 1, &si->erase);
968 else
969 /* This is an unexpected corruption */
970 err = add_corrupted(si, pnum, ec);
826 if (err) 971 if (err)
827 return err; 972 return err;
828 goto adjust_mean_ec; 973 goto adjust_mean_ec;
829 } else if (err == UBI_IO_PEB_FREE) { 974 case UBI_IO_FF_BITFLIPS:
830 /* No VID header - the physical eraseblock is free */ 975 err = add_to_list(si, pnum, ec, 1, &si->erase);
831 err = add_to_list(si, pnum, ec, &si->free);
832 if (err) 976 if (err)
833 return err; 977 return err;
834 goto adjust_mean_ec; 978 goto adjust_mean_ec;
979 case UBI_IO_FF:
980 if (ec_err)
981 err = add_to_list(si, pnum, ec, 1, &si->erase);
982 else
983 err = add_to_list(si, pnum, ec, 0, &si->free);
984 if (err)
985 return err;
986 goto adjust_mean_ec;
987 default:
988 ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
989 err);
990 return -EINVAL;
835 } 991 }
836 992
837 vol_id = be32_to_cpu(vidh->vol_id); 993 vol_id = be32_to_cpu(vidh->vol_id);
@@ -843,7 +999,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
843 case UBI_COMPAT_DELETE: 999 case UBI_COMPAT_DELETE:
844 ubi_msg("\"delete\" compatible internal volume %d:%d" 1000 ubi_msg("\"delete\" compatible internal volume %d:%d"
845 " found, will remove it", vol_id, lnum); 1001 " found, will remove it", vol_id, lnum);
846 err = add_to_list(si, pnum, ec, &si->erase); 1002 err = add_to_list(si, pnum, ec, 1, &si->erase);
847 if (err) 1003 if (err)
848 return err; 1004 return err;
849 return 0; 1005 return 0;
@@ -858,7 +1014,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
858 case UBI_COMPAT_PRESERVE: 1014 case UBI_COMPAT_PRESERVE:
859 ubi_msg("\"preserve\" compatible internal volume %d:%d" 1015 ubi_msg("\"preserve\" compatible internal volume %d:%d"
860 " found", vol_id, lnum); 1016 " found", vol_id, lnum);
861 err = add_to_list(si, pnum, ec, &si->alien); 1017 err = add_to_list(si, pnum, ec, 0, &si->alien);
862 if (err) 1018 if (err)
863 return err; 1019 return err;
864 return 0; 1020 return 0;
@@ -870,7 +1026,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
870 } 1026 }
871 } 1027 }
872 1028
873 if (ec_corr) 1029 if (ec_err)
874 ubi_warn("valid VID header but corrupted EC header at PEB %d", 1030 ubi_warn("valid VID header but corrupted EC header at PEB %d",
875 pnum); 1031 pnum);
876 err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips); 1032 err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
@@ -878,7 +1034,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
878 return err; 1034 return err;
879 1035
880adjust_mean_ec: 1036adjust_mean_ec:
881 if (!ec_corr) { 1037 if (!ec_err) {
882 si->ec_sum += ec; 1038 si->ec_sum += ec;
883 si->ec_count += 1; 1039 si->ec_count += 1;
884 if (ec > si->max_ec) 1040 if (ec > si->max_ec)
@@ -904,19 +1060,20 @@ adjust_mean_ec:
904static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si) 1060static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
905{ 1061{
906 struct ubi_scan_leb *seb; 1062 struct ubi_scan_leb *seb;
907 int max_corr; 1063 int max_corr, peb_count;
908 1064
909 max_corr = ubi->peb_count - si->bad_peb_count - si->alien_peb_count; 1065 peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
910 max_corr = max_corr / 20 ?: 8; 1066 max_corr = peb_count / 20 ?: 8;
911 1067
912 /* 1068 /*
913 * Few corrupted PEBs are not a problem and may be just a result of 1069 * Few corrupted PEBs is not a problem and may be just a result of
914 * unclean reboots. However, many of them may indicate some problems 1070 * unclean reboots. However, many of them may indicate some problems
915 * with the flash HW or driver. 1071 * with the flash HW or driver.
916 */ 1072 */
917 if (si->corr_peb_count >= 8) { 1073 if (si->corr_peb_count) {
918 ubi_warn("%d PEBs are corrupted", si->corr_peb_count); 1074 ubi_err("%d PEBs are corrupted and preserved",
919 printk(KERN_WARNING "corrupted PEBs are:"); 1075 si->corr_peb_count);
1076 printk(KERN_ERR "Corrupted PEBs are:");
920 list_for_each_entry(seb, &si->corr, u.list) 1077 list_for_each_entry(seb, &si->corr, u.list)
921 printk(KERN_CONT " %d", seb->pnum); 1078 printk(KERN_CONT " %d", seb->pnum);
922 printk(KERN_CONT "\n"); 1079 printk(KERN_CONT "\n");
@@ -931,41 +1088,35 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
931 } 1088 }
932 } 1089 }
933 1090
934 if (si->free_peb_count + si->used_peb_count + 1091 if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
935 si->alien_peb_count == 0) { 1092 /*
936 /* No UBI-formatted eraseblocks were found */ 1093 * All PEBs are empty, or almost all - a couple PEBs look like
937 if (si->corr_peb_count == si->read_err_count && 1094 * they may be bad PEBs which were not marked as bad yet.
938 si->corr_peb_count < 8) { 1095 *
939 /* No or just few corrupted PEBs, and all of them had a 1096 * This piece of code basically tries to distinguish between
940 * read error. We assume that those are bad PEBs, which 1097 * the following situations:
941 * were just not marked as bad so far. 1098 *
942 * 1099 * 1. Flash is empty, but there are few bad PEBs, which are not
943 * This piece of code basically tries to distinguish 1100 * marked as bad so far, and which were read with error. We
944 * between the following 2 situations: 1101 * want to go ahead and format this flash. While formatting,
945 * 1102 * the faulty PEBs will probably be marked as bad.
946 * 1. Flash is empty, but there are few bad PEBs, which 1103 *
947 * are not marked as bad so far, and which were read 1104 * 2. Flash contains non-UBI data and we do not want to format
948 * with error. We want to go ahead and format this 1105 * it and destroy possibly important information.
949 * flash. While formating, the faulty PEBs will 1106 */
950 * probably be marked as bad. 1107 if (si->maybe_bad_peb_count <= 2) {
951 *
952 * 2. Flash probably contains non-UBI data and we do
953 * not want to format it and destroy possibly needed
954 * data (e.g., consider the case when the bootloader
955 * MTD partition was accidentally fed to UBI).
956 */
957 si->is_empty = 1; 1108 si->is_empty = 1;
958 ubi_msg("empty MTD device detected"); 1109 ubi_msg("empty MTD device detected");
959 get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); 1110 get_random_bytes(&ubi->image_seq,
1111 sizeof(ubi->image_seq));
960 } else { 1112 } else {
961 ubi_err("MTD device possibly contains non-UBI data, " 1113 ubi_err("MTD device is not UBI-formatted and possibly "
962 "refusing it"); 1114 "contains non-UBI data - refusing it");
963 return -EINVAL; 1115 return -EINVAL;
964 } 1116 }
1117
965 } 1118 }
966 1119
967 if (si->corr_peb_count > 0)
968 ubi_msg("corrupted PEBs will be formatted");
969 return 0; 1120 return 0;
970} 1121}
971 1122
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 2576a8d1532b..a3264f0bef2b 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -30,6 +30,7 @@
30 * @pnum: physical eraseblock number 30 * @pnum: physical eraseblock number
31 * @lnum: logical eraseblock number 31 * @lnum: logical eraseblock number
32 * @scrub: if this physical eraseblock needs scrubbing 32 * @scrub: if this physical eraseblock needs scrubbing
33 * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
33 * @sqnum: sequence number 34 * @sqnum: sequence number
34 * @u: unions RB-tree or @list links 35 * @u: unions RB-tree or @list links
35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects 36 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
@@ -42,7 +43,8 @@ struct ubi_scan_leb {
42 int ec; 43 int ec;
43 int pnum; 44 int pnum;
44 int lnum; 45 int lnum;
45 int scrub; 46 unsigned int scrub:1;
47 unsigned int copy_flag:1;
46 unsigned long long sqnum; 48 unsigned long long sqnum;
47 union { 49 union {
48 struct rb_node rb; 50 struct rb_node rb;
@@ -91,14 +93,13 @@ struct ubi_scan_volume {
91 * @erase: list of physical eraseblocks which have to be erased 93 * @erase: list of physical eraseblocks which have to be erased
92 * @alien: list of physical eraseblocks which should not be used by UBI (e.g., 94 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
93 * those belonging to "preserve"-compatible internal volumes) 95 * those belonging to "preserve"-compatible internal volumes)
94 * @used_peb_count: count of used PEBs
95 * @corr_peb_count: count of PEBs in the @corr list 96 * @corr_peb_count: count of PEBs in the @corr list
96 * @read_err_count: count of PEBs read with error (%UBI_IO_BAD_HDR_READ was 97 * @empty_peb_count: count of PEBs which are presumably empty (contain only
97 * returned) 98 * 0xFF bytes)
98 * @free_peb_count: count of PEBs in the @free list
99 * @erase_peb_count: count of PEBs in the @erase list
100 * @alien_peb_count: count of PEBs in the @alien list 99 * @alien_peb_count: count of PEBs in the @alien list
101 * @bad_peb_count: count of bad physical eraseblocks 100 * @bad_peb_count: count of bad physical eraseblocks
101 * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
102 * as bad yet, but which look like bad
102 * @vols_found: number of volumes found during scanning 103 * @vols_found: number of volumes found during scanning
103 * @highest_vol_id: highest volume ID 104 * @highest_vol_id: highest volume ID
104 * @is_empty: flag indicating whether the MTD device is empty or not 105 * @is_empty: flag indicating whether the MTD device is empty or not
@@ -119,13 +120,11 @@ struct ubi_scan_info {
119 struct list_head free; 120 struct list_head free;
120 struct list_head erase; 121 struct list_head erase;
121 struct list_head alien; 122 struct list_head alien;
122 int used_peb_count;
123 int corr_peb_count; 123 int corr_peb_count;
124 int read_err_count; 124 int empty_peb_count;
125 int free_peb_count;
126 int erase_peb_count;
127 int alien_peb_count; 125 int alien_peb_count;
128 int bad_peb_count; 126 int bad_peb_count;
127 int maybe_bad_peb_count;
129 int vols_found; 128 int vols_found;
130 int highest_vol_id; 129 int highest_vol_id;
131 int is_empty; 130 int is_empty;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0359e0cce482..0b0149c41fe3 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -85,21 +85,26 @@
85/* 85/*
86 * Error codes returned by the I/O sub-system. 86 * Error codes returned by the I/O sub-system.
87 * 87 *
88 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 88 * UBI_IO_FF: the read region of flash contains only 0xFFs
89 * %0xFF bytes 89 * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
90 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a 90 * integrity error reported by the MTD driver
91 * valid erase counter header, and the rest are %0xFF bytes 91 * (uncorrectable ECC error in case of NAND)
92 * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) 92 * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
93 * UBI_IO_BAD_HDR_READ: the same as %UBI_IO_BAD_HDR, but also there was a read 93 * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
94 * error reported by the flash driver 94 * data integrity error reported by the MTD driver
95 * (uncorrectable ECC error in case of NAND)
95 * UBI_IO_BITFLIPS: bit-flips were detected and corrected 96 * UBI_IO_BITFLIPS: bit-flips were detected and corrected
97 *
98 * Note, it is probably better to have bit-flip and ebadmsg as flags which can
99 * be or'ed with other error code. But this is a big change because there are
100 * may callers, so it does not worth the risk of introducing a bug
96 */ 101 */
97enum { 102enum {
98 UBI_IO_PEB_EMPTY = 1, 103 UBI_IO_FF = 1,
99 UBI_IO_PEB_FREE, 104 UBI_IO_FF_BITFLIPS,
100 UBI_IO_BAD_HDR, 105 UBI_IO_BAD_HDR,
101 UBI_IO_BAD_HDR_READ, 106 UBI_IO_BAD_HDR_EBADMSG,
102 UBI_IO_BITFLIPS 107 UBI_IO_BITFLIPS,
103}; 108};
104 109
105/* 110/*
@@ -356,6 +361,8 @@ struct ubi_wl_entry;
356 * @peb_size: physical eraseblock size 361 * @peb_size: physical eraseblock size
357 * @bad_peb_count: count of bad physical eraseblocks 362 * @bad_peb_count: count of bad physical eraseblocks
358 * @good_peb_count: count of good physical eraseblocks 363 * @good_peb_count: count of good physical eraseblocks
364 * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
365 * used by UBI)
359 * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous 366 * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
360 * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks 367 * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
361 * @min_io_size: minimal input/output unit size of the underlying MTD device 368 * @min_io_size: minimal input/output unit size of the underlying MTD device
@@ -442,6 +449,7 @@ struct ubi_device {
442 int peb_size; 449 int peb_size;
443 int bad_peb_count; 450 int bad_peb_count;
444 int good_peb_count; 451 int good_peb_count;
452 int corr_peb_count;
445 int erroneous_peb_count; 453 int erroneous_peb_count;
446 int max_erroneous; 454 int max_erroneous;
447 int min_io_size; 455 int min_io_size;
@@ -506,6 +514,7 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
506 int length); 514 int length);
507int ubi_check_volume(struct ubi_device *ubi, int vol_id); 515int ubi_check_volume(struct ubi_device *ubi, int vol_id);
508void ubi_calculate_reserved(struct ubi_device *ubi); 516void ubi_calculate_reserved(struct ubi_device *ubi);
517int ubi_check_pattern(const void *buf, uint8_t patt, int size);
509 518
510/* eba.c */ 519/* eba.c */
511int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 520int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index e42afab9a9fe..c47620dfc722 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -261,6 +261,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
261 /* Reserve physical eraseblocks */ 261 /* Reserve physical eraseblocks */
262 if (vol->reserved_pebs > ubi->avail_pebs) { 262 if (vol->reserved_pebs > ubi->avail_pebs) {
263 dbg_err("not enough PEBs, only %d available", ubi->avail_pebs); 263 dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
264 if (ubi->corr_peb_count)
265 dbg_err("%d PEBs are corrupted and not used",
266 ubi->corr_peb_count);
264 err = -ENOSPC; 267 err = -ENOSPC;
265 goto out_unlock; 268 goto out_unlock;
266 } 269 }
@@ -527,6 +530,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
527 if (pebs > ubi->avail_pebs) { 530 if (pebs > ubi->avail_pebs) {
528 dbg_err("not enough PEBs: requested %d, available %d", 531 dbg_err("not enough PEBs: requested %d, available %d",
529 pebs, ubi->avail_pebs); 532 pebs, ubi->avail_pebs);
533 if (ubi->corr_peb_count)
534 dbg_err("%d PEBs are corrupted and not used",
535 ubi->corr_peb_count);
530 spin_unlock(&ubi->volumes_lock); 536 spin_unlock(&ubi->volumes_lock);
531 err = -ENOSPC; 537 err = -ENOSPC;
532 goto out_free; 538 goto out_free;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 14c10bed94ee..fcdb7f65fe0b 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -366,7 +366,7 @@ write_error:
366 * Probably this physical eraseblock went bad, try to pick 366 * Probably this physical eraseblock went bad, try to pick
367 * another one. 367 * another one.
368 */ 368 */
369 list_add_tail(&new_seb->u.list, &si->corr); 369 list_add(&new_seb->u.list, &si->erase);
370 goto retry; 370 goto retry;
371 } 371 }
372 kfree(new_seb); 372 kfree(new_seb);
@@ -662,9 +662,13 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
662 ubi->vol_count += 1; 662 ubi->vol_count += 1;
663 vol->ubi = ubi; 663 vol->ubi = ubi;
664 664
665 if (reserved_pebs > ubi->avail_pebs) 665 if (reserved_pebs > ubi->avail_pebs) {
666 ubi_err("not enough PEBs, required %d, available %d", 666 ubi_err("not enough PEBs, required %d, available %d",
667 reserved_pebs, ubi->avail_pebs); 667 reserved_pebs, ubi->avail_pebs);
668 if (ubi->corr_peb_count)
669 ubi_err("%d PEBs are corrupted and not used",
670 ubi->corr_peb_count);
671 }
668 ubi->rsvd_pebs += reserved_pebs; 672 ubi->rsvd_pebs += reserved_pebs;
669 ubi->avail_pebs -= reserved_pebs; 673 ubi->avail_pebs -= reserved_pebs;
670 674
@@ -837,7 +841,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
837 return PTR_ERR(ubi->vtbl); 841 return PTR_ERR(ubi->vtbl);
838 } 842 }
839 843
840 ubi->avail_pebs = ubi->good_peb_count; 844 ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
841 845
842 /* 846 /*
843 * The layout volume is OK, initialize the corresponding in-RAM data 847 * The layout volume is OK, initialize the corresponding in-RAM data
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 97a435672eaf..655bbbe415d9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -745,7 +745,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 745
746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747 if (err && err != UBI_IO_BITFLIPS) { 747 if (err && err != UBI_IO_BITFLIPS) {
748 if (err == UBI_IO_PEB_FREE) { 748 if (err == UBI_IO_FF) {
749 /* 749 /*
750 * We are trying to move PEB without a VID header. UBI 750 * We are trying to move PEB without a VID header. UBI
751 * always write VID headers shortly after the PEB was 751 * always write VID headers shortly after the PEB was
@@ -759,6 +759,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
759 dbg_wl("PEB %d has no VID header", e1->pnum); 759 dbg_wl("PEB %d has no VID header", e1->pnum);
760 protect = 1; 760 protect = 1;
761 goto out_not_moved; 761 goto out_not_moved;
762 } else if (err == UBI_IO_FF_BITFLIPS) {
763 /*
764 * The same situation as %UBI_IO_FF, but bit-flips were
765 * detected. It is better to schedule this PEB for
766 * scrubbing.
767 */
768 dbg_wl("PEB %d has no VID header but has bit-flips",
769 e1->pnum);
770 scrubbing = 1;
771 goto out_not_moved;
762 } 772 }
763 773
764 ubi_err("error %d while reading VID header from PEB %d", 774 ubi_err("error %d while reading VID header from PEB %d",
@@ -1468,22 +1478,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1468 ubi->lookuptbl[e->pnum] = e; 1478 ubi->lookuptbl[e->pnum] = e;
1469 } 1479 }
1470 1480
1471 list_for_each_entry(seb, &si->corr, u.list) {
1472 cond_resched();
1473
1474 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1475 if (!e)
1476 goto out_free;
1477
1478 e->pnum = seb->pnum;
1479 e->ec = seb->ec;
1480 ubi->lookuptbl[e->pnum] = e;
1481 if (schedule_erase(ubi, e, 0)) {
1482 kmem_cache_free(ubi_wl_entry_slab, e);
1483 goto out_free;
1484 }
1485 }
1486
1487 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { 1481 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1488 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1482 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1489 cond_resched(); 1483 cond_resched();
@@ -1510,6 +1504,9 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1510 if (ubi->avail_pebs < WL_RESERVED_PEBS) { 1504 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1511 ubi_err("no enough physical eraseblocks (%d, need %d)", 1505 ubi_err("no enough physical eraseblocks (%d, need %d)",
1512 ubi->avail_pebs, WL_RESERVED_PEBS); 1506 ubi->avail_pebs, WL_RESERVED_PEBS);
1507 if (ubi->corr_peb_count)
1508 ubi_err("%d PEBs are corrupted and not used",
1509 ubi->corr_peb_count);
1513 goto out_free; 1510 goto out_free;
1514 } 1511 }
1515 ubi->avail_pebs -= WL_RESERVED_PEBS; 1512 ubi->avail_pebs -= WL_RESERVED_PEBS;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1cd752f9a6e1..b8e957249132 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1645,11 +1645,11 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
1645 * NB: both 1->0 and 0->1 transitions are counted except for 1645 * NB: both 1->0 and 0->1 transitions are counted except for
1646 * RI where only 0->1 is counted. 1646 * RI where only 0->1 is counted.
1647 */ 1647 */
1648static int hso_get_count(struct hso_serial *serial, 1648static int hso_get_count(struct tty_struct *tty,
1649 struct serial_icounter_struct __user *icnt) 1649 struct serial_icounter_struct *icount)
1650{ 1650{
1651 struct serial_icounter_struct icount;
1652 struct uart_icount cnow; 1651 struct uart_icount cnow;
1652 struct hso_serial *serial = get_serial_by_tty(tty);
1653 struct hso_tiocmget *tiocmget = serial->tiocmget; 1653 struct hso_tiocmget *tiocmget = serial->tiocmget;
1654 1654
1655 memset(&icount, 0, sizeof(struct serial_icounter_struct)); 1655 memset(&icount, 0, sizeof(struct serial_icounter_struct));
@@ -1660,19 +1660,19 @@ static int hso_get_count(struct hso_serial *serial,
1660 memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); 1660 memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount));
1661 spin_unlock_irq(&serial->serial_lock); 1661 spin_unlock_irq(&serial->serial_lock);
1662 1662
1663 icount.cts = cnow.cts; 1663 icount->cts = cnow.cts;
1664 icount.dsr = cnow.dsr; 1664 icount->dsr = cnow.dsr;
1665 icount.rng = cnow.rng; 1665 icount->rng = cnow.rng;
1666 icount.dcd = cnow.dcd; 1666 icount->dcd = cnow.dcd;
1667 icount.rx = cnow.rx; 1667 icount->rx = cnow.rx;
1668 icount.tx = cnow.tx; 1668 icount->tx = cnow.tx;
1669 icount.frame = cnow.frame; 1669 icount->frame = cnow.frame;
1670 icount.overrun = cnow.overrun; 1670 icount->overrun = cnow.overrun;
1671 icount.parity = cnow.parity; 1671 icount->parity = cnow.parity;
1672 icount.brk = cnow.brk; 1672 icount->brk = cnow.brk;
1673 icount.buf_overrun = cnow.buf_overrun; 1673 icount->buf_overrun = cnow.buf_overrun;
1674 1674
1675 return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; 1675 return 0;
1676} 1676}
1677 1677
1678 1678
@@ -1764,10 +1764,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
1764 case TIOCMIWAIT: 1764 case TIOCMIWAIT:
1765 ret = hso_wait_modem_status(serial, arg); 1765 ret = hso_wait_modem_status(serial, arg);
1766 break; 1766 break;
1767
1768 case TIOCGICOUNT:
1769 ret = hso_get_count(serial, uarg);
1770 break;
1771 default: 1767 default:
1772 ret = -ENOIOCTLCMD; 1768 ret = -ENOIOCTLCMD;
1773 break; 1769 break;
@@ -3300,6 +3296,7 @@ static const struct tty_operations hso_serial_ops = {
3300 .chars_in_buffer = hso_serial_chars_in_buffer, 3296 .chars_in_buffer = hso_serial_chars_in_buffer,
3301 .tiocmget = hso_serial_tiocmget, 3297 .tiocmget = hso_serial_tiocmget,
3302 .tiocmset = hso_serial_tiocmset, 3298 .tiocmset = hso_serial_tiocmset,
3299 .get_icount = hso_get_count,
3303 .unthrottle = hso_unthrottle 3300 .unthrottle = hso_unthrottle
3304}; 3301};
3305 3302
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 73d513989263..838f571027b7 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -36,6 +36,7 @@
36#include <linux/sched.h> /* signal_pending() */ 36#include <linux/sched.h> /* signal_pending() */
37#include <linux/pcieport_if.h> 37#include <linux/pcieport_if.h>
38#include <linux/mutex.h> 38#include <linux/mutex.h>
39#include <linux/workqueue.h>
39 40
40#define MY_NAME "pciehp" 41#define MY_NAME "pciehp"
41 42
@@ -44,6 +45,7 @@ extern int pciehp_poll_time;
44extern int pciehp_debug; 45extern int pciehp_debug;
45extern int pciehp_force; 46extern int pciehp_force;
46extern struct workqueue_struct *pciehp_wq; 47extern struct workqueue_struct *pciehp_wq;
48extern struct workqueue_struct *pciehp_ordered_wq;
47 49
48#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
49do { \ 51do { \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index aa5f3ff629ff..7ac8358df8fd 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -43,6 +43,7 @@ int pciehp_poll_mode;
43int pciehp_poll_time; 43int pciehp_poll_time;
44int pciehp_force; 44int pciehp_force;
45struct workqueue_struct *pciehp_wq; 45struct workqueue_struct *pciehp_wq;
46struct workqueue_struct *pciehp_ordered_wq;
46 47
47#define DRIVER_VERSION "0.4" 48#define DRIVER_VERSION "0.4"
48#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" 49#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -340,18 +341,33 @@ static int __init pcied_init(void)
340{ 341{
341 int retval = 0; 342 int retval = 0;
342 343
344 pciehp_wq = alloc_workqueue("pciehp", 0, 0);
345 if (!pciehp_wq)
346 return -ENOMEM;
347
348 pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
349 if (!pciehp_ordered_wq) {
350 destroy_workqueue(pciehp_wq);
351 return -ENOMEM;
352 }
353
343 pciehp_firmware_init(); 354 pciehp_firmware_init();
344 retval = pcie_port_service_register(&hpdriver_portdrv); 355 retval = pcie_port_service_register(&hpdriver_portdrv);
345 dbg("pcie_port_service_register = %d\n", retval); 356 dbg("pcie_port_service_register = %d\n", retval);
346 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 357 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
347 if (retval) 358 if (retval) {
359 destroy_workqueue(pciehp_ordered_wq);
360 destroy_workqueue(pciehp_wq);
348 dbg("Failure to register service\n"); 361 dbg("Failure to register service\n");
362 }
349 return retval; 363 return retval;
350} 364}
351 365
352static void __exit pcied_cleanup(void) 366static void __exit pcied_cleanup(void)
353{ 367{
354 dbg("unload_pciehpd()\n"); 368 dbg("unload_pciehpd()\n");
369 destroy_workqueue(pciehp_ordered_wq);
370 destroy_workqueue(pciehp_wq);
355 pcie_port_service_unregister(&hpdriver_portdrv); 371 pcie_port_service_unregister(&hpdriver_portdrv);
356 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 372 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
357} 373}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 8f58148be044..085dbb5fc168 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -32,7 +32,6 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/workqueue.h>
36#include "../pci.h" 35#include "../pci.h"
37#include "pciehp.h" 36#include "pciehp.h"
38 37
@@ -50,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
50 info->p_slot = p_slot; 49 info->p_slot = p_slot;
51 INIT_WORK(&info->work, interrupt_event_handler); 50 INIT_WORK(&info->work, interrupt_event_handler);
52 51
53 schedule_work(&info->work); 52 queue_work(pciehp_wq, &info->work);
54 53
55 return 0; 54 return 0;
56} 55}
@@ -345,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
345 kfree(info); 344 kfree(info);
346 goto out; 345 goto out;
347 } 346 }
348 queue_work(pciehp_wq, &info->work); 347 queue_work(pciehp_ordered_wq, &info->work);
349 out: 348 out:
350 mutex_unlock(&p_slot->lock); 349 mutex_unlock(&p_slot->lock);
351} 350}
@@ -378,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
378 if (ATTN_LED(ctrl)) 377 if (ATTN_LED(ctrl))
379 pciehp_set_attention_status(p_slot, 0); 378 pciehp_set_attention_status(p_slot, 0);
380 379
381 schedule_delayed_work(&p_slot->work, 5*HZ); 380 queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
382 break; 381 break;
383 case BLINKINGOFF_STATE: 382 case BLINKINGOFF_STATE:
384 case BLINKINGON_STATE: 383 case BLINKINGON_STATE:
@@ -440,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
440 else 439 else
441 p_slot->state = POWERON_STATE; 440 p_slot->state = POWERON_STATE;
442 441
443 queue_work(pciehp_wq, &info->work); 442 queue_work(pciehp_ordered_wq, &info->work);
444} 443}
445 444
446static void interrupt_event_handler(struct work_struct *work) 445static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 0cd42047d89b..50a23da5d24d 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -41,8 +41,6 @@
41#include "../pci.h" 41#include "../pci.h"
42#include "pciehp.h" 42#include "pciehp.h"
43 43
44static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
45
46static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 44static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
47{ 45{
48 struct pci_dev *dev = ctrl->pcie->port; 46 struct pci_dev *dev = ctrl->pcie->port;
@@ -805,8 +803,8 @@ static void pcie_cleanup_slot(struct controller *ctrl)
805{ 803{
806 struct slot *slot = ctrl->slot; 804 struct slot *slot = ctrl->slot;
807 cancel_delayed_work(&slot->work); 805 cancel_delayed_work(&slot->work);
808 flush_scheduled_work();
809 flush_workqueue(pciehp_wq); 806 flush_workqueue(pciehp_wq);
807 flush_workqueue(pciehp_ordered_wq);
810 kfree(slot); 808 kfree(slot);
811} 809}
812 810
@@ -912,16 +910,6 @@ struct controller *pcie_init(struct pcie_device *dev)
912 /* Disable sotfware notification */ 910 /* Disable sotfware notification */
913 pcie_disable_notification(ctrl); 911 pcie_disable_notification(ctrl);
914 912
915 /*
916 * If this is the first controller to be initialized,
917 * initialize the pciehp work queue
918 */
919 if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
920 pciehp_wq = create_singlethread_workqueue("pciehpd");
921 if (!pciehp_wq)
922 goto abort_ctrl;
923 }
924
925 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 913 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
926 pdev->vendor, pdev->device, pdev->subsystem_vendor, 914 pdev->vendor, pdev->device, pdev->subsystem_vendor,
927 pdev->subsystem_device); 915 pdev->subsystem_device);
@@ -941,11 +929,5 @@ void pciehp_release_ctrl(struct controller *ctrl)
941{ 929{
942 pcie_shutdown_notification(ctrl); 930 pcie_shutdown_notification(ctrl);
943 pcie_cleanup_slot(ctrl); 931 pcie_cleanup_slot(ctrl);
944 /*
945 * If this is the last controller to be released, destroy the
946 * pciehp work queue
947 */
948 if (atomic_dec_and_test(&pciehp_num_controllers))
949 destroy_workqueue(pciehp_wq);
950 kfree(ctrl); 932 kfree(ctrl);
951} 933}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index d2627e1c3ac1..e0c90e643b5f 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h> /* signal_pending(), struct timer_list */ 36#include <linux/sched.h> /* signal_pending(), struct timer_list */
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/workqueue.h>
38 39
39#if !defined(MODULE) 40#if !defined(MODULE)
40 #define MY_NAME "shpchp" 41 #define MY_NAME "shpchp"
@@ -46,6 +47,7 @@ extern int shpchp_poll_mode;
46extern int shpchp_poll_time; 47extern int shpchp_poll_time;
47extern int shpchp_debug; 48extern int shpchp_debug;
48extern struct workqueue_struct *shpchp_wq; 49extern struct workqueue_struct *shpchp_wq;
50extern struct workqueue_struct *shpchp_ordered_wq;
49 51
50#define dbg(format, arg...) \ 52#define dbg(format, arg...) \
51do { \ 53do { \
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a7bd5048396e..aca972bbfb4c 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -33,7 +33,6 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/workqueue.h>
37#include "shpchp.h" 36#include "shpchp.h"
38 37
39/* Global variables */ 38/* Global variables */
@@ -41,6 +40,7 @@ int shpchp_debug;
41int shpchp_poll_mode; 40int shpchp_poll_mode;
42int shpchp_poll_time; 41int shpchp_poll_time;
43struct workqueue_struct *shpchp_wq; 42struct workqueue_struct *shpchp_wq;
43struct workqueue_struct *shpchp_ordered_wq;
44 44
45#define DRIVER_VERSION "0.4" 45#define DRIVER_VERSION "0.4"
46#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" 46#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -174,8 +174,8 @@ void cleanup_slots(struct controller *ctrl)
174 slot = list_entry(tmp, struct slot, slot_list); 174 slot = list_entry(tmp, struct slot, slot_list);
175 list_del(&slot->slot_list); 175 list_del(&slot->slot_list);
176 cancel_delayed_work(&slot->work); 176 cancel_delayed_work(&slot->work);
177 flush_scheduled_work();
178 flush_workqueue(shpchp_wq); 177 flush_workqueue(shpchp_wq);
178 flush_workqueue(shpchp_ordered_wq);
179 pci_hp_deregister(slot->hotplug_slot); 179 pci_hp_deregister(slot->hotplug_slot);
180 } 180 }
181} 181}
@@ -360,9 +360,23 @@ static int __init shpcd_init(void)
360{ 360{
361 int retval = 0; 361 int retval = 0;
362 362
363 shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
364 if (!shpchp_wq)
365 return -ENOMEM;
366
367 shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
368 if (!shpchp_ordered_wq) {
369 destroy_workqueue(shpchp_wq);
370 return -ENOMEM;
371 }
372
363 retval = pci_register_driver(&shpc_driver); 373 retval = pci_register_driver(&shpc_driver);
364 dbg("%s: pci_register_driver = %d\n", __func__, retval); 374 dbg("%s: pci_register_driver = %d\n", __func__, retval);
365 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 375 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
376 if (retval) {
377 destroy_workqueue(shpchp_ordered_wq);
378 destroy_workqueue(shpchp_wq);
379 }
366 return retval; 380 return retval;
367} 381}
368 382
@@ -370,6 +384,8 @@ static void __exit shpcd_cleanup(void)
370{ 384{
371 dbg("unload_shpchpd()\n"); 385 dbg("unload_shpchpd()\n");
372 pci_unregister_driver(&shpc_driver); 386 pci_unregister_driver(&shpc_driver);
387 destroy_workqueue(shpchp_ordered_wq);
388 destroy_workqueue(shpchp_wq);
373 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 389 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
374} 390}
375 391
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 3387fbfb0c54..b00b09bdd38a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -32,7 +32,6 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/workqueue.h>
36#include "../pci.h" 35#include "../pci.h"
37#include "shpchp.h" 36#include "shpchp.h"
38 37
@@ -52,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
52 info->p_slot = p_slot; 51 info->p_slot = p_slot;
53 INIT_WORK(&info->work, interrupt_event_handler); 52 INIT_WORK(&info->work, interrupt_event_handler);
54 53
55 schedule_work(&info->work); 54 queue_work(shpchp_wq, &info->work);
56 55
57 return 0; 56 return 0;
58} 57}
@@ -457,7 +456,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
457 kfree(info); 456 kfree(info);
458 goto out; 457 goto out;
459 } 458 }
460 queue_work(shpchp_wq, &info->work); 459 queue_work(shpchp_ordered_wq, &info->work);
461 out: 460 out:
462 mutex_unlock(&p_slot->lock); 461 mutex_unlock(&p_slot->lock);
463} 462}
@@ -505,7 +504,7 @@ static void handle_button_press_event(struct slot *p_slot)
505 p_slot->hpc_ops->green_led_blink(p_slot); 504 p_slot->hpc_ops->green_led_blink(p_slot);
506 p_slot->hpc_ops->set_attention_status(p_slot, 0); 505 p_slot->hpc_ops->set_attention_status(p_slot, 0);
507 506
508 schedule_delayed_work(&p_slot->work, 5*HZ); 507 queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
509 break; 508 break;
510 case BLINKINGOFF_STATE: 509 case BLINKINGOFF_STATE:
511 case BLINKINGON_STATE: 510 case BLINKINGON_STATE:
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index d3985e7deab7..36547f0ce305 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -179,8 +179,6 @@
179#define SLOT_EVENT_LATCH 0x2 179#define SLOT_EVENT_LATCH 0x2
180#define SLOT_SERR_INT_MASK 0x3 180#define SLOT_SERR_INT_MASK 0x3
181 181
182static atomic_t shpchp_num_controllers = ATOMIC_INIT(0);
183
184static irqreturn_t shpc_isr(int irq, void *dev_id); 182static irqreturn_t shpc_isr(int irq, void *dev_id);
185static void start_int_poll_timer(struct controller *ctrl, int sec); 183static void start_int_poll_timer(struct controller *ctrl, int sec);
186static int hpc_check_cmd_status(struct controller *ctrl); 184static int hpc_check_cmd_status(struct controller *ctrl);
@@ -614,13 +612,6 @@ static void hpc_release_ctlr(struct controller *ctrl)
614 612
615 iounmap(ctrl->creg); 613 iounmap(ctrl->creg);
616 release_mem_region(ctrl->mmio_base, ctrl->mmio_size); 614 release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
617
618 /*
619 * If this is the last controller to be released, destroy the
620 * shpchpd work queue
621 */
622 if (atomic_dec_and_test(&shpchp_num_controllers))
623 destroy_workqueue(shpchp_wq);
624} 615}
625 616
626static int hpc_power_on_slot(struct slot * slot) 617static int hpc_power_on_slot(struct slot * slot)
@@ -1077,9 +1068,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1077 1068
1078 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, 1069 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1079 MY_NAME, (void *)ctrl); 1070 MY_NAME, (void *)ctrl);
1080 ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n", 1071 ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
1081 ctrl->pci_dev->irq, 1072 ctrl->pci_dev->irq, rc);
1082 atomic_read(&shpchp_num_controllers), rc);
1083 if (rc) { 1073 if (rc) {
1084 ctrl_err(ctrl, "Can't get irq %d for the hotplug " 1074 ctrl_err(ctrl, "Can't get irq %d for the hotplug "
1085 "controller\n", ctrl->pci_dev->irq); 1075 "controller\n", ctrl->pci_dev->irq);
@@ -1092,18 +1082,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1092 shpc_get_cur_bus_speed(ctrl); 1082 shpc_get_cur_bus_speed(ctrl);
1093 1083
1094 /* 1084 /*
1095 * If this is the first controller to be initialized,
1096 * initialize the shpchpd work queue
1097 */
1098 if (atomic_add_return(1, &shpchp_num_controllers) == 1) {
1099 shpchp_wq = create_singlethread_workqueue("shpchpd");
1100 if (!shpchp_wq) {
1101 rc = -ENOMEM;
1102 goto abort_iounmap;
1103 }
1104 }
1105
1106 /*
1107 * Unmask all event interrupts of all slots 1085 * Unmask all event interrupts of all slots
1108 */ 1086 */
1109 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 1087 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 38e6fa9a2012..aa95f1001761 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2196,7 +2196,6 @@ static void dasd_setup_queue(struct dasd_block *block)
2196 */ 2196 */
2197 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2197 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2198 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2198 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2199 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
2200} 2199}
2201 2200
2202/* 2201/*
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index cb301cc6178c..c454ffebb63e 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o 6 zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
7 zfcp_unit.o
7 8
8obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 96fa1f536394..044fb22718d2 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
56 struct ccw_device *cdev; 56 struct ccw_device *cdev;
57 struct zfcp_adapter *adapter; 57 struct zfcp_adapter *adapter;
58 struct zfcp_port *port; 58 struct zfcp_port *port;
59 struct zfcp_unit *unit;
60 59
61 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 60 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
62 if (!cdev) 61 if (!cdev)
@@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
72 port = zfcp_get_port_by_wwpn(adapter, wwpn); 71 port = zfcp_get_port_by_wwpn(adapter, wwpn);
73 if (!port) 72 if (!port)
74 goto out_port; 73 goto out_port;
74 flush_work(&port->rport_work);
75 75
76 unit = zfcp_unit_enqueue(port, lun); 76 zfcp_unit_add(port, lun);
77 if (IS_ERR(unit))
78 goto out_unit;
79
80 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
81 zfcp_erp_wait(adapter);
82 flush_work(&unit->scsi_work);
83
84out_unit:
85 put_device(&port->dev); 77 put_device(&port->dev);
78
86out_port: 79out_port:
87 zfcp_ccw_adapter_put(adapter); 80 zfcp_ccw_adapter_put(adapter);
88out_ccw_device: 81out_ccw_device:
@@ -158,6 +151,9 @@ static int __init zfcp_module_init(void)
158 fc_attach_transport(&zfcp_transport_functions); 151 fc_attach_transport(&zfcp_transport_functions);
159 if (!zfcp_data.scsi_transport_template) 152 if (!zfcp_data.scsi_transport_template)
160 goto out_transport; 153 goto out_transport;
154 scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
155 sizeof(struct zfcp_scsi_dev));
156
161 157
162 retval = misc_register(&zfcp_cfdc_misc); 158 retval = misc_register(&zfcp_cfdc_misc);
163 if (retval) { 159 if (retval) {
@@ -211,30 +207,6 @@ static void __exit zfcp_module_exit(void)
211module_exit(zfcp_module_exit); 207module_exit(zfcp_module_exit);
212 208
213/** 209/**
214 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
215 * @port: pointer to port to search for unit
216 * @fcp_lun: FCP LUN to search for
217 *
218 * Returns: pointer to zfcp_unit or NULL
219 */
220struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
221{
222 unsigned long flags;
223 struct zfcp_unit *unit;
224
225 read_lock_irqsave(&port->unit_list_lock, flags);
226 list_for_each_entry(unit, &port->unit_list, list)
227 if (unit->fcp_lun == fcp_lun) {
228 if (!get_device(&unit->dev))
229 unit = NULL;
230 read_unlock_irqrestore(&port->unit_list_lock, flags);
231 return unit;
232 }
233 read_unlock_irqrestore(&port->unit_list_lock, flags);
234 return NULL;
235}
236
237/**
238 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 210 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
239 * @adapter: pointer to adapter to search for port 211 * @adapter: pointer to adapter to search for port
240 * @wwpn: wwpn to search for 212 * @wwpn: wwpn to search for
@@ -259,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
259 return NULL; 231 return NULL;
260} 232}
261 233
262/**
263 * zfcp_unit_release - dequeue unit
264 * @dev: pointer to device
265 *
266 * waits until all work is done on unit and removes it then from the unit->list
267 * of the associated port.
268 */
269static void zfcp_unit_release(struct device *dev)
270{
271 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
272
273 put_device(&unit->port->dev);
274 kfree(unit);
275}
276
277/**
278 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
279 * @port: pointer to port where unit is added
280 * @fcp_lun: FCP LUN of unit to be enqueued
281 * Returns: pointer to enqueued unit on success, ERR_PTR on error
282 *
283 * Sets up some unit internal structures and creates sysfs entry.
284 */
285struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
286{
287 struct zfcp_unit *unit;
288 int retval = -ENOMEM;
289
290 get_device(&port->dev);
291
292 unit = zfcp_get_unit_by_lun(port, fcp_lun);
293 if (unit) {
294 put_device(&unit->dev);
295 retval = -EEXIST;
296 goto err_out;
297 }
298
299 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
300 if (!unit)
301 goto err_out;
302
303 unit->port = port;
304 unit->fcp_lun = fcp_lun;
305 unit->dev.parent = &port->dev;
306 unit->dev.release = zfcp_unit_release;
307
308 if (dev_set_name(&unit->dev, "0x%016llx",
309 (unsigned long long) fcp_lun)) {
310 kfree(unit);
311 goto err_out;
312 }
313 retval = -EINVAL;
314
315 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
316
317 spin_lock_init(&unit->latencies.lock);
318 unit->latencies.write.channel.min = 0xFFFFFFFF;
319 unit->latencies.write.fabric.min = 0xFFFFFFFF;
320 unit->latencies.read.channel.min = 0xFFFFFFFF;
321 unit->latencies.read.fabric.min = 0xFFFFFFFF;
322 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
323 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
324
325 if (device_register(&unit->dev)) {
326 put_device(&unit->dev);
327 goto err_out;
328 }
329
330 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
331 goto err_out_put;
332
333 write_lock_irq(&port->unit_list_lock);
334 list_add_tail(&unit->list, &port->unit_list);
335 write_unlock_irq(&port->unit_list_lock);
336
337 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
338
339 return unit;
340
341err_out_put:
342 device_unregister(&unit->dev);
343err_out:
344 put_device(&port->dev);
345 return ERR_PTR(retval);
346}
347
348static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 234static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
349{ 235{
350 adapter->pool.erp_req = 236 adapter->pool.erp_req =
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index ce1cc7a11fb4..0833c2b51e39 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 if (!adapter) 46 if (!adapter)
47 return 0; 47 return 0;
48 48
49 zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, 49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
50 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
51 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
52 "ccresu2", NULL); 51 "ccresu2", NULL);
53 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
@@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); 163 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
165 adapter->req_no = 0; 164 adapter->req_no = 0;
166 165
167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 166 zfcp_ccw_activate(cdev);
168 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
170 "ccsonl2", NULL);
171 zfcp_erp_wait(adapter);
172
173 flush_work(&adapter->scan_work);
174
175 zfcp_ccw_adapter_put(adapter); 167 zfcp_ccw_adapter_put(adapter);
176 return 0; 168 return 0;
177} 169}
@@ -224,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
224 break; 216 break;
225 case CIO_OPER: 217 case CIO_OPER:
226 dev_info(&cdev->dev, "The FCP device is operational again\n"); 218 dev_info(&cdev->dev, "The FCP device is operational again\n");
227 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 219 zfcp_erp_set_adapter_status(adapter,
228 ZFCP_STATUS_COMMON_RUNNING, 220 ZFCP_STATUS_COMMON_RUNNING);
229 ZFCP_SET);
230 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 221 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
231 "ccnoti4", NULL); 222 "ccnoti4", NULL);
232 break; 223 break;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 1838cda68ba8..d692e229ecba 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -2,9 +2,10 @@
2 * zfcp device driver 2 * zfcp device driver
3 * 3 *
4 * Userspace interface for accessing the 4 * Userspace interface for accessing the
5 * Access Control Lists / Control File Data Channel 5 * Access Control Lists / Control File Data Channel;
6 * handling of response code and states for ports and LUNs.
6 * 7 *
7 * Copyright IBM Corporation 2008, 2009 8 * Copyright IBM Corporation 2008, 2010
8 */ 9 */
9 10
10#define KMSG_COMPONENT "zfcp" 11#define KMSG_COMPONENT "zfcp"
@@ -261,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = {
261 .name = "zfcp_cfdc", 262 .name = "zfcp_cfdc",
262 .fops = &zfcp_cfdc_fops, 263 .fops = &zfcp_cfdc_fops,
263}; 264};
265
266/**
267 * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
268 * @adapter: Adapter where the Access Control Table (ACT) changed
269 *
270 * After a change in the adapter ACT, check if access to any
271 * previously denied resources is now possible.
272 */
273void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
274{
275 unsigned long flags;
276 struct zfcp_port *port;
277 struct scsi_device *sdev;
278 struct zfcp_scsi_dev *zfcp_sdev;
279 int status;
280
281 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
282 return;
283
284 read_lock_irqsave(&adapter->port_list_lock, flags);
285 list_for_each_entry(port, &adapter->port_list, list) {
286 status = atomic_read(&port->status);
287 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
288 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
289 zfcp_erp_port_reopen(port,
290 ZFCP_STATUS_COMMON_ERP_FAILED,
291 "cfaac_1", NULL);
292 }
293 read_unlock_irqrestore(&adapter->port_list_lock, flags);
294
295 shost_for_each_device(sdev, port->adapter->scsi_host) {
296 zfcp_sdev = sdev_to_zfcp(sdev);
297 status = atomic_read(&zfcp_sdev->status);
298 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
299 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
300 zfcp_erp_lun_reopen(sdev,
301 ZFCP_STATUS_COMMON_ERP_FAILED,
302 "cfaac_2", NULL);
303 }
304}
305
306static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
307{
308 u16 subtable = table >> 16;
309 u16 rule = table & 0xffff;
310 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
311
312 if (subtable && subtable < ARRAY_SIZE(act_type))
313 dev_warn(&adapter->ccw_device->dev,
314 "Access denied according to ACT rule type %s, "
315 "rule %d\n", act_type[subtable], rule);
316}
317
318/**
319 * zfcp_cfdc_port_denied - Process "access denied" for port
320 * @port: The port where the acces has been denied
321 * @qual: The FSF status qualifier for the access denied FSF status
322 */
323void zfcp_cfdc_port_denied(struct zfcp_port *port,
324 union fsf_status_qual *qual)
325{
326 dev_warn(&port->adapter->ccw_device->dev,
327 "Access denied to port 0x%016Lx\n",
328 (unsigned long long)port->wwpn);
329
330 zfcp_act_eval_err(port->adapter, qual->halfword[0]);
331 zfcp_act_eval_err(port->adapter, qual->halfword[1]);
332 zfcp_erp_set_port_status(port,
333 ZFCP_STATUS_COMMON_ERP_FAILED |
334 ZFCP_STATUS_COMMON_ACCESS_DENIED);
335}
336
337/**
338 * zfcp_cfdc_lun_denied - Process "access denied" for LUN
339 * @sdev: The SCSI device / LUN where the access has been denied
340 * @qual: The FSF status qualifier for the access denied FSF status
341 */
342void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
343 union fsf_status_qual *qual)
344{
345 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
346
347 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
348 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
349 zfcp_scsi_dev_lun(sdev),
350 (unsigned long long)zfcp_sdev->port->wwpn);
351 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
352 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
353 zfcp_erp_set_lun_status(sdev,
354 ZFCP_STATUS_COMMON_ERP_FAILED |
355 ZFCP_STATUS_COMMON_ACCESS_DENIED);
356
357 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
358 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
359}
360
361/**
362 * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
363 * @sdev: The LUN / SCSI device where sharing violation occurred
364 * @qual: The FSF status qualifier from the LUN sharing violation
365 */
366void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
367 union fsf_status_qual *qual)
368{
369 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
370
371 if (qual->word[0])
372 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
373 "LUN 0x%Lx on port 0x%Lx is already in "
374 "use by CSS%d, MIF Image ID %x\n",
375 zfcp_scsi_dev_lun(sdev),
376 (unsigned long long)zfcp_sdev->port->wwpn,
377 qual->fsf_queue_designator.cssid,
378 qual->fsf_queue_designator.hla);
379 else
380 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
381
382 zfcp_erp_set_lun_status(sdev,
383 ZFCP_STATUS_COMMON_ERP_FAILED |
384 ZFCP_STATUS_COMMON_ACCESS_DENIED);
385 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
386 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
387}
388
389/**
390 * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
391 * @sdev: The SCSI device / LUN where to evaluate the status
392 * @bottom: The qtcb bottom with the status from the "open lun"
393 *
394 * Returns: 0 if LUN is usable, -EACCES if the access control table
395 * reports an unsupported configuration.
396 */
397int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
398 struct fsf_qtcb_bottom_support *bottom)
399{
400 int shared, rw;
401 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
402 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
403
404 if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
405 !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
406 zfcp_ccw_priv_sch(adapter))
407 return 0;
408
409 shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
410 rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
411
412 if (shared)
413 atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
414
415 if (!rw) {
416 atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
417 dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
418 "0x%016Lx on port 0x%016Lx opened read-only\n",
419 zfcp_scsi_dev_lun(sdev),
420 (unsigned long long)zfcp_sdev->port->wwpn);
421 }
422
423 if (!shared && !rw) {
424 dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
425 "not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
426 zfcp_scsi_dev_lun(sdev),
427 (unsigned long long)zfcp_sdev->port->wwpn);
428 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
429 zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
430 return -EACCES;
431 }
432
433 if (shared && rw) {
434 dev_err(&adapter->ccw_device->dev,
435 "Shared read-write access not supported "
436 "(LUN 0x%016Lx, port 0x%016Lx)\n",
437 zfcp_scsi_dev_lun(sdev),
438 (unsigned long long)zfcp_sdev->port->wwpn);
439 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
440 zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
441 return -EACCES;
442 }
443
444 return 0;
445}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a86117b0d6e1..2cdd6b28ff7f 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; 154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
155 if (scsi_cmnd) { 155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd; 156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.serial = scsi_cmnd->serial_number;
158 response->u.fcp.data_dir = 157 response->u.fcp.data_dir =
159 qtcb->bottom.io.data_direction; 158 qtcb->bottom.io.data_direction;
160 } 159 }
@@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p,
330 break; 329 break;
331 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); 330 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
332 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 331 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
333 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
334 *p += sprintf(*p, "\n"); 332 *p += sprintf(*p, "\n");
335 break; 333 break;
336 334
@@ -482,7 +480,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
482 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); 480 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
483 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); 481 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
484 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); 482 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
485 zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); 483 zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
486 break; 484 break;
487 case ZFCP_REC_DBF_ID_ACTION: 485 case ZFCP_REC_DBF_ID_ACTION:
488 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); 486 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
@@ -600,19 +598,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
600} 598}
601 599
602/** 600/**
603 * zfcp_dbf_rec_unit - trace event for unit state change 601 * zfcp_dbf_rec_lun - trace event for LUN state change
604 * @id: identifier for trigger of state change 602 * @id: identifier for trigger of state change
605 * @ref: additional reference (e.g. request) 603 * @ref: additional reference (e.g. request)
606 * @unit: unit 604 * @sdev: SCSI device
607 */ 605 */
608void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) 606void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
609{ 607{
610 struct zfcp_port *port = unit->port; 608 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
609 struct zfcp_port *port = zfcp_sdev->port;
611 struct zfcp_dbf *dbf = port->adapter->dbf; 610 struct zfcp_dbf *dbf = port->adapter->dbf;
612 611
613 zfcp_dbf_rec_target(id, ref, dbf, &unit->status, 612 zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status,
614 &unit->erp_counter, port->wwpn, port->d_id, 613 &zfcp_sdev->erp_counter, port->wwpn, port->d_id,
615 unit->fcp_lun); 614 zfcp_scsi_dev_lun(sdev));
616} 615}
617 616
618/** 617/**
@@ -624,11 +623,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
624 * @action: address of error recovery action struct 623 * @action: address of error recovery action struct
625 * @adapter: adapter 624 * @adapter: adapter
626 * @port: port 625 * @port: port
627 * @unit: unit 626 * @sdev: SCSI device
628 */ 627 */
629void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, 628void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
630 struct zfcp_adapter *adapter, struct zfcp_port *port, 629 struct zfcp_adapter *adapter, struct zfcp_port *port,
631 struct zfcp_unit *unit) 630 struct scsi_device *sdev)
632{ 631{
633 struct zfcp_dbf *dbf = adapter->dbf; 632 struct zfcp_dbf *dbf = adapter->dbf;
634 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 633 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
@@ -647,9 +646,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
647 r->u.trigger.ps = atomic_read(&port->status); 646 r->u.trigger.ps = atomic_read(&port->status);
648 r->u.trigger.wwpn = port->wwpn; 647 r->u.trigger.wwpn = port->wwpn;
649 } 648 }
650 if (unit) 649 if (sdev)
651 r->u.trigger.us = atomic_read(&unit->status); 650 r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
652 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; 651 r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
652 ZFCP_DBF_INVALID_LUN;
653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); 653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
654 spin_unlock_irqrestore(&dbf->rec_lock, flags); 654 spin_unlock_irqrestore(&dbf->rec_lock, flags);
655} 655}
@@ -879,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
879 } 879 }
880 rec->scsi_result = scsi_cmnd->result; 880 rec->scsi_result = scsi_cmnd->result;
881 rec->scsi_cmnd = (unsigned long)scsi_cmnd; 881 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
882 rec->scsi_serial = scsi_cmnd->serial_number;
883 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, 882 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
884 min((int)scsi_cmnd->cmd_len, 883 min((int)scsi_cmnd->cmd_len,
885 ZFCP_DBF_SCSI_OPCODE)); 884 ZFCP_DBF_SCSI_OPCODE));
@@ -948,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
948 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); 947 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
949 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); 948 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
950 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); 949 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
951 zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
952 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, 950 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
953 0, ZFCP_DBF_SCSI_OPCODE); 951 0, ZFCP_DBF_SCSI_OPCODE);
954 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); 952 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 2bcc3403126a..04081b1b62b4 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger {
60 u8 need; 60 u8 need;
61 u32 as; 61 u32 as;
62 u32 ps; 62 u32 ps;
63 u32 us; 63 u32 ls;
64 u64 ref; 64 u64 ref;
65 u64 action; 65 u64 action;
66 u64 wwpn; 66 u64 wwpn;
@@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response {
110 union { 110 union {
111 struct { 111 struct {
112 u64 cmnd; 112 u64 cmnd;
113 u64 serial;
114 u32 data_dir; 113 u32 data_dir;
115 } fcp; 114 } fcp;
116 struct { 115 struct {
@@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record {
206 u32 scsi_lun; 205 u32 scsi_lun;
207 u32 scsi_result; 206 u32 scsi_result;
208 u64 scsi_cmnd; 207 u64 scsi_cmnd;
209 u64 scsi_serial;
210#define ZFCP_DBF_SCSI_OPCODE 16 208#define ZFCP_DBF_SCSI_OPCODE 16
211 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; 209 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
212 u8 scsi_retries; 210 u8 scsi_retries;
@@ -350,16 +348,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
350/** 348/**
351 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset 349 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
352 * @tag: tag indicating success or failure of reset operation 350 * @tag: tag indicating success or failure of reset operation
351 * @scmnd: SCSI command which caused this error recovery
353 * @flag: indicates type of reset (Target Reset, Logical Unit Reset) 352 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
354 * @unit: unit that needs reset
355 * @scsi_cmnd: SCSI command which caused this error recovery
356 */ 353 */
357static inline 354static inline
358void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 355void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
359 struct scsi_cmnd *scsi_cmnd)
360{ 356{
357 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
358
361 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, 359 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
362 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 360 zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
363} 361}
364 362
365#endif /* ZFCP_DBF_H */ 363#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1c6b6e05a75..9ae1d0a6f627 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -85,8 +85,8 @@ struct zfcp_reqlist;
85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
86 86
87/* logical unit status */ 87/* logical unit status */
88#define ZFCP_STATUS_UNIT_SHARED 0x00000004 88#define ZFCP_STATUS_LUN_SHARED 0x00000004
89#define ZFCP_STATUS_UNIT_READONLY 0x00000008 89#define ZFCP_STATUS_LUN_READONLY 0x00000008
90 90
91/* FSF request status (this does not have a common part) */ 91/* FSF request status (this does not have a common part) */
92#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 92#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
@@ -118,7 +118,7 @@ struct zfcp_erp_action {
118 int action; /* requested action code */ 118 int action; /* requested action code */
119 struct zfcp_adapter *adapter; /* device which should be recovered */ 119 struct zfcp_adapter *adapter; /* device which should be recovered */
120 struct zfcp_port *port; 120 struct zfcp_port *port;
121 struct zfcp_unit *unit; 121 struct scsi_device *sdev;
122 u32 status; /* recovery status */ 122 u32 status; /* recovery status */
123 u32 step; /* active step of this erp action */ 123 u32 step; /* active step of this erp action */
124 unsigned long fsf_req_id; 124 unsigned long fsf_req_id;
@@ -219,21 +219,66 @@ struct zfcp_port {
219 unsigned int starget_id; 219 unsigned int starget_id;
220}; 220};
221 221
222/**
223 * struct zfcp_unit - LUN configured via zfcp sysfs
224 * @dev: struct device for sysfs representation and reference counting
225 * @list: entry in LUN/unit list per zfcp_port
226 * @port: reference to zfcp_port where this LUN is configured
227 * @fcp_lun: 64 bit LUN value
228 * @scsi_work: for running scsi_scan_target
229 *
230 * This is the representation of a LUN that has been configured for
231 * usage. The main data here is the 64 bit LUN value, data for
232 * running I/O and recovery is in struct zfcp_scsi_dev.
233 */
222struct zfcp_unit { 234struct zfcp_unit {
223 struct device dev; 235 struct device dev;
224 struct list_head list; /* list of logical units */ 236 struct list_head list;
225 struct zfcp_port *port; /* remote port of unit */ 237 struct zfcp_port *port;
226 atomic_t status; /* status of this logical unit */ 238 u64 fcp_lun;
227 u64 fcp_lun; /* own FCP_LUN */
228 u32 handle; /* handle assigned by FSF */
229 struct scsi_device *device; /* scsi device struct pointer */
230 struct zfcp_erp_action erp_action; /* pending error recovery */
231 atomic_t erp_counter;
232 struct zfcp_latencies latencies;
233 struct work_struct scsi_work; 239 struct work_struct scsi_work;
234}; 240};
235 241
236/** 242/**
243 * struct zfcp_scsi_dev - zfcp data per SCSI device
244 * @status: zfcp internal status flags
245 * @lun_handle: handle from "open lun" for issuing FSF requests
246 * @erp_action: zfcp erp data for opening and recovering this LUN
247 * @erp_counter: zfcp erp counter for this LUN
248 * @latencies: FSF channel and fabric latencies
249 * @port: zfcp_port where this LUN belongs to
250 */
251struct zfcp_scsi_dev {
252 atomic_t status;
253 u32 lun_handle;
254 struct zfcp_erp_action erp_action;
255 atomic_t erp_counter;
256 struct zfcp_latencies latencies;
257 struct zfcp_port *port;
258};
259
260/**
261 * sdev_to_zfcp - Access zfcp LUN data for SCSI device
262 * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
263 */
264static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
265{
266 return scsi_transport_device_data(sdev);
267}
268
269/**
270 * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
271 * @sdev: SCSI device where to get the LUN from
272 */
273static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
274{
275 u64 fcp_lun;
276
277 int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
278 return fcp_lun;
279}
280
281/**
237 * struct zfcp_fsf_req - basic FSF request structure 282 * struct zfcp_fsf_req - basic FSF request structure
238 * @list: list of FSF requests 283 * @list: list of FSF requests
239 * @req_id: unique request ID 284 * @req_id: unique request ID
@@ -249,7 +294,6 @@ struct zfcp_unit {
249 * @erp_action: reference to erp action if request issued on behalf of ERP 294 * @erp_action: reference to erp action if request issued on behalf of ERP
250 * @pool: reference to memory pool if used for this request 295 * @pool: reference to memory pool if used for this request
251 * @issued: time when request was send (STCK) 296 * @issued: time when request was send (STCK)
252 * @unit: reference to unit if this request is a SCSI request
253 * @handler: handler which should be called to process response 297 * @handler: handler which should be called to process response
254 */ 298 */
255struct zfcp_fsf_req { 299struct zfcp_fsf_req {
@@ -267,7 +311,6 @@ struct zfcp_fsf_req {
267 struct zfcp_erp_action *erp_action; 311 struct zfcp_erp_action *erp_action;
268 mempool_t *pool; 312 mempool_t *pool;
269 unsigned long long issued; 313 unsigned long long issued;
270 struct zfcp_unit *unit;
271 void (*handler)(struct zfcp_fsf_req *); 314 void (*handler)(struct zfcp_fsf_req *);
272}; 315};
273 316
@@ -282,9 +325,4 @@ struct zfcp_data {
282 struct kmem_cache *adisc_cache; 325 struct kmem_cache *adisc_cache;
283}; 326};
284 327
285/********************** ZFCP SPECIFIC DEFINES ********************************/
286
287#define ZFCP_SET 0x00000100
288#define ZFCP_CLEAR 0x00000200
289
290#endif /* ZFCP_DEF_H */ 328#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 160b432c907f..d37c7331f244 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -21,6 +21,7 @@ enum zfcp_erp_act_flags {
21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000, 21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000, 22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000, 23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
24 ZFCP_STATUS_ERP_NO_REF = 0x00800000,
24}; 25};
25 26
26enum zfcp_erp_steps { 27enum zfcp_erp_steps {
@@ -29,12 +30,12 @@ enum zfcp_erp_steps {
29 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 30 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
30 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 31 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
31 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 32 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
32 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 33 ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
33 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, 34 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
34}; 35};
35 36
36enum zfcp_erp_act_type { 37enum zfcp_erp_act_type {
37 ZFCP_ERP_ACTION_REOPEN_UNIT = 1, 38 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
38 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 39 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
39 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 40 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
40 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 41 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
@@ -56,9 +57,8 @@ enum zfcp_erp_act_result {
56 57
57static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) 58static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
58{ 59{
59 zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, 60 zfcp_erp_clear_adapter_status(adapter,
60 ZFCP_STATUS_COMMON_UNBLOCKED | mask, 61 ZFCP_STATUS_COMMON_UNBLOCKED | mask);
61 ZFCP_CLEAR);
62} 62}
63 63
64static int zfcp_erp_action_exists(struct zfcp_erp_action *act) 64static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
@@ -88,24 +88,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
88 zfcp_erp_action_ready(act); 88 zfcp_erp_action_ready(act);
89} 89}
90 90
91static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) 91static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
92{ 92{
93 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 93 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
94 zfcp_erp_action_dismiss(&unit->erp_action); 94
95 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
96 zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
95} 97}
96 98
97static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) 99static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
98{ 100{
99 struct zfcp_unit *unit; 101 struct scsi_device *sdev;
100 102
101 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
102 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
103 else { 105 else
104 read_lock(&port->unit_list_lock); 106 shost_for_each_device(sdev, port->adapter->scsi_host)
105 list_for_each_entry(unit, &port->unit_list, list) 107 if (sdev_to_zfcp(sdev)->port == port)
106 zfcp_erp_action_dismiss_unit(unit); 108 zfcp_erp_action_dismiss_lun(sdev);
107 read_unlock(&port->unit_list_lock);
108 }
109} 109}
110 110
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -124,15 +124,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
124 124
125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
126 struct zfcp_port *port, 126 struct zfcp_port *port,
127 struct zfcp_unit *unit) 127 struct scsi_device *sdev)
128{ 128{
129 int need = want; 129 int need = want;
130 int u_status, p_status, a_status; 130 int l_status, p_status, a_status;
131 struct zfcp_scsi_dev *zfcp_sdev;
131 132
132 switch (want) { 133 switch (want) {
133 case ZFCP_ERP_ACTION_REOPEN_UNIT: 134 case ZFCP_ERP_ACTION_REOPEN_LUN:
134 u_status = atomic_read(&unit->status); 135 zfcp_sdev = sdev_to_zfcp(sdev);
135 if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) 136 l_status = atomic_read(&zfcp_sdev->status);
137 if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
136 return 0; 138 return 0;
137 p_status = atomic_read(&port->status); 139 p_status = atomic_read(&port->status);
138 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || 140 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
@@ -169,22 +171,26 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
169 return need; 171 return need;
170} 172}
171 173
172static struct zfcp_erp_action *zfcp_erp_setup_act(int need, 174static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
173 struct zfcp_adapter *adapter, 175 struct zfcp_adapter *adapter,
174 struct zfcp_port *port, 176 struct zfcp_port *port,
175 struct zfcp_unit *unit) 177 struct scsi_device *sdev)
176{ 178{
177 struct zfcp_erp_action *erp_action; 179 struct zfcp_erp_action *erp_action;
178 u32 status = 0; 180 struct zfcp_scsi_dev *zfcp_sdev;
179 181
180 switch (need) { 182 switch (need) {
181 case ZFCP_ERP_ACTION_REOPEN_UNIT: 183 case ZFCP_ERP_ACTION_REOPEN_LUN:
182 if (!get_device(&unit->dev)) 184 zfcp_sdev = sdev_to_zfcp(sdev);
183 return NULL; 185 if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
184 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 186 if (scsi_device_get(sdev))
185 erp_action = &unit->erp_action; 187 return NULL;
186 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 188 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
187 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 189 &zfcp_sdev->status);
190 erp_action = &zfcp_sdev->erp_action;
191 if (!(atomic_read(&zfcp_sdev->status) &
192 ZFCP_STATUS_COMMON_RUNNING))
193 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
188 break; 194 break;
189 195
190 case ZFCP_ERP_ACTION_REOPEN_PORT: 196 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -195,7 +201,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
195 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 201 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
196 erp_action = &port->erp_action; 202 erp_action = &port->erp_action;
197 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 203 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
198 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 204 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
199 break; 205 break;
200 206
201 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 207 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
@@ -205,7 +211,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
205 erp_action = &adapter->erp_action; 211 erp_action = &adapter->erp_action;
206 if (!(atomic_read(&adapter->status) & 212 if (!(atomic_read(&adapter->status) &
207 ZFCP_STATUS_COMMON_RUNNING)) 213 ZFCP_STATUS_COMMON_RUNNING))
208 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 214 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
209 break; 215 break;
210 216
211 default: 217 default:
@@ -215,16 +221,17 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
215 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 221 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
216 erp_action->adapter = adapter; 222 erp_action->adapter = adapter;
217 erp_action->port = port; 223 erp_action->port = port;
218 erp_action->unit = unit; 224 erp_action->sdev = sdev;
219 erp_action->action = need; 225 erp_action->action = need;
220 erp_action->status = status; 226 erp_action->status = act_status;
221 227
222 return erp_action; 228 return erp_action;
223} 229}
224 230
225static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 231static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
226 struct zfcp_port *port, 232 struct zfcp_port *port,
227 struct zfcp_unit *unit, char *id, void *ref) 233 struct scsi_device *sdev,
234 char *id, void *ref, u32 act_status)
228{ 235{
229 int retval = 1, need; 236 int retval = 1, need;
230 struct zfcp_erp_action *act = NULL; 237 struct zfcp_erp_action *act = NULL;
@@ -232,21 +239,21 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
232 if (!adapter->erp_thread) 239 if (!adapter->erp_thread)
233 return -EIO; 240 return -EIO;
234 241
235 need = zfcp_erp_required_act(want, adapter, port, unit); 242 need = zfcp_erp_required_act(want, adapter, port, sdev);
236 if (!need) 243 if (!need)
237 goto out; 244 goto out;
238 245
239 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 246 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
240 act = zfcp_erp_setup_act(need, adapter, port, unit);
241 if (!act) 247 if (!act)
242 goto out; 248 goto out;
249 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
243 ++adapter->erp_total_count; 250 ++adapter->erp_total_count;
244 list_add_tail(&act->list, &adapter->erp_ready_head); 251 list_add_tail(&act->list, &adapter->erp_ready_head);
245 wake_up(&adapter->erp_ready_wq); 252 wake_up(&adapter->erp_ready_wq);
246 zfcp_dbf_rec_thread("eracte1", adapter->dbf); 253 zfcp_dbf_rec_thread("eracte1", adapter->dbf);
247 retval = 0; 254 retval = 0;
248 out: 255 out:
249 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); 256 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
250 return retval; 257 return retval;
251} 258}
252 259
@@ -258,11 +265,12 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
258 265
259 /* ensure propagation of failed status to new devices */ 266 /* ensure propagation of failed status to new devices */
260 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 267 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
261 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 268 zfcp_erp_set_adapter_status(adapter,
269 ZFCP_STATUS_COMMON_ERP_FAILED);
262 return -EIO; 270 return -EIO;
263 } 271 }
264 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 272 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
265 adapter, NULL, NULL, id, ref); 273 adapter, NULL, NULL, id, ref, 0);
266} 274}
267 275
268/** 276/**
@@ -282,10 +290,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
282 290
283 write_lock_irqsave(&adapter->erp_lock, flags); 291 write_lock_irqsave(&adapter->erp_lock, flags);
284 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 292 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
285 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 293 zfcp_erp_set_adapter_status(adapter,
294 ZFCP_STATUS_COMMON_ERP_FAILED);
286 else 295 else
287 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, 296 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
288 NULL, NULL, id, ref); 297 NULL, NULL, id, ref, 0);
289 write_unlock_irqrestore(&adapter->erp_lock, flags); 298 write_unlock_irqrestore(&adapter->erp_lock, flags);
290} 299}
291 300
@@ -317,25 +326,10 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
317 zfcp_erp_port_reopen(port, clear | flags, id, ref); 326 zfcp_erp_port_reopen(port, clear | flags, id, ref);
318} 327}
319 328
320/**
321 * zfcp_erp_unit_shutdown - Shutdown unit
322 * @unit: Unit to shut down.
323 * @clear: Status flags to clear.
324 * @id: Id for debug trace event.
325 * @ref: Reference for debug trace event.
326 */
327void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
328 void *ref)
329{
330 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
331 zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
332}
333
334static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 329static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
335{ 330{
336 zfcp_erp_modify_port_status(port, "erpblk1", NULL, 331 zfcp_erp_clear_port_status(port,
337 ZFCP_STATUS_COMMON_UNBLOCKED | clear, 332 ZFCP_STATUS_COMMON_UNBLOCKED | clear);
338 ZFCP_CLEAR);
339} 333}
340 334
341static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, 335static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
@@ -348,7 +342,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
348 return; 342 return;
349 343
350 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 344 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
351 port->adapter, port, NULL, id, ref); 345 port->adapter, port, NULL, id, ref, 0);
352} 346}
353 347
354/** 348/**
@@ -376,12 +370,12 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
376 370
377 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 371 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
378 /* ensure propagation of failed status to new devices */ 372 /* ensure propagation of failed status to new devices */
379 zfcp_erp_port_failed(port, "erpreo1", NULL); 373 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
380 return -EIO; 374 return -EIO;
381 } 375 }
382 376
383 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 377 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
384 port->adapter, port, NULL, id, ref); 378 port->adapter, port, NULL, id, ref, 0);
385} 379}
386 380
387/** 381/**
@@ -404,53 +398,88 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
404 return retval; 398 return retval;
405} 399}
406 400
407static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) 401static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
408{ 402{
409 zfcp_erp_modify_unit_status(unit, "erublk1", NULL, 403 zfcp_erp_clear_lun_status(sdev,
410 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, 404 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
411 ZFCP_CLEAR);
412} 405}
413 406
414static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 407static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
415 void *ref) 408 void *ref, u32 act_status)
416{ 409{
417 struct zfcp_adapter *adapter = unit->port->adapter; 410 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
411 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
418 412
419 zfcp_erp_unit_block(unit, clear); 413 zfcp_erp_lun_block(sdev, clear);
420 414
421 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 415 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
422 return; 416 return;
423 417
424 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, 418 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
425 adapter, unit->port, unit, id, ref); 419 zfcp_sdev->port, sdev, id, ref, act_status);
426} 420}
427 421
428/** 422/**
429 * zfcp_erp_unit_reopen - initiate reopen of a unit 423 * zfcp_erp_lun_reopen - initiate reopen of a LUN
430 * @unit: unit to be reopened 424 * @sdev: SCSI device / LUN to be reopened
431 * @clear_mask: specifies flags in unit status to be cleared 425 * @clear_mask: specifies flags in LUN status to be cleared
432 * Return: 0 on success, < 0 on error 426 * Return: 0 on success, < 0 on error
433 */ 427 */
434void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 428void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
435 void *ref) 429 void *ref)
436{ 430{
437 unsigned long flags; 431 unsigned long flags;
438 struct zfcp_port *port = unit->port; 432 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
433 struct zfcp_port *port = zfcp_sdev->port;
439 struct zfcp_adapter *adapter = port->adapter; 434 struct zfcp_adapter *adapter = port->adapter;
440 435
441 write_lock_irqsave(&adapter->erp_lock, flags); 436 write_lock_irqsave(&adapter->erp_lock, flags);
442 _zfcp_erp_unit_reopen(unit, clear, id, ref); 437 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
443 write_unlock_irqrestore(&adapter->erp_lock, flags); 438 write_unlock_irqrestore(&adapter->erp_lock, flags);
444} 439}
445 440
446static int status_change_set(unsigned long mask, atomic_t *status) 441/**
442 * zfcp_erp_lun_shutdown - Shutdown LUN
443 * @sdev: SCSI device / LUN to shut down.
444 * @clear: Status flags to clear.
445 * @id: Id for debug trace event.
446 * @ref: Reference for debug trace event.
447 */
448void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
449 void *ref)
447{ 450{
448 return (atomic_read(status) ^ mask) & mask; 451 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
452 zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
449} 453}
450 454
451static int status_change_clear(unsigned long mask, atomic_t *status) 455/**
456 * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
457 * @sdev: SCSI device / LUN to shut down.
458 * @id: Id for debug trace event.
459 *
460 * Do not acquire a reference for the LUN when creating the ERP
461 * action. It is safe, because this function waits for the ERP to
462 * complete first. This allows to shutdown the LUN, even when the SCSI
463 * device is in the state SDEV_DEL when scsi_device_get will fail.
464 */
465void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
452{ 466{
453 return atomic_read(status) & mask; 467 unsigned long flags;
468 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
469 struct zfcp_port *port = zfcp_sdev->port;
470 struct zfcp_adapter *adapter = port->adapter;
471 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
472
473 write_lock_irqsave(&adapter->erp_lock, flags);
474 _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
475 write_unlock_irqrestore(&adapter->erp_lock, flags);
476
477 zfcp_erp_wait(adapter);
478}
479
480static int status_change_set(unsigned long mask, atomic_t *status)
481{
482 return (atomic_read(status) ^ mask) & mask;
454} 483}
455 484
456static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 485static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
@@ -467,11 +496,13 @@ static void zfcp_erp_port_unblock(struct zfcp_port *port)
467 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 496 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
468} 497}
469 498
470static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) 499static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
471{ 500{
472 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) 501 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
473 zfcp_dbf_rec_unit("eruubl1", NULL, unit); 502
474 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); 503 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
504 zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
505 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
475} 506}
476 507
477static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 508static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -559,15 +590,14 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
559 read_unlock(&adapter->port_list_lock); 590 read_unlock(&adapter->port_list_lock);
560} 591}
561 592
562static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 593static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
563 char *id, void *ref) 594 char *id, void *ref)
564{ 595{
565 struct zfcp_unit *unit; 596 struct scsi_device *sdev;
566 597
567 read_lock(&port->unit_list_lock); 598 shost_for_each_device(sdev, port->adapter->scsi_host)
568 list_for_each_entry(unit, &port->unit_list, list) 599 if (sdev_to_zfcp(sdev)->port == port)
569 _zfcp_erp_unit_reopen(unit, clear, id, ref); 600 _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
570 read_unlock(&port->unit_list_lock);
571} 601}
572 602
573static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 603static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -582,8 +612,8 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
582 case ZFCP_ERP_ACTION_REOPEN_PORT: 612 case ZFCP_ERP_ACTION_REOPEN_PORT:
583 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); 613 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
584 break; 614 break;
585 case ZFCP_ERP_ACTION_REOPEN_UNIT: 615 case ZFCP_ERP_ACTION_REOPEN_LUN:
586 _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); 616 _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
587 break; 617 break;
588 } 618 }
589} 619}
@@ -598,7 +628,7 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
598 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); 628 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
599 break; 629 break;
600 case ZFCP_ERP_ACTION_REOPEN_PORT: 630 case ZFCP_ERP_ACTION_REOPEN_PORT:
601 _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); 631 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
602 break; 632 break;
603 } 633 }
604} 634}
@@ -742,9 +772,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
742 zfcp_fsf_req_dismiss_all(adapter); 772 zfcp_fsf_req_dismiss_all(adapter);
743 adapter->fsf_req_seq_no = 0; 773 adapter->fsf_req_seq_no = 0;
744 zfcp_fc_wka_ports_force_offline(adapter->gs); 774 zfcp_fc_wka_ports_force_offline(adapter->gs);
745 /* all ports and units are closed */ 775 /* all ports and LUNs are closed */
746 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, 776 zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
747 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
748 777
749 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 778 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
750 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 779 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
@@ -861,7 +890,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
861 struct zfcp_port *port = act->port; 890 struct zfcp_port *port = act->port;
862 891
863 if (port->wwpn != adapter->peer_wwpn) { 892 if (port->wwpn != adapter->peer_wwpn) {
864 zfcp_erp_port_failed(port, "eroptp1", NULL); 893 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
865 return ZFCP_ERP_FAILED; 894 return ZFCP_ERP_FAILED;
866 } 895 }
867 port->d_id = adapter->peer_d_id; 896 port->d_id = adapter->peer_d_id;
@@ -933,82 +962,87 @@ close_init_done:
933 return zfcp_erp_port_strategy_open_common(erp_action); 962 return zfcp_erp_port_strategy_open_common(erp_action);
934} 963}
935 964
936static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 965static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
937{ 966{
967 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
968
938 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 969 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
939 ZFCP_STATUS_UNIT_SHARED | 970 ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
940 ZFCP_STATUS_UNIT_READONLY, 971 &zfcp_sdev->status);
941 &unit->status);
942} 972}
943 973
944static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) 974static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
945{ 975{
946 int retval = zfcp_fsf_close_unit(erp_action); 976 int retval = zfcp_fsf_close_lun(erp_action);
947 if (retval == -ENOMEM) 977 if (retval == -ENOMEM)
948 return ZFCP_ERP_NOMEM; 978 return ZFCP_ERP_NOMEM;
949 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; 979 erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
950 if (retval) 980 if (retval)
951 return ZFCP_ERP_FAILED; 981 return ZFCP_ERP_FAILED;
952 return ZFCP_ERP_CONTINUES; 982 return ZFCP_ERP_CONTINUES;
953} 983}
954 984
955static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) 985static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
956{ 986{
957 int retval = zfcp_fsf_open_unit(erp_action); 987 int retval = zfcp_fsf_open_lun(erp_action);
958 if (retval == -ENOMEM) 988 if (retval == -ENOMEM)
959 return ZFCP_ERP_NOMEM; 989 return ZFCP_ERP_NOMEM;
960 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; 990 erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
961 if (retval) 991 if (retval)
962 return ZFCP_ERP_FAILED; 992 return ZFCP_ERP_FAILED;
963 return ZFCP_ERP_CONTINUES; 993 return ZFCP_ERP_CONTINUES;
964} 994}
965 995
966static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) 996static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
967{ 997{
968 struct zfcp_unit *unit = erp_action->unit; 998 struct scsi_device *sdev = erp_action->sdev;
999 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
969 1000
970 switch (erp_action->step) { 1001 switch (erp_action->step) {
971 case ZFCP_ERP_STEP_UNINITIALIZED: 1002 case ZFCP_ERP_STEP_UNINITIALIZED:
972 zfcp_erp_unit_strategy_clearstati(unit); 1003 zfcp_erp_lun_strategy_clearstati(sdev);
973 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1004 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
974 return zfcp_erp_unit_strategy_close(erp_action); 1005 return zfcp_erp_lun_strategy_close(erp_action);
975 /* already closed, fall through */ 1006 /* already closed, fall through */
976 case ZFCP_ERP_STEP_UNIT_CLOSING: 1007 case ZFCP_ERP_STEP_LUN_CLOSING:
977 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1008 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
978 return ZFCP_ERP_FAILED; 1009 return ZFCP_ERP_FAILED;
979 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 1010 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
980 return ZFCP_ERP_EXIT; 1011 return ZFCP_ERP_EXIT;
981 return zfcp_erp_unit_strategy_open(erp_action); 1012 return zfcp_erp_lun_strategy_open(erp_action);
982 1013
983 case ZFCP_ERP_STEP_UNIT_OPENING: 1014 case ZFCP_ERP_STEP_LUN_OPENING:
984 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 1015 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
985 return ZFCP_ERP_SUCCEEDED; 1016 return ZFCP_ERP_SUCCEEDED;
986 } 1017 }
987 return ZFCP_ERP_FAILED; 1018 return ZFCP_ERP_FAILED;
988} 1019}
989 1020
990static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) 1021static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
991{ 1022{
1023 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1024
992 switch (result) { 1025 switch (result) {
993 case ZFCP_ERP_SUCCEEDED : 1026 case ZFCP_ERP_SUCCEEDED :
994 atomic_set(&unit->erp_counter, 0); 1027 atomic_set(&zfcp_sdev->erp_counter, 0);
995 zfcp_erp_unit_unblock(unit); 1028 zfcp_erp_lun_unblock(sdev);
996 break; 1029 break;
997 case ZFCP_ERP_FAILED : 1030 case ZFCP_ERP_FAILED :
998 atomic_inc(&unit->erp_counter); 1031 atomic_inc(&zfcp_sdev->erp_counter);
999 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { 1032 if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
1000 dev_err(&unit->port->adapter->ccw_device->dev, 1033 dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
1001 "ERP failed for unit 0x%016Lx on " 1034 "ERP failed for LUN 0x%016Lx on "
1002 "port 0x%016Lx\n", 1035 "port 0x%016Lx\n",
1003 (unsigned long long)unit->fcp_lun, 1036 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1004 (unsigned long long)unit->port->wwpn); 1037 (unsigned long long)zfcp_sdev->port->wwpn);
1005 zfcp_erp_unit_failed(unit, "erusck1", NULL); 1038 zfcp_erp_set_lun_status(sdev,
1039 ZFCP_STATUS_COMMON_ERP_FAILED);
1006 } 1040 }
1007 break; 1041 break;
1008 } 1042 }
1009 1043
1010 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1044 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
1011 zfcp_erp_unit_block(unit, 0); 1045 zfcp_erp_lun_block(sdev, 0);
1012 result = ZFCP_ERP_EXIT; 1046 result = ZFCP_ERP_EXIT;
1013 } 1047 }
1014 return result; 1048 return result;
@@ -1032,7 +1066,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1032 dev_err(&port->adapter->ccw_device->dev, 1066 dev_err(&port->adapter->ccw_device->dev,
1033 "ERP failed for remote port 0x%016Lx\n", 1067 "ERP failed for remote port 0x%016Lx\n",
1034 (unsigned long long)port->wwpn); 1068 (unsigned long long)port->wwpn);
1035 zfcp_erp_port_failed(port, "erpsck1", NULL); 1069 zfcp_erp_set_port_status(port,
1070 ZFCP_STATUS_COMMON_ERP_FAILED);
1036 } 1071 }
1037 break; 1072 break;
1038 } 1073 }
@@ -1059,7 +1094,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1059 dev_err(&adapter->ccw_device->dev, 1094 dev_err(&adapter->ccw_device->dev,
1060 "ERP cannot recover an error " 1095 "ERP cannot recover an error "
1061 "on the FCP device\n"); 1096 "on the FCP device\n");
1062 zfcp_erp_adapter_failed(adapter, "erasck1", NULL); 1097 zfcp_erp_set_adapter_status(adapter,
1098 ZFCP_STATUS_COMMON_ERP_FAILED);
1063 } 1099 }
1064 break; 1100 break;
1065 } 1101 }
@@ -1076,12 +1112,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
1076{ 1112{
1077 struct zfcp_adapter *adapter = erp_action->adapter; 1113 struct zfcp_adapter *adapter = erp_action->adapter;
1078 struct zfcp_port *port = erp_action->port; 1114 struct zfcp_port *port = erp_action->port;
1079 struct zfcp_unit *unit = erp_action->unit; 1115 struct scsi_device *sdev = erp_action->sdev;
1080 1116
1081 switch (erp_action->action) { 1117 switch (erp_action->action) {
1082 1118
1083 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1119 case ZFCP_ERP_ACTION_REOPEN_LUN:
1084 result = zfcp_erp_strategy_check_unit(unit, result); 1120 result = zfcp_erp_strategy_check_lun(sdev, result);
1085 break; 1121 break;
1086 1122
1087 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1123 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1116,7 +1152,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1116 int action = act->action; 1152 int action = act->action;
1117 struct zfcp_adapter *adapter = act->adapter; 1153 struct zfcp_adapter *adapter = act->adapter;
1118 struct zfcp_port *port = act->port; 1154 struct zfcp_port *port = act->port;
1119 struct zfcp_unit *unit = act->unit; 1155 struct scsi_device *sdev = act->sdev;
1156 struct zfcp_scsi_dev *zfcp_sdev;
1120 u32 erp_status = act->status; 1157 u32 erp_status = act->status;
1121 1158
1122 switch (action) { 1159 switch (action) {
@@ -1139,11 +1176,12 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1139 } 1176 }
1140 break; 1177 break;
1141 1178
1142 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1179 case ZFCP_ERP_ACTION_REOPEN_LUN:
1143 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { 1180 zfcp_sdev = sdev_to_zfcp(sdev);
1144 _zfcp_erp_unit_reopen(unit, 1181 if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
1145 ZFCP_STATUS_COMMON_ERP_FAILED, 1182 _zfcp_erp_lun_reopen(sdev,
1146 "ersscg3", NULL); 1183 ZFCP_STATUS_COMMON_ERP_FAILED,
1184 "ersscg3", NULL, 0);
1147 return ZFCP_ERP_EXIT; 1185 return ZFCP_ERP_EXIT;
1148 } 1186 }
1149 break; 1187 break;
@@ -1154,6 +1192,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1154static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) 1192static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1155{ 1193{
1156 struct zfcp_adapter *adapter = erp_action->adapter; 1194 struct zfcp_adapter *adapter = erp_action->adapter;
1195 struct zfcp_scsi_dev *zfcp_sdev;
1157 1196
1158 adapter->erp_total_count--; 1197 adapter->erp_total_count--;
1159 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { 1198 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
@@ -1165,9 +1204,10 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1165 zfcp_dbf_rec_action("eractd1", erp_action); 1204 zfcp_dbf_rec_action("eractd1", erp_action);
1166 1205
1167 switch (erp_action->action) { 1206 switch (erp_action->action) {
1168 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1207 case ZFCP_ERP_ACTION_REOPEN_LUN:
1208 zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1169 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1209 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
1170 &erp_action->unit->status); 1210 &zfcp_sdev->status);
1171 break; 1211 break;
1172 1212
1173 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1213 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1187,11 +1227,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1187{ 1227{
1188 struct zfcp_adapter *adapter = act->adapter; 1228 struct zfcp_adapter *adapter = act->adapter;
1189 struct zfcp_port *port = act->port; 1229 struct zfcp_port *port = act->port;
1190 struct zfcp_unit *unit = act->unit; 1230 struct scsi_device *sdev = act->sdev;
1191 1231
1192 switch (act->action) { 1232 switch (act->action) {
1193 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1233 case ZFCP_ERP_ACTION_REOPEN_LUN:
1194 put_device(&unit->dev); 1234 if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1235 scsi_device_put(sdev);
1195 break; 1236 break;
1196 1237
1197 case ZFCP_ERP_ACTION_REOPEN_PORT: 1238 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1222,8 +1263,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1222 return zfcp_erp_port_forced_strategy(erp_action); 1263 return zfcp_erp_port_forced_strategy(erp_action);
1223 case ZFCP_ERP_ACTION_REOPEN_PORT: 1264 case ZFCP_ERP_ACTION_REOPEN_PORT:
1224 return zfcp_erp_port_strategy(erp_action); 1265 return zfcp_erp_port_strategy(erp_action);
1225 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1266 case ZFCP_ERP_ACTION_REOPEN_LUN:
1226 return zfcp_erp_unit_strategy(erp_action); 1267 return zfcp_erp_lun_strategy(erp_action);
1227 } 1268 }
1228 return ZFCP_ERP_FAILED; 1269 return ZFCP_ERP_FAILED;
1229} 1270}
@@ -1376,42 +1417,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1376} 1417}
1377 1418
1378/** 1419/**
1379 * zfcp_erp_adapter_failed - Set adapter status to failed.
1380 * @adapter: Failed adapter.
1381 * @id: Event id for debug trace.
1382 * @ref: Reference for debug trace.
1383 */
1384void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
1385{
1386 zfcp_erp_modify_adapter_status(adapter, id, ref,
1387 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1388}
1389
1390/**
1391 * zfcp_erp_port_failed - Set port status to failed.
1392 * @port: Failed port.
1393 * @id: Event id for debug trace.
1394 * @ref: Reference for debug trace.
1395 */
1396void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
1397{
1398 zfcp_erp_modify_port_status(port, id, ref,
1399 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1400}
1401
1402/**
1403 * zfcp_erp_unit_failed - Set unit status to failed.
1404 * @unit: Failed unit.
1405 * @id: Event id for debug trace.
1406 * @ref: Reference for debug trace.
1407 */
1408void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
1409{
1410 zfcp_erp_modify_unit_status(unit, id, ref,
1411 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1412}
1413
1414/**
1415 * zfcp_erp_wait - wait for completion of error recovery on an adapter 1420 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1416 * @adapter: adapter for which to wait for completion of its error recovery 1421 * @adapter: adapter for which to wait for completion of its error recovery
1417 */ 1422 */
@@ -1423,210 +1428,148 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
1423} 1428}
1424 1429
1425/** 1430/**
1426 * zfcp_erp_modify_adapter_status - change adapter status bits 1431 * zfcp_erp_set_adapter_status - set adapter status bits
1427 * @adapter: adapter to change the status 1432 * @adapter: adapter to change the status
1428 * @id: id for the debug trace
1429 * @ref: reference for the debug trace
1430 * @mask: status bits to change 1433 * @mask: status bits to change
1431 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1432 * 1434 *
1433 * Changes in common status bits are propagated to attached ports and units. 1435 * Changes in common status bits are propagated to attached ports and LUNs.
1434 */ 1436 */
1435void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, 1437void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1436 void *ref, u32 mask, int set_or_clear)
1437{ 1438{
1438 struct zfcp_port *port; 1439 struct zfcp_port *port;
1440 struct scsi_device *sdev;
1439 unsigned long flags; 1441 unsigned long flags;
1440 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1442 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1441 1443
1442 if (set_or_clear == ZFCP_SET) { 1444 atomic_set_mask(mask, &adapter->status);
1443 if (status_change_set(mask, &adapter->status))
1444 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1445 atomic_set_mask(mask, &adapter->status);
1446 } else {
1447 if (status_change_clear(mask, &adapter->status))
1448 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1449 atomic_clear_mask(mask, &adapter->status);
1450 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1451 atomic_set(&adapter->erp_counter, 0);
1452 }
1453 1445
1454 if (common_mask) { 1446 if (!common_mask)
1455 read_lock_irqsave(&adapter->port_list_lock, flags); 1447 return;
1456 list_for_each_entry(port, &adapter->port_list, list) 1448
1457 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1449 read_lock_irqsave(&adapter->port_list_lock, flags);
1458 set_or_clear); 1450 list_for_each_entry(port, &adapter->port_list, list)
1459 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1451 atomic_set_mask(common_mask, &port->status);
1460 } 1452 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1453
1454 shost_for_each_device(sdev, adapter->scsi_host)
1455 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1461} 1456}
1462 1457
1463/** 1458/**
1464 * zfcp_erp_modify_port_status - change port status bits 1459 * zfcp_erp_clear_adapter_status - clear adapter status bits
1465 * @port: port to change the status bits 1460 * @adapter: adapter to change the status
1466 * @id: id for the debug trace
1467 * @ref: reference for the debug trace
1468 * @mask: status bits to change 1461 * @mask: status bits to change
1469 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1470 * 1462 *
1471 * Changes in common status bits are propagated to attached units. 1463 * Changes in common status bits are propagated to attached ports and LUNs.
1472 */ 1464 */
1473void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, 1465void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1474 u32 mask, int set_or_clear)
1475{ 1466{
1476 struct zfcp_unit *unit; 1467 struct zfcp_port *port;
1468 struct scsi_device *sdev;
1477 unsigned long flags; 1469 unsigned long flags;
1478 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1470 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1471 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1472
1473 atomic_clear_mask(mask, &adapter->status);
1474
1475 if (!common_mask)
1476 return;
1477
1478 if (clear_counter)
1479 atomic_set(&adapter->erp_counter, 0);
1479 1480
1480 if (set_or_clear == ZFCP_SET) { 1481 read_lock_irqsave(&adapter->port_list_lock, flags);
1481 if (status_change_set(mask, &port->status)) 1482 list_for_each_entry(port, &adapter->port_list, list) {
1482 zfcp_dbf_rec_port(id, ref, port); 1483 atomic_clear_mask(common_mask, &port->status);
1483 atomic_set_mask(mask, &port->status); 1484 if (clear_counter)
1484 } else {
1485 if (status_change_clear(mask, &port->status))
1486 zfcp_dbf_rec_port(id, ref, port);
1487 atomic_clear_mask(mask, &port->status);
1488 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1489 atomic_set(&port->erp_counter, 0); 1485 atomic_set(&port->erp_counter, 0);
1490 } 1486 }
1487 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1491 1488
1492 if (common_mask) { 1489 shost_for_each_device(sdev, adapter->scsi_host) {
1493 read_lock_irqsave(&port->unit_list_lock, flags); 1490 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1494 list_for_each_entry(unit, &port->unit_list, list) 1491 if (clear_counter)
1495 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1492 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1496 set_or_clear);
1497 read_unlock_irqrestore(&port->unit_list_lock, flags);
1498 } 1493 }
1499} 1494}
1500 1495
1501/** 1496/**
1502 * zfcp_erp_modify_unit_status - change unit status bits 1497 * zfcp_erp_set_port_status - set port status bits
1503 * @unit: unit to change the status bits 1498 * @port: port to change the status
1504 * @id: id for the debug trace
1505 * @ref: reference for the debug trace
1506 * @mask: status bits to change 1499 * @mask: status bits to change
1507 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR 1500 *
1508 */ 1501 * Changes in common status bits are propagated to attached LUNs.
1509void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1510 u32 mask, int set_or_clear)
1511{
1512 if (set_or_clear == ZFCP_SET) {
1513 if (status_change_set(mask, &unit->status))
1514 zfcp_dbf_rec_unit(id, ref, unit);
1515 atomic_set_mask(mask, &unit->status);
1516 } else {
1517 if (status_change_clear(mask, &unit->status))
1518 zfcp_dbf_rec_unit(id, ref, unit);
1519 atomic_clear_mask(mask, &unit->status);
1520 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1521 atomic_set(&unit->erp_counter, 0);
1522 }
1523 }
1524}
1525
1526/**
1527 * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
1528 * @port: The "boxed" port.
1529 * @id: The debug trace id.
1530 * @id: Reference for the debug trace.
1531 */ 1502 */
1532void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1503void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1533{ 1504{
1534 zfcp_erp_modify_port_status(port, id, ref, 1505 struct scsi_device *sdev;
1535 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1506 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1536 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1537}
1538 1507
1539/** 1508 atomic_set_mask(mask, &port->status);
1540 * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
1541 * @port: The "boxed" unit.
1542 * @id: The debug trace id.
1543 * @id: Reference for the debug trace.
1544 */
1545void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1546{
1547 zfcp_erp_modify_unit_status(unit, id, ref,
1548 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1549 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1550}
1551 1509
1552/** 1510 if (!common_mask)
1553 * zfcp_erp_port_access_denied - Adapter denied access to port. 1511 return;
1554 * @port: port where access has been denied 1512
1555 * @id: id for debug trace 1513 shost_for_each_device(sdev, port->adapter->scsi_host)
1556 * @ref: reference for debug trace 1514 if (sdev_to_zfcp(sdev)->port == port)
1557 * 1515 atomic_set_mask(common_mask,
1558 * Since the adapter has denied access, stop using the port and the 1516 &sdev_to_zfcp(sdev)->status);
1559 * attached units.
1560 */
1561void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1562{
1563 zfcp_erp_modify_port_status(port, id, ref,
1564 ZFCP_STATUS_COMMON_ERP_FAILED |
1565 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1566} 1517}
1567 1518
1568/** 1519/**
1569 * zfcp_erp_unit_access_denied - Adapter denied access to unit. 1520 * zfcp_erp_clear_port_status - clear port status bits
1570 * @unit: unit where access has been denied 1521 * @port: adapter to change the status
1571 * @id: id for debug trace 1522 * @mask: status bits to change
1572 * @ref: reference for debug trace
1573 * 1523 *
1574 * Since the adapter has denied access, stop using the unit. 1524 * Changes in common status bits are propagated to attached LUNs.
1575 */ 1525 */
1576void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) 1526void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1577{ 1527{
1578 zfcp_erp_modify_unit_status(unit, id, ref, 1528 struct scsi_device *sdev;
1579 ZFCP_STATUS_COMMON_ERP_FAILED | 1529 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1580 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1530 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1581}
1582 1531
1583static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, 1532 atomic_clear_mask(mask, &port->status);
1584 void *ref) 1533
1585{ 1534 if (!common_mask)
1586 int status = atomic_read(&unit->status);
1587 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1588 ZFCP_STATUS_COMMON_ACCESS_BOXED)))
1589 return; 1535 return;
1590 1536
1591 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1537 if (clear_counter)
1538 atomic_set(&port->erp_counter, 0);
1539
1540 shost_for_each_device(sdev, port->adapter->scsi_host)
1541 if (sdev_to_zfcp(sdev)->port == port) {
1542 atomic_clear_mask(common_mask,
1543 &sdev_to_zfcp(sdev)->status);
1544 if (clear_counter)
1545 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1546 }
1592} 1547}
1593 1548
1594static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, 1549/**
1595 void *ref) 1550 * zfcp_erp_set_lun_status - set lun status bits
1551 * @sdev: SCSI device / lun to set the status bits
1552 * @mask: status bits to change
1553 */
1554void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
1596{ 1555{
1597 struct zfcp_unit *unit; 1556 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1598 unsigned long flags;
1599 int status = atomic_read(&port->status);
1600 1557
1601 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1558 atomic_set_mask(mask, &zfcp_sdev->status);
1602 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1603 read_lock_irqsave(&port->unit_list_lock, flags);
1604 list_for_each_entry(unit, &port->unit_list, list)
1605 zfcp_erp_unit_access_changed(unit, id, ref);
1606 read_unlock_irqrestore(&port->unit_list_lock, flags);
1607 return;
1608 }
1609
1610 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1611} 1559}
1612 1560
1613/** 1561/**
1614 * zfcp_erp_adapter_access_changed - Process change in adapter ACT 1562 * zfcp_erp_clear_lun_status - clear lun status bits
1615 * @adapter: Adapter where the Access Control Table (ACT) changed 1563 * @sdev: SCSi device / lun to clear the status bits
1616 * @id: Id for debug trace 1564 * @mask: status bits to change
1617 * @ref: Reference for debug trace
1618 */ 1565 */
1619void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1566void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
1620 void *ref)
1621{ 1567{
1622 unsigned long flags; 1568 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1623 struct zfcp_port *port;
1624 1569
1625 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1570 atomic_clear_mask(mask, &zfcp_sdev->status);
1626 return;
1627 1571
1628 read_lock_irqsave(&adapter->port_list_lock, flags); 1572 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1629 list_for_each_entry(port, &adapter->port_list, list) 1573 atomic_set(&zfcp_sdev->erp_counter, 0);
1630 zfcp_erp_port_access_changed(port, id, ref);
1631 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1632} 1574}
1575
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3b93239c6f69..bf8f3e514839 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -15,12 +15,10 @@
15#include "zfcp_fc.h" 15#include "zfcp_fc.h"
16 16
17/* zfcp_aux.c */ 17/* zfcp_aux.c */
18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 18extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); 19extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 20extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 21 u32);
23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
24extern void zfcp_sg_free_table(struct scatterlist *, int); 22extern void zfcp_sg_free_table(struct scatterlist *, int);
25extern int zfcp_sg_setup_table(struct scatterlist *, int); 23extern int zfcp_sg_setup_table(struct scatterlist *, int);
26extern void zfcp_device_unregister(struct device *, 24extern void zfcp_device_unregister(struct device *,
@@ -36,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
36 34
37/* zfcp_cfdc.c */ 35/* zfcp_cfdc.c */
38extern struct miscdevice zfcp_cfdc_misc; 36extern struct miscdevice zfcp_cfdc_misc;
37extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
38extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
39extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
40 union fsf_status_qual *);
41extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
42 struct fsf_qtcb_bottom_support *);
43extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
44
39 45
40/* zfcp_dbf.c */ 46/* zfcp_dbf.c */
41extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); 47extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
@@ -44,10 +50,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
44extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); 50extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
45extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); 51extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
46extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); 52extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
47extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); 53extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
48extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, 54extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
49 struct zfcp_adapter *, struct zfcp_port *, 55 struct zfcp_adapter *, struct zfcp_port *,
50 struct zfcp_unit *); 56 struct scsi_device *);
51extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); 57extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
52extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, 58extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
53 struct zfcp_dbf *); 59 struct zfcp_dbf *);
@@ -65,34 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
65 unsigned long); 71 unsigned long);
66 72
67/* zfcp_erp.c */ 73/* zfcp_erp.c */
68extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, 74extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
69 void *, u32, int); 75extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
70extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); 76extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
71extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, 77extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
72 void *); 78 void *);
73extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); 79extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
74extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, 80extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
75 int);
76extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); 81extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
77extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); 82extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
78extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, 83extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
79 void *); 84 void *);
80extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); 85extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
81extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, 86extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
82 int); 87extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
83extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); 88extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
84extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); 89extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
85extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
86extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 90extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
87extern void zfcp_erp_thread_kill(struct zfcp_adapter *); 91extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
88extern void zfcp_erp_wait(struct zfcp_adapter *); 92extern void zfcp_erp_wait(struct zfcp_adapter *);
89extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 93extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
90extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
91extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
92extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
93extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
94extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
95 void *);
96extern void zfcp_erp_timeout_handler(unsigned long); 94extern void zfcp_erp_timeout_handler(unsigned long);
97 95
98/* zfcp_fc.c */ 96/* zfcp_fc.c */
@@ -118,8 +116,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
118extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); 116extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
119extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
120extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 118extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
121extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 119extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
122extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 120extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
123extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 121extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
124extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, 122extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
125 struct fsf_qtcb_bottom_config *); 123 struct fsf_qtcb_bottom_config *);
@@ -135,12 +133,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
135 mempool_t *, unsigned int); 133 mempool_t *, unsigned int);
136extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 134extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
137 struct zfcp_fsf_ct_els *, unsigned int); 135 struct zfcp_fsf_ct_els *, unsigned int);
138extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 136extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
139 struct scsi_cmnd *);
140extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
141extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); 138extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
142extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, 139extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
143 struct zfcp_unit *);
144extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); 140extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
145 141
146/* zfcp_qdio.c */ 142/* zfcp_qdio.c */
@@ -163,8 +159,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
164extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); 160extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
165extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); 161extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
166extern void zfcp_scsi_scan(struct zfcp_unit *);
167extern void zfcp_scsi_scan_work(struct work_struct *);
168extern void zfcp_scsi_set_prot(struct zfcp_adapter *); 162extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
169extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); 163extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
170 164
@@ -175,4 +169,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs;
175extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 169extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
176extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 170extern struct device_attribute *zfcp_sysfs_shost_attrs[];
177 171
172/* zfcp_unit.c */
173extern int zfcp_unit_add(struct zfcp_port *, u64);
174extern int zfcp_unit_remove(struct zfcp_port *, u64);
175extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
176extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
177extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
178extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
179extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
180
178#endif /* ZFCP_EXT_H */ 181#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f3ed2b9a349..86fd905df48b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
365 } 365 }
366 366
367 if (!port->d_id) { 367 if (!port->d_id) {
368 zfcp_erp_port_failed(port, "fcgpn_2", NULL); 368 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
369 goto out; 369 goto out;
370 } 370 }
371 371
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9d1d7d1842ce..beaf0916ceab 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -61,45 +61,6 @@ static u32 fsf_qtcb_type[] = {
61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
62}; 62};
63 63
64static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
65{
66 u16 subtable = table >> 16;
67 u16 rule = table & 0xffff;
68 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
69
70 if (subtable && subtable < ARRAY_SIZE(act_type))
71 dev_warn(&adapter->ccw_device->dev,
72 "Access denied according to ACT rule type %s, "
73 "rule %d\n", act_type[subtable], rule);
74}
75
76static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
77 struct zfcp_port *port)
78{
79 struct fsf_qtcb_header *header = &req->qtcb->header;
80 dev_warn(&req->adapter->ccw_device->dev,
81 "Access denied to port 0x%016Lx\n",
82 (unsigned long long)port->wwpn);
83 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
84 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
85 zfcp_erp_port_access_denied(port, "fspad_1", req);
86 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
87}
88
89static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
90 struct zfcp_unit *unit)
91{
92 struct fsf_qtcb_header *header = &req->qtcb->header;
93 dev_warn(&req->adapter->ccw_device->dev,
94 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
95 (unsigned long long)unit->fcp_lun,
96 (unsigned long long)unit->port->wwpn);
97 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
98 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
99 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101}
102
103static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 64static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
104{ 65{
105 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 66 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
@@ -143,7 +104,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
143 read_unlock_irqrestore(&adapter->port_list_lock, flags); 104 read_unlock_irqrestore(&adapter->port_list_lock, flags);
144} 105}
145 106
146static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 107static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
147 struct fsf_link_down_info *link_down) 108 struct fsf_link_down_info *link_down)
148{ 109{
149 struct zfcp_adapter *adapter = req->adapter; 110 struct zfcp_adapter *adapter = req->adapter;
@@ -223,7 +184,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
223 "the FC fabric is down\n"); 184 "the FC fabric is down\n");
224 } 185 }
225out: 186out:
226 zfcp_erp_adapter_failed(adapter, id, req); 187 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227} 188}
228 189
229static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 190static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
@@ -234,13 +195,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
234 195
235 switch (sr_buf->status_subtype) { 196 switch (sr_buf->status_subtype) {
236 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 197 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); 198 zfcp_fsf_link_down_info_eval(req, ldi);
238 break; 199 break;
239 case FSF_STATUS_READ_SUB_FDISC_FAILED: 200 case FSF_STATUS_READ_SUB_FDISC_FAILED:
240 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); 201 zfcp_fsf_link_down_info_eval(req, ldi);
241 break; 202 break;
242 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 203 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
243 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); 204 zfcp_fsf_link_down_info_eval(req, NULL);
244 }; 205 };
245} 206}
246 207
@@ -281,9 +242,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
281 dev_info(&adapter->ccw_device->dev, 242 dev_info(&adapter->ccw_device->dev,
282 "The local link has been restored\n"); 243 "The local link has been restored\n");
283 /* All ports should be marked as ready to run again */ 244 /* All ports should be marked as ready to run again */
284 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, 245 zfcp_erp_set_adapter_status(adapter,
285 ZFCP_STATUS_COMMON_RUNNING, 246 ZFCP_STATUS_COMMON_RUNNING);
286 ZFCP_SET);
287 zfcp_erp_adapter_reopen(adapter, 247 zfcp_erp_adapter_reopen(adapter,
288 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
289 ZFCP_STATUS_COMMON_ERP_FAILED, 249 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -293,13 +253,12 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
293 break; 253 break;
294 case FSF_STATUS_READ_NOTIFICATION_LOST: 254 case FSF_STATUS_READ_NOTIFICATION_LOST:
295 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 255 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
296 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 256 zfcp_cfdc_adapter_access_changed(adapter);
297 req);
298 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
299 queue_work(adapter->work_queue, &adapter->scan_work); 258 queue_work(adapter->work_queue, &adapter->scan_work);
300 break; 259 break;
301 case FSF_STATUS_READ_CFDC_UPDATED: 260 case FSF_STATUS_READ_CFDC_UPDATED:
302 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 261 zfcp_cfdc_adapter_access_changed(adapter);
303 break; 262 break;
304 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 263 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
305 adapter->adapter_features = sr_buf->payload.word[0]; 264 adapter->adapter_features = sr_buf->payload.word[0];
@@ -399,16 +358,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
399 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); 358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
400 break; 359 break;
401 case FSF_PROT_LINK_DOWN: 360 case FSF_PROT_LINK_DOWN:
402 zfcp_fsf_link_down_info_eval(req, "fspse_5", 361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
403 &psq->link_down_info);
404 /* go through reopen to flush pending requests */ 362 /* go through reopen to flush pending requests */
405 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
406 break; 364 break;
407 case FSF_PROT_REEST_QUEUE: 365 case FSF_PROT_REEST_QUEUE:
408 /* All ports should be marked as ready to run again */ 366 /* All ports should be marked as ready to run again */
409 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, 367 zfcp_erp_set_adapter_status(adapter,
410 ZFCP_STATUS_COMMON_RUNNING, 368 ZFCP_STATUS_COMMON_RUNNING);
411 ZFCP_SET);
412 zfcp_erp_adapter_reopen(adapter, 369 zfcp_erp_adapter_reopen(adapter,
413 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
414 ZFCP_STATUS_COMMON_ERP_FAILED, 371 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -578,7 +535,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
578 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 535 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
579 &adapter->status); 536 &adapter->status);
580 537
581 zfcp_fsf_link_down_info_eval(req, "fsecdh2", 538 zfcp_fsf_link_down_info_eval(req,
582 &qtcb->header.fsf_status_qual.link_down_info); 539 &qtcb->header.fsf_status_qual.link_down_info);
583 break; 540 break;
584 default: 541 default:
@@ -644,7 +601,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
644 break; 601 break;
645 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 602 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
646 zfcp_fsf_exchange_port_evaluate(req); 603 zfcp_fsf_exchange_port_evaluate(req);
647 zfcp_fsf_link_down_info_eval(req, "fsepdh1", 604 zfcp_fsf_link_down_info_eval(req,
648 &qtcb->header.fsf_status_qual.link_down_info); 605 &qtcb->header.fsf_status_qual.link_down_info);
649 break; 606 break;
650 } 607 }
@@ -771,7 +728,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
771 struct fsf_status_read_buffer *sr_buf; 728 struct fsf_status_read_buffer *sr_buf;
772 int retval = -EIO; 729 int retval = -EIO;
773 730
774 spin_lock_bh(&qdio->req_q_lock); 731 spin_lock_irq(&qdio->req_q_lock);
775 if (zfcp_qdio_sbal_get(qdio)) 732 if (zfcp_qdio_sbal_get(qdio))
776 goto out; 733 goto out;
777 734
@@ -805,13 +762,14 @@ failed_buf:
805 zfcp_fsf_req_free(req); 762 zfcp_fsf_req_free(req);
806 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); 763 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
807out: 764out:
808 spin_unlock_bh(&qdio->req_q_lock); 765 spin_unlock_irq(&qdio->req_q_lock);
809 return retval; 766 return retval;
810} 767}
811 768
812static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 769static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
813{ 770{
814 struct zfcp_unit *unit = req->data; 771 struct scsi_device *sdev = req->data;
772 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
815 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 773 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
816 774
817 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 775 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -820,14 +778,15 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
820 switch (req->qtcb->header.fsf_status) { 778 switch (req->qtcb->header.fsf_status) {
821 case FSF_PORT_HANDLE_NOT_VALID: 779 case FSF_PORT_HANDLE_NOT_VALID:
822 if (fsq->word[0] == fsq->word[1]) { 780 if (fsq->word[0] == fsq->word[1]) {
823 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 781 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
824 "fsafch1", req); 782 "fsafch1", req);
825 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 783 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
826 } 784 }
827 break; 785 break;
828 case FSF_LUN_HANDLE_NOT_VALID: 786 case FSF_LUN_HANDLE_NOT_VALID:
829 if (fsq->word[0] == fsq->word[1]) { 787 if (fsq->word[0] == fsq->word[1]) {
830 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); 788 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
789 req);
831 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 790 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
832 } 791 }
833 break; 792 break;
@@ -835,17 +794,23 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
835 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 794 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
836 break; 795 break;
837 case FSF_PORT_BOXED: 796 case FSF_PORT_BOXED:
838 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 797 zfcp_erp_set_port_status(zfcp_sdev->port,
798 ZFCP_STATUS_COMMON_ACCESS_BOXED);
799 zfcp_erp_port_reopen(zfcp_sdev->port,
800 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
801 req);
839 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 802 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
840 break; 803 break;
841 case FSF_LUN_BOXED: 804 case FSF_LUN_BOXED:
842 zfcp_erp_unit_boxed(unit, "fsafch4", req); 805 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
806 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
807 "fsafch4", req);
843 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 808 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
844 break; 809 break;
845 case FSF_ADAPTER_STATUS_AVAILABLE: 810 case FSF_ADAPTER_STATUS_AVAILABLE:
846 switch (fsq->word[0]) { 811 switch (fsq->word[0]) {
847 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 812 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
848 zfcp_fc_test_link(unit->port); 813 zfcp_fc_test_link(zfcp_sdev->port);
849 /* fall through */ 814 /* fall through */
850 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 815 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
851 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 816 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -859,19 +824,20 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
859} 824}
860 825
861/** 826/**
862 * zfcp_fsf_abort_fcp_command - abort running SCSI command 827 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
863 * @old_req_id: unsigned long 828 * @scmnd: The SCSI command to abort
864 * @unit: pointer to struct zfcp_unit
865 * Returns: pointer to struct zfcp_fsf_req 829 * Returns: pointer to struct zfcp_fsf_req
866 */ 830 */
867 831
868struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 832struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
869 struct zfcp_unit *unit)
870{ 833{
871 struct zfcp_fsf_req *req = NULL; 834 struct zfcp_fsf_req *req = NULL;
872 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 835 struct scsi_device *sdev = scmnd->device;
836 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
837 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
838 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
873 839
874 spin_lock_bh(&qdio->req_q_lock); 840 spin_lock_irq(&qdio->req_q_lock);
875 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
876 goto out; 842 goto out;
877 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
@@ -882,16 +848,16 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
882 goto out; 848 goto out;
883 } 849 }
884 850
885 if (unlikely(!(atomic_read(&unit->status) & 851 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
886 ZFCP_STATUS_COMMON_UNBLOCKED))) 852 ZFCP_STATUS_COMMON_UNBLOCKED)))
887 goto out_error_free; 853 goto out_error_free;
888 854
889 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 855 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
890 856
891 req->data = unit; 857 req->data = zfcp_sdev;
892 req->handler = zfcp_fsf_abort_fcp_command_handler; 858 req->handler = zfcp_fsf_abort_fcp_command_handler;
893 req->qtcb->header.lun_handle = unit->handle; 859 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
894 req->qtcb->header.port_handle = unit->port->handle; 860 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
895 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 861 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
896 862
897 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 863 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -902,7 +868,7 @@ out_error_free:
902 zfcp_fsf_req_free(req); 868 zfcp_fsf_req_free(req);
903 req = NULL; 869 req = NULL;
904out: 870out:
905 spin_unlock_bh(&qdio->req_q_lock); 871 spin_unlock_irq(&qdio->req_q_lock);
906 return req; 872 return req;
907} 873}
908 874
@@ -1041,7 +1007,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1041 struct zfcp_fsf_req *req; 1007 struct zfcp_fsf_req *req;
1042 int ret = -EIO; 1008 int ret = -EIO;
1043 1009
1044 spin_lock_bh(&qdio->req_q_lock); 1010 spin_lock_irq(&qdio->req_q_lock);
1045 if (zfcp_qdio_sbal_get(qdio)) 1011 if (zfcp_qdio_sbal_get(qdio))
1046 goto out; 1012 goto out;
1047 1013
@@ -1073,7 +1039,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1073failed_send: 1039failed_send:
1074 zfcp_fsf_req_free(req); 1040 zfcp_fsf_req_free(req);
1075out: 1041out:
1076 spin_unlock_bh(&qdio->req_q_lock); 1042 spin_unlock_irq(&qdio->req_q_lock);
1077 return ret; 1043 return ret;
1078} 1044}
1079 1045
@@ -1111,8 +1077,10 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1111 case FSF_RESPONSE_SIZE_TOO_LARGE: 1077 case FSF_RESPONSE_SIZE_TOO_LARGE:
1112 break; 1078 break;
1113 case FSF_ACCESS_DENIED: 1079 case FSF_ACCESS_DENIED:
1114 if (port) 1080 if (port) {
1115 zfcp_fsf_access_denied_port(req, port); 1081 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1082 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1083 }
1116 break; 1084 break;
1117 case FSF_SBAL_MISMATCH: 1085 case FSF_SBAL_MISMATCH:
1118 /* should never occure, avoided in zfcp_fsf_send_els */ 1086 /* should never occure, avoided in zfcp_fsf_send_els */
@@ -1137,7 +1105,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1137 struct zfcp_qdio *qdio = adapter->qdio; 1105 struct zfcp_qdio *qdio = adapter->qdio;
1138 int ret = -EIO; 1106 int ret = -EIO;
1139 1107
1140 spin_lock_bh(&qdio->req_q_lock); 1108 spin_lock_irq(&qdio->req_q_lock);
1141 if (zfcp_qdio_sbal_get(qdio)) 1109 if (zfcp_qdio_sbal_get(qdio))
1142 goto out; 1110 goto out;
1143 1111
@@ -1173,7 +1141,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1173failed_send: 1141failed_send:
1174 zfcp_fsf_req_free(req); 1142 zfcp_fsf_req_free(req);
1175out: 1143out:
1176 spin_unlock_bh(&qdio->req_q_lock); 1144 spin_unlock_irq(&qdio->req_q_lock);
1177 return ret; 1145 return ret;
1178} 1146}
1179 1147
@@ -1183,7 +1151,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1183 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1151 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1184 int retval = -EIO; 1152 int retval = -EIO;
1185 1153
1186 spin_lock_bh(&qdio->req_q_lock); 1154 spin_lock_irq(&qdio->req_q_lock);
1187 if (zfcp_qdio_sbal_get(qdio)) 1155 if (zfcp_qdio_sbal_get(qdio))
1188 goto out; 1156 goto out;
1189 1157
@@ -1215,7 +1183,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1215 erp_action->fsf_req_id = 0; 1183 erp_action->fsf_req_id = 0;
1216 } 1184 }
1217out: 1185out:
1218 spin_unlock_bh(&qdio->req_q_lock); 1186 spin_unlock_irq(&qdio->req_q_lock);
1219 return retval; 1187 return retval;
1220} 1188}
1221 1189
@@ -1225,7 +1193,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1225 struct zfcp_fsf_req *req = NULL; 1193 struct zfcp_fsf_req *req = NULL;
1226 int retval = -EIO; 1194 int retval = -EIO;
1227 1195
1228 spin_lock_bh(&qdio->req_q_lock); 1196 spin_lock_irq(&qdio->req_q_lock);
1229 if (zfcp_qdio_sbal_get(qdio)) 1197 if (zfcp_qdio_sbal_get(qdio))
1230 goto out_unlock; 1198 goto out_unlock;
1231 1199
@@ -1251,7 +1219,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1251 1219
1252 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1220 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1253 retval = zfcp_fsf_req_send(req); 1221 retval = zfcp_fsf_req_send(req);
1254 spin_unlock_bh(&qdio->req_q_lock); 1222 spin_unlock_irq(&qdio->req_q_lock);
1255 if (!retval) 1223 if (!retval)
1256 wait_for_completion(&req->completion); 1224 wait_for_completion(&req->completion);
1257 1225
@@ -1259,7 +1227,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1259 return retval; 1227 return retval;
1260 1228
1261out_unlock: 1229out_unlock:
1262 spin_unlock_bh(&qdio->req_q_lock); 1230 spin_unlock_irq(&qdio->req_q_lock);
1263 return retval; 1231 return retval;
1264} 1232}
1265 1233
@@ -1277,7 +1245,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1277 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1245 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1278 return -EOPNOTSUPP; 1246 return -EOPNOTSUPP;
1279 1247
1280 spin_lock_bh(&qdio->req_q_lock); 1248 spin_lock_irq(&qdio->req_q_lock);
1281 if (zfcp_qdio_sbal_get(qdio)) 1249 if (zfcp_qdio_sbal_get(qdio))
1282 goto out; 1250 goto out;
1283 1251
@@ -1304,7 +1272,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1304 erp_action->fsf_req_id = 0; 1272 erp_action->fsf_req_id = 0;
1305 } 1273 }
1306out: 1274out:
1307 spin_unlock_bh(&qdio->req_q_lock); 1275 spin_unlock_irq(&qdio->req_q_lock);
1308 return retval; 1276 return retval;
1309} 1277}
1310 1278
@@ -1323,7 +1291,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1323 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1291 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1324 return -EOPNOTSUPP; 1292 return -EOPNOTSUPP;
1325 1293
1326 spin_lock_bh(&qdio->req_q_lock); 1294 spin_lock_irq(&qdio->req_q_lock);
1327 if (zfcp_qdio_sbal_get(qdio)) 1295 if (zfcp_qdio_sbal_get(qdio))
1328 goto out_unlock; 1296 goto out_unlock;
1329 1297
@@ -1343,7 +1311,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1343 req->handler = zfcp_fsf_exchange_port_data_handler; 1311 req->handler = zfcp_fsf_exchange_port_data_handler;
1344 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1312 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1345 retval = zfcp_fsf_req_send(req); 1313 retval = zfcp_fsf_req_send(req);
1346 spin_unlock_bh(&qdio->req_q_lock); 1314 spin_unlock_irq(&qdio->req_q_lock);
1347 1315
1348 if (!retval) 1316 if (!retval)
1349 wait_for_completion(&req->completion); 1317 wait_for_completion(&req->completion);
@@ -1353,7 +1321,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1353 return retval; 1321 return retval;
1354 1322
1355out_unlock: 1323out_unlock:
1356 spin_unlock_bh(&qdio->req_q_lock); 1324 spin_unlock_irq(&qdio->req_q_lock);
1357 return retval; 1325 return retval;
1358} 1326}
1359 1327
@@ -1370,14 +1338,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1370 case FSF_PORT_ALREADY_OPEN: 1338 case FSF_PORT_ALREADY_OPEN:
1371 break; 1339 break;
1372 case FSF_ACCESS_DENIED: 1340 case FSF_ACCESS_DENIED:
1373 zfcp_fsf_access_denied_port(req, port); 1341 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1342 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1374 break; 1343 break;
1375 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1344 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1376 dev_warn(&req->adapter->ccw_device->dev, 1345 dev_warn(&req->adapter->ccw_device->dev,
1377 "Not enough FCP adapter resources to open " 1346 "Not enough FCP adapter resources to open "
1378 "remote port 0x%016Lx\n", 1347 "remote port 0x%016Lx\n",
1379 (unsigned long long)port->wwpn); 1348 (unsigned long long)port->wwpn);
1380 zfcp_erp_port_failed(port, "fsoph_1", req); 1349 zfcp_erp_set_port_status(port,
1350 ZFCP_STATUS_COMMON_ERP_FAILED);
1381 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1351 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1382 break; 1352 break;
1383 case FSF_ADAPTER_STATUS_AVAILABLE: 1353 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1437,7 +1407,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1437 struct zfcp_fsf_req *req; 1407 struct zfcp_fsf_req *req;
1438 int retval = -EIO; 1408 int retval = -EIO;
1439 1409
1440 spin_lock_bh(&qdio->req_q_lock); 1410 spin_lock_irq(&qdio->req_q_lock);
1441 if (zfcp_qdio_sbal_get(qdio)) 1411 if (zfcp_qdio_sbal_get(qdio))
1442 goto out; 1412 goto out;
1443 1413
@@ -1468,7 +1438,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1468 put_device(&port->dev); 1438 put_device(&port->dev);
1469 } 1439 }
1470out: 1440out:
1471 spin_unlock_bh(&qdio->req_q_lock); 1441 spin_unlock_irq(&qdio->req_q_lock);
1472 return retval; 1442 return retval;
1473} 1443}
1474 1444
@@ -1487,9 +1457,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1487 case FSF_ADAPTER_STATUS_AVAILABLE: 1457 case FSF_ADAPTER_STATUS_AVAILABLE:
1488 break; 1458 break;
1489 case FSF_GOOD: 1459 case FSF_GOOD:
1490 zfcp_erp_modify_port_status(port, "fscph_2", req, 1460 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1491 ZFCP_STATUS_COMMON_OPEN,
1492 ZFCP_CLEAR);
1493 break; 1461 break;
1494 } 1462 }
1495} 1463}
@@ -1505,7 +1473,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1505 struct zfcp_fsf_req *req; 1473 struct zfcp_fsf_req *req;
1506 int retval = -EIO; 1474 int retval = -EIO;
1507 1475
1508 spin_lock_bh(&qdio->req_q_lock); 1476 spin_lock_irq(&qdio->req_q_lock);
1509 if (zfcp_qdio_sbal_get(qdio)) 1477 if (zfcp_qdio_sbal_get(qdio))
1510 goto out; 1478 goto out;
1511 1479
@@ -1534,7 +1502,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1534 erp_action->fsf_req_id = 0; 1502 erp_action->fsf_req_id = 0;
1535 } 1503 }
1536out: 1504out:
1537 spin_unlock_bh(&qdio->req_q_lock); 1505 spin_unlock_irq(&qdio->req_q_lock);
1538 return retval; 1506 return retval;
1539} 1507}
1540 1508
@@ -1580,7 +1548,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1580 struct zfcp_fsf_req *req; 1548 struct zfcp_fsf_req *req;
1581 int retval = -EIO; 1549 int retval = -EIO;
1582 1550
1583 spin_lock_bh(&qdio->req_q_lock); 1551 spin_lock_irq(&qdio->req_q_lock);
1584 if (zfcp_qdio_sbal_get(qdio)) 1552 if (zfcp_qdio_sbal_get(qdio))
1585 goto out; 1553 goto out;
1586 1554
@@ -1605,7 +1573,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1605 if (retval) 1573 if (retval)
1606 zfcp_fsf_req_free(req); 1574 zfcp_fsf_req_free(req);
1607out: 1575out:
1608 spin_unlock_bh(&qdio->req_q_lock); 1576 spin_unlock_irq(&qdio->req_q_lock);
1609 return retval; 1577 return retval;
1610} 1578}
1611 1579
@@ -1633,7 +1601,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1633 struct zfcp_fsf_req *req; 1601 struct zfcp_fsf_req *req;
1634 int retval = -EIO; 1602 int retval = -EIO;
1635 1603
1636 spin_lock_bh(&qdio->req_q_lock); 1604 spin_lock_irq(&qdio->req_q_lock);
1637 if (zfcp_qdio_sbal_get(qdio)) 1605 if (zfcp_qdio_sbal_get(qdio))
1638 goto out; 1606 goto out;
1639 1607
@@ -1658,7 +1626,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1658 if (retval) 1626 if (retval)
1659 zfcp_fsf_req_free(req); 1627 zfcp_fsf_req_free(req);
1660out: 1628out:
1661 spin_unlock_bh(&qdio->req_q_lock); 1629 spin_unlock_irq(&qdio->req_q_lock);
1662 return retval; 1630 return retval;
1663} 1631}
1664 1632
@@ -1666,7 +1634,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1666{ 1634{
1667 struct zfcp_port *port = req->data; 1635 struct zfcp_port *port = req->data;
1668 struct fsf_qtcb_header *header = &req->qtcb->header; 1636 struct fsf_qtcb_header *header = &req->qtcb->header;
1669 struct zfcp_unit *unit; 1637 struct scsi_device *sdev;
1670 1638
1671 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1639 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1672 return; 1640 return;
@@ -1677,18 +1645,19 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1677 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1645 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1678 break; 1646 break;
1679 case FSF_ACCESS_DENIED: 1647 case FSF_ACCESS_DENIED:
1680 zfcp_fsf_access_denied_port(req, port); 1648 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1681 break; 1649 break;
1682 case FSF_PORT_BOXED: 1650 case FSF_PORT_BOXED:
1683 /* can't use generic zfcp_erp_modify_port_status because 1651 /* can't use generic zfcp_erp_modify_port_status because
1684 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1652 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1685 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1653 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1686 read_lock(&port->unit_list_lock); 1654 shost_for_each_device(sdev, port->adapter->scsi_host)
1687 list_for_each_entry(unit, &port->unit_list, list) 1655 if (sdev_to_zfcp(sdev)->port == port)
1688 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1656 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1689 &unit->status); 1657 &sdev_to_zfcp(sdev)->status);
1690 read_unlock(&port->unit_list_lock); 1658 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1691 zfcp_erp_port_boxed(port, "fscpph2", req); 1659 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1660 "fscpph2", req);
1692 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1661 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1693 break; 1662 break;
1694 case FSF_ADAPTER_STATUS_AVAILABLE: 1663 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1705,11 +1674,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1705 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1674 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1706 */ 1675 */
1707 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1676 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1708 read_lock(&port->unit_list_lock); 1677 shost_for_each_device(sdev, port->adapter->scsi_host)
1709 list_for_each_entry(unit, &port->unit_list, list) 1678 if (sdev_to_zfcp(sdev)->port == port)
1710 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1679 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1711 &unit->status); 1680 &sdev_to_zfcp(sdev)->status);
1712 read_unlock(&port->unit_list_lock);
1713 break; 1681 break;
1714 } 1682 }
1715} 1683}
@@ -1725,7 +1693,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1725 struct zfcp_fsf_req *req; 1693 struct zfcp_fsf_req *req;
1726 int retval = -EIO; 1694 int retval = -EIO;
1727 1695
1728 spin_lock_bh(&qdio->req_q_lock); 1696 spin_lock_irq(&qdio->req_q_lock);
1729 if (zfcp_qdio_sbal_get(qdio)) 1697 if (zfcp_qdio_sbal_get(qdio))
1730 goto out; 1698 goto out;
1731 1699
@@ -1754,69 +1722,57 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1754 erp_action->fsf_req_id = 0; 1722 erp_action->fsf_req_id = 0;
1755 } 1723 }
1756out: 1724out:
1757 spin_unlock_bh(&qdio->req_q_lock); 1725 spin_unlock_irq(&qdio->req_q_lock);
1758 return retval; 1726 return retval;
1759} 1727}
1760 1728
1761static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) 1729static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1762{ 1730{
1763 struct zfcp_adapter *adapter = req->adapter; 1731 struct zfcp_adapter *adapter = req->adapter;
1764 struct zfcp_unit *unit = req->data; 1732 struct scsi_device *sdev = req->data;
1733 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1765 struct fsf_qtcb_header *header = &req->qtcb->header; 1734 struct fsf_qtcb_header *header = &req->qtcb->header;
1766 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1735 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1767 struct fsf_queue_designator *queue_designator =
1768 &header->fsf_status_qual.fsf_queue_designator;
1769 int exclusive, readwrite;
1770 1736
1771 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1737 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1772 return; 1738 return;
1773 1739
1774 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1740 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1775 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1741 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1776 ZFCP_STATUS_UNIT_SHARED | 1742 ZFCP_STATUS_LUN_SHARED |
1777 ZFCP_STATUS_UNIT_READONLY, 1743 ZFCP_STATUS_LUN_READONLY,
1778 &unit->status); 1744 &zfcp_sdev->status);
1779 1745
1780 switch (header->fsf_status) { 1746 switch (header->fsf_status) {
1781 1747
1782 case FSF_PORT_HANDLE_NOT_VALID: 1748 case FSF_PORT_HANDLE_NOT_VALID:
1783 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); 1749 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
1784 /* fall through */ 1750 /* fall through */
1785 case FSF_LUN_ALREADY_OPEN: 1751 case FSF_LUN_ALREADY_OPEN:
1786 break; 1752 break;
1787 case FSF_ACCESS_DENIED: 1753 case FSF_ACCESS_DENIED:
1788 zfcp_fsf_access_denied_unit(req, unit); 1754 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1789 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1755 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1790 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1791 break; 1756 break;
1792 case FSF_PORT_BOXED: 1757 case FSF_PORT_BOXED:
1793 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1758 zfcp_erp_set_port_status(zfcp_sdev->port,
1759 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1760 zfcp_erp_port_reopen(zfcp_sdev->port,
1761 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
1762 req);
1794 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1763 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1795 break; 1764 break;
1796 case FSF_LUN_SHARING_VIOLATION: 1765 case FSF_LUN_SHARING_VIOLATION:
1797 if (header->fsf_status_qual.word[0]) 1766 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1798 dev_warn(&adapter->ccw_device->dev,
1799 "LUN 0x%Lx on port 0x%Lx is already in "
1800 "use by CSS%d, MIF Image ID %x\n",
1801 (unsigned long long)unit->fcp_lun,
1802 (unsigned long long)unit->port->wwpn,
1803 queue_designator->cssid,
1804 queue_designator->hla);
1805 else
1806 zfcp_act_eval_err(adapter,
1807 header->fsf_status_qual.word[2]);
1808 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1809 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1810 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1811 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1767 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 break; 1768 break;
1813 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1769 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1814 dev_warn(&adapter->ccw_device->dev, 1770 dev_warn(&adapter->ccw_device->dev,
1815 "No handle is available for LUN " 1771 "No handle is available for LUN "
1816 "0x%016Lx on port 0x%016Lx\n", 1772 "0x%016Lx on port 0x%016Lx\n",
1817 (unsigned long long)unit->fcp_lun, 1773 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1818 (unsigned long long)unit->port->wwpn); 1774 (unsigned long long)zfcp_sdev->port->wwpn);
1819 zfcp_erp_unit_failed(unit, "fsouh_4", req); 1775 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1820 /* fall through */ 1776 /* fall through */
1821 case FSF_INVALID_COMMAND_OPTION: 1777 case FSF_INVALID_COMMAND_OPTION:
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1778 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1824,7 +1780,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1824 case FSF_ADAPTER_STATUS_AVAILABLE: 1780 case FSF_ADAPTER_STATUS_AVAILABLE:
1825 switch (header->fsf_status_qual.word[0]) { 1781 switch (header->fsf_status_qual.word[0]) {
1826 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1782 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1827 zfcp_fc_test_link(unit->port); 1783 zfcp_fc_test_link(zfcp_sdev->port);
1828 /* fall through */ 1784 /* fall through */
1829 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1785 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1830 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1786 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1833,70 +1789,26 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1833 break; 1789 break;
1834 1790
1835 case FSF_GOOD: 1791 case FSF_GOOD:
1836 unit->handle = header->lun_handle; 1792 zfcp_sdev->lun_handle = header->lun_handle;
1837 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1793 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1838 1794 zfcp_cfdc_open_lun_eval(sdev, bottom);
1839 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1840 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1841 !zfcp_ccw_priv_sch(adapter)) {
1842 exclusive = (bottom->lun_access_info &
1843 FSF_UNIT_ACCESS_EXCLUSIVE);
1844 readwrite = (bottom->lun_access_info &
1845 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1846
1847 if (!exclusive)
1848 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1849 &unit->status);
1850
1851 if (!readwrite) {
1852 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1853 &unit->status);
1854 dev_info(&adapter->ccw_device->dev,
1855 "SCSI device at LUN 0x%016Lx on port "
1856 "0x%016Lx opened read-only\n",
1857 (unsigned long long)unit->fcp_lun,
1858 (unsigned long long)unit->port->wwpn);
1859 }
1860
1861 if (exclusive && !readwrite) {
1862 dev_err(&adapter->ccw_device->dev,
1863 "Exclusive read-only access not "
1864 "supported (unit 0x%016Lx, "
1865 "port 0x%016Lx)\n",
1866 (unsigned long long)unit->fcp_lun,
1867 (unsigned long long)unit->port->wwpn);
1868 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1870 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1871 } else if (!exclusive && readwrite) {
1872 dev_err(&adapter->ccw_device->dev,
1873 "Shared read-write access not "
1874 "supported (unit 0x%016Lx, port "
1875 "0x%016Lx)\n",
1876 (unsigned long long)unit->fcp_lun,
1877 (unsigned long long)unit->port->wwpn);
1878 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1879 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1880 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1881 }
1882 }
1883 break; 1795 break;
1884 } 1796 }
1885} 1797}
1886 1798
1887/** 1799/**
1888 * zfcp_fsf_open_unit - open unit 1800 * zfcp_fsf_open_lun - open LUN
1889 * @erp_action: pointer to struct zfcp_erp_action 1801 * @erp_action: pointer to struct zfcp_erp_action
1890 * Returns: 0 on success, error otherwise 1802 * Returns: 0 on success, error otherwise
1891 */ 1803 */
1892int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1804int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1893{ 1805{
1894 struct zfcp_adapter *adapter = erp_action->adapter; 1806 struct zfcp_adapter *adapter = erp_action->adapter;
1895 struct zfcp_qdio *qdio = adapter->qdio; 1807 struct zfcp_qdio *qdio = adapter->qdio;
1896 struct zfcp_fsf_req *req; 1808 struct zfcp_fsf_req *req;
1897 int retval = -EIO; 1809 int retval = -EIO;
1898 1810
1899 spin_lock_bh(&qdio->req_q_lock); 1811 spin_lock_irq(&qdio->req_q_lock);
1900 if (zfcp_qdio_sbal_get(qdio)) 1812 if (zfcp_qdio_sbal_get(qdio))
1901 goto out; 1813 goto out;
1902 1814
@@ -1913,9 +1825,9 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1913 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1825 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1914 1826
1915 req->qtcb->header.port_handle = erp_action->port->handle; 1827 req->qtcb->header.port_handle = erp_action->port->handle;
1916 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1828 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1917 req->handler = zfcp_fsf_open_unit_handler; 1829 req->handler = zfcp_fsf_open_lun_handler;
1918 req->data = erp_action->unit; 1830 req->data = erp_action->sdev;
1919 req->erp_action = erp_action; 1831 req->erp_action = erp_action;
1920 erp_action->fsf_req_id = req->req_id; 1832 erp_action->fsf_req_id = req->req_id;
1921 1833
@@ -1929,34 +1841,40 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1929 erp_action->fsf_req_id = 0; 1841 erp_action->fsf_req_id = 0;
1930 } 1842 }
1931out: 1843out:
1932 spin_unlock_bh(&qdio->req_q_lock); 1844 spin_unlock_irq(&qdio->req_q_lock);
1933 return retval; 1845 return retval;
1934} 1846}
1935 1847
1936static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) 1848static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1937{ 1849{
1938 struct zfcp_unit *unit = req->data; 1850 struct scsi_device *sdev = req->data;
1851 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1939 1852
1940 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1853 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1941 return; 1854 return;
1942 1855
1943 switch (req->qtcb->header.fsf_status) { 1856 switch (req->qtcb->header.fsf_status) {
1944 case FSF_PORT_HANDLE_NOT_VALID: 1857 case FSF_PORT_HANDLE_NOT_VALID:
1945 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); 1858 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
1859 req);
1946 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1860 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1947 break; 1861 break;
1948 case FSF_LUN_HANDLE_NOT_VALID: 1862 case FSF_LUN_HANDLE_NOT_VALID:
1949 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); 1863 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
1950 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1864 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1951 break; 1865 break;
1952 case FSF_PORT_BOXED: 1866 case FSF_PORT_BOXED:
1953 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 1867 zfcp_erp_set_port_status(zfcp_sdev->port,
1868 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1869 zfcp_erp_port_reopen(zfcp_sdev->port,
1870 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
1871 req);
1954 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1872 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1955 break; 1873 break;
1956 case FSF_ADAPTER_STATUS_AVAILABLE: 1874 case FSF_ADAPTER_STATUS_AVAILABLE:
1957 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1875 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1958 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1876 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1959 zfcp_fc_test_link(unit->port); 1877 zfcp_fc_test_link(zfcp_sdev->port);
1960 /* fall through */ 1878 /* fall through */
1961 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1879 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1962 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1880 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1964,23 +1882,24 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1964 } 1882 }
1965 break; 1883 break;
1966 case FSF_GOOD: 1884 case FSF_GOOD:
1967 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1885 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1968 break; 1886 break;
1969 } 1887 }
1970} 1888}
1971 1889
1972/** 1890/**
1973 * zfcp_fsf_close_unit - close zfcp unit 1891 * zfcp_fsf_close_LUN - close LUN
1974 * @erp_action: pointer to struct zfcp_unit 1892 * @erp_action: pointer to erp_action triggering the "close LUN"
1975 * Returns: 0 on success, error otherwise 1893 * Returns: 0 on success, error otherwise
1976 */ 1894 */
1977int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 1895int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1978{ 1896{
1979 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1897 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1898 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1980 struct zfcp_fsf_req *req; 1899 struct zfcp_fsf_req *req;
1981 int retval = -EIO; 1900 int retval = -EIO;
1982 1901
1983 spin_lock_bh(&qdio->req_q_lock); 1902 spin_lock_irq(&qdio->req_q_lock);
1984 if (zfcp_qdio_sbal_get(qdio)) 1903 if (zfcp_qdio_sbal_get(qdio))
1985 goto out; 1904 goto out;
1986 1905
@@ -1997,9 +1916,9 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1997 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1916 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1998 1917
1999 req->qtcb->header.port_handle = erp_action->port->handle; 1918 req->qtcb->header.port_handle = erp_action->port->handle;
2000 req->qtcb->header.lun_handle = erp_action->unit->handle; 1919 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2001 req->handler = zfcp_fsf_close_unit_handler; 1920 req->handler = zfcp_fsf_close_lun_handler;
2002 req->data = erp_action->unit; 1921 req->data = erp_action->sdev;
2003 req->erp_action = erp_action; 1922 req->erp_action = erp_action;
2004 erp_action->fsf_req_id = req->req_id; 1923 erp_action->fsf_req_id = req->req_id;
2005 1924
@@ -2010,7 +1929,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2010 erp_action->fsf_req_id = 0; 1929 erp_action->fsf_req_id = 0;
2011 } 1930 }
2012out: 1931out:
2013 spin_unlock_bh(&qdio->req_q_lock); 1932 spin_unlock_irq(&qdio->req_q_lock);
2014 return retval; 1933 return retval;
2015} 1934}
2016 1935
@@ -2025,7 +1944,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025{ 1944{
2026 struct fsf_qual_latency_info *lat_in; 1945 struct fsf_qual_latency_info *lat_in;
2027 struct latency_cont *lat = NULL; 1946 struct latency_cont *lat = NULL;
2028 struct zfcp_unit *unit = req->unit; 1947 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
2029 struct zfcp_blk_drv_data blktrc; 1948 struct zfcp_blk_drv_data blktrc;
2030 int ticks = req->adapter->timer_ticks; 1949 int ticks = req->adapter->timer_ticks;
2031 1950
@@ -2048,24 +1967,24 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2048 case FSF_DATADIR_DIF_READ_STRIP: 1967 case FSF_DATADIR_DIF_READ_STRIP:
2049 case FSF_DATADIR_DIF_READ_CONVERT: 1968 case FSF_DATADIR_DIF_READ_CONVERT:
2050 case FSF_DATADIR_READ: 1969 case FSF_DATADIR_READ:
2051 lat = &unit->latencies.read; 1970 lat = &zfcp_sdev->latencies.read;
2052 break; 1971 break;
2053 case FSF_DATADIR_DIF_WRITE_INSERT: 1972 case FSF_DATADIR_DIF_WRITE_INSERT:
2054 case FSF_DATADIR_DIF_WRITE_CONVERT: 1973 case FSF_DATADIR_DIF_WRITE_CONVERT:
2055 case FSF_DATADIR_WRITE: 1974 case FSF_DATADIR_WRITE:
2056 lat = &unit->latencies.write; 1975 lat = &zfcp_sdev->latencies.write;
2057 break; 1976 break;
2058 case FSF_DATADIR_CMND: 1977 case FSF_DATADIR_CMND:
2059 lat = &unit->latencies.cmd; 1978 lat = &zfcp_sdev->latencies.cmd;
2060 break; 1979 break;
2061 } 1980 }
2062 1981
2063 if (lat) { 1982 if (lat) {
2064 spin_lock(&unit->latencies.lock); 1983 spin_lock(&zfcp_sdev->latencies.lock);
2065 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 1984 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2066 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 1985 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2067 lat->counter++; 1986 lat->counter++;
2068 spin_unlock(&unit->latencies.lock); 1987 spin_unlock(&zfcp_sdev->latencies.lock);
2069 } 1988 }
2070 } 1989 }
2071 1990
@@ -2073,12 +1992,88 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2073 sizeof(blktrc)); 1992 sizeof(blktrc));
2074} 1993}
2075 1994
2076static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 1995static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1996{
1997 struct scsi_cmnd *scmnd = req->data;
1998 struct scsi_device *sdev = scmnd->device;
1999 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2000 struct fsf_qtcb_header *header = &req->qtcb->header;
2001
2002 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2003 return;
2004
2005 switch (header->fsf_status) {
2006 case FSF_HANDLE_MISMATCH:
2007 case FSF_PORT_HANDLE_NOT_VALID:
2008 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
2009 req);
2010 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2011 break;
2012 case FSF_FCPLUN_NOT_VALID:
2013 case FSF_LUN_HANDLE_NOT_VALID:
2014 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
2015 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2016 break;
2017 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2018 zfcp_fsf_class_not_supp(req);
2019 break;
2020 case FSF_ACCESS_DENIED:
2021 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2022 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2023 break;
2024 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2025 dev_err(&req->adapter->ccw_device->dev,
2026 "Incorrect direction %d, LUN 0x%016Lx on port "
2027 "0x%016Lx closed\n",
2028 req->qtcb->bottom.io.data_direction,
2029 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2030 (unsigned long long)zfcp_sdev->port->wwpn);
2031 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2032 "fssfch3", req);
2033 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2034 break;
2035 case FSF_CMND_LENGTH_NOT_VALID:
2036 dev_err(&req->adapter->ccw_device->dev,
2037 "Incorrect CDB length %d, LUN 0x%016Lx on "
2038 "port 0x%016Lx closed\n",
2039 req->qtcb->bottom.io.fcp_cmnd_length,
2040 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2041 (unsigned long long)zfcp_sdev->port->wwpn);
2042 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2043 "fssfch4", req);
2044 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2045 break;
2046 case FSF_PORT_BOXED:
2047 zfcp_erp_set_port_status(zfcp_sdev->port,
2048 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2049 zfcp_erp_port_reopen(zfcp_sdev->port,
2050 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
2051 req);
2052 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2053 break;
2054 case FSF_LUN_BOXED:
2055 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2056 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2057 "fssfch6", req);
2058 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2059 break;
2060 case FSF_ADAPTER_STATUS_AVAILABLE:
2061 if (header->fsf_status_qual.word[0] ==
2062 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2063 zfcp_fc_test_link(zfcp_sdev->port);
2064 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2065 break;
2066 }
2067}
2068
2069static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2077{ 2070{
2078 struct scsi_cmnd *scpnt; 2071 struct scsi_cmnd *scpnt;
2079 struct fcp_resp_with_ext *fcp_rsp; 2072 struct fcp_resp_with_ext *fcp_rsp;
2080 unsigned long flags; 2073 unsigned long flags;
2081 2074
2075 zfcp_fsf_fcp_handler_common(req);
2076
2082 read_lock_irqsave(&req->adapter->abort_lock, flags); 2077 read_lock_irqsave(&req->adapter->abort_lock, flags);
2083 2078
2084 scpnt = req->data; 2079 scpnt = req->data;
@@ -2125,97 +2120,6 @@ skip_fsfstatus:
2125 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2120 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2126} 2121}
2127 2122
2128static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2129{
2130 struct fcp_resp_with_ext *fcp_rsp;
2131 struct fcp_resp_rsp_info *rsp_info;
2132
2133 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2134 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2135
2136 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2137 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2138 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2139}
2140
2141
2142static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2143{
2144 struct zfcp_unit *unit;
2145 struct fsf_qtcb_header *header = &req->qtcb->header;
2146
2147 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2148 unit = req->data;
2149 else
2150 unit = req->unit;
2151
2152 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2153 goto skip_fsfstatus;
2154
2155 switch (header->fsf_status) {
2156 case FSF_HANDLE_MISMATCH:
2157 case FSF_PORT_HANDLE_NOT_VALID:
2158 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2159 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2160 break;
2161 case FSF_FCPLUN_NOT_VALID:
2162 case FSF_LUN_HANDLE_NOT_VALID:
2163 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2164 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2165 break;
2166 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2167 zfcp_fsf_class_not_supp(req);
2168 break;
2169 case FSF_ACCESS_DENIED:
2170 zfcp_fsf_access_denied_unit(req, unit);
2171 break;
2172 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2173 dev_err(&req->adapter->ccw_device->dev,
2174 "Incorrect direction %d, unit 0x%016Lx on port "
2175 "0x%016Lx closed\n",
2176 req->qtcb->bottom.io.data_direction,
2177 (unsigned long long)unit->fcp_lun,
2178 (unsigned long long)unit->port->wwpn);
2179 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2180 req);
2181 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2182 break;
2183 case FSF_CMND_LENGTH_NOT_VALID:
2184 dev_err(&req->adapter->ccw_device->dev,
2185 "Incorrect CDB length %d, unit 0x%016Lx on "
2186 "port 0x%016Lx closed\n",
2187 req->qtcb->bottom.io.fcp_cmnd_length,
2188 (unsigned long long)unit->fcp_lun,
2189 (unsigned long long)unit->port->wwpn);
2190 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2191 req);
2192 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2193 break;
2194 case FSF_PORT_BOXED:
2195 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2196 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2197 break;
2198 case FSF_LUN_BOXED:
2199 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2200 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2201 break;
2202 case FSF_ADAPTER_STATUS_AVAILABLE:
2203 if (header->fsf_status_qual.word[0] ==
2204 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2205 zfcp_fc_test_link(unit->port);
2206 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2207 break;
2208 }
2209skip_fsfstatus:
2210 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2211 zfcp_fsf_send_fcp_ctm_handler(req);
2212 else {
2213 zfcp_fsf_send_fcp_command_task_handler(req);
2214 req->unit = NULL;
2215 put_device(&unit->dev);
2216 }
2217}
2218
2219static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2123static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2220{ 2124{
2221 switch (scsi_get_prot_op(scsi_cmnd)) { 2125 switch (scsi_get_prot_op(scsi_cmnd)) {
@@ -2255,22 +2159,22 @@ static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2255} 2159}
2256 2160
2257/** 2161/**
2258 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2162 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2259 * @unit: unit where command is sent to
2260 * @scsi_cmnd: scsi command to be sent 2163 * @scsi_cmnd: scsi command to be sent
2261 */ 2164 */
2262int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, 2165int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2263 struct scsi_cmnd *scsi_cmnd)
2264{ 2166{
2265 struct zfcp_fsf_req *req; 2167 struct zfcp_fsf_req *req;
2266 struct fcp_cmnd *fcp_cmnd; 2168 struct fcp_cmnd *fcp_cmnd;
2267 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2169 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2268 int real_bytes, retval = -EIO, dix_bytes = 0; 2170 int real_bytes, retval = -EIO, dix_bytes = 0;
2269 struct zfcp_adapter *adapter = unit->port->adapter; 2171 struct scsi_device *sdev = scsi_cmnd->device;
2172 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2173 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2270 struct zfcp_qdio *qdio = adapter->qdio; 2174 struct zfcp_qdio *qdio = adapter->qdio;
2271 struct fsf_qtcb_bottom_io *io; 2175 struct fsf_qtcb_bottom_io *io;
2272 2176
2273 if (unlikely(!(atomic_read(&unit->status) & 2177 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2274 ZFCP_STATUS_COMMON_UNBLOCKED))) 2178 ZFCP_STATUS_COMMON_UNBLOCKED)))
2275 return -EBUSY; 2179 return -EBUSY;
2276 2180
@@ -2295,11 +2199,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2295 2199
2296 io = &req->qtcb->bottom.io; 2200 io = &req->qtcb->bottom.io;
2297 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2201 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2298 req->unit = unit;
2299 req->data = scsi_cmnd; 2202 req->data = scsi_cmnd;
2300 req->handler = zfcp_fsf_send_fcp_command_handler; 2203 req->handler = zfcp_fsf_fcp_cmnd_handler;
2301 req->qtcb->header.lun_handle = unit->handle; 2204 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2302 req->qtcb->header.port_handle = unit->port->handle; 2205 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2303 io->service_class = FSF_CLASS_3; 2206 io->service_class = FSF_CLASS_3;
2304 io->fcp_cmnd_length = FCP_CMND_LEN; 2207 io->fcp_cmnd_length = FCP_CMND_LEN;
2305 2208
@@ -2310,8 +2213,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2310 2213
2311 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); 2214 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2312 2215
2313 get_device(&unit->dev);
2314
2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2216 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2217 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2317 2218
@@ -2338,7 +2239,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2338 goto out; 2239 goto out;
2339 2240
2340failed_scsi_cmnd: 2241failed_scsi_cmnd:
2341 put_device(&unit->dev);
2342 zfcp_fsf_req_free(req); 2242 zfcp_fsf_req_free(req);
2343 scsi_cmnd->host_scribble = NULL; 2243 scsi_cmnd->host_scribble = NULL;
2344out: 2244out:
@@ -2346,23 +2246,40 @@ out:
2346 return retval; 2246 return retval;
2347} 2247}
2348 2248
2249static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2250{
2251 struct fcp_resp_with_ext *fcp_rsp;
2252 struct fcp_resp_rsp_info *rsp_info;
2253
2254 zfcp_fsf_fcp_handler_common(req);
2255
2256 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2257 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2258
2259 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2260 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2261 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2262}
2263
2349/** 2264/**
2350 * zfcp_fsf_send_fcp_ctm - send SCSI task management command 2265 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2351 * @unit: pointer to struct zfcp_unit 2266 * @scmnd: SCSI command to send the task management command for
2352 * @tm_flags: unsigned byte for task management flags 2267 * @tm_flags: unsigned byte for task management flags
2353 * Returns: on success pointer to struct fsf_req, NULL otherwise 2268 * Returns: on success pointer to struct fsf_req, NULL otherwise
2354 */ 2269 */
2355struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) 2270struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2271 u8 tm_flags)
2356{ 2272{
2357 struct zfcp_fsf_req *req = NULL; 2273 struct zfcp_fsf_req *req = NULL;
2358 struct fcp_cmnd *fcp_cmnd; 2274 struct fcp_cmnd *fcp_cmnd;
2359 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2275 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2276 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2360 2277
2361 if (unlikely(!(atomic_read(&unit->status) & 2278 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2362 ZFCP_STATUS_COMMON_UNBLOCKED))) 2279 ZFCP_STATUS_COMMON_UNBLOCKED)))
2363 return NULL; 2280 return NULL;
2364 2281
2365 spin_lock_bh(&qdio->req_q_lock); 2282 spin_lock_irq(&qdio->req_q_lock);
2366 if (zfcp_qdio_sbal_get(qdio)) 2283 if (zfcp_qdio_sbal_get(qdio))
2367 goto out; 2284 goto out;
2368 2285
@@ -2376,10 +2293,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2376 } 2293 }
2377 2294
2378 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2295 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2379 req->data = unit; 2296 req->data = scmnd;
2380 req->handler = zfcp_fsf_send_fcp_command_handler; 2297 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2381 req->qtcb->header.lun_handle = unit->handle; 2298 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2382 req->qtcb->header.port_handle = unit->port->handle; 2299 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2383 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2300 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2384 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2301 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2385 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2302 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
@@ -2387,7 +2304,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2387 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2304 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2388 2305
2389 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2306 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2390 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); 2307 zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
2391 2308
2392 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2309 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2393 if (!zfcp_fsf_req_send(req)) 2310 if (!zfcp_fsf_req_send(req))
@@ -2396,7 +2313,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2396 zfcp_fsf_req_free(req); 2313 zfcp_fsf_req_free(req);
2397 req = NULL; 2314 req = NULL;
2398out: 2315out:
2399 spin_unlock_bh(&qdio->req_q_lock); 2316 spin_unlock_irq(&qdio->req_q_lock);
2400 return req; 2317 return req;
2401} 2318}
2402 2319
@@ -2432,7 +2349,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2432 return ERR_PTR(-EINVAL); 2349 return ERR_PTR(-EINVAL);
2433 } 2350 }
2434 2351
2435 spin_lock_bh(&qdio->req_q_lock); 2352 spin_lock_irq(&qdio->req_q_lock);
2436 if (zfcp_qdio_sbal_get(qdio)) 2353 if (zfcp_qdio_sbal_get(qdio))
2437 goto out; 2354 goto out;
2438 2355
@@ -2459,7 +2376,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2459 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2376 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2460 retval = zfcp_fsf_req_send(req); 2377 retval = zfcp_fsf_req_send(req);
2461out: 2378out:
2462 spin_unlock_bh(&qdio->req_q_lock); 2379 spin_unlock_irq(&qdio->req_q_lock);
2463 2380
2464 if (!retval) { 2381 if (!retval) {
2465 wait_for_completion(&req->completion); 2382 wait_for_completion(&req->completion);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721c..60e6e5714eb9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
60 unsigned long long now, span; 60 unsigned long long now, span;
61 int used; 61 int used;
62 62
63 spin_lock(&qdio->stat_lock);
64 now = get_clock_monotonic(); 63 now = get_clock_monotonic();
65 span = (now - qdio->req_q_time) >> 12; 64 span = (now - qdio->req_q_time) >> 12;
66 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 65 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
67 qdio->req_q_util += used * span; 66 qdio->req_q_util += used * span;
68 qdio->req_q_time = now; 67 qdio->req_q_time = now;
69 spin_unlock(&qdio->stat_lock);
70} 68}
71 69
72static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
@@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
84 /* cleanup all SBALs being program-owned now */ 82 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 83 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 84
85 spin_lock_irq(&qdio->stat_lock);
87 zfcp_qdio_account(qdio); 86 zfcp_qdio_account(qdio);
87 spin_unlock_irq(&qdio->stat_lock);
88 atomic_add(count, &qdio->req_q_free); 88 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 89 wake_up(&qdio->req_q_wq);
90} 90}
@@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
201 201
202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
203{ 203{
204 spin_lock_bh(&qdio->req_q_lock); 204 spin_lock_irq(&qdio->req_q_lock);
205 if (atomic_read(&qdio->req_q_free) || 205 if (atomic_read(&qdio->req_q_free) ||
206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
207 return 1; 207 return 1;
208 spin_unlock_bh(&qdio->req_q_lock); 208 spin_unlock_irq(&qdio->req_q_lock);
209 return 0; 209 return 0;
210} 210}
211 211
@@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
223{ 223{
224 long ret; 224 long ret;
225 225
226 spin_unlock_bh(&qdio->req_q_lock); 226 spin_unlock_irq(&qdio->req_q_lock);
227 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 227 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
228 zfcp_qdio_sbal_check(qdio), 5 * HZ); 228 zfcp_qdio_sbal_check(qdio), 5 * HZ);
229 229
@@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); 239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
240 } 240 }
241 241
242 spin_lock_bh(&qdio->req_q_lock); 242 spin_lock_irq(&qdio->req_q_lock);
243 return -EIO; 243 return -EIO;
244} 244}
245 245
@@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
254 int retval; 254 int retval;
255 u8 sbal_number = q_req->sbal_number; 255 u8 sbal_number = q_req->sbal_number;
256 256
257 spin_lock(&qdio->stat_lock);
257 zfcp_qdio_account(qdio); 258 zfcp_qdio_account(qdio);
259 spin_unlock(&qdio->stat_lock);
258 260
259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 261 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
260 q_req->sbal_first, sbal_number); 262 q_req->sbal_first, sbal_number);
@@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
328 return; 330 return;
329 331
330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 332 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
331 spin_lock_bh(&qdio->req_q_lock); 333 spin_lock_irq(&qdio->req_q_lock);
332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 334 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
333 spin_unlock_bh(&qdio->req_q_lock); 335 spin_unlock_irq(&qdio->req_q_lock);
334 336
335 wake_up(&qdio->req_q_wq); 337 wake_up(&qdio->req_q_wq);
336 338
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index cb000c9833bb..50286d8707f3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
49 return sdev->queue_depth; 49 return sdev->queue_depth;
50} 50}
51 51
52static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 52static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
53{ 53{
54 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 54 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
55 unit->device = NULL; 55
56 put_device(&unit->dev); 56 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
57 put_device(&zfcp_sdev->port->dev);
57} 58}
58 59
59static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 60static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
78static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 79static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
79 void (*done) (struct scsi_cmnd *)) 80 void (*done) (struct scsi_cmnd *))
80{ 81{
81 struct zfcp_unit *unit; 82 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
82 struct zfcp_adapter *adapter; 83 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
83 int status, scsi_result, ret;
84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
85 int status, scsi_result, ret;
85 86
86 /* reset the status for this request */ 87 /* reset the status for this request */
87 scpnt->result = 0; 88 scpnt->result = 0;
88 scpnt->host_scribble = NULL; 89 scpnt->host_scribble = NULL;
89 scpnt->scsi_done = done; 90 scpnt->scsi_done = done;
90 91
91 /*
92 * figure out adapter and target device
93 * (stored there by zfcp_scsi_slave_alloc)
94 */
95 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
96 unit = scpnt->device->hostdata;
97
98 scsi_result = fc_remote_port_chkready(rport); 92 scsi_result = fc_remote_port_chkready(rport);
99 if (unlikely(scsi_result)) { 93 if (unlikely(scsi_result)) {
100 scpnt->result = scsi_result; 94 scpnt->result = scsi_result;
@@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
103 return 0; 97 return 0;
104 } 98 }
105 99
106 status = atomic_read(&unit->status); 100 status = atomic_read(&zfcp_sdev->status);
107 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && 101 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
108 !(atomic_read(&unit->port->status) & 102 !(atomic_read(&zfcp_sdev->port->status) &
109 ZFCP_STATUS_COMMON_ERP_FAILED)) { 103 ZFCP_STATUS_COMMON_ERP_FAILED)) {
110 /* only unit access denied, but port is good 104 /* only LUN access denied, but port is good
111 * not covered by FC transport, have to fail here */ 105 * not covered by FC transport, have to fail here */
112 zfcp_scsi_command_fail(scpnt, DID_ERROR); 106 zfcp_scsi_command_fail(scpnt, DID_ERROR);
113 return 0; 107 return 0;
@@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115 109
116 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 110 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
117 /* This could be either 111 /* This could be either
118 * open unit pending: this is temporary, will result in 112 * open LUN pending: this is temporary, will result in
119 * open unit or ERP_FAILED, so retry command 113 * open LUN or ERP_FAILED, so retry command
120 * call to rport_delete pending: mimic retry from 114 * call to rport_delete pending: mimic retry from
121 * fc_remote_port_chkready until rport is BLOCKED 115 * fc_remote_port_chkready until rport is BLOCKED
122 */ 116 */
@@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
124 return 0; 118 return 0;
125 } 119 }
126 120
127 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 121 ret = zfcp_fsf_fcp_cmnd(scpnt);
128 if (unlikely(ret == -EBUSY)) 122 if (unlikely(ret == -EBUSY))
129 return SCSI_MLQUEUE_DEVICE_BUSY; 123 return SCSI_MLQUEUE_DEVICE_BUSY;
130 else if (unlikely(ret < 0)) 124 else if (unlikely(ret < 0))
@@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
133 return ret; 127 return ret;
134} 128}
135 129
136static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 130static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
137 unsigned int id, u64 lun)
138{ 131{
139 unsigned long flags; 132 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
133 struct zfcp_adapter *adapter =
134 (struct zfcp_adapter *) sdev->host->hostdata[0];
135 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
140 struct zfcp_port *port; 136 struct zfcp_port *port;
141 struct zfcp_unit *unit = NULL; 137 struct zfcp_unit *unit;
142 138
143 read_lock_irqsave(&adapter->port_list_lock, flags); 139 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
144 list_for_each_entry(port, &adapter->port_list, list) { 140 if (!port)
145 if (!port->rport || (id != port->rport->scsi_target_id)) 141 return -ENXIO;
146 continue;
147 unit = zfcp_get_unit_by_lun(port, lun);
148 if (unit)
149 break;
150 }
151 read_unlock_irqrestore(&adapter->port_list_lock, flags);
152 142
153 return unit; 143 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
154} 144 if (unit)
145 put_device(&unit->dev);
155 146
156static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 147 if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
157{ 148 put_device(&port->dev);
158 struct zfcp_adapter *adapter; 149 return -ENXIO;
159 struct zfcp_unit *unit; 150 }
160 u64 lun;
161 151
162 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 152 zfcp_sdev->port = port;
163 if (!adapter) 153 zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
164 goto out; 154 zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
155 zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
156 zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
157 zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
158 zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
159 spin_lock_init(&zfcp_sdev->latencies.lock);
165 160
166 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); 161 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
167 unit = zfcp_unit_lookup(adapter, sdp->id, lun); 162 zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
168 if (unit) { 163 zfcp_erp_wait(port->adapter);
169 sdp->hostdata = unit; 164
170 unit->device = sdp; 165 return 0;
171 return 0;
172 }
173out:
174 return -ENXIO;
175} 166}
176 167
177static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 168static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
179 struct Scsi_Host *scsi_host = scpnt->device->host; 170 struct Scsi_Host *scsi_host = scpnt->device->host;
180 struct zfcp_adapter *adapter = 171 struct zfcp_adapter *adapter =
181 (struct zfcp_adapter *) scsi_host->hostdata[0]; 172 (struct zfcp_adapter *) scsi_host->hostdata[0];
182 struct zfcp_unit *unit = scpnt->device->hostdata;
183 struct zfcp_fsf_req *old_req, *abrt_req; 173 struct zfcp_fsf_req *old_req, *abrt_req;
184 unsigned long flags; 174 unsigned long flags;
185 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 175 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
@@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
203 write_unlock_irqrestore(&adapter->abort_lock, flags); 193 write_unlock_irqrestore(&adapter->abort_lock, flags);
204 194
205 while (retry--) { 195 while (retry--) {
206 abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); 196 abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
207 if (abrt_req) 197 if (abrt_req)
208 break; 198 break;
209 199
@@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
238 228
239static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 229static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
240{ 230{
241 struct zfcp_unit *unit = scpnt->device->hostdata; 231 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
242 struct zfcp_adapter *adapter = unit->port->adapter; 232 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
243 struct zfcp_fsf_req *fsf_req = NULL; 233 struct zfcp_fsf_req *fsf_req = NULL;
244 int retval = SUCCESS, ret; 234 int retval = SUCCESS, ret;
245 int retry = 3; 235 int retry = 3;
246 236
247 while (retry--) { 237 while (retry--) {
248 fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); 238 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
249 if (fsf_req) 239 if (fsf_req)
250 break; 240 break;
251 241
@@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
256 246
257 if (!(atomic_read(&adapter->status) & 247 if (!(atomic_read(&adapter->status) &
258 ZFCP_STATUS_COMMON_RUNNING)) { 248 ZFCP_STATUS_COMMON_RUNNING)) {
259 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 249 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
260 return SUCCESS; 250 return SUCCESS;
261 } 251 }
262 } 252 }
@@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
266 wait_for_completion(&fsf_req->completion); 256 wait_for_completion(&fsf_req->completion);
267 257
268 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 258 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
269 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 259 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
270 retval = FAILED; 260 retval = FAILED;
271 } else 261 } else
272 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 262 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
273 263
274 zfcp_fsf_req_free(fsf_req); 264 zfcp_fsf_req_free(fsf_req);
275 return retval; 265 return retval;
@@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
287 277
288static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 278static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
289{ 279{
290 struct zfcp_unit *unit = scpnt->device->hostdata; 280 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
291 struct zfcp_adapter *adapter = unit->port->adapter; 281 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
292 int ret; 282 int ret;
293 283
294 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 284 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
@@ -319,8 +309,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
319 } 309 }
320 310
321 /* tell the SCSI stack some characteristics of this adapter */ 311 /* tell the SCSI stack some characteristics of this adapter */
322 adapter->scsi_host->max_id = 1; 312 adapter->scsi_host->max_id = 511;
323 adapter->scsi_host->max_lun = 1; 313 adapter->scsi_host->max_lun = 0xFFFFFFFF;
324 adapter->scsi_host->max_channel = 0; 314 adapter->scsi_host->max_channel = 0;
325 adapter->scsi_host->unique_id = dev_id.devno; 315 adapter->scsi_host->unique_id = dev_id.devno;
326 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ 316 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
@@ -534,20 +524,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
534 } 524 }
535} 525}
536 526
537static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
538{
539 struct zfcp_unit *unit;
540
541 read_lock_irq(&port->unit_list_lock);
542 list_for_each_entry(unit, &port->unit_list, list) {
543 get_device(&unit->dev);
544 if (scsi_queue_work(port->adapter->scsi_host,
545 &unit->scsi_work) <= 0)
546 put_device(&unit->dev);
547 }
548 read_unlock_irq(&port->unit_list_lock);
549}
550
551static void zfcp_scsi_rport_register(struct zfcp_port *port) 527static void zfcp_scsi_rport_register(struct zfcp_port *port)
552{ 528{
553 struct fc_rport_identifiers ids; 529 struct fc_rport_identifiers ids;
@@ -574,7 +550,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
574 port->rport = rport; 550 port->rport = rport;
575 port->starget_id = rport->scsi_target_id; 551 port->starget_id = rport->scsi_target_id;
576 552
577 zfcp_scsi_queue_unit_register(port); 553 zfcp_unit_queue_scsi_scan(port);
578} 554}
579 555
580static void zfcp_scsi_rport_block(struct zfcp_port *port) 556static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -638,29 +614,6 @@ void zfcp_scsi_rport_work(struct work_struct *work)
638} 614}
639 615
640/** 616/**
641 * zfcp_scsi_scan - Register LUN with SCSI midlayer
642 * @unit: The LUN/unit to register
643 */
644void zfcp_scsi_scan(struct zfcp_unit *unit)
645{
646 struct fc_rport *rport = unit->port->rport;
647
648 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
649 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
650 scsilun_to_int((struct scsi_lun *)
651 &unit->fcp_lun), 0);
652}
653
654void zfcp_scsi_scan_work(struct work_struct *work)
655{
656 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
657 scsi_work);
658
659 zfcp_scsi_scan(unit);
660 put_device(&unit->dev);
661}
662
663/**
664 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host 617 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
665 * @adapter: The adapter where to configure DIF/DIX for the SCSI host 618 * @adapter: The adapter where to configure DIF/DIX for the SCSI host
666 */ 619 */
@@ -681,6 +634,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
681 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 634 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
682 mask |= SHOST_DIX_TYPE1_PROTECTION; 635 mask |= SHOST_DIX_TYPE1_PROTECTION;
683 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 636 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
637 shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
684 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 638 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
685 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; 639 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
686 } 640 }
@@ -734,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = {
734 .show_host_port_type = 1, 688 .show_host_port_type = 1,
735 .show_host_speed = 1, 689 .show_host_speed = 1,
736 .show_host_port_id = 1, 690 .show_host_port_id = 1,
737 .disable_target_scan = 1,
738 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), 691 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
739}; 692};
740 693
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b4561c86e230..2f2c54f4718f 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -68,63 +68,96 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
69 69
70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
71 atomic_read(&unit->status)); 71 zfcp_unit_sdev_status(unit));
72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", 72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
73 (atomic_read(&unit->status) & 73 (zfcp_unit_sdev_status(unit) &
74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (atomic_read(&unit->status) & 76 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", 78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
79 (atomic_read(&unit->status) & 79 (zfcp_unit_sdev_status(unit) &
80 ZFCP_STATUS_UNIT_SHARED) != 0); 80 ZFCP_STATUS_LUN_SHARED) != 0);
81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", 81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
82 (atomic_read(&unit->status) & 82 (zfcp_unit_sdev_status(unit) &
83 ZFCP_STATUS_UNIT_READONLY) != 0); 83 ZFCP_STATUS_LUN_READONLY) != 0);
84 84
85#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ 85static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
86static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ 86 struct device_attribute *attr,
87 struct device_attribute *attr, \ 87 char *buf)
88 char *buf) \ 88{
89{ \ 89 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
90 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 90
91 \ 91 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 92 return sprintf(buf, "1\n");
93 return sprintf(buf, "1\n"); \ 93
94 else \ 94 return sprintf(buf, "0\n");
95 return sprintf(buf, "0\n"); \ 95}
96} \ 96
97static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ 97static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
98 struct device_attribute *attr,\ 98 struct device_attribute *attr,
99 const char *buf, size_t count)\ 99 const char *buf, size_t count)
100{ \ 100{
101 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 101 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
102 unsigned long val; \ 102 unsigned long val;
103 int retval = 0; \ 103
104 \ 104 if (strict_strtoul(buf, 0, &val) || val != 0)
105 if (!(_feat && get_device(&_feat->dev))) \ 105 return -EINVAL;
106 return -EBUSY; \ 106
107 \ 107 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
108 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 108 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
109 retval = -EINVAL; \ 109 NULL);
110 goto out; \ 110 zfcp_erp_wait(port->adapter);
111 } \
112 \
113 zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
114 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
115 zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
116 _reopen_id, NULL); \
117 zfcp_erp_wait(_adapter); \
118out: \
119 put_device(&_feat->dev); \
120 return retval ? retval : (ssize_t) count; \
121} \
122static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
123 zfcp_sysfs_##_feat##_failed_show, \
124 zfcp_sysfs_##_feat##_failed_store);
125 111
126ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 112 return count;
127ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 113}
114static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
115 zfcp_sysfs_port_failed_show,
116 zfcp_sysfs_port_failed_store);
117
118static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
123 struct scsi_device *sdev;
124 unsigned int status, failed = 1;
125
126 sdev = zfcp_unit_sdev(unit);
127 if (sdev) {
128 status = atomic_read(&sdev_to_zfcp(sdev)->status);
129 failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
130 scsi_device_put(sdev);
131 }
132
133 return sprintf(buf, "%d\n", failed);
134}
135
136static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
137 struct device_attribute *attr,
138 const char *buf, size_t count)
139{
140 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
141 unsigned long val;
142 struct scsi_device *sdev;
143
144 if (strict_strtoul(buf, 0, &val) || val != 0)
145 return -EINVAL;
146
147 sdev = zfcp_unit_sdev(unit);
148 if (sdev) {
149 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
150 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
151 "syufai2", NULL);
152 zfcp_erp_wait(unit->port->adapter);
153 } else
154 zfcp_unit_scsi_scan(unit);
155
156 return count;
157}
158static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
159 zfcp_sysfs_unit_failed_show,
160 zfcp_sysfs_unit_failed_store);
128 161
129static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, 162static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
130 struct device_attribute *attr, 163 struct device_attribute *attr,
@@ -163,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
163 goto out; 196 goto out;
164 } 197 }
165 198
166 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, 199 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
167 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
168 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 200 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
169 "syafai2", NULL); 201 "syafai2", NULL);
170 zfcp_erp_wait(adapter); 202 zfcp_erp_wait(adapter);
@@ -257,28 +289,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
257 const char *buf, size_t count) 289 const char *buf, size_t count)
258{ 290{
259 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 291 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
260 struct zfcp_unit *unit;
261 u64 fcp_lun; 292 u64 fcp_lun;
262 int retval = -EINVAL;
263
264 if (!(port && get_device(&port->dev)))
265 return -EBUSY;
266 293
267 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 294 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
268 goto out; 295 return -EINVAL;
269 296
270 unit = zfcp_unit_enqueue(port, fcp_lun); 297 if (zfcp_unit_add(port, fcp_lun))
271 if (IS_ERR(unit)) 298 return -EINVAL;
272 goto out;
273 else
274 retval = 0;
275 299
276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 300 return count;
277 zfcp_erp_wait(unit->port->adapter);
278 zfcp_scsi_scan(unit);
279out:
280 put_device(&port->dev);
281 return retval ? retval : (ssize_t) count;
282} 301}
283static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 302static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
284 303
@@ -287,42 +306,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
287 const char *buf, size_t count) 306 const char *buf, size_t count)
288{ 307{
289 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 308 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
290 struct zfcp_unit *unit;
291 u64 fcp_lun; 309 u64 fcp_lun;
292 int retval = -EINVAL;
293 struct scsi_device *sdev;
294
295 if (!(port && get_device(&port->dev)))
296 return -EBUSY;
297 310
298 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 311 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
299 goto out; 312 return -EINVAL;
300 313
301 unit = zfcp_get_unit_by_lun(port, fcp_lun); 314 if (zfcp_unit_remove(port, fcp_lun))
302 if (!unit) 315 return -EINVAL;
303 goto out;
304 else
305 retval = 0;
306
307 sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
308 port->starget_id,
309 scsilun_to_int((struct scsi_lun *)&fcp_lun));
310 if (sdev) {
311 scsi_remove_device(sdev);
312 scsi_device_put(sdev);
313 }
314
315 write_lock_irq(&port->unit_list_lock);
316 list_del(&unit->list);
317 write_unlock_irq(&port->unit_list_lock);
318
319 put_device(&unit->dev);
320 316
321 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 317 return count;
322 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
323out:
324 put_device(&port->dev);
325 return retval ? retval : (ssize_t) count;
326} 318}
327static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 319static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
328 320
@@ -363,9 +355,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
363 struct device_attribute *attr, \ 355 struct device_attribute *attr, \
364 char *buf) { \ 356 char *buf) { \
365 struct scsi_device *sdev = to_scsi_device(dev); \ 357 struct scsi_device *sdev = to_scsi_device(dev); \
366 struct zfcp_unit *unit = sdev->hostdata; \ 358 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
367 struct zfcp_latencies *lat = &unit->latencies; \ 359 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
368 struct zfcp_adapter *adapter = unit->port->adapter; \ 360 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
369 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ 361 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
370 \ 362 \
371 spin_lock_bh(&lat->lock); \ 363 spin_lock_bh(&lat->lock); \
@@ -394,8 +386,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
394 const char *buf, size_t count) \ 386 const char *buf, size_t count) \
395{ \ 387{ \
396 struct scsi_device *sdev = to_scsi_device(dev); \ 388 struct scsi_device *sdev = to_scsi_device(dev); \
397 struct zfcp_unit *unit = sdev->hostdata; \ 389 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
398 struct zfcp_latencies *lat = &unit->latencies; \ 390 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
399 unsigned long flags; \ 391 unsigned long flags; \
400 \ 392 \
401 spin_lock_irqsave(&lat->lock, flags); \ 393 spin_lock_irqsave(&lat->lock, flags); \
@@ -423,19 +415,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
423 struct device_attribute *attr,\ 415 struct device_attribute *attr,\
424 char *buf) \ 416 char *buf) \
425{ \ 417{ \
426 struct scsi_device *sdev = to_scsi_device(dev); \ 418 struct scsi_device *sdev = to_scsi_device(dev); \
427 struct zfcp_unit *unit = sdev->hostdata; \ 419 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
420 struct zfcp_port *port = zfcp_sdev->port; \
428 \ 421 \
429 return sprintf(buf, _format, _value); \ 422 return sprintf(buf, _format, _value); \
430} \ 423} \
431static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 424static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
432 425
433ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 426ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
434 dev_name(&unit->port->adapter->ccw_device->dev)); 427 dev_name(&port->adapter->ccw_device->dev));
435ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 428ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
436 (unsigned long long) unit->port->wwpn); 429 (unsigned long long) port->wwpn);
437ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", 430
438 (unsigned long long) unit->fcp_lun); 431static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
432 struct device_attribute *attr,
433 char *buf)
434{
435 struct scsi_device *sdev = to_scsi_device(dev);
436
437 return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
438}
439static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
439 440
440struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 441struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
441 &dev_attr_fcp_lun, 442 &dev_attr_fcp_lun,
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 000000000000..1119c535a667
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,244 @@
1/*
2 * zfcp device driver
3 *
4 * Tracking of manually configured LUNs and helper functions to
5 * register the LUNs with the SCSI midlayer.
6 *
7 * Copyright IBM Corporation 2010
8 */
9
10#include "zfcp_def.h"
11#include "zfcp_ext.h"
12
13/**
14 * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
15 * @unit: The zfcp LUN/unit to register
16 *
17 * When the SCSI midlayer is not allowed to automatically scan and
18 * attach SCSI devices, zfcp has to register the single devices with
19 * the SCSI midlayer.
20 */
21void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
22{
23 struct fc_rport *rport = unit->port->rport;
24 unsigned int lun;
25
26 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
27
28 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
29 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
30}
31
32static void zfcp_unit_scsi_scan_work(struct work_struct *work)
33{
34 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
35 scsi_work);
36
37 zfcp_unit_scsi_scan(unit);
38 put_device(&unit->dev);
39}
40
41/**
42 * zfcp_unit_queue_scsi_scan - Register configured units on port
43 * @port: The zfcp_port where to register units
44 *
45 * After opening a port, all units configured on this port have to be
46 * registered with the SCSI midlayer. This function should be called
47 * after calling fc_remote_port_add, so that the fc_rport is already
48 * ONLINE and the call to scsi_scan_target runs the same way as the
49 * call in the FC transport class.
50 */
51void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
52{
53 struct zfcp_unit *unit;
54
55 read_lock_irq(&port->unit_list_lock);
56 list_for_each_entry(unit, &port->unit_list, list) {
57 get_device(&unit->dev);
58 if (scsi_queue_work(port->adapter->scsi_host,
59 &unit->scsi_work) <= 0)
60 put_device(&unit->dev);
61 }
62 read_unlock_irq(&port->unit_list_lock);
63}
64
65static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
66{
67 struct zfcp_unit *unit;
68
69 list_for_each_entry(unit, &port->unit_list, list)
70 if (unit->fcp_lun == fcp_lun) {
71 get_device(&unit->dev);
72 return unit;
73 }
74
75 return NULL;
76}
77
78/**
79 * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
80 * @port: zfcp_port where to look for the unit
81 * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
82 *
83 * If zfcp_unit is found, a reference is acquired that has to be
84 * released later.
85 *
86 * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
87 * with the specified FCP LUN.
88 */
89struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
90{
91 struct zfcp_unit *unit;
92
93 read_lock_irq(&port->unit_list_lock);
94 unit = _zfcp_unit_find(port, fcp_lun);
95 read_unlock_irq(&port->unit_list_lock);
96 return unit;
97}
98
99/**
100 * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
101 * @dev: pointer to device in zfcp_unit
102 */
103static void zfcp_unit_release(struct device *dev)
104{
105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
106
107 put_device(&unit->port->dev);
108 kfree(unit);
109}
110
111/**
112 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
113 * @port: pointer to port where unit is added
114 * @fcp_lun: FCP LUN of unit to be enqueued
115 * Returns: 0 success
116 *
117 * Sets up some unit internal structures and creates sysfs entry.
118 */
119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
120{
121 struct zfcp_unit *unit;
122
123 unit = zfcp_unit_find(port, fcp_lun);
124 if (unit) {
125 put_device(&unit->dev);
126 return -EEXIST;
127 }
128
129 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
130 if (!unit)
131 return -ENOMEM;
132
133 unit->port = port;
134 unit->fcp_lun = fcp_lun;
135 unit->dev.parent = &port->dev;
136 unit->dev.release = zfcp_unit_release;
137 INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
138
139 if (dev_set_name(&unit->dev, "0x%016llx",
140 (unsigned long long) fcp_lun)) {
141 kfree(unit);
142 return -ENOMEM;
143 }
144
145 if (device_register(&unit->dev)) {
146 put_device(&unit->dev);
147 return -ENOMEM;
148 }
149
150 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
151 device_unregister(&unit->dev);
152 return -EINVAL;
153 }
154
155 get_device(&port->dev);
156
157 write_lock_irq(&port->unit_list_lock);
158 list_add_tail(&unit->list, &port->unit_list);
159 write_unlock_irq(&port->unit_list_lock);
160
161 zfcp_unit_scsi_scan(unit);
162
163 return 0;
164}
165
166/**
167 * zfcp_unit_sdev - Return SCSI device for zfcp_unit
168 * @unit: The zfcp_unit where to get the SCSI device for
169 *
170 * Returns: scsi_device pointer on success, NULL if there is no SCSI
171 * device for this zfcp_unit
172 *
173 * On success, the caller also holds a reference to the SCSI device
174 * that must be released with scsi_device_put.
175 */
176struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
177{
178 struct Scsi_Host *shost;
179 struct zfcp_port *port;
180 unsigned int lun;
181
182 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
183 port = unit->port;
184 shost = port->adapter->scsi_host;
185 return scsi_device_lookup(shost, 0, port->starget_id, lun);
186}
187
188/**
189 * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
190 * @unit: The unit to lookup the SCSI device for
191 *
192 * Returns the zfcp LUN status field of the SCSI device if the SCSI device
193 * for the zfcp_unit exists, 0 otherwise.
194 */
195unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
196{
197 unsigned int status = 0;
198 struct scsi_device *sdev;
199 struct zfcp_scsi_dev *zfcp_sdev;
200
201 sdev = zfcp_unit_sdev(unit);
202 if (sdev) {
203 zfcp_sdev = sdev_to_zfcp(sdev);
204 status = atomic_read(&zfcp_sdev->status);
205 scsi_device_put(sdev);
206 }
207
208 return status;
209}
210
211/**
212 * zfcp_unit_remove - Remove entry from list of configured units
213 * @port: The port where to remove the unit from the configuration
214 * @fcp_lun: The 64 bit LUN of the unit to remove
215 *
216 * Returns: -EINVAL if a unit with the specified LUN does not exist,
217 * 0 on success.
218 */
219int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
220{
221 struct zfcp_unit *unit;
222 struct scsi_device *sdev;
223
224 write_lock_irq(&port->unit_list_lock);
225 unit = _zfcp_unit_find(port, fcp_lun);
226 if (unit)
227 list_del(&unit->list);
228 write_unlock_irq(&port->unit_list_lock);
229
230 if (!unit)
231 return -EINVAL;
232
233 sdev = zfcp_unit_sdev(unit);
234 if (sdev) {
235 scsi_remove_device(sdev);
236 scsi_device_put(sdev);
237 }
238
239 put_device(&unit->dev);
240
241 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
242
243 return 0;
244}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bbf91aec64f5..2e9632e2c98b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -316,7 +316,8 @@ config SCSI_ISCSI_ATTRS
316 316
317config SCSI_SAS_ATTRS 317config SCSI_SAS_ATTRS
318 tristate "SAS Transport Attributes" 318 tristate "SAS Transport Attributes"
319 depends on SCSI && BLK_DEV_BSG 319 depends on SCSI
320 select BLK_DEV_BSG
320 help 321 help
321 If you wish to export transport-specific information about 322 If you wish to export transport-specific information about
322 each attached SAS device to sysfs, say Y. 323 each attached SAS device to sysfs, say Y.
@@ -378,7 +379,7 @@ config ISCSI_BOOT_SYSFS
378 via sysfs to userspace. If you wish to export this information, 379 via sysfs to userspace. If you wish to export this information,
379 say Y. Otherwise, say N. 380 say Y. Otherwise, say N.
380 381
381source "drivers/scsi/cxgb3i/Kconfig" 382source "drivers/scsi/cxgbi/Kconfig"
382source "drivers/scsi/bnx2i/Kconfig" 383source "drivers/scsi/bnx2i/Kconfig"
383source "drivers/scsi/be2iscsi/Kconfig" 384source "drivers/scsi/be2iscsi/Kconfig"
384 385
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2703c6ec5e36..2e9a87e8e7d8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -133,7 +133,8 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
133obj-$(CONFIG_SCSI_STEX) += stex.o 133obj-$(CONFIG_SCSI_STEX) += stex.o
134obj-$(CONFIG_SCSI_MVSAS) += mvsas/ 134obj-$(CONFIG_SCSI_MVSAS) += mvsas/
135obj-$(CONFIG_PS3_ROM) += ps3rom.o 135obj-$(CONFIG_PS3_ROM) += ps3rom.o
136obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 136obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
137obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
137obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 138obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
138obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 139obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
139obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 140obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 1a5bf5724750..645ddd9d9b9e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
190 /* 190 /*
191 * Initialize the mutex used to wait for the next AIF. 191 * Initialize the mutex used to wait for the next AIF.
192 */ 192 */
193 init_MUTEX_LOCKED(&fibctx->wait_sem); 193 sema_init(&fibctx->wait_sem, 0);
194 fibctx->wait = 0; 194 fibctx->wait = 0;
195 /* 195 /*
196 * Initialize the fibs and set the count of fibs on 196 * Initialize the fibs and set the count of fibs on
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 70079146e203..afc9aeba5edb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev)
124 fibptr->hw_fib_va = hw_fib; 124 fibptr->hw_fib_va = hw_fib;
125 fibptr->data = (void *) fibptr->hw_fib_va->data; 125 fibptr->data = (void *) fibptr->hw_fib_va->data;
126 fibptr->next = fibptr+1; /* Forward chain the fibs */ 126 fibptr->next = fibptr+1; /* Forward chain the fibs */
127 init_MUTEX_LOCKED(&fibptr->event_wait); 127 sema_init(&fibptr->event_wait, 0);
128 spin_lock_init(&fibptr->event_lock); 128 spin_lock_init(&fibptr->event_lock);
129 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 129 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
130 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); 130 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 93984c9dfe14..aee73fafccc8 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2850,12 +2850,6 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2850 aic_dev->r_total++; 2850 aic_dev->r_total++;
2851 ptr = aic_dev->r_bins; 2851 ptr = aic_dev->r_bins;
2852 } 2852 }
2853 if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER)
2854 {
2855 aic_dev->barrier_total++;
2856 if(scb->tag_action == MSG_ORDERED_Q_TAG)
2857 aic_dev->ordered_total++;
2858 }
2859 x = scb->sg_length; 2853 x = scb->sg_length;
2860 x >>= 10; 2854 x >>= 10;
2861 for(i=0; i<6; i++) 2855 for(i=0; i<6; i++)
@@ -10125,7 +10119,6 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10125 struct aic_dev_data *aic_dev = cmd->device->hostdata; 10119 struct aic_dev_data *aic_dev = cmd->device->hostdata;
10126 struct scsi_device *sdptr = cmd->device; 10120 struct scsi_device *sdptr = cmd->device;
10127 unsigned char tindex = TARGET_INDEX(cmd); 10121 unsigned char tindex = TARGET_INDEX(cmd);
10128 struct request *req = cmd->request;
10129 int use_sg; 10122 int use_sg;
10130 10123
10131 mask = (0x01 << tindex); 10124 mask = (0x01 << tindex);
@@ -10144,19 +10137,8 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10144 /* We always force TEST_UNIT_READY to untagged */ 10137 /* We always force TEST_UNIT_READY to untagged */
10145 if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) 10138 if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags)
10146 { 10139 {
10147 if (req->cmd_flags & REQ_HARDBARRIER) 10140 hscb->control |= MSG_SIMPLE_Q_TAG;
10148 { 10141 scb->tag_action = MSG_SIMPLE_Q_TAG;
10149 if(sdptr->ordered_tags)
10150 {
10151 hscb->control |= MSG_ORDERED_Q_TAG;
10152 scb->tag_action = MSG_ORDERED_Q_TAG;
10153 }
10154 }
10155 else
10156 {
10157 hscb->control |= MSG_SIMPLE_Q_TAG;
10158 scb->tag_action = MSG_SIMPLE_Q_TAG;
10159 }
10160 } 10142 }
10161 } 10143 }
10162 if ( !(aic_dev->dtr_pending) && 10144 if ( !(aic_dev->dtr_pending) &&
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index c8dc392edd57..05a78e515a24 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -878,8 +878,8 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
878 if (!error) { 878 if (!error) {
879 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 879 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
880 acb->devstate[id][lun] = ARECA_RAID_GOOD; 880 acb->devstate[id][lun] = ARECA_RAID_GOOD;
881 ccb->pcmd->result = DID_OK << 16; 881 ccb->pcmd->result = DID_OK << 16;
882 arcmsr_ccb_complete(ccb); 882 arcmsr_ccb_complete(ccb);
883 }else{ 883 }else{
884 switch (ccb->arcmsr_cdb.DeviceStatus) { 884 switch (ccb->arcmsr_cdb.DeviceStatus) {
885 case ARCMSR_DEV_SELECT_TIMEOUT: { 885 case ARCMSR_DEV_SELECT_TIMEOUT: {
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 7c7537335c88..ad246369d373 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -335,7 +335,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
335 if (ready) 335 if (ready)
336 break; 336 break;
337 337
338 if (cnt > 6000000) { 338 if (cnt > 12000000) {
339 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n"); 339 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
340 return -EBUSY; 340 return -EBUSY;
341 } 341 }
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7f11f3e48e12..eaaa8813067d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -522,7 +522,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
522 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + 522 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
523 phba->params.cxns_per_ctrl * 2)) { 523 phba->params.cxns_per_ctrl * 2)) {
524 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 524 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
525 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
526 goto free_ep; 525 goto free_ep;
527 } 526 }
528 527
@@ -559,7 +558,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
559 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" 558 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
560 " status = %d extd_status = %d\n", 559 " status = %d extd_status = %d\n",
561 status, extd_status); 560 status, extd_status);
562 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
563 free_mcc_tag(&phba->ctrl, tag); 561 free_mcc_tag(&phba->ctrl, tag);
564 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 562 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
565 nonemb_cmd.va, nonemb_cmd.dma); 563 nonemb_cmd.va, nonemb_cmd.dma);
@@ -574,7 +572,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
574 beiscsi_ep->cid_vld = 1; 572 beiscsi_ep->cid_vld = 1;
575 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); 573 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
576 } 574 }
577 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
578 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 575 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
579 nonemb_cmd.va, nonemb_cmd.dma); 576 nonemb_cmd.va, nonemb_cmd.dma);
580 return 0; 577 return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 8220bde6c04c..75a85aa9e882 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -2040,7 +2040,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2040 unsigned int num_sg, struct beiscsi_io_task *io_task) 2040 unsigned int num_sg, struct beiscsi_io_task *io_task)
2041{ 2041{
2042 struct iscsi_sge *psgl; 2042 struct iscsi_sge *psgl;
2043 unsigned short sg_len, index; 2043 unsigned int sg_len, index;
2044 unsigned int sge_len = 0; 2044 unsigned int sge_len = 0;
2045 unsigned long long addr; 2045 unsigned long long addr;
2046 struct scatterlist *l_sg; 2046 struct scatterlist *l_sg;
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index ac3fdf02d5f6..d2eefd3e3bd5 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,15 +1,8 @@
1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o 1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
4bfa-y += bfad_debugfs.o 4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o 5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
9bfa-y += bfa_csdebug.o bfa_sm.o plog.o
10 7
11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 8ccflags-y := -DBFA_PERF_BUILD
12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
14
15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
new file mode 100644
index 000000000000..ceaac65a91ff
--- /dev/null
+++ b/drivers/scsi/bfa/bfa.h
@@ -0,0 +1,438 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_H__
18#define __BFA_H__
19
20#include "bfa_os_inc.h"
21#include "bfa_cs.h"
22#include "bfa_plog.h"
23#include "bfa_defs_svc.h"
24#include "bfi.h"
25#include "bfa_ioc.h"
26
27struct bfa_s;
28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
31
32/**
33 * Interrupt message handlers
34 */
35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
36void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
37
38/**
39 * Request and response queue related defines
40 */
41#define BFA_REQQ_NELEMS_MIN (4)
42#define BFA_RSPQ_NELEMS_MIN (4)
43
44#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
45#define bfa_reqq_ci(__bfa, __reqq) \
46 (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
47
48#define bfa_reqq_full(__bfa, __reqq) \
49 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
50 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
51 bfa_reqq_ci(__bfa, __reqq))
52
53#define bfa_reqq_next(__bfa, __reqq) \
54 (bfa_reqq_full(__bfa, __reqq) ? NULL : \
55 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
56 + bfa_reqq_pi((__bfa), (__reqq)))))
57
58#define bfa_reqq_produce(__bfa, __reqq) do { \
59 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
60 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
61 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
62 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
63 (__bfa)->iocfc.req_cq_pi[__reqq]); \
64 mmiowb(); \
65 } while (0)
66
67#define bfa_rspq_pi(__bfa, __rspq) \
68 (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
69
70#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
71#define bfa_rspq_elem(__bfa, __rspq, __ci) \
72 (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
73
74#define CQ_INCR(__index, __size) do { \
75 (__index)++; \
76 (__index) &= ((__size) - 1); \
77} while (0)
78
79/**
80 * Queue element to wait for room in request queue. FIFO order is
81 * maintained when fullfilling requests.
82 */
83struct bfa_reqq_wait_s {
84 struct list_head qe;
85 void (*qresume) (void *cbarg);
86 void *cbarg;
87};
88
89/**
90 * Circular queue usage assignments
91 */
92enum {
93 BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
94 BFA_REQQ_FCXP = 0, /* all FCXP messages */
95 BFA_REQQ_LPS = 0, /* all lport service msgs */
96 BFA_REQQ_PORT = 0, /* all port messages */
97 BFA_REQQ_FLASH = 0, /* for flash module */
98 BFA_REQQ_DIAG = 0, /* for diag module */
99 BFA_REQQ_RPORT = 0, /* all port messages */
100 BFA_REQQ_SBOOT = 0, /* all san boot messages */
101 BFA_REQQ_QOS_LO = 1, /* all low priority IO */
102 BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
103 BFA_REQQ_QOS_HI = 3, /* all high priority IO */
104};
105
106static inline void
107bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
108 void *cbarg)
109{
110 wqe->qresume = qresume;
111 wqe->cbarg = cbarg;
112}
113
114#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
115
116/**
117 * static inline void
118 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
119 */
120#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
121 \
122 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
123 \
124 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
125 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
126 \
127 list_add_tail(&(__wqe)->qe, waitq); \
128 } while (0)
129
130#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
131
132
133/**
134 * Generic BFA callback element.
135 */
136struct bfa_cb_qe_s {
137 struct list_head qe;
138 bfa_cb_cbfn_t cbfn;
139 bfa_boolean_t once;
140 u32 rsvd;
141 void *cbarg;
142};
143
144#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
145 (__hcb_qe)->cbfn = (__cbfn); \
146 (__hcb_qe)->cbarg = (__cbarg); \
147 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
148 } while (0)
149
150#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
151
152#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
153 (__hcb_qe)->cbfn = (__cbfn); \
154 (__hcb_qe)->cbarg = (__cbarg); \
155 if (!(__hcb_qe)->once) { \
156 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
157 (__hcb_qe)->once = BFA_TRUE; \
158 } \
159 } while (0)
160
161#define bfa_cb_queue_done(__hcb_qe) do { \
162 (__hcb_qe)->once = BFA_FALSE; \
163 } while (0)
164
165
166/**
167 * PCI devices supported by the current BFA
168 */
169struct bfa_pciid_s {
170 u16 device_id;
171 u16 vendor_id;
172};
173
174extern char bfa_version[];
175
176/**
177 * BFA memory resources
178 */
179enum bfa_mem_type {
180 BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
181 BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
182 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
183};
184
185struct bfa_mem_elem_s {
186 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
187 u32 mem_len; /* Total Length in Bytes */
188 u8 *kva; /* kernel virtual address */
189 u64 dma; /* dma address if DMA memory */
190 u8 *kva_curp; /* kva allocation cursor */
191 u64 dma_curp; /* dma allocation cursor */
192};
193
194struct bfa_meminfo_s {
195 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
196};
197#define bfa_meminfo_kva(_m) \
198 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
199#define bfa_meminfo_dma_virt(_m) \
200 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
201#define bfa_meminfo_dma_phys(_m) \
202 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
203
204struct bfa_iocfc_regs_s {
205 bfa_os_addr_t intr_status;
206 bfa_os_addr_t intr_mask;
207 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
208 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
209 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
210 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
211 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
212 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
213 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
214 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
215};
216
217/**
218 * MSIX vector handlers
219 */
220#define BFA_MSIX_MAX_VECTORS 22
221typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
222struct bfa_msix_s {
223 int nvecs;
224 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
225};
226
227/**
228 * Chip specific interfaces
229 */
230struct bfa_hwif_s {
231 void (*hw_reginit)(struct bfa_s *bfa);
232 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
233 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
234 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
235 void (*hw_msix_install)(struct bfa_s *bfa);
236 void (*hw_msix_uninstall)(struct bfa_s *bfa);
237 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
238 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
239 u32 *nvecs, u32 *maxvec);
240 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
241 u32 *end);
242};
243typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
244
245struct bfa_iocfc_s {
246 struct bfa_s *bfa;
247 struct bfa_iocfc_cfg_s cfg;
248 int action;
249 u32 req_cq_pi[BFI_IOC_MAX_CQS];
250 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
251 struct bfa_cb_qe_s init_hcb_qe;
252 struct bfa_cb_qe_s stop_hcb_qe;
253 struct bfa_cb_qe_s dis_hcb_qe;
254 struct bfa_cb_qe_s stats_hcb_qe;
255 bfa_boolean_t cfgdone;
256
257 struct bfa_dma_s cfg_info;
258 struct bfi_iocfc_cfg_s *cfginfo;
259 struct bfa_dma_s cfgrsp_dma;
260 struct bfi_iocfc_cfgrsp_s *cfgrsp;
261 struct bfi_iocfc_cfg_reply_s *cfg_reply;
262 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
263 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
264 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
265 struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
266 struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
267 struct bfa_hwif_s hwif;
268 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
269 void *updateq_cbarg; /* bios callback arg */
270 u32 intr_mask;
271};
272
273#define bfa_lpuid(__bfa) \
274 bfa_ioc_portid(&(__bfa)->ioc)
275#define bfa_msix_init(__bfa, __nvecs) \
276 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
277#define bfa_msix_install(__bfa) \
278 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
279#define bfa_msix_uninstall(__bfa) \
280 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
281#define bfa_isr_mode_set(__bfa, __msix) \
282 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
283#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
284 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
285 __nvecs, __maxvec))
286#define bfa_msix_get_rme_range(__bfa, __start, __end) \
287 ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
288#define bfa_msix(__bfa, __vec) \
289 ((__bfa)->msix.handler[__vec](__bfa, __vec))
290
291/*
292 * FC specific IOC functions.
293 */
294void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
295 u32 *dm_len);
296void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
297 struct bfa_iocfc_cfg_s *cfg,
298 struct bfa_meminfo_s *meminfo,
299 struct bfa_pcidev_s *pcidev);
300void bfa_iocfc_detach(struct bfa_s *bfa);
301void bfa_iocfc_init(struct bfa_s *bfa);
302void bfa_iocfc_start(struct bfa_s *bfa);
303void bfa_iocfc_stop(struct bfa_s *bfa);
304void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
305void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
306bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
307void bfa_iocfc_reset_queues(struct bfa_s *bfa);
308
309void bfa_msix_all(struct bfa_s *bfa, int vec);
310void bfa_msix_reqq(struct bfa_s *bfa, int vec);
311void bfa_msix_rspq(struct bfa_s *bfa, int vec);
312void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
313
314void bfa_hwcb_reginit(struct bfa_s *bfa);
315void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
316void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
317void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
318void bfa_hwcb_msix_install(struct bfa_s *bfa);
319void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
320void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
321void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
322 u32 *maxvec);
323void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
324 u32 *end);
325void bfa_hwct_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_install(struct bfa_s *bfa);
330void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
331void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
332void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
333 u32 *maxvec);
334void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
335 u32 *end);
336void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
337void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
338wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
339wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
340void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
341 struct bfa_boot_pbc_s *pbcfg);
342int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
343 struct bfi_pbc_vport_s *pbc_vport);
344
345
346/**
347 *----------------------------------------------------------------------
348 * BFA public interfaces
349 *----------------------------------------------------------------------
350 */
351#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
352#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
353 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
354#define bfa_ioc_clear_stats(__bfa) \
355 bfa_ioc_clr_stats(&(__bfa)->ioc)
356#define bfa_get_nports(__bfa) \
357 bfa_ioc_get_nports(&(__bfa)->ioc)
358#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
359 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
360#define bfa_get_adapter_model(__bfa, __model) \
361 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
362#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
363 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
364#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
365 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
366#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
367 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
368#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
369 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
370#define bfa_get_ioc_state(__bfa) \
371 bfa_ioc_get_state(&(__bfa)->ioc)
372#define bfa_get_type(__bfa) \
373 bfa_ioc_get_type(&(__bfa)->ioc)
374#define bfa_get_mac(__bfa) \
375 bfa_ioc_get_mac(&(__bfa)->ioc)
376#define bfa_get_mfg_mac(__bfa) \
377 bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
378#define bfa_get_fw_clock_res(__bfa) \
379 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
380
381void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
382void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
383void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
384void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
385 struct bfa_meminfo_s *meminfo);
386void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
387 struct bfa_meminfo_s *meminfo,
388 struct bfa_pcidev_s *pcidev);
389void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
390void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
391void bfa_detach(struct bfa_s *bfa);
392void bfa_init(struct bfa_s *bfa);
393void bfa_start(struct bfa_s *bfa);
394void bfa_stop(struct bfa_s *bfa);
395void bfa_attach_fcs(struct bfa_s *bfa);
396void bfa_cb_init(void *bfad, bfa_status_t status);
397void bfa_cb_updateq(void *bfad, bfa_status_t status);
398
399bfa_boolean_t bfa_intx(struct bfa_s *bfa);
400void bfa_intx_disable(struct bfa_s *bfa);
401void bfa_intx_enable(struct bfa_s *bfa);
402void bfa_isr_enable(struct bfa_s *bfa);
403void bfa_isr_disable(struct bfa_s *bfa);
404
405void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
406void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
407void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
408
409typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
410void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
411void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
412
413void bfa_adapter_get_attr(struct bfa_s *bfa,
414 struct bfa_adapter_attr_s *ad_attr);
415u64 bfa_adapter_get_id(struct bfa_s *bfa);
416
417bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
418 struct bfa_iocfc_intr_attr_s *attr);
419
420void bfa_iocfc_enable(struct bfa_s *bfa);
421void bfa_iocfc_disable(struct bfa_s *bfa);
422void bfa_chip_reset(struct bfa_s *bfa);
423void bfa_timer_tick(struct bfa_s *bfa);
424#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
425 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
426
427/*
428 * BFA debug API functions
429 */
430bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
431bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
432bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
433 u32 *offset, int *buflen);
434void bfa_debug_fwsave_clear(struct bfa_s *bfa);
435bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
436bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
437
438#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_callback_priv.h b/drivers/scsi/bfa/bfa_callback_priv.h
deleted file mode 100644
index 1e3265c9f7d4..000000000000
--- a/drivers/scsi/bfa/bfa_callback_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CALLBACK_PRIV_H__
19#define __BFA_CALLBACK_PRIV_H__
20
21#include <cs/bfa_q.h>
22
23typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
24
25/**
26 * Generic BFA callback element.
27 */
28struct bfa_cb_qe_s {
29 struct list_head qe;
30 bfa_cb_cbfn_t cbfn;
31 bfa_boolean_t once;
32 u32 rsvd;
33 void *cbarg;
34};
35
36#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
37 (__hcb_qe)->cbfn = (__cbfn); \
38 (__hcb_qe)->cbarg = (__cbarg); \
39 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
40} while (0)
41
42#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
43
44#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
45 (__hcb_qe)->cbfn = (__cbfn); \
46 (__hcb_qe)->cbarg = (__cbarg); \
47 if (!(__hcb_qe)->once) { \
48 list_add_tail((__hcb_qe), &(__bfa)->comp_q); \
49 (__hcb_qe)->once = BFA_TRUE; \
50 } \
51} while (0)
52
53#define bfa_cb_queue_done(__hcb_qe) do { \
54 (__hcb_qe)->once = BFA_FALSE; \
55} while (0)
56
57#endif /* __BFA_CALLBACK_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim.h
index 3906ed926966..a989a94c38da 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,37 +15,25 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18#ifndef __BFA_HCB_IOIM_H__
19 * bfa_cb_ioim_macros.h BFA IOIM driver interface macros. 19#define __BFA_HCB_IOIM_H__
20 */
21
22#ifndef __BFA_HCB_IOIM_MACROS_H__
23#define __BFA_HCB_IOIM_MACROS_H__
24
25#include <bfa_os_inc.h>
26/*
27 * #include <linux/dma-mapping.h>
28 *
29 * #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
30 * <scsi/scsi_device.h> #include <scsi/scsi_host.h>
31 */
32#include "bfad_im_compat.h"
33 20
21#include "bfa_os_inc.h"
34/* 22/*
35 * task attribute values in FCP-2 FCP_CMND IU 23 * task attribute values in FCP-2 FCP_CMND IU
36 */ 24 */
37#define SIMPLE_Q 0 25#define SIMPLE_Q 0
38#define HEAD_OF_Q 1 26#define HEAD_OF_Q 1
39#define ORDERED_Q 2 27#define ORDERED_Q 2
40#define ACA_Q 4 28#define ACA_Q 4
41#define UNTAGGED 5 29#define UNTAGGED 5
42 30
43static inline lun_t 31static inline lun_t
44bfad_int_to_lun(u32 luno) 32bfad_int_to_lun(u32 luno)
45{ 33{
46 union { 34 union {
47 u16 scsi_lun[4]; 35 u16 scsi_lun[4];
48 lun_t bfa_lun; 36 lun_t bfa_lun;
49 } lun; 37 } lun;
50 38
51 lun.bfa_lun = 0; 39 lun.bfa_lun = 0;
@@ -141,7 +129,7 @@ static inline u8
141bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) 129bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
142{ 130{
143 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 131 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
144 u8 task_attr = UNTAGGED; 132 u8 task_attr = UNTAGGED;
145 133
146 if (cmnd->device->tagged_supported) { 134 if (cmnd->device->tagged_supported) {
147 switch (cmnd->tag) { 135 switch (cmnd->tag) {
@@ -178,4 +166,4 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
178 */ 166 */
179#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE 167#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
180 168
181#endif /* __BFA_HCB_IOIM_MACROS_H__ */ 169#endif /* __BFA_HCB_IOIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
deleted file mode 100644
index 2b917792c6bc..000000000000
--- a/drivers/scsi/bfa/bfa_cee.c
+++ /dev/null
@@ -1,492 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <defs/bfa_defs_cee.h>
19#include <cs/bfa_trc.h>
20#include <cs/bfa_log.h>
21#include <cs/bfa_debug.h>
22#include <cee/bfa_cee.h>
23#include <bfi/bfi_cee.h>
24#include <bfi/bfi.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27
28BFA_TRC_FILE(CNA, CEE);
29
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32
33static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
34static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
35 *dcbcx_stats);
36static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
37 *lldp_stats);
38static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
39static void bfa_cee_format_cee_cfg(void *buffer);
40static void bfa_cee_format_cee_stats(void *buffer);
41
42static void
43bfa_cee_format_cee_stats(void *buffer)
44{
45 struct bfa_cee_stats_s *cee_stats = buffer;
46 bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
47 bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
48 bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
49}
50
51static void
52bfa_cee_format_cee_cfg(void *buffer)
53{
54 struct bfa_cee_attr_s *cee_cfg = buffer;
55 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
56}
57
58static void
59bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
60{
61 dcbcx_stats->subtlvs_unrecognized =
62 bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
63 dcbcx_stats->negotiation_failed =
64 bfa_os_ntohl(dcbcx_stats->negotiation_failed);
65 dcbcx_stats->remote_cfg_changed =
66 bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
67 dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
68 dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
69 dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
70 dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
71 dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
72 dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
73}
74
75static void
76bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
77{
78 lldp_stats->frames_transmitted =
79 bfa_os_ntohl(lldp_stats->frames_transmitted);
80 lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
81 lldp_stats->frames_discarded =
82 bfa_os_ntohl(lldp_stats->frames_discarded);
83 lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
84 lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
85 lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
86 lldp_stats->tlvs_unrecognized =
87 bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
88}
89
90static void
91bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
92{
93 cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
94 cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
95 cfg_stats->cee_hw_cfg_changed =
96 bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
97 cfg_stats->recvd_invalid_cfg =
98 bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
99}
100
101static void
102bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
103{
104 lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
105 lldp_cfg->enabled_system_cap =
106 bfa_os_ntohs(lldp_cfg->enabled_system_cap);
107}
108
109/**
110 * bfa_cee_attr_meminfo()
111 *
112 *
113 * @param[in] void
114 *
115 * @return Size of DMA region
116 */
117static u32
118bfa_cee_attr_meminfo(void)
119{
120 return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
121}
122
123/**
124 * bfa_cee_stats_meminfo()
125 *
126 *
127 * @param[in] void
128 *
129 * @return Size of DMA region
130 */
131static u32
132bfa_cee_stats_meminfo(void)
133{
134 return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
135}
136
137/**
138 * bfa_cee_get_attr_isr()
139 *
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
148{
149 cee->get_attr_status = status;
150 bfa_trc(cee, 0);
151 if (status == BFA_STATUS_OK) {
152 bfa_trc(cee, 0);
153 /*
154 * The requested data has been copied to the DMA area, *process
155 * it.
156 */
157 memcpy(cee->attr, cee->attr_dma.kva,
158 sizeof(struct bfa_cee_attr_s));
159 bfa_cee_format_cee_cfg(cee->attr);
160 }
161 cee->get_attr_pending = BFA_FALSE;
162 if (cee->cbfn.get_attr_cbfn) {
163 bfa_trc(cee, 0);
164 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
165 }
166 bfa_trc(cee, 0);
167}
168
169/**
170 * bfa_cee_get_attr_isr()
171 *
172 *
173 * @param[in] cee - Pointer to the CEE module
174 * status - Return status from the f/w
175 *
176 * @return void
177 */
178static void
179bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
180{
181 cee->get_stats_status = status;
182 bfa_trc(cee, 0);
183 if (status == BFA_STATUS_OK) {
184 bfa_trc(cee, 0);
185 /*
186 * The requested data has been copied to the DMA area, process
187 * it.
188 */
189 memcpy(cee->stats, cee->stats_dma.kva,
190 sizeof(struct bfa_cee_stats_s));
191 bfa_cee_format_cee_stats(cee->stats);
192 }
193 cee->get_stats_pending = BFA_FALSE;
194 bfa_trc(cee, 0);
195 if (cee->cbfn.get_stats_cbfn) {
196 bfa_trc(cee, 0);
197 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
198 }
199 bfa_trc(cee, 0);
200}
201
202/**
203 * bfa_cee_get_attr_isr()
204 *
205 *
206 * @param[in] cee - Pointer to the CEE module
207 * status - Return status from the f/w
208 *
209 * @return void
210 */
211static void
212bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
213{
214 cee->reset_stats_status = status;
215 cee->reset_stats_pending = BFA_FALSE;
216 if (cee->cbfn.reset_stats_cbfn)
217 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
218}
219
220/**
221 * bfa_cee_meminfo()
222 *
223 *
224 * @param[in] void
225 *
226 * @return Size of DMA region
227 */
228u32
229bfa_cee_meminfo(void)
230{
231 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
232}
233
234/**
235 * bfa_cee_mem_claim()
236 *
237 *
238 * @param[in] cee CEE module pointer
239 * dma_kva Kernel Virtual Address of CEE DMA Memory
240 * dma_pa Physical Address of CEE DMA Memory
241 *
242 * @return void
243 */
244void
245bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
246{
247 cee->attr_dma.kva = dma_kva;
248 cee->attr_dma.pa = dma_pa;
249 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
250 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
251 cee->attr = (struct bfa_cee_attr_s *)dma_kva;
252 cee->stats =
253 (struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
254}
255
256/**
257 * bfa_cee_get_attr()
258 *
259 * Send the request to the f/w to fetch CEE attributes.
260 *
261 * @param[in] Pointer to the CEE module data structure.
262 *
263 * @return Status
264 */
265
266bfa_status_t
267bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
268 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
269{
270 struct bfi_cee_get_req_s *cmd;
271
272 bfa_assert((cee != NULL) && (cee->ioc != NULL));
273 bfa_trc(cee, 0);
274 if (!bfa_ioc_is_operational(cee->ioc)) {
275 bfa_trc(cee, 0);
276 return BFA_STATUS_IOC_FAILURE;
277 }
278 if (cee->get_attr_pending == BFA_TRUE) {
279 bfa_trc(cee, 0);
280 return BFA_STATUS_DEVBUSY;
281 }
282 cee->get_attr_pending = BFA_TRUE;
283 cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
284 cee->attr = attr;
285 cee->cbfn.get_attr_cbfn = cbfn;
286 cee->cbfn.get_attr_cbarg = cbarg;
287 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
288 bfa_ioc_portid(cee->ioc));
289 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
290 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
291 bfa_trc(cee, 0);
292
293 return BFA_STATUS_OK;
294}
295
296/**
297 * bfa_cee_get_stats()
298 *
299 * Send the request to the f/w to fetch CEE statistics.
300 *
301 * @param[in] Pointer to the CEE module data structure.
302 *
303 * @return Status
304 */
305
306bfa_status_t
307bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
308 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
309{
310 struct bfi_cee_get_req_s *cmd;
311
312 bfa_assert((cee != NULL) && (cee->ioc != NULL));
313
314 if (!bfa_ioc_is_operational(cee->ioc)) {
315 bfa_trc(cee, 0);
316 return BFA_STATUS_IOC_FAILURE;
317 }
318 if (cee->get_stats_pending == BFA_TRUE) {
319 bfa_trc(cee, 0);
320 return BFA_STATUS_DEVBUSY;
321 }
322 cee->get_stats_pending = BFA_TRUE;
323 cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
324 cee->stats = stats;
325 cee->cbfn.get_stats_cbfn = cbfn;
326 cee->cbfn.get_stats_cbarg = cbarg;
327 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
328 bfa_ioc_portid(cee->ioc));
329 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
330 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
331 bfa_trc(cee, 0);
332
333 return BFA_STATUS_OK;
334}
335
336/**
337 * bfa_cee_reset_stats()
338 *
339 *
340 * @param[in] Pointer to the CEE module data structure.
341 *
342 * @return Status
343 */
344
345bfa_status_t
346bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
347 void *cbarg)
348{
349 struct bfi_cee_reset_stats_s *cmd;
350
351 bfa_assert((cee != NULL) && (cee->ioc != NULL));
352 if (!bfa_ioc_is_operational(cee->ioc)) {
353 bfa_trc(cee, 0);
354 return BFA_STATUS_IOC_FAILURE;
355 }
356 if (cee->reset_stats_pending == BFA_TRUE) {
357 bfa_trc(cee, 0);
358 return BFA_STATUS_DEVBUSY;
359 }
360 cee->reset_stats_pending = BFA_TRUE;
361 cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
362 cee->cbfn.reset_stats_cbfn = cbfn;
363 cee->cbfn.reset_stats_cbarg = cbarg;
364 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
365 bfa_ioc_portid(cee->ioc));
366 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
367 bfa_trc(cee, 0);
368 return BFA_STATUS_OK;
369}
370
371/**
372 * bfa_cee_isrs()
373 *
374 *
375 * @param[in] Pointer to the CEE module data structure.
376 *
377 * @return void
378 */
379
380void
381bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
382{
383 union bfi_cee_i2h_msg_u *msg;
384 struct bfi_cee_get_rsp_s *get_rsp;
385 struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
386 msg = (union bfi_cee_i2h_msg_u *)m;
387 get_rsp = (struct bfi_cee_get_rsp_s *)m;
388 bfa_trc(cee, msg->mh.msg_id);
389 switch (msg->mh.msg_id) {
390 case BFI_CEE_I2H_GET_CFG_RSP:
391 bfa_trc(cee, get_rsp->cmd_status);
392 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
393 break;
394 case BFI_CEE_I2H_GET_STATS_RSP:
395 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
396 break;
397 case BFI_CEE_I2H_RESET_STATS_RSP:
398 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
399 break;
400 default:
401 bfa_assert(0);
402 }
403}
404
405/**
406 * bfa_cee_hbfail()
407 *
408 *
409 * @param[in] Pointer to the CEE module data structure.
410 *
411 * @return void
412 */
413
414void
415bfa_cee_hbfail(void *arg)
416{
417 struct bfa_cee_s *cee;
418 cee = (struct bfa_cee_s *)arg;
419
420 if (cee->get_attr_pending == BFA_TRUE) {
421 cee->get_attr_status = BFA_STATUS_FAILED;
422 cee->get_attr_pending = BFA_FALSE;
423 if (cee->cbfn.get_attr_cbfn) {
424 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
425 BFA_STATUS_FAILED);
426 }
427 }
428 if (cee->get_stats_pending == BFA_TRUE) {
429 cee->get_stats_status = BFA_STATUS_FAILED;
430 cee->get_stats_pending = BFA_FALSE;
431 if (cee->cbfn.get_stats_cbfn) {
432 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
433 BFA_STATUS_FAILED);
434 }
435 }
436 if (cee->reset_stats_pending == BFA_TRUE) {
437 cee->reset_stats_status = BFA_STATUS_FAILED;
438 cee->reset_stats_pending = BFA_FALSE;
439 if (cee->cbfn.reset_stats_cbfn) {
440 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
441 BFA_STATUS_FAILED);
442 }
443 }
444}
445
446/**
447 * bfa_cee_attach()
448 *
449 *
450 * @param[in] cee - Pointer to the CEE module data structure
451 * ioc - Pointer to the ioc module data structure
452 * dev - Pointer to the device driver module data structure
453 * The device driver specific mbox ISR functions have
454 * this pointer as one of the parameters.
455 * trcmod -
456 * logmod -
457 *
458 * @return void
459 */
460void
461bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
462 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
463{
464 bfa_assert(cee != NULL);
465 cee->dev = dev;
466 cee->trcmod = trcmod;
467 cee->logmod = logmod;
468 cee->ioc = ioc;
469
470 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
471 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
472 bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
473 bfa_trc(cee, 0);
474}
475
476/**
477 * bfa_cee_detach()
478 *
479 *
480 * @param[in] cee - Pointer to the CEE module data structure
481 *
482 * @return void
483 */
484void
485bfa_cee_detach(struct bfa_cee_s *cee)
486{
487 /*
488 * For now, just check if there is some ioctl pending and mark that as
489 * failed?
490 */
491 /* bfa_cee_hbfail(cee); */
492}
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 76fa5c5b40dd..c2fa07f2485d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,27 +15,992 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_modules.h"
19#include <defs/bfa_defs_pci.h> 19#include "bfi_ctreg.h"
20#include <cs/bfa_debug.h> 20#include "bfad_drv.h"
21#include <bfa_iocfc.h>
22 21
23#define DEF_CFG_NUM_FABRICS 1 22BFA_TRC_FILE(HAL, CORE);
24#define DEF_CFG_NUM_LPORTS 256
25#define DEF_CFG_NUM_CQS 4
26#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
27#define DEF_CFG_NUM_TSKIM_REQS 128
28#define DEF_CFG_NUM_FCXP_REQS 64
29#define DEF_CFG_NUM_UF_BUFS 64
30#define DEF_CFG_NUM_RPORTS 1024
31#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
32#define DEF_CFG_NUM_TINS 256
33 23
34#define DEF_CFG_NUM_SGPGS 2048 24/**
35#define DEF_CFG_NUM_REQQ_ELEMS 256 25 * BFA IOC FC related definitions
36#define DEF_CFG_NUM_RSPQ_ELEMS 64 26 */
37#define DEF_CFG_NUM_SBOOT_TGTS 16 27
38#define DEF_CFG_NUM_SBOOT_LUNS 16 28/**
29 * IOC local definitions
30 */
31#define BFA_IOCFC_TOV 5000 /* msecs */
32
33enum {
34 BFA_IOCFC_ACT_NONE = 0,
35 BFA_IOCFC_ACT_INIT = 1,
36 BFA_IOCFC_ACT_STOP = 2,
37 BFA_IOCFC_ACT_DISABLE = 3,
38};
39
40#define DEF_CFG_NUM_FABRICS 1
41#define DEF_CFG_NUM_LPORTS 256
42#define DEF_CFG_NUM_CQS 4
43#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
44#define DEF_CFG_NUM_TSKIM_REQS 128
45#define DEF_CFG_NUM_FCXP_REQS 64
46#define DEF_CFG_NUM_UF_BUFS 64
47#define DEF_CFG_NUM_RPORTS 1024
48#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
49#define DEF_CFG_NUM_TINS 256
50
51#define DEF_CFG_NUM_SGPGS 2048
52#define DEF_CFG_NUM_REQQ_ELEMS 256
53#define DEF_CFG_NUM_RSPQ_ELEMS 64
54#define DEF_CFG_NUM_SBOOT_TGTS 16
55#define DEF_CFG_NUM_SBOOT_LUNS 16
56
57/**
58 * forward declaration for IOC FC functions
59 */
60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
61static void bfa_iocfc_disable_cbfn(void *bfa_arg);
62static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
63static void bfa_iocfc_reset_cbfn(void *bfa_arg);
64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
65
66/**
67 * BFA Interrupt handling functions
68 */
69static void
70bfa_msix_errint(struct bfa_s *bfa, u32 intr)
71{
72 bfa_ioc_error_isr(&bfa->ioc);
73}
74
75static void
76bfa_msix_lpu(struct bfa_s *bfa)
77{
78 bfa_ioc_mbox_isr(&bfa->ioc);
79}
80
81static void
82bfa_reqq_resume(struct bfa_s *bfa, int qid)
83{
84 struct list_head *waitq, *qe, *qen;
85 struct bfa_reqq_wait_s *wqe;
86
87 waitq = bfa_reqq(bfa, qid);
88 list_for_each_safe(qe, qen, waitq) {
89 /**
90 * Callback only as long as there is room in request queue
91 */
92 if (bfa_reqq_full(bfa, qid))
93 break;
94
95 list_del(qe);
96 wqe = (struct bfa_reqq_wait_s *) qe;
97 wqe->qresume(wqe->cbarg);
98 }
99}
100
101void
102bfa_msix_all(struct bfa_s *bfa, int vec)
103{
104 bfa_intx(bfa);
105}
106
107/**
108 * hal_intr_api
109 */
110bfa_boolean_t
111bfa_intx(struct bfa_s *bfa)
112{
113 u32 intr, qintr;
114 int queue;
115
116 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
117 if (!intr)
118 return BFA_FALSE;
119
120 /**
121 * RME completion queue interrupt
122 */
123 qintr = intr & __HFN_INT_RME_MASK;
124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
125
126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
127 if (intr & (__HFN_INT_RME_Q0 << queue))
128 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
129 }
130 intr &= ~qintr;
131 if (!intr)
132 return BFA_TRUE;
133
134 /**
135 * CPE completion queue interrupt
136 */
137 qintr = intr & __HFN_INT_CPE_MASK;
138 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
139
140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
141 if (intr & (__HFN_INT_CPE_Q0 << queue))
142 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
143 }
144 intr &= ~qintr;
145 if (!intr)
146 return BFA_TRUE;
147
148 bfa_msix_lpu_err(bfa, intr);
149
150 return BFA_TRUE;
151}
152
153void
154bfa_intx_enable(struct bfa_s *bfa)
155{
156 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
157}
158
159void
160bfa_intx_disable(struct bfa_s *bfa)
161{
162 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
163}
164
165void
166bfa_isr_enable(struct bfa_s *bfa)
167{
168 u32 intr_unmask;
169 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
170
171 bfa_trc(bfa, pci_func);
172
173 bfa_msix_install(bfa);
174 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
175 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
176 __HFN_INT_LL_HALT);
177
178 if (pci_func == 0)
179 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
180 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
181 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
182 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
183 __HFN_INT_MBOX_LPU0);
184 else
185 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
186 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
187 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
189 __HFN_INT_MBOX_LPU1);
190
191 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
192 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
193 bfa->iocfc.intr_mask = ~intr_unmask;
194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
195}
196
197void
198bfa_isr_disable(struct bfa_s *bfa)
199{
200 bfa_isr_mode_set(bfa, BFA_FALSE);
201 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
202 bfa_msix_uninstall(bfa);
203}
204
205void
206bfa_msix_reqq(struct bfa_s *bfa, int qid)
207{
208 struct list_head *waitq;
209
210 qid &= (BFI_IOC_MAX_CQS - 1);
211
212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
213
214 /**
215 * Resume any pending requests in the corresponding reqq.
216 */
217 waitq = bfa_reqq(bfa, qid);
218 if (!list_empty(waitq))
219 bfa_reqq_resume(bfa, qid);
220}
221
222void
223bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
224{
225 bfa_trc(bfa, m->mhdr.msg_class);
226 bfa_trc(bfa, m->mhdr.msg_id);
227 bfa_trc(bfa, m->mhdr.mtag.i2htok);
228 bfa_assert(0);
229 bfa_trc_stop(bfa->trcmod);
230}
231
232void
233bfa_msix_rspq(struct bfa_s *bfa, int qid)
234{
235 struct bfi_msg_s *m;
236 u32 pi, ci;
237 struct list_head *waitq;
238
239 bfa_trc_fp(bfa, qid);
240
241 qid &= (BFI_IOC_MAX_CQS - 1);
242
243 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
244
245 ci = bfa_rspq_ci(bfa, qid);
246 pi = bfa_rspq_pi(bfa, qid);
247
248 bfa_trc_fp(bfa, ci);
249 bfa_trc_fp(bfa, pi);
250
251 if (bfa->rme_process) {
252 while (ci != pi) {
253 m = bfa_rspq_elem(bfa, qid, ci);
254 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
255
256 bfa_isrs[m->mhdr.msg_class] (bfa, m);
257
258 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
259 }
260 }
261
262 /**
263 * update CI
264 */
265 bfa_rspq_ci(bfa, qid) = pi;
266 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
267 mmiowb();
268
269 /**
270 * Resume any pending requests in the corresponding reqq.
271 */
272 waitq = bfa_reqq(bfa, qid);
273 if (!list_empty(waitq))
274 bfa_reqq_resume(bfa, qid);
275}
276
277void
278bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
279{
280 u32 intr, curr_value;
281
282 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
283
284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
285 bfa_msix_lpu(bfa);
286
287 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
288 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
289
290 if (intr) {
291 if (intr & __HFN_INT_LL_HALT) {
292 /**
293 * If LL_HALT bit is set then FW Init Halt LL Port
294 * Register needs to be cleared as well so Interrupt
295 * Status Register will be cleared.
296 */
297 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
298 curr_value &= ~__FW_INIT_HALT_P;
299 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
300 }
301
302 if (intr & __HFN_INT_ERR_PSS) {
303 /**
304 * ERR_PSS bit needs to be cleared as well in case
305 * interrups are shared so driver's interrupt handler is
306 * still called eventhough it is already masked out.
307 */
308 curr_value = bfa_reg_read(
309 bfa->ioc.ioc_regs.pss_err_status_reg);
310 curr_value &= __PSS_ERR_STATUS_SET;
311 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
312 curr_value);
313 }
314
315 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
316 bfa_msix_errint(bfa, intr);
317 }
318}
319
320void
321bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
322{
323 bfa_isrs[mc] = isr_func;
324}
325
326/**
327 * BFA IOC FC related functions
328 */
329
330/**
331 * hal_ioc_pvt BFA IOC private functions
332 */
333
334static void
335bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
336{
337 int i, per_reqq_sz, per_rspq_sz;
338
339 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
340 BFA_DMA_ALIGN_SZ);
341 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
342 BFA_DMA_ALIGN_SZ);
343
344 /*
345 * Calculate CQ size
346 */
347 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
348 *dm_len = *dm_len + per_reqq_sz;
349 *dm_len = *dm_len + per_rspq_sz;
350 }
351
352 /*
353 * Calculate Shadow CI/PI size
354 */
355 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
356 *dm_len += (2 * BFA_CACHELINE_SZ);
357}
358
359static void
360bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
361{
362 *dm_len +=
363 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
364 *dm_len +=
365 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
366 BFA_CACHELINE_SZ);
367}
368
369/**
370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
371 */
372static void
373bfa_iocfc_send_cfg(void *bfa_arg)
374{
375 struct bfa_s *bfa = bfa_arg;
376 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
377 struct bfi_iocfc_cfg_req_s cfg_req;
378 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
379 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
380 int i;
381
382 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
383 bfa_trc(bfa, cfg->fwcfg.num_cqs);
384
385 bfa_iocfc_reset_queues(bfa);
386
387 /**
388 * initialize IOC configuration info
389 */
390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
391 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
392
393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
394 /**
395 * dma map REQ and RSP circular queues and shadow pointers
396 */
397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
398 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
399 iocfc->req_cq_ba[i].pa);
400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
401 iocfc->req_cq_shadow_ci[i].pa);
402 cfg_info->req_cq_elems[i] =
403 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
404
405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
406 iocfc->rsp_cq_ba[i].pa);
407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
408 iocfc->rsp_cq_shadow_pi[i].pa);
409 cfg_info->rsp_cq_elems[i] =
410 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
411 }
412
413 /**
414 * Enable interrupt coalescing if it is driver init path
415 * and not ioc disable/enable path.
416 */
417 if (!iocfc->cfgdone)
418 cfg_info->intr_attr.coalesce = BFA_TRUE;
419
420 iocfc->cfgdone = BFA_FALSE;
421
422 /**
423 * dma map IOC configuration itself
424 */
425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
426 bfa_lpuid(bfa));
427 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
428
429 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
430 sizeof(struct bfi_iocfc_cfg_req_s));
431}
432
433static void
434bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
435 struct bfa_pcidev_s *pcidev)
436{
437 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
438
439 bfa->bfad = bfad;
440 iocfc->bfa = bfa;
441 iocfc->action = BFA_IOCFC_ACT_NONE;
442
443 bfa_os_assign(iocfc->cfg, *cfg);
444
445 /**
446 * Initialize chip specific handlers.
447 */
448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
449 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
450 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
451 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
452 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
453 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
454 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
455 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
456 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
457 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
458 } else {
459 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
460 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
461 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
462 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
463 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
464 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
465 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
466 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
467 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
468 }
469
470 iocfc->hwif.hw_reginit(bfa);
471 bfa->msix.nvecs = 0;
472}
473
474static void
475bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
476 struct bfa_meminfo_s *meminfo)
477{
478 u8 *dm_kva;
479 u64 dm_pa;
480 int i, per_reqq_sz, per_rspq_sz;
481 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
482 int dbgsz;
483
484 dm_kva = bfa_meminfo_dma_virt(meminfo);
485 dm_pa = bfa_meminfo_dma_phys(meminfo);
486
487 /*
488 * First allocate dma memory for IOC.
489 */
490 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
491 dm_kva += bfa_ioc_meminfo();
492 dm_pa += bfa_ioc_meminfo();
493
494 /*
495 * Claim DMA-able memory for the request/response queues and for shadow
496 * ci/pi registers
497 */
498 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
499 BFA_DMA_ALIGN_SZ);
500 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
501 BFA_DMA_ALIGN_SZ);
502
503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
504 iocfc->req_cq_ba[i].kva = dm_kva;
505 iocfc->req_cq_ba[i].pa = dm_pa;
506 bfa_os_memset(dm_kva, 0, per_reqq_sz);
507 dm_kva += per_reqq_sz;
508 dm_pa += per_reqq_sz;
509
510 iocfc->rsp_cq_ba[i].kva = dm_kva;
511 iocfc->rsp_cq_ba[i].pa = dm_pa;
512 bfa_os_memset(dm_kva, 0, per_rspq_sz);
513 dm_kva += per_rspq_sz;
514 dm_pa += per_rspq_sz;
515 }
516
517 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
518 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
519 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
520 dm_kva += BFA_CACHELINE_SZ;
521 dm_pa += BFA_CACHELINE_SZ;
522
523 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
524 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
525 dm_kva += BFA_CACHELINE_SZ;
526 dm_pa += BFA_CACHELINE_SZ;
527 }
528
529 /*
530 * Claim DMA-able memory for the config info page
531 */
532 bfa->iocfc.cfg_info.kva = dm_kva;
533 bfa->iocfc.cfg_info.pa = dm_pa;
534 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
535 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
536 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
537
538 /*
539 * Claim DMA-able memory for the config response
540 */
541 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
542 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
543 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
544
545 dm_kva +=
546 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
547 BFA_CACHELINE_SZ);
548 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
549 BFA_CACHELINE_SZ);
550
551
552 bfa_meminfo_dma_virt(meminfo) = dm_kva;
553 bfa_meminfo_dma_phys(meminfo) = dm_pa;
554
555 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
556 if (dbgsz > 0) {
557 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
558 bfa_meminfo_kva(meminfo) += dbgsz;
559 }
560}
561
562/**
563 * Start BFA submodules.
564 */
565static void
566bfa_iocfc_start_submod(struct bfa_s *bfa)
567{
568 int i;
569
570 bfa->rme_process = BFA_TRUE;
571
572 for (i = 0; hal_mods[i]; i++)
573 hal_mods[i]->start(bfa);
574}
575
576/**
577 * Disable BFA submodules.
578 */
579static void
580bfa_iocfc_disable_submod(struct bfa_s *bfa)
581{
582 int i;
583
584 for (i = 0; hal_mods[i]; i++)
585 hal_mods[i]->iocdisable(bfa);
586}
587
588static void
589bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
590{
591 struct bfa_s *bfa = bfa_arg;
592
593 if (complete) {
594 if (bfa->iocfc.cfgdone)
595 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
596 else
597 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
598 } else {
599 if (bfa->iocfc.cfgdone)
600 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
601 }
602}
603
604static void
605bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
606{
607 struct bfa_s *bfa = bfa_arg;
608 struct bfad_s *bfad = bfa->bfad;
609
610 if (compl)
611 complete(&bfad->comp);
612 else
613 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
614}
615
616static void
617bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
618{
619 struct bfa_s *bfa = bfa_arg;
620 struct bfad_s *bfad = bfa->bfad;
621
622 if (compl)
623 complete(&bfad->disable_comp);
624}
625
626/**
627 * Update BFA configuration from firmware configuration.
628 */
629static void
630bfa_iocfc_cfgrsp(struct bfa_s *bfa)
631{
632 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
633 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
635
636 fwcfg->num_cqs = fwcfg->num_cqs;
637 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
638 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
639 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
640 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
641 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
642
643 iocfc->cfgdone = BFA_TRUE;
644
645 /**
646 * Configuration is complete - initialize/start submodules
647 */
648 bfa_fcport_init(bfa);
649
650 if (iocfc->action == BFA_IOCFC_ACT_INIT)
651 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
652 else
653 bfa_iocfc_start_submod(bfa);
654}
655void
656bfa_iocfc_reset_queues(struct bfa_s *bfa)
657{
658 int q;
659
660 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
661 bfa_reqq_ci(bfa, q) = 0;
662 bfa_reqq_pi(bfa, q) = 0;
663 bfa_rspq_ci(bfa, q) = 0;
664 bfa_rspq_pi(bfa, q) = 0;
665 }
666}
667
668/**
669 * IOC enable request is complete
670 */
671static void
672bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
673{
674 struct bfa_s *bfa = bfa_arg;
675
676 if (status != BFA_STATUS_OK) {
677 bfa_isr_disable(bfa);
678 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
679 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
680 bfa_iocfc_init_cb, bfa);
681 return;
682 }
683
684 bfa_iocfc_send_cfg(bfa);
685}
686
687/**
688 * IOC disable request is complete
689 */
690static void
691bfa_iocfc_disable_cbfn(void *bfa_arg)
692{
693 struct bfa_s *bfa = bfa_arg;
694
695 bfa_isr_disable(bfa);
696 bfa_iocfc_disable_submod(bfa);
697
698 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
699 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
700 bfa);
701 else {
702 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
703 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
704 bfa);
705 }
706}
707
708/**
709 * Notify sub-modules of hardware failure.
710 */
711static void
712bfa_iocfc_hbfail_cbfn(void *bfa_arg)
713{
714 struct bfa_s *bfa = bfa_arg;
715
716 bfa->rme_process = BFA_FALSE;
717
718 bfa_isr_disable(bfa);
719 bfa_iocfc_disable_submod(bfa);
720
721 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
722 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
723 bfa);
724}
725
726/**
727 * Actions on chip-reset completion.
728 */
729static void
730bfa_iocfc_reset_cbfn(void *bfa_arg)
731{
732 struct bfa_s *bfa = bfa_arg;
733
734 bfa_iocfc_reset_queues(bfa);
735 bfa_isr_enable(bfa);
736}
737
738/**
739 * hal_ioc_public
740 */
741
742/**
743 * Query IOC memory requirement information.
744 */
745void
746bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
747 u32 *dm_len)
748{
749 /* dma memory for IOC */
750 *dm_len += bfa_ioc_meminfo();
751
752 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
753 bfa_iocfc_cqs_sz(cfg, dm_len);
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
755}
756
757/**
758 * Query IOC memory requirement information.
759 */
760void
761bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
762 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
763{
764 int i;
765 struct bfa_ioc_s *ioc = &bfa->ioc;
766
767 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
768 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
769 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
770 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
771
772 ioc->trcmod = bfa->trcmod;
773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
774
775 /**
776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
777 */
778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
779 bfa_ioc_set_fcmode(&bfa->ioc);
780
781 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
782 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
783
784 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
785 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
786 bfa_timer_init(&bfa->timer_mod);
787
788 INIT_LIST_HEAD(&bfa->comp_q);
789 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
791}
792
793/**
794 * Query IOC memory requirement information.
795 */
796void
797bfa_iocfc_detach(struct bfa_s *bfa)
798{
799 bfa_ioc_detach(&bfa->ioc);
800}
801
802/**
803 * Query IOC memory requirement information.
804 */
805void
806bfa_iocfc_init(struct bfa_s *bfa)
807{
808 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
809 bfa_ioc_enable(&bfa->ioc);
810}
811
812/**
813 * IOC start called from bfa_start(). Called to start IOC operations
814 * at driver instantiation for this instance.
815 */
816void
817bfa_iocfc_start(struct bfa_s *bfa)
818{
819 if (bfa->iocfc.cfgdone)
820 bfa_iocfc_start_submod(bfa);
821}
822
823/**
824 * IOC stop called from bfa_stop(). Called only when driver is unloaded
825 * for this instance.
826 */
827void
828bfa_iocfc_stop(struct bfa_s *bfa)
829{
830 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
831
832 bfa->rme_process = BFA_FALSE;
833 bfa_ioc_disable(&bfa->ioc);
834}
835
836void
837bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
838{
839 struct bfa_s *bfa = bfaarg;
840 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
841 union bfi_iocfc_i2h_msg_u *msg;
842
843 msg = (union bfi_iocfc_i2h_msg_u *) m;
844 bfa_trc(bfa, msg->mh.msg_id);
845
846 switch (msg->mh.msg_id) {
847 case BFI_IOCFC_I2H_CFG_REPLY:
848 iocfc->cfg_reply = &msg->cfg_reply;
849 bfa_iocfc_cfgrsp(bfa);
850 break;
851 case BFI_IOCFC_I2H_UPDATEQ_RSP:
852 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
853 break;
854 default:
855 bfa_assert(0);
856 }
857}
858
859void
860bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
861{
862 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
863}
864
865u64
866bfa_adapter_get_id(struct bfa_s *bfa)
867{
868 return bfa_ioc_get_adid(&bfa->ioc);
869}
870
871void
872bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
873{
874 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
875
876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
877
878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
879 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
880 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
881
882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
883 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
884 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
885
886 attr->config = iocfc->cfg;
887}
888
889bfa_status_t
890bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
891{
892 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
893 struct bfi_iocfc_set_intr_req_s *m;
894
895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
896 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
897 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
898
899 if (!bfa_iocfc_is_operational(bfa))
900 return BFA_STATUS_OK;
901
902 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
903 if (!m)
904 return BFA_STATUS_DEVBUSY;
905
906 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
907 bfa_lpuid(bfa));
908 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
909 m->delay = iocfc->cfginfo->intr_attr.delay;
910 m->latency = iocfc->cfginfo->intr_attr.latency;
911
912 bfa_trc(bfa, attr->delay);
913 bfa_trc(bfa, attr->latency);
914
915 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
916 return BFA_STATUS_OK;
917}
918
919void
920bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
921{
922 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
923
924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
926}
927/**
928 * Enable IOC after it is disabled.
929 */
930void
931bfa_iocfc_enable(struct bfa_s *bfa)
932{
933 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
934 "IOC Enable");
935 bfa_ioc_enable(&bfa->ioc);
936}
937
938void
939bfa_iocfc_disable(struct bfa_s *bfa)
940{
941 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
942 "IOC Disable");
943 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
944
945 bfa->rme_process = BFA_FALSE;
946 bfa_ioc_disable(&bfa->ioc);
947}
948
949
950bfa_boolean_t
951bfa_iocfc_is_operational(struct bfa_s *bfa)
952{
953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
954}
955
956/**
957 * Return boot target port wwns -- read from boot information in flash.
958 */
959void
960bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
961{
962 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
963 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
964 int i;
965
966 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
967 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
968 *nwwns = cfgrsp->pbc_cfg.nbluns;
969 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
970 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
971
972 return;
973 }
974
975 *nwwns = cfgrsp->bootwwns.nwwns;
976 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
977}
978
979void
980bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
981{
982 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
983 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
984
985 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
986 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
987 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
988 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
989}
990
991int
992bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
993{
994 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
995 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
996
997 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
998 return cfgrsp->pbc_cfg.nvports;
999}
1000
1001/**
1002 * hal_api
1003 */
39 1004
40/** 1005/**
41 * Use this function query the memory requirement of the BFA library. 1006 * Use this function query the memory requirement of the BFA library.
@@ -45,16 +1010,16 @@
45 * This call will fail, if the cap is out of range compared to pre-defined 1010 * This call will fail, if the cap is out of range compared to pre-defined
46 * values within the BFA library 1011 * values within the BFA library
47 * 1012 *
48 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1013 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
49 * its configuration in this structure. 1014 * its configuration in this structure.
50 * The default values for struct bfa_iocfc_cfg_s can be 1015 * The default values for struct bfa_iocfc_cfg_s can be
51 * fetched using bfa_cfg_get_default() API. 1016 * fetched using bfa_cfg_get_default() API.
52 * 1017 *
53 * If cap's boundary check fails, the library will use 1018 * If cap's boundary check fails, the library will use
54 * the default bfa_cap_t values (and log a warning msg). 1019 * the default bfa_cap_t values (and log a warning msg).
55 * 1020 *
56 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1021 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
57 * indicates the memory type (see bfa_mem_type_t) and 1022 * indicates the memory type (see bfa_mem_type_t) and
58 * amount of memory required. 1023 * amount of memory required.
59 * 1024 *
60 * Driver should allocate the memory, populate the 1025 * Driver should allocate the memory, populate the
@@ -68,8 +1033,8 @@
68void 1033void
69bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) 1034bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
70{ 1035{
71 int i; 1036 int i;
72 u32 km_len = 0, dm_len = 0; 1037 u32 km_len = 0, dm_len = 0;
73 1038
74 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1039 bfa_assert((cfg != NULL) && (meminfo != NULL));
75 1040
@@ -90,26 +1055,6 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
90 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
91} 1056}
92 1057
93static void
94bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
95{
96 struct bfa_port_s *port = &bfa->modules.port;
97 uint32_t dm_len;
98 uint8_t *dm_kva;
99 uint64_t dm_pa;
100
101 dm_len = bfa_port_meminfo();
102 dm_kva = bfa_meminfo_dma_virt(mi);
103 dm_pa = bfa_meminfo_dma_phys(mi);
104
105 memset(port, 0, sizeof(struct bfa_port_s));
106 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
107 bfa_port_mem_claim(port, dm_kva, dm_pa);
108
109 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
110 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
111}
112
113/** 1058/**
114 * Use this function to do attach the driver instance with the BFA 1059 * Use this function to do attach the driver instance with the BFA
115 * library. This function will not trigger any HW initialization 1060 * library. This function will not trigger any HW initialization
@@ -119,14 +1064,14 @@ bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
119 * pre-defined values within the BFA library 1064 * pre-defined values within the BFA library
120 * 1065 *
121 * @param[out] bfa Pointer to bfa_t. 1066 * @param[out] bfa Pointer to bfa_t.
122 * @param[in] bfad Opaque handle back to the driver's IOC structure 1067 * @param[in] bfad Opaque handle back to the driver's IOC structure
123 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1068 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
124 * that was used in bfa_cfg_get_meminfo(). 1069 * that was used in bfa_cfg_get_meminfo().
125 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1070 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
126 * use the bfa_cfg_get_meminfo() call to 1071 * use the bfa_cfg_get_meminfo() call to
127 * find the memory blocks required, allocate the 1072 * find the memory blocks required, allocate the
128 * required memory and provide the starting addresses. 1073 * required memory and provide the starting addresses.
129 * @param[in] pcidev pointer to struct bfa_pcidev_s 1074 * @param[in] pcidev pointer to struct bfa_pcidev_s
130 * 1075 *
131 * @return 1076 * @return
132 * void 1077 * void
@@ -140,8 +1085,8 @@ void
140bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1085bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
141 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1086 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
142{ 1087{
143 int i; 1088 int i;
144 struct bfa_mem_elem_s *melem; 1089 struct bfa_mem_elem_s *melem;
145 1090
146 bfa->fcs = BFA_FALSE; 1091 bfa->fcs = BFA_FALSE;
147 1092
@@ -195,20 +1140,6 @@ bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
195 bfa->trcmod = trcmod; 1140 bfa->trcmod = trcmod;
196} 1141}
197 1142
198
199void
200bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod)
201{
202 bfa->logm = logmod;
203}
204
205
206void
207bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen)
208{
209 bfa->aen = aen;
210}
211
212void 1143void
213bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) 1144bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
214{ 1145{
@@ -254,14 +1185,14 @@ bfa_start(struct bfa_s *bfa)
254 1185
255/** 1186/**
256 * Use this function quiese the IOC. This function will return immediately, 1187 * Use this function quiese the IOC. This function will return immediately,
257 * when the IOC is actually stopped, the bfa_cb_stop() will be called. 1188 * when the IOC is actually stopped, the bfad->comp will be set.
258 * 1189 *
259 * @param[in] bfa - pointer to bfa_t. 1190 * @param[in]bfa - pointer to bfa_t.
260 * 1191 *
261 * @return None 1192 * @return None
262 * 1193 *
263 * Special Considerations: 1194 * Special Considerations:
264 * bfa_cb_stop() could be called before or after bfa_stop() returns. 1195 * bfad->comp can be set before or after bfa_stop() returns.
265 * 1196 *
266 * @note 1197 * @note
267 * In case of any failure, we could handle it automatically by doing a 1198 * In case of any failure, we could handle it automatically by doing a
@@ -283,9 +1214,9 @@ bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
283void 1214void
284bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1215bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
285{ 1216{
286 struct list_head *qe; 1217 struct list_head *qe;
287 struct list_head *qen; 1218 struct list_head *qen;
288 struct bfa_cb_qe_s *hcb_qe; 1219 struct bfa_cb_qe_s *hcb_qe;
289 1220
290 list_for_each_safe(qe, qen, comp_q) { 1221 list_for_each_safe(qe, qen, comp_q) {
291 hcb_qe = (struct bfa_cb_qe_s *) qe; 1222 hcb_qe = (struct bfa_cb_qe_s *) qe;
@@ -296,8 +1227,8 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
296void 1227void
297bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1228bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
298{ 1229{
299 struct list_head *qe; 1230 struct list_head *qe;
300 struct bfa_cb_qe_s *hcb_qe; 1231 struct bfa_cb_qe_s *hcb_qe;
301 1232
302 while (!list_empty(comp_q)) { 1233 while (!list_empty(comp_q)) {
303 bfa_q_deq(comp_q, &qe); 1234 bfa_q_deq(comp_q, &qe);
@@ -321,7 +1252,6 @@ bfa_timer_tick(struct bfa_s *bfa)
321 bfa_timer_beat(&bfa->timer_mod); 1252 bfa_timer_beat(&bfa->timer_mod);
322} 1253}
323 1254
324#ifndef BFA_BIOS_BUILD
325/** 1255/**
326 * Return the list of PCI vendor/device id lists supported by this 1256 * Return the list of PCI vendor/device id lists supported by this
327 * BFA instance. 1257 * BFA instance.
@@ -336,7 +1266,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
336 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1266 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
337 }; 1267 };
338 1268
339 *npciids = ARRAY_SIZE(__pciids); 1269 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
340 *pciids = __pciids; 1270 *pciids = __pciids;
341} 1271}
342 1272
@@ -351,7 +1281,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
351 * void 1281 * void
352 * 1282 *
353 * Special Considerations: 1283 * Special Considerations:
354 * note 1284 * note
355 */ 1285 */
356void 1286void
357bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1287bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
@@ -389,7 +1319,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
389 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1319 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
390 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1320 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
391 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1321 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
392 cfg->drvcfg.min_cfg = BFA_TRUE; 1322 cfg->drvcfg.min_cfg = BFA_TRUE;
393} 1323}
394 1324
395void 1325void
@@ -417,7 +1347,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
417} 1347}
418 1348
419/** 1349/**
420 * Fetch firmware trace data. 1350 * Fetch firmware trace data.
421 * 1351 *
422 * @param[in] bfa BFA instance 1352 * @param[in] bfa BFA instance
423 * @param[out] trcdata Firmware trace buffer 1353 * @param[out] trcdata Firmware trace buffer
@@ -433,6 +1363,22 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
433} 1363}
434 1364
435/** 1365/**
1366 * Dump firmware memory.
1367 *
1368 * @param[in] bfa BFA instance
1369 * @param[out] buf buffer for dump
1370 * @param[in,out] offset smem offset to start read
1371 * @param[in,out] buflen length of buffer
1372 *
1373 * @retval BFA_STATUS_OK Firmware memory is dumped.
1374 * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress.
1375 */
1376bfa_status_t
1377bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1378{
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1380}
1381/**
436 * Reset hw semaphore & usage cnt regs and initialize. 1382 * Reset hw semaphore & usage cnt regs and initialize.
437 */ 1383 */
438void 1384void
@@ -441,4 +1387,23 @@ bfa_chip_reset(struct bfa_s *bfa)
441 bfa_ioc_ownership_reset(&bfa->ioc); 1387 bfa_ioc_ownership_reset(&bfa->ioc);
442 bfa_ioc_pll_init(&bfa->ioc); 1388 bfa_ioc_pll_init(&bfa->ioc);
443} 1389}
444#endif 1390
1391/**
1392 * Fetch firmware statistics data.
1393 *
1394 * @param[in] bfa BFA instance
1395 * @param[out] data Firmware stats buffer
1396 *
1397 * @retval BFA_STATUS_OK Firmware trace is fetched.
1398 */
1399bfa_status_t
1400bfa_fw_stats_get(struct bfa_s *bfa, void *data)
1401{
1402 return bfa_ioc_fw_stats_get(&bfa->ioc, data);
1403}
1404
1405bfa_status_t
1406bfa_fw_stats_clear(struct bfa_s *bfa)
1407{
1408 return bfa_ioc_fw_stats_clear(&bfa->ioc);
1409}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
new file mode 100644
index 000000000000..7260c74620f8
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -0,0 +1,364 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cs.h BFA common services
20 */
21
22#ifndef __BFA_CS_H__
23#define __BFA_CS_H__
24
25#include "bfa_os_inc.h"
26
27/**
28 * BFA TRC
29 */
30
31#ifndef BFA_TRC_MAX
32#define BFA_TRC_MAX (4 * 1024)
33#endif
34
35#ifndef BFA_TRC_TS
36#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
37#endif
38
39struct bfa_trc_s {
40#ifdef __BIGENDIAN
41 u16 fileno;
42 u16 line;
43#else
44 u16 line;
45 u16 fileno;
46#endif
47 u32 timestamp;
48 union {
49 struct {
50 u32 rsvd;
51 u32 u32;
52 } u32;
53 u64 u64;
54 } data;
55};
56
57struct bfa_trc_mod_s {
58 u32 head;
59 u32 tail;
60 u32 ntrc;
61 u32 stopped;
62 u32 ticks;
63 u32 rsvd[3];
64 struct bfa_trc_s trc[BFA_TRC_MAX];
65};
66
67enum {
68 BFA_TRC_HAL = 1, /* BFA modules */
69 BFA_TRC_FCS = 2, /* BFA FCS modules */
70 BFA_TRC_LDRV = 3, /* Linux driver modules */
71 BFA_TRC_CNA = 4, /* Common modules */
72};
73#define BFA_TRC_MOD_SH 10
74#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
75
76/**
77 * Define a new tracing file (module). Module should match one defined above.
78 */
79#define BFA_TRC_FILE(__mod, __submod) \
80 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
81 BFA_TRC_MOD(__mod))
82
83
84#define bfa_trc32(_trcp, _data) \
85 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
86#define bfa_trc(_trcp, _data) \
87 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
88
89static inline void
90bfa_trc_init(struct bfa_trc_mod_s *trcm)
91{
92 trcm->head = trcm->tail = trcm->stopped = 0;
93 trcm->ntrc = BFA_TRC_MAX;
94}
95
96static inline void
97bfa_trc_stop(struct bfa_trc_mod_s *trcm)
98{
99 trcm->stopped = 1;
100}
101
102#ifdef FWTRC
103extern void dc_flush(void *data);
104#else
105#define dc_flush(data)
106#endif
107
108
109static inline void
110__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
111{
112 int tail = trcm->tail;
113 struct bfa_trc_s *trc = &trcm->trc[tail];
114
115 if (trcm->stopped)
116 return;
117
118 trc->fileno = (u16) fileno;
119 trc->line = (u16) line;
120 trc->data.u64 = data;
121 trc->timestamp = BFA_TRC_TS(trcm);
122 dc_flush(trc);
123
124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
125 if (trcm->tail == trcm->head)
126 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
127 dc_flush(trcm);
128}
129
130
131static inline void
132__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
133{
134 int tail = trcm->tail;
135 struct bfa_trc_s *trc = &trcm->trc[tail];
136
137 if (trcm->stopped)
138 return;
139
140 trc->fileno = (u16) fileno;
141 trc->line = (u16) line;
142 trc->data.u32.u32 = data;
143 trc->timestamp = BFA_TRC_TS(trcm);
144 dc_flush(trc);
145
146 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
147 if (trcm->tail == trcm->head)
148 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
149 dc_flush(trcm);
150}
151
152#ifndef BFA_PERF_BUILD
153#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
154#else
155#define bfa_trc_fp(_trcp, _data)
156#endif
157
158/**
159 * @ BFA LOG interfaces
160 */
161#define bfa_assert(__cond) do { \
162 if (!(__cond)) { \
163 printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \
164 #__cond, __FILE__, __LINE__); \
165 } \
166} while (0)
167
168#define bfa_sm_fault(__mod, __event) do { \
169 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
170 printk(KERN_ERR "Assertion failure: %s:%d: %d", \
171 __FILE__, __LINE__, (__event)); \
172} while (0)
173
174#ifndef BFA_PERF_BUILD
175#define bfa_assert_fp(__cond) bfa_assert(__cond)
176#else
177#define bfa_assert_fp(__cond)
178#endif
179
180/* BFA queue definitions */
181#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
182#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
183#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
184
185/*
186 * bfa_q_qe_init - to initialize a queue element
187 */
188#define bfa_q_qe_init(_qe) { \
189 bfa_q_next(_qe) = (struct list_head *) NULL; \
190 bfa_q_prev(_qe) = (struct list_head *) NULL; \
191}
192
193/*
194 * bfa_q_deq - dequeue an element from head of the queue
195 */
196#define bfa_q_deq(_q, _qe) { \
197 if (!list_empty(_q)) { \
198 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
199 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
200 (struct list_head *) (_q); \
201 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
202 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
203 } else { \
204 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
205 } \
206}
207
208/*
209 * bfa_q_deq_tail - dequeue an element from tail of the queue
210 */
211#define bfa_q_deq_tail(_q, _qe) { \
212 if (!list_empty(_q)) { \
213 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
214 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
215 (struct list_head *) (_q); \
216 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
217 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
218 } else { \
219 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
220 } \
221}
222
223static inline int
224bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
225{
226 struct list_head *tqe;
227
228 tqe = bfa_q_next(q);
229 while (tqe != q) {
230 if (tqe == qe)
231 return 1;
232 tqe = bfa_q_next(tqe);
233 if (tqe == NULL)
234 break;
235 }
236 return 0;
237}
238
239/*
240 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
241 * consistent across modules)
242 */
243#ifndef BFA_PERF_BUILD
244#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
245#else
246#define BFA_Q_DBG_INIT(_qe)
247#endif
248
249#define bfa_q_is_on_q(_q, _qe) \
250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
251
252/**
253 * @ BFA state machine interfaces
254 */
255
256typedef void (*bfa_sm_t)(void *sm, int event);
257
258/**
259 * oc - object class eg. bfa_ioc
260 * st - state, eg. reset
261 * otype - object type, eg. struct bfa_ioc_s
262 * etype - object type, eg. enum ioc_event
263 */
264#define bfa_sm_state_decl(oc, st, otype, etype) \
265 static void oc ## _sm_ ## st(otype * fsm, etype event)
266
267#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
268#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
269#define bfa_sm_get_state(_sm) ((_sm)->sm)
270#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
271
272/**
273 * For converting from state machine function to state encoding.
274 */
275struct bfa_sm_table_s {
276 bfa_sm_t sm; /* state machine function */
277 int state; /* state machine encoding */
278 char *name; /* state name for display */
279};
280#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
281
282/**
283 * State machine with entry actions.
284 */
285typedef void (*bfa_fsm_t)(void *fsm, int event);
286
287/**
288 * oc - object class eg. bfa_ioc
289 * st - state, eg. reset
290 * otype - object type, eg. struct bfa_ioc_s
291 * etype - object type, eg. enum ioc_event
292 */
293#define bfa_fsm_state_decl(oc, st, otype, etype) \
294 static void oc ## _sm_ ## st(otype * fsm, etype event); \
295 static void oc ## _sm_ ## st ## _entry(otype * fsm)
296
297#define bfa_fsm_set_state(_fsm, _state) do { \
298 (_fsm)->fsm = (bfa_fsm_t)(_state); \
299 _state ## _entry(_fsm); \
300} while (0)
301
302#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
303#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
304#define bfa_fsm_cmp_state(_fsm, _state) \
305 ((_fsm)->fsm == (bfa_fsm_t)(_state))
306
307static inline int
308bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
309{
310 int i = 0;
311
312 while (smt[i].sm && smt[i].sm != sm)
313 i++;
314 return smt[i].state;
315}
316
317/**
318 * @ Generic wait counter.
319 */
320
321typedef void (*bfa_wc_resume_t) (void *cbarg);
322
323struct bfa_wc_s {
324 bfa_wc_resume_t wc_resume;
325 void *wc_cbarg;
326 int wc_count;
327};
328
329static inline void
330bfa_wc_up(struct bfa_wc_s *wc)
331{
332 wc->wc_count++;
333}
334
335static inline void
336bfa_wc_down(struct bfa_wc_s *wc)
337{
338 wc->wc_count--;
339 if (wc->wc_count == 0)
340 wc->wc_resume(wc->wc_cbarg);
341}
342
343/**
344 * Initialize a waiting counter.
345 */
346static inline void
347bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
348{
349 wc->wc_resume = wc_resume;
350 wc->wc_cbarg = wc_cbarg;
351 wc->wc_count = 0;
352 bfa_wc_up(wc);
353}
354
355/**
356 * Wait for counter to reach zero
357 */
358static inline void
359bfa_wc_wait(struct bfa_wc_s *wc)
360{
361 bfa_wc_down(wc);
362}
363
364#endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
deleted file mode 100644
index caeb1143a4e6..000000000000
--- a/drivers/scsi/bfa/bfa_csdebug.c
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_os_inc.h>
20#include <cs/bfa_q.h>
21#include <log/bfa_log_hal.h>
22
23/**
24 * cs_debug_api
25 */
26
27
28void
29bfa_panic(int line, char *file, char *panicstr)
30{
31 bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
32 bfa_os_panic();
33}
34
35void
36bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
37{
38 bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
39 bfa_os_panic();
40}
41
42int
43bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
44{
45 struct list_head *tqe;
46
47 tqe = bfa_q_next(q);
48 while (tqe != q) {
49 if (tqe == qe)
50 return 1;
51 tqe = bfa_q_next(tqe);
52 if (tqe == NULL)
53 break;
54 }
55 return 0;
56}
57
58
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
new file mode 100644
index 000000000000..d49877ff5140
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -0,0 +1,466 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_H__
19#define __BFA_DEFS_H__
20
21#include "bfa_fc.h"
22#include "bfa_os_inc.h"
23
24#define BFA_MFG_SERIALNUM_SIZE 11
25#define STRSZ(_n) (((_n) + 4) & ~3)
26
27/**
28 * Manufacturing card type
29 */
30enum {
31 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
32 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
33 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
34 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
35 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
36 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
37 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
38 BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
39 BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
40 BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
41 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
42 BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
43 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
44};
45
46#pragma pack(1)
47
48/**
49 * Check if Mezz card
50 */
51#define bfa_mfg_is_mezz(type) (( \
52 (type) == BFA_MFG_TYPE_JAYHAWK || \
53 (type) == BFA_MFG_TYPE_WANCHESE || \
54 (type) == BFA_MFG_TYPE_ASTRA || \
55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
56 (type) == BFA_MFG_TYPE_LIGHTNING))
57
58/**
59 * Check if the card having old wwn/mac handling
60 */
61#define bfa_mfg_is_old_wwn_mac_model(type) (( \
62 (type) == BFA_MFG_TYPE_FC8P2 || \
63 (type) == BFA_MFG_TYPE_FC8P1 || \
64 (type) == BFA_MFG_TYPE_FC4P2 || \
65 (type) == BFA_MFG_TYPE_FC4P1 || \
66 (type) == BFA_MFG_TYPE_CNA10P2 || \
67 (type) == BFA_MFG_TYPE_CNA10P1 || \
68 (type) == BFA_MFG_TYPE_JAYHAWK || \
69 (type) == BFA_MFG_TYPE_WANCHESE))
70
71#define bfa_mfg_increment_wwn_mac(m, i) \
72do { \
73 u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
74 (u32)(m)[2]; \
75 t += (i); \
76 (m)[0] = (t >> 16) & 0xFF; \
77 (m)[1] = (t >> 8) & 0xFF; \
78 (m)[2] = t & 0xFF; \
79} while (0)
80
81/**
82 * VPD data length
83 */
84#define BFA_MFG_VPD_LEN 512
85
86/**
87 * VPD vendor tag
88 */
89enum {
90 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
91 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
92 BFA_MFG_VPD_HP = 2, /* vendor HP */
93 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
94 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
95 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
96 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
97 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
98};
99
100/**
101 * All numerical fields are in big-endian format.
102 */
103struct bfa_mfg_vpd_s {
104 u8 version; /* vpd data version */
105 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
106 u8 chksum; /* u8 checksum */
107 u8 vendor; /* vendor */
108 u8 len; /* vpd data length excluding header */
109 u8 rsv;
110 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
111};
112
113#pragma pack()
114
115/**
116 * Status return values
117 */
118enum bfa_status {
119 BFA_STATUS_OK = 0, /* Success */
120 BFA_STATUS_FAILED = 1, /* Operation failed */
121 BFA_STATUS_EINVAL = 2, /* Invalid params Check input
122 * parameters */
123 BFA_STATUS_ENOMEM = 3, /* Out of resources */
124 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
125 * contact support */
126 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
127 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
128 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
129 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
130 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
131 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
132 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
133 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
134 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
135 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
136 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
137 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
138 * contact support */
139 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
140 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
141 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
142 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
143 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
144 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
145 * configuration */
146 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
147 * this adapter */
148 BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
149 * the adapter */
150 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
151 BFA_STATUS_MAX_VAL /* Unknown error code */
152};
153#define bfa_status_t enum bfa_status
154
155enum bfa_eproto_status {
156 BFA_EPROTO_BAD_ACCEPT = 0,
157 BFA_EPROTO_UNKNOWN_RSP = 1
158};
159#define bfa_eproto_status_t enum bfa_eproto_status
160
161enum bfa_boolean {
162 BFA_FALSE = 0,
163 BFA_TRUE = 1
164};
165#define bfa_boolean_t enum bfa_boolean
166
167#define BFA_STRING_32 32
168#define BFA_VERSION_LEN 64
169
170/**
171 * ---------------------- adapter definitions ------------
172 */
173
174/**
175 * BFA adapter level attributes.
176 */
177enum {
178 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
179 /*
180 *!< adapter serial num length
181 */
182 BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
183 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
184 BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
185 BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
186 BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
187};
188
189struct bfa_adapter_attr_s {
190 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
191 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
192 u32 card_type;
193 char model[BFA_ADAPTER_MODEL_NAME_LEN];
194 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
195 wwn_t pwwn;
196 char node_symname[FC_SYMNAME_MAX];
197 char hw_ver[BFA_VERSION_LEN];
198 char fw_ver[BFA_VERSION_LEN];
199 char optrom_ver[BFA_VERSION_LEN];
200 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
201 struct bfa_mfg_vpd_s vpd;
202 struct mac_s mac;
203
204 u8 nports;
205 u8 max_speed;
206 u8 prototype;
207 char asic_rev;
208
209 u8 pcie_gen;
210 u8 pcie_lanes_orig;
211 u8 pcie_lanes;
212 u8 cna_capable;
213
214 u8 is_mezz;
215 u8 trunk_capable;
216};
217
218/**
219 * ---------------------- IOC definitions ------------
220 */
221
222enum {
223 BFA_IOC_DRIVER_LEN = 16,
224 BFA_IOC_CHIP_REV_LEN = 8,
225};
226
227/**
228 * Driver and firmware versions.
229 */
230struct bfa_ioc_driver_attr_s {
231 char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
232 char driver_ver[BFA_VERSION_LEN]; /* driver version */
233 char fw_ver[BFA_VERSION_LEN]; /* firmware version */
234 char bios_ver[BFA_VERSION_LEN]; /* bios version */
235 char efi_ver[BFA_VERSION_LEN]; /* EFI version */
236 char ob_ver[BFA_VERSION_LEN]; /* openboot version */
237};
238
239/**
240 * IOC PCI device attributes
241 */
242struct bfa_ioc_pci_attr_s {
243 u16 vendor_id; /* PCI vendor ID */
244 u16 device_id; /* PCI device ID */
245 u16 ssid; /* subsystem ID */
246 u16 ssvid; /* subsystem vendor ID */
247 u32 pcifn; /* PCI device function */
248 u32 rsvd; /* padding */
249 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
250};
251
252/**
253 * IOC states
254 */
255enum bfa_ioc_state {
256 BFA_IOC_UNINIT = 1, /* IOC is in uninit state */
257 BFA_IOC_RESET = 2, /* IOC is in reset state */
258 BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */
259 BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */
260 BFA_IOC_GETATTR = 5, /* IOC is being configured */
261 BFA_IOC_OPERATIONAL = 6, /* IOC is operational */
262 BFA_IOC_INITFAIL = 7, /* IOC hardware failure */
263 BFA_IOC_FAIL = 8, /* IOC heart-beat failure */
264 BFA_IOC_DISABLING = 9, /* IOC is being disabled */
265 BFA_IOC_DISABLED = 10, /* IOC is disabled */
266 BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */
268};
269
270/**
271 * IOC firmware stats
272 */
273struct bfa_fw_ioc_stats_s {
274 u32 enable_reqs;
275 u32 disable_reqs;
276 u32 get_attr_reqs;
277 u32 dbg_sync;
278 u32 dbg_dump;
279 u32 unknown_reqs;
280};
281
282/**
283 * IOC driver stats
284 */
285struct bfa_ioc_drv_stats_s {
286 u32 ioc_isrs;
287 u32 ioc_enables;
288 u32 ioc_disables;
289 u32 ioc_hbfails;
290 u32 ioc_boots;
291 u32 stats_tmos;
292 u32 hb_count;
293 u32 disable_reqs;
294 u32 enable_reqs;
295 u32 disable_replies;
296 u32 enable_replies;
297};
298
299/**
300 * IOC statistics
301 */
302struct bfa_ioc_stats_s {
303 struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
304 struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
305};
306
307enum bfa_ioc_type_e {
308 BFA_IOC_TYPE_FC = 1,
309 BFA_IOC_TYPE_FCoE = 2,
310 BFA_IOC_TYPE_LL = 3,
311};
312
313/**
314 * IOC attributes returned in queries
315 */
316struct bfa_ioc_attr_s {
317 enum bfa_ioc_type_e ioc_type;
318 enum bfa_ioc_state state; /* IOC state */
319 struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
320 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
321 struct bfa_ioc_pci_attr_s pci_attr;
322 u8 port_id; /* port number */
323 u8 rsvd[7]; /* 64bit align */
324};
325
326/**
327 * ---------------------- mfg definitions ------------
328 */
329
330/**
331 * Checksum size
332 */
333#define BFA_MFG_CHKSUM_SIZE 16
334
335#define BFA_MFG_PARTNUM_SIZE 14
336#define BFA_MFG_SUPPLIER_ID_SIZE 10
337#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
338#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
339#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
340
341#pragma pack(1)
342
343/**
344 * All numerical fields are in big-endian format.
345 */
346struct bfa_mfg_block_s {
347 u8 version; /* manufacturing block version */
348 u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
349 u16 mfgsize; /* mfg block size */
350 u16 u16_chksum; /* old u16 checksum */
351 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
352 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
353 u8 mfg_day; /* manufacturing day */
354 u8 mfg_month; /* manufacturing month */
355 u16 mfg_year; /* manufacturing year */
356 wwn_t mfg_wwn; /* wwn base for this adapter */
357 u8 num_wwn; /* number of wwns assigned */
358 u8 mfg_speeds; /* speeds allowed for this adapter */
359 u8 rsv[2];
360 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
361 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
362 char
363 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
364 char
365 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
366 mac_t mfg_mac; /* mac address */
367 u8 num_mac; /* number of mac addresses */
368 u8 rsv2;
369 u32 mfg_type; /* card type */
370 u8 rsv3[108];
371 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
372};
373
374#pragma pack()
375
376/**
377 * ---------------------- pci definitions ------------
378 */
379
380/**
381 * PCI device and vendor ID information
382 */
383enum {
384 BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
385 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
386 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
387 BFA_PCI_DEVICE_ID_CT = 0x14,
388 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
389};
390
391#define bfa_asic_id_ct(devid) \
392 ((devid) == BFA_PCI_DEVICE_ID_CT || \
393 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
394
395/**
396 * PCI sub-system device and vendor ID information
397 */
398enum {
399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
400};
401
402/**
403 * Maximum number of device address ranges mapped through different BAR(s)
404 */
405#define BFA_PCI_ACCESS_RANGES 1
406
407/*
408 * Port speed settings. Each specific speed is a bit field. Use multiple
409 * bits to specify speeds to be selected for auto-negotiation.
410 */
411enum bfa_port_speed {
412 BFA_PORT_SPEED_UNKNOWN = 0,
413 BFA_PORT_SPEED_1GBPS = 1,
414 BFA_PORT_SPEED_2GBPS = 2,
415 BFA_PORT_SPEED_4GBPS = 4,
416 BFA_PORT_SPEED_8GBPS = 8,
417 BFA_PORT_SPEED_10GBPS = 10,
418 BFA_PORT_SPEED_16GBPS = 16,
419 BFA_PORT_SPEED_AUTO =
420 (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
421 BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
422};
423#define bfa_port_speed_t enum bfa_port_speed
424
425enum {
426 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
427 BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
428};
429
430#define BOOT_CFG_REV1 1
431#define BOOT_CFG_VLAN 1
432
433/**
434 * Boot options setting. Boot options setting determines from where
435 * to get the boot lun information
436 */
437enum bfa_boot_bootopt {
438 BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
439 BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
440 BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
441 BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */
442};
443
444#pragma pack(1)
445/**
446 * Boot lun information.
447 */
448struct bfa_boot_bootlun_s {
449 wwn_t pwwn; /* port wwn of target */
450 lun_t lun; /* 64-bit lun */
451};
452#pragma pack()
453
454/**
455 * BOOT boot configuraton
456 */
457struct bfa_boot_pbc_s {
458 u8 enable; /* enable/disable SAN boot */
459 u8 speed; /* boot speed settings */
460 u8 topology; /* boot topology setting */
461 u8 rsvd1;
462 u32 nbluns; /* number of boot luns */
463 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
464};
465
466#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
new file mode 100644
index 000000000000..96905d301828
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -0,0 +1,457 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_FCS_H__
19#define __BFA_DEFS_FCS_H__
20
21#include "bfa_fc.h"
22#include "bfa_defs_svc.h"
23
24/**
25 * VF states
26 */
27enum bfa_vf_state {
28 BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
29 BFA_VF_LINK_DOWN = 1, /* link is down */
30 BFA_VF_FLOGI = 2, /* flogi is in progress */
31 BFA_VF_AUTH = 3, /* authentication in progress */
32 BFA_VF_NOFABRIC = 4, /* fabric is not present */
33 BFA_VF_ONLINE = 5, /* login to fabric is complete */
34 BFA_VF_EVFP = 6, /* EVFP is in progress */
35 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
36};
37
38/**
39 * VF statistics
40 */
41struct bfa_vf_stats_s {
42 u32 flogi_sent; /* Num FLOGIs sent */
43 u32 flogi_rsp_err; /* FLOGI response errors */
44 u32 flogi_acc_err; /* FLOGI accept errors */
45 u32 flogi_accepts; /* FLOGI accepts received */
46 u32 flogi_rejects; /* FLOGI rejects received */
47 u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
48 u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */
49 u32 flogi_rcvd; /* FLOGIs received */
50 u32 flogi_rejected; /* Incoming FLOGIs rejected */
51 u32 fabric_onlines; /* Internal fabric online notification sent
52 * to other modules */
53 u32 fabric_offlines; /* Internal fabric offline notification sent
54 * to other modules */
55 u32 resvd; /* padding for 64 bit alignment */
56};
57
58/**
59 * VF attributes returned in queries
60 */
61struct bfa_vf_attr_s {
62 enum bfa_vf_state state; /* VF state */
63 u32 rsvd;
64 wwn_t fabric_name; /* fabric name */
65};
66
67#define BFA_FCS_MAX_LPORTS 256
68#define BFA_FCS_FABRIC_IPADDR_SZ 16
69
70/**
71 * symbolic names for base port/virtual port
72 */
73#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
74struct bfa_lport_symname_s {
75 char symname[BFA_SYMNAME_MAXLEN];
76};
77
78/**
79* Roles of FCS port:
80 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
81 * - Create multiple ports if both IM and TM functions required.
82 * - Atleast one role must be specified.
83 */
84enum bfa_lport_role {
85 BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
86 BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
87};
88
89/**
90 * FCS port configuration.
91 */
92struct bfa_lport_cfg_s {
93 wwn_t pwwn; /* port wwn */
94 wwn_t nwwn; /* node wwn */
95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
96 bfa_boolean_t preboot_vp; /* vport created from PBC */
97 enum bfa_lport_role roles; /* FCS port roles */
98 u8 tag[16]; /* opaque tag from application */
99};
100
101/**
102 * FCS port states
103 */
104enum bfa_lport_state {
105 BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */
106 BFA_LPORT_FDISC = 1, /* FDISC is in progress */
107 BFA_LPORT_ONLINE = 2, /* login to fabric is complete */
108 BFA_LPORT_OFFLINE = 3, /* No login to fabric */
109};
110
111/**
112 * FCS port type.
113 */
114enum bfa_lport_type {
115 BFA_LPORT_TYPE_PHYSICAL = 0,
116 BFA_LPORT_TYPE_VIRTUAL,
117};
118
119/**
120 * FCS port offline reason.
121 */
122enum bfa_lport_offline_reason {
123 BFA_LPORT_OFFLINE_UNKNOWN = 0,
124 BFA_LPORT_OFFLINE_LINKDOWN,
125 BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
126 * fabric */
127 BFA_LPORT_OFFLINE_FAB_NORESOURCES,
128 BFA_LPORT_OFFLINE_FAB_LOGOUT,
129};
130
131/**
132 * FCS lport info.
133 */
134struct bfa_lport_info_s {
135 u8 port_type; /* bfa_lport_type_t : physical or
136 * virtual */
137 u8 port_state; /* one of bfa_lport_state values */
138 u8 offline_reason; /* one of bfa_lport_offline_reason_t
139 * values */
140 wwn_t port_wwn;
141 wwn_t node_wwn;
142
143 /*
144 * following 4 feilds are valid for Physical Ports only
145 */
146 u32 max_vports_supp; /* Max supported vports */
147 u32 num_vports_inuse; /* Num of in use vports */
148 u32 max_rports_supp; /* Max supported rports */
149 u32 num_rports_inuse; /* Num of doscovered rports */
150
151};
152
153/**
154 * FCS port statistics
155 */
156struct bfa_lport_stats_s {
157 u32 ns_plogi_sent;
158 u32 ns_plogi_rsp_err;
159 u32 ns_plogi_acc_err;
160 u32 ns_plogi_accepts;
161 u32 ns_rejects; /* NS command rejects */
162 u32 ns_plogi_unknown_rsp;
163 u32 ns_plogi_alloc_wait;
164
165 u32 ns_retries; /* NS command retries */
166 u32 ns_timeouts; /* NS command timeouts */
167
168 u32 ns_rspnid_sent;
169 u32 ns_rspnid_accepts;
170 u32 ns_rspnid_rsp_err;
171 u32 ns_rspnid_rejects;
172 u32 ns_rspnid_alloc_wait;
173
174 u32 ns_rftid_sent;
175 u32 ns_rftid_accepts;
176 u32 ns_rftid_rsp_err;
177 u32 ns_rftid_rejects;
178 u32 ns_rftid_alloc_wait;
179
180 u32 ns_rffid_sent;
181 u32 ns_rffid_accepts;
182 u32 ns_rffid_rsp_err;
183 u32 ns_rffid_rejects;
184 u32 ns_rffid_alloc_wait;
185
186 u32 ns_gidft_sent;
187 u32 ns_gidft_accepts;
188 u32 ns_gidft_rsp_err;
189 u32 ns_gidft_rejects;
190 u32 ns_gidft_unknown_rsp;
191 u32 ns_gidft_alloc_wait;
192
193 /*
194 * Mgmt Server stats
195 */
196 u32 ms_retries; /* MS command retries */
197 u32 ms_timeouts; /* MS command timeouts */
198 u32 ms_plogi_sent;
199 u32 ms_plogi_rsp_err;
200 u32 ms_plogi_acc_err;
201 u32 ms_plogi_accepts;
202 u32 ms_rejects; /* MS command rejects */
203 u32 ms_plogi_unknown_rsp;
204 u32 ms_plogi_alloc_wait;
205
206 u32 num_rscn; /* Num of RSCN received */
207 u32 num_portid_rscn;/* Num portid format RSCN
208 * received */
209
210 u32 uf_recvs; /* Unsolicited recv frames */
211 u32 uf_recv_drops; /* Dropped received frames */
212
213 u32 plogi_rcvd; /* Received plogi */
214 u32 prli_rcvd; /* Received prli */
215 u32 adisc_rcvd; /* Received adisc */
216 u32 prlo_rcvd; /* Received prlo */
217 u32 logo_rcvd; /* Received logo */
218 u32 rpsc_rcvd; /* Received rpsc */
219 u32 un_handled_els_rcvd; /* Received unhandled ELS */
220 u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */
221 u32 rport_del_max_plogi_retry; /* Deleted rport
222 * (max retry of plogi) */
223};
224
225/**
226 * BFA port attribute returned in queries
227 */
228struct bfa_lport_attr_s {
229 enum bfa_lport_state state; /* port state */
230 u32 pid; /* port ID */
231 struct bfa_lport_cfg_s port_cfg; /* port configuration */
232 enum bfa_port_type port_type; /* current topology */
233 u32 loopback; /* cable is externally looped back */
234 wwn_t fabric_name; /* attached switch's nwwn */
235 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
236 * fabric's ip addr */
237 mac_t fpma_mac; /* Lport's FPMA Mac address */
238 u16 authfail; /* auth failed state */
239};
240
241
242/**
243 * VPORT states
244 */
245enum bfa_vport_state {
246 BFA_FCS_VPORT_UNINIT = 0,
247 BFA_FCS_VPORT_CREATED = 1,
248 BFA_FCS_VPORT_OFFLINE = 1,
249 BFA_FCS_VPORT_FDISC_SEND = 2,
250 BFA_FCS_VPORT_FDISC = 3,
251 BFA_FCS_VPORT_FDISC_RETRY = 4,
252 BFA_FCS_VPORT_ONLINE = 5,
253 BFA_FCS_VPORT_DELETING = 6,
254 BFA_FCS_VPORT_CLEANUP = 6,
255 BFA_FCS_VPORT_LOGO_SEND = 7,
256 BFA_FCS_VPORT_LOGO = 8,
257 BFA_FCS_VPORT_ERROR = 9,
258 BFA_FCS_VPORT_MAX_STATE,
259};
260
261/**
262 * vport statistics
263 */
264struct bfa_vport_stats_s {
265 struct bfa_lport_stats_s port_stats; /* base class (port) stats */
266 /*
267 * TODO - remove
268 */
269
270 u32 fdisc_sent; /* num fdisc sent */
271 u32 fdisc_accepts; /* fdisc accepts */
272 u32 fdisc_retries; /* fdisc retries */
273 u32 fdisc_timeouts; /* fdisc timeouts */
274 u32 fdisc_rsp_err; /* fdisc response error */
275 u32 fdisc_acc_bad; /* bad fdisc accepts */
276 u32 fdisc_rejects; /* fdisc rejects */
277 u32 fdisc_unknown_rsp;
278 /*
279 *!< fdisc rsp unknown error
280 */
281 u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
282
283 u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
284 u32 logo_sent; /* logo sent */
285 u32 logo_accepts; /* logo accepts */
286 u32 logo_rejects; /* logo rejects */
287 u32 logo_rsp_err; /* logo rsp errors */
288 u32 logo_unknown_rsp;
289 /* logo rsp unknown errors */
290
291 u32 fab_no_npiv; /* fabric does not support npiv */
292
293 u32 fab_offline; /* offline events from fab SM */
294 u32 fab_online; /* online events from fab SM */
295 u32 fab_cleanup; /* cleanup request from fab SM */
296 u32 rsvd;
297};
298
299/**
300 * BFA vport attribute returned in queries
301 */
302struct bfa_vport_attr_s {
303 struct bfa_lport_attr_s port_attr; /* base class (port) attributes */
304 enum bfa_vport_state vport_state; /* vport state */
305 u32 rsvd;
306};
307
308/**
309 * FCS remote port states
310 */
311enum bfa_rport_state {
312 BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
313 BFA_RPORT_OFFLINE = 1, /* rport is offline */
314 BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
315 BFA_RPORT_ONLINE = 3, /* login to rport is complete */
316 BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
317 BFA_RPORT_NSQUERY = 5, /* nameserver query */
318 BFA_RPORT_ADISC = 6, /* ADISC authentication */
319 BFA_RPORT_LOGO = 7, /* logging out with rport */
320 BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
321 BFA_RPORT_NSDISC = 9, /* re-discover rport */
322};
323
324/**
325 * Rport Scsi Function : Initiator/Target.
326 */
327enum bfa_rport_function {
328 BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
329 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
330};
331
332/**
333 * port/node symbolic names for rport
334 */
335#define BFA_RPORT_SYMNAME_MAXLEN 255
336struct bfa_rport_symname_s {
337 char symname[BFA_RPORT_SYMNAME_MAXLEN];
338};
339
340/**
341 * FCS remote port statistics
342 */
343struct bfa_rport_stats_s {
344 u32 offlines; /* remote port offline count */
345 u32 onlines; /* remote port online count */
346 u32 rscns; /* RSCN affecting rport */
347 u32 plogis; /* plogis sent */
348 u32 plogi_accs; /* plogi accepts */
349 u32 plogi_timeouts; /* plogi timeouts */
350 u32 plogi_rejects; /* rcvd plogi rejects */
351 u32 plogi_failed; /* local failure */
352 u32 plogi_rcvd; /* plogis rcvd */
353 u32 prli_rcvd; /* inbound PRLIs */
354 u32 adisc_rcvd; /* ADISCs received */
355 u32 adisc_rejects; /* recvd ADISC rejects */
356 u32 adisc_sent; /* ADISC requests sent */
357 u32 adisc_accs; /* ADISC accepted by rport */
358 u32 adisc_failed; /* ADISC failed (no response) */
359 u32 adisc_rejected; /* ADISC rejected by us */
360 u32 logos; /* logos sent */
361 u32 logo_accs; /* LOGO accepts from rport */
362 u32 logo_failed; /* LOGO failures */
363 u32 logo_rejected; /* LOGO rejects from rport */
364 u32 logo_rcvd; /* LOGO from remote port */
365
366 u32 rpsc_rcvd; /* RPSC received */
367 u32 rpsc_rejects; /* recvd RPSC rejects */
368 u32 rpsc_sent; /* RPSC requests sent */
369 u32 rpsc_accs; /* RPSC accepted by rport */
370 u32 rpsc_failed; /* RPSC failed (no response) */
371 u32 rpsc_rejected; /* RPSC rejected by us */
372
373 u32 rjt_insuff_res; /* LS RJT with insuff resources */
374 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
375};
376
377/**
378 * FCS remote port attributes returned in queries
379 */
380struct bfa_rport_attr_s {
381 wwn_t nwwn; /* node wwn */
382 wwn_t pwwn; /* port wwn */
383 enum fc_cos cos_supported; /* supported class of services */
384 u32 pid; /* port ID */
385 u32 df_sz; /* Max payload size */
386 enum bfa_rport_state state; /* Rport State machine state */
387 enum fc_cos fc_cos; /* FC classes of services */
388 bfa_boolean_t cisc; /* CISC capable device */
389 struct bfa_rport_symname_s symname; /* Symbolic Name */
390 enum bfa_rport_function scsi_function; /* Initiator/Target */
391 struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
392 enum bfa_port_speed curr_speed; /* operating speed got from
393 * RPSC ELS. UNKNOWN, if RPSC
394 * is not supported */
395 bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
396 enum bfa_port_speed assigned_speed; /* Speed assigned by the user.
397 * will be used if RPSC is not
398 * supported by the rport */
399};
400
401struct bfa_rport_remote_link_stats_s {
402 u32 lfc; /* Link Failure Count */
403 u32 lsyc; /* Loss of Synchronization Count */
404 u32 lsic; /* Loss of Signal Count */
405 u32 pspec; /* Primitive Sequence Protocol Error Count */
406 u32 itwc; /* Invalid Transmission Word Count */
407 u32 icc; /* Invalid CRC Count */
408};
409
410
411#define BFA_MAX_IO_INDEX 7
412#define BFA_NO_IO_INDEX 9
413
414/**
415 * FCS itnim states
416 */
417enum bfa_itnim_state {
418 BFA_ITNIM_OFFLINE = 0, /* offline */
419 BFA_ITNIM_PRLI_SEND = 1, /* prli send */
420 BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
421 BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
422 BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
423 BFA_ITNIM_ONLINE = 5, /* online */
424 BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
425 BFA_ITNIM_INITIATIOR = 7, /* initiator */
426};
427
428/**
429 * FCS remote port statistics
430 */
431struct bfa_itnim_stats_s {
432 u32 onlines; /* num rport online */
433 u32 offlines; /* num rport offline */
434 u32 prli_sent; /* num prli sent out */
435 u32 fcxp_alloc_wait;/* num fcxp alloc waits */
436 u32 prli_rsp_err; /* num prli rsp errors */
437 u32 prli_rsp_acc; /* num prli rsp accepts */
438 u32 initiator; /* rport is an initiator */
439 u32 prli_rsp_parse_err; /* prli rsp parsing errors */
440 u32 prli_rsp_rjt; /* num prli rsp rejects */
441 u32 timeout; /* num timeouts detected */
442 u32 sler; /* num sler notification from BFA */
443 u32 rsvd; /* padding for 64 bit alignment */
444};
445
446/**
447 * FCS itnim attributes returned in queries
448 */
449struct bfa_itnim_attr_s {
450 enum bfa_itnim_state state; /* FCS itnim state */
451 u8 retry; /* data retransmision support */
452 u8 task_retry_id; /* task retry ident support */
453 u8 rec_support; /* REC supported */
454 u8 conf_comp; /* confirmed completion supp */
455};
456
457#endif /* __BFA_DEFS_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
new file mode 100644
index 000000000000..56226fcf9470
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -0,0 +1,1081 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_SVC_H__
19#define __BFA_DEFS_SVC_H__
20
21#include "bfa_defs.h"
22#include "bfa_fc.h"
23#include "bfi.h"
24
25#define BFA_IOCFC_INTR_DELAY 1125
26#define BFA_IOCFC_INTR_LATENCY 225
27#define BFA_IOCFCOE_INTR_DELAY 25
28#define BFA_IOCFCOE_INTR_LATENCY 5
29
30/**
31 * Interrupt coalescing configuration.
32 */
33#pragma pack(1)
34struct bfa_iocfc_intr_attr_s {
35 u8 coalesce; /* enable/disable coalescing */
36 u8 rsvd[3];
37 u16 latency; /* latency in microseconds */
38 u16 delay; /* delay in microseconds */
39};
40
41/**
42 * IOC firmware configuraton
43 */
44struct bfa_iocfc_fwcfg_s {
45 u16 num_fabrics; /* number of fabrics */
46 u16 num_lports; /* number of local lports */
47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_iotm_reqs; /* number of TM IO reqs */
51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs;
55 u8 fw_tick_res; /* FW clock resolution in ms */
56 u8 rsvd[4];
57};
58#pragma pack()
59
60struct bfa_iocfc_drvcfg_s {
61 u16 num_reqq_elems; /* number of req queue elements */
62 u16 num_rspq_elems; /* number of rsp queue elements */
63 u16 num_sgpgs; /* number of total SG pages */
64 u16 num_sboot_tgts; /* number of SAN boot targets */
65 u16 num_sboot_luns; /* number of SAN boot luns */
66 u16 ioc_recover; /* IOC recovery mode */
67 u16 min_cfg; /* minimum configuration */
68 u16 path_tov; /* device path timeout */
69 bfa_boolean_t delay_comp; /* delay completion of
70 failed inflight IOs */
71 u32 rsvd;
72};
73
74/**
75 * IOC configuration
76 */
77struct bfa_iocfc_cfg_s {
78 struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
79 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
80};
81
82/**
83 * IOC firmware IO stats
84 */
85struct bfa_fw_io_stats_s {
86 u32 host_abort; /* IO aborted by host driver*/
87 u32 host_cleanup; /* IO clean up by host driver */
88
89 u32 fw_io_timeout; /* IOs timedout */
90 u32 fw_frm_parse; /* frame parsed by f/w */
91 u32 fw_frm_data; /* fcp_data frame parsed by f/w */
92 u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
93 u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
94 u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
95 u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
96 u32 fw_frm_unknown; /* unknown parsed by f/w */
97 u32 fw_data_dma; /* f/w DMA'ed the data frame */
98 u32 fw_frm_drop; /* f/w drop the frame */
99
100 u32 rec_timeout; /* FW rec timed out */
101 u32 error_rec; /* FW sending rec on
102 * an error condition*/
103 u32 wait_for_si; /* FW wait for SI */
104 u32 rec_rsp_inval; /* REC rsp invalid */
105 u32 seqr_io_abort; /* target does not know cmd so abort */
106 u32 seqr_io_retry; /* SEQR failed so retry IO */
107
108 u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
109 u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
110 u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
111
112 u32 fcp_data_lost; /* fcp data lost */
113
114 u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
115 u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
116 u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
117
118 u32 io_abort_timeout; /* ABTS timedout */
119 u32 sler_initiated; /* SLER initiated */
120
121 u32 unexp_fcp_rsp; /* fcp response in wrong state */
122
123 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
124 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
125 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
126 u32 fcp_rsp_resid_inval; /* invalid residue */
127 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
128 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
129 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
130 u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
131 u32 fcp_conf_req; /* FCP conf requested */
132
133 u32 tgt_aborted_io; /* target initiated abort */
134
135 u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
136 u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
137 u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
138 u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
139 u32 ioh_hit_class2_event; /* IOH hit class2 */
140 u32 ioh_miss_other_event; /* IOH miss other */
141 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
142 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
143 * bytes xfered */
144 u32 ioh_seq_len_err_event; /* IOH seq len error */
145 u32 ioh_data_oor_event; /* Data out of range */
146 u32 ioh_ro_ooo_event; /* Relative offset out of range */
147 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
148 u32 ioh_unexp_frame_event; /* unexpected frame recieved
149 * count */
150 u32 ioh_err_int; /* IOH error int during data-phase
151 * for scsi write
152 */
153};
154
155/**
156 * IOC port firmware stats
157 */
158
159struct bfa_fw_port_fpg_stats_s {
160 u32 intr_evt;
161 u32 intr;
162 u32 intr_excess;
163 u32 intr_cause0;
164 u32 intr_other;
165 u32 intr_other_ign;
166 u32 sig_lost;
167 u32 sig_regained;
168 u32 sync_lost;
169 u32 sync_to;
170 u32 sync_regained;
171 u32 div2_overflow;
172 u32 div2_underflow;
173 u32 efifo_overflow;
174 u32 efifo_underflow;
175 u32 idle_rx;
176 u32 lrr_rx;
177 u32 lr_rx;
178 u32 ols_rx;
179 u32 nos_rx;
180 u32 lip_rx;
181 u32 arbf0_rx;
182 u32 arb_rx;
183 u32 mrk_rx;
184 u32 const_mrk_rx;
185 u32 prim_unknown;
186};
187
188
189struct bfa_fw_port_lksm_stats_s {
190 u32 hwsm_success; /* hwsm state machine success */
191 u32 hwsm_fails; /* hwsm fails */
192 u32 hwsm_wdtov; /* hwsm timed out */
193 u32 swsm_success; /* swsm success */
194 u32 swsm_fails; /* swsm fails */
195 u32 swsm_wdtov; /* swsm timed out */
196 u32 busybufs; /* link init failed due to busybuf */
197 u32 buf_waits; /* bufwait state entries */
198 u32 link_fails; /* link failures */
199 u32 psp_errors; /* primitive sequence protocol errors */
200 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
201 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
202 u32 lr_tx; /* No. of times LR tx started */
203 u32 lrr_tx; /* No. of times LRR tx started */
204 u32 ols_tx; /* No. of times OLS tx started */
205 u32 nos_tx; /* No. of times NOS tx started */
206 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
207 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
208};
209
210struct bfa_fw_port_snsm_stats_s {
211 u32 hwsm_success; /* Successful hwsm terminations */
212 u32 hwsm_fails; /* hwsm fail count */
213 u32 hwsm_wdtov; /* hwsm timed out */
214 u32 swsm_success; /* swsm success */
215 u32 swsm_wdtov; /* swsm timed out */
216 u32 error_resets; /* error resets initiated by upsm */
217 u32 sync_lost; /* Sync loss count */
218 u32 sig_lost; /* Signal loss count */
219};
220
221struct bfa_fw_port_physm_stats_s {
222 u32 module_inserts; /* Module insert count */
223 u32 module_xtracts; /* Module extracts count */
224 u32 module_invalids; /* Invalid module inserted count */
225 u32 module_read_ign; /* Module validation status ignored */
226 u32 laser_faults; /* Laser fault count */
227 u32 rsvd;
228};
229
230struct bfa_fw_fip_stats_s {
231 u32 vlan_req; /* vlan discovery requests */
232 u32 vlan_notify; /* vlan notifications */
233 u32 vlan_err; /* vlan response error */
234 u32 vlan_timeouts; /* vlan disvoery timeouts */
235 u32 vlan_invalids; /* invalid vlan in discovery advert. */
236 u32 disc_req; /* Discovery solicit requests */
237 u32 disc_rsp; /* Discovery solicit response */
238 u32 disc_err; /* Discovery advt. parse errors */
239 u32 disc_unsol; /* Discovery unsolicited */
240 u32 disc_timeouts; /* Discovery timeouts */
241 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
242 u32 linksvc_unsupp; /* Unsupported link service req */
243 u32 linksvc_err; /* Parse error in link service req */
244 u32 logo_req; /* FIP logos received */
245 u32 clrvlink_req; /* Clear virtual link req */
246 u32 op_unsupp; /* Unsupported FIP operation */
247 u32 untagged; /* Untagged frames (ignored) */
248 u32 invalid_version; /* Invalid FIP version */
249};
250
251struct bfa_fw_lps_stats_s {
252 u32 mac_invalids; /* Invalid mac assigned */
253 u32 rsvd;
254};
255
256struct bfa_fw_fcoe_stats_s {
257 u32 cee_linkups; /* CEE link up count */
258 u32 cee_linkdns; /* CEE link down count */
259 u32 fip_linkups; /* FIP link up count */
260 u32 fip_linkdns; /* FIP link up count */
261 u32 fip_fails; /* FIP fail count */
262 u32 mac_invalids; /* Invalid mac assigned */
263};
264
265/**
266 * IOC firmware FCoE port stats
267 */
268struct bfa_fw_fcoe_port_stats_s {
269 struct bfa_fw_fcoe_stats_s fcoe_stats;
270 struct bfa_fw_fip_stats_s fip_stats;
271};
272
273/**
274 * IOC firmware FC uport stats
275 */
276struct bfa_fw_fc_uport_stats_s {
277 struct bfa_fw_port_snsm_stats_s snsm_stats;
278 struct bfa_fw_port_lksm_stats_s lksm_stats;
279};
280
281/**
282 * IOC firmware FC port stats
283 */
284union bfa_fw_fc_port_stats_s {
285 struct bfa_fw_fc_uport_stats_s fc_stats;
286 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
287};
288
289/**
290 * IOC firmware port stats
291 */
292struct bfa_fw_port_stats_s {
293 struct bfa_fw_port_fpg_stats_s fpg_stats;
294 struct bfa_fw_port_physm_stats_s physm_stats;
295 union bfa_fw_fc_port_stats_s fc_port;
296};
297
298/**
299 * fcxchg module statistics
300 */
301struct bfa_fw_fcxchg_stats_s {
302 u32 ua_tag_inv;
303 u32 ua_state_inv;
304};
305
306struct bfa_fw_lpsm_stats_s {
307 u32 cls_rx;
308 u32 cls_tx;
309};
310
311/**
312 * Trunk statistics
313 */
314struct bfa_fw_trunk_stats_s {
315 u32 emt_recvd; /* Trunk EMT received */
316 u32 emt_accepted; /* Trunk EMT Accepted */
317 u32 emt_rejected; /* Trunk EMT rejected */
318 u32 etp_recvd; /* Trunk ETP received */
319 u32 etp_accepted; /* Trunk ETP Accepted */
320 u32 etp_rejected; /* Trunk ETP rejected */
321 u32 lr_recvd; /* Trunk LR received */
322 u32 rsvd; /* padding for 64 bit alignment */
323};
324
325struct bfa_fw_advsm_stats_s {
326 u32 flogi_sent; /* Flogi sent */
327 u32 flogi_acc_recvd; /* Flogi Acc received */
328 u32 flogi_rjt_recvd; /* Flogi rejects received */
329 u32 flogi_retries; /* Flogi retries */
330
331 u32 elp_recvd; /* ELP received */
332 u32 elp_accepted; /* ELP Accepted */
333 u32 elp_rejected; /* ELP rejected */
334 u32 elp_dropped; /* ELP dropped */
335};
336
337/**
338 * IOCFC firmware stats
339 */
340struct bfa_fw_iocfc_stats_s {
341 u32 cfg_reqs; /* cfg request */
342 u32 updq_reqs; /* update queue request */
343 u32 ic_reqs; /* interrupt coalesce reqs */
344 u32 unknown_reqs;
345 u32 set_intr_reqs; /* set interrupt reqs */
346};
347
348/**
349 * IOC attributes returned in queries
350 */
351struct bfa_iocfc_attr_s {
352 struct bfa_iocfc_cfg_s config; /* IOCFC config */
353 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
354};
355
356/**
357 * Eth_sndrcv mod stats
358 */
359struct bfa_fw_eth_sndrcv_stats_s {
360 u32 crc_err;
361 u32 rsvd; /* 64bit align */
362};
363
364/**
365 * CT MAC mod stats
366 */
367struct bfa_fw_mac_mod_stats_s {
368 u32 mac_on; /* MAC got turned-on */
369 u32 link_up; /* link-up */
370 u32 signal_off; /* lost signal */
371 u32 dfe_on; /* DFE on */
372 u32 mac_reset; /* # of MAC reset to bring lnk up */
373 u32 pcs_reset; /* # of PCS reset to bring lnk up */
374 u32 loopback; /* MAC got into serdes loopback */
375 u32 lb_mac_reset;
376 /* # of MAC reset to bring link up in loopback */
377 u32 lb_pcs_reset;
378 /* # of PCS reset to bring link up in loopback */
379 u32 rsvd; /* 64bit align */
380};
381
382/**
383 * CT MOD stats
384 */
385struct bfa_fw_ct_mod_stats_s {
386 u32 rxa_rds_undrun; /* RxA RDS underrun */
387 u32 rad_bpc_ovfl; /* RAD BPC overflow */
388 u32 rad_rlb_bpc_ovfl; /* RAD RLB BPC overflow */
389 u32 bpc_fcs_err; /* BPC FCS_ERR */
390 u32 txa_tso_hdr; /* TxA TSO header too long */
391 u32 rsvd; /* 64bit align */
392};
393
394/**
395 * IOC firmware stats
396 */
397struct bfa_fw_stats_s {
398 struct bfa_fw_ioc_stats_s ioc_stats;
399 struct bfa_fw_iocfc_stats_s iocfc_stats;
400 struct bfa_fw_io_stats_s io_stats;
401 struct bfa_fw_port_stats_s port_stats;
402 struct bfa_fw_fcxchg_stats_s fcxchg_stats;
403 struct bfa_fw_lpsm_stats_s lpsm_stats;
404 struct bfa_fw_lps_stats_s lps_stats;
405 struct bfa_fw_trunk_stats_s trunk_stats;
406 struct bfa_fw_advsm_stats_s advsm_stats;
407 struct bfa_fw_mac_mod_stats_s macmod_stats;
408 struct bfa_fw_ct_mod_stats_s ctmod_stats;
409 struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats;
410};
411
412#define BFA_IOCFC_PATHTOV_MAX 60
413#define BFA_IOCFC_QDEPTH_MAX 2000
414
415/**
416 * QoS states
417 */
418enum bfa_qos_state {
419 BFA_QOS_ONLINE = 1, /* QoS is online */
420 BFA_QOS_OFFLINE = 2, /* QoS is offline */
421};
422
423/**
424 * QoS Priority levels.
425 */
426enum bfa_qos_priority {
427 BFA_QOS_UNKNOWN = 0,
428 BFA_QOS_HIGH = 1, /* QoS Priority Level High */
429 BFA_QOS_MED = 2, /* QoS Priority Level Medium */
430 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
431};
432
433/**
434 * QoS bandwidth allocation for each priority level
435 */
436enum bfa_qos_bw_alloc {
437 BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
438 BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
439 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
440};
441#pragma pack(1)
442/**
443 * QoS attribute returned in QoS Query
444 */
445struct bfa_qos_attr_s {
446 u8 state; /* QoS current state */
447 u8 rsvd[3];
448 u32 total_bb_cr; /* Total BB Credits */
449};
450
451/**
452 * These fields should be displayed only from the CLI.
453 * There will be a separate BFAL API (get_qos_vc_attr ?)
454 * to retrieve this.
455 *
456 */
457#define BFA_QOS_MAX_VC 16
458
459struct bfa_qos_vc_info_s {
460 u8 vc_credit;
461 u8 borrow_credit;
462 u8 priority;
463 u8 resvd;
464};
465
466struct bfa_qos_vc_attr_s {
467 u16 total_vc_count; /* Total VC Count */
468 u16 shared_credit;
469 u32 elp_opmode_flags;
470 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
471 * total_vc_count */
472};
473
474/**
475 * QoS statistics
476 */
477struct bfa_qos_stats_s {
478 u32 flogi_sent; /* QoS Flogi sent */
479 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
480 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
481 u32 flogi_retries; /* QoS Flogi retries */
482
483 u32 elp_recvd; /* QoS ELP received */
484 u32 elp_accepted; /* QoS ELP Accepted */
485 u32 elp_rejected; /* QoS ELP rejected */
486 u32 elp_dropped; /* QoS ELP dropped */
487
488 u32 qos_rscn_recvd; /* QoS RSCN received */
489 u32 rsvd; /* padding for 64 bit alignment */
490};
491
492/**
493 * FCoE statistics
494 */
495struct bfa_fcoe_stats_s {
496 u64 secs_reset; /* Seconds since stats reset */
497 u64 cee_linkups; /* CEE link up */
498 u64 cee_linkdns; /* CEE link down */
499 u64 fip_linkups; /* FIP link up */
500 u64 fip_linkdns; /* FIP link down */
501 u64 fip_fails; /* FIP failures */
502 u64 mac_invalids; /* Invalid mac assignments */
503 u64 vlan_req; /* Vlan requests */
504 u64 vlan_notify; /* Vlan notifications */
505 u64 vlan_err; /* Vlan notification errors */
506 u64 vlan_timeouts; /* Vlan request timeouts */
507 u64 vlan_invalids; /* Vlan invalids */
508 u64 disc_req; /* Discovery requests */
509 u64 disc_rsp; /* Discovery responses */
510 u64 disc_err; /* Discovery error frames */
511 u64 disc_unsol; /* Discovery unsolicited */
512 u64 disc_timeouts; /* Discovery timeouts */
513 u64 disc_fcf_unavail; /* Discovery FCF not avail */
514 u64 linksvc_unsupp; /* FIP link service req unsupp. */
515 u64 linksvc_err; /* FIP link service req errors */
516 u64 logo_req; /* FIP logos received */
517 u64 clrvlink_req; /* Clear virtual link requests */
518 u64 op_unsupp; /* FIP operation unsupp. */
519 u64 untagged; /* FIP untagged frames */
520 u64 txf_ucast; /* Tx FCoE unicast frames */
521 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
522 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
523 u64 txf_mcast; /* Tx FCoE multicast frames */
524 u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
525 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
526 u64 txf_bcast; /* Tx FCoE broadcast frames */
527 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
528 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
529 u64 txf_timeout; /* Tx timeouts */
530 u64 txf_parity_errors; /* Transmit parity err */
531 u64 txf_fid_parity_errors; /* Transmit FID parity err */
532 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
533 u64 rxf_ucast; /* Rx FCoE unicast frames */
534 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
535 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
536 u64 rxf_mcast; /* Rx FCoE multicast frames */
537 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
538 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
539 u64 rxf_bcast; /* Rx FCoE broadcast frames */
540 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
541};
542
543/**
544 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
545 */
546union bfa_fcport_stats_u {
547 struct bfa_qos_stats_s fcqos;
548 struct bfa_fcoe_stats_s fcoe;
549};
550#pragma pack()
551
552struct bfa_fcpim_del_itn_stats_s {
553 u32 del_itn_iocomp_aborted; /* Aborted IO requests */
554 u32 del_itn_iocomp_timedout; /* IO timeouts */
555 u32 del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery */
556 u32 del_itn_iocom_res_free; /* Delayed freeing of IO resources */
557 u32 del_itn_iocom_hostabrts; /* Host IO abort requests */
558 u32 del_itn_total_ios; /* Total IO count */
559 u32 del_io_iocdowns; /* IO cleaned-up due to IOC down */
560 u32 del_tm_iocdowns; /* TM cleaned-up due to IOC down */
561};
562
563struct bfa_itnim_iostats_s {
564
565 u32 total_ios; /* Total IO Requests */
566 u32 input_reqs; /* Data in-bound requests */
567 u32 output_reqs; /* Data out-bound requests */
568 u32 io_comps; /* Total IO Completions */
569 u32 wr_throughput; /* Write data transfered in bytes */
570 u32 rd_throughput; /* Read data transfered in bytes */
571
572 u32 iocomp_ok; /* Slowpath IO completions */
573 u32 iocomp_underrun; /* IO underrun */
574 u32 iocomp_overrun; /* IO overrun */
575 u32 qwait; /* IO Request-Q wait */
576 u32 qresumes; /* IO Request-Q wait done */
577 u32 no_iotags; /* No free IO tag */
578 u32 iocomp_timedout; /* IO timeouts */
579 u32 iocom_nexus_abort; /* IO failure due to target offline */
580 u32 iocom_proto_err; /* IO protocol errors */
581 u32 iocom_dif_err; /* IO SBC-3 protection errors */
582
583 u32 iocom_sqer_needed; /* fcp-2 error recovery failed */
584 u32 iocom_res_free; /* Delayed freeing of IO tag */
585
586
587 u32 io_aborts; /* Host IO abort requests */
588 u32 iocom_hostabrts; /* Host IO abort completions */
589 u32 io_cleanups; /* IO clean-up requests */
590 u32 path_tov_expired; /* IO path tov expired */
591 u32 iocomp_aborted; /* IO abort completions */
592 u32 io_iocdowns; /* IO cleaned-up due to IOC down */
593 u32 iocom_utags; /* IO comp with unknown tags */
594
595 u32 io_tmaborts; /* Abort request due to TM command */
596 u32 tm_io_comps; /* Abort completion due to TM command */
597
598 u32 creates; /* IT Nexus create requests */
599 u32 fw_create; /* IT Nexus FW create requests */
600 u32 create_comps; /* IT Nexus FW create completions */
601 u32 onlines; /* IT Nexus onlines */
602 u32 offlines; /* IT Nexus offlines */
603 u32 fw_delete; /* IT Nexus FW delete requests */
604 u32 delete_comps; /* IT Nexus FW delete completions */
605 u32 deletes; /* IT Nexus delete requests */
606 u32 sler_events; /* SLER events */
607 u32 ioc_disabled; /* Num IOC disables */
608 u32 cleanup_comps; /* IT Nexus cleanup completions */
609
610 u32 tm_cmnds; /* TM Requests */
611 u32 tm_fw_rsps; /* TM Completions */
612 u32 tm_success; /* TM initiated IO cleanup success */
613 u32 tm_failures; /* TM initiated IO cleanup failure */
614 u32 no_tskims; /* No free TM tag */
615 u32 tm_qwait; /* TM Request-Q wait */
616 u32 tm_qresumes; /* TM Request-Q wait done */
617
618 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
619 u32 tm_cleanups; /* TM cleanup requests */
620 u32 tm_cleanup_comps; /* TM cleanup completions */
621};
622
623/* Modify char* port_stt[] in bfal_port.c if a new state was added */
624enum bfa_port_states {
625 BFA_PORT_ST_UNINIT = 1,
626 BFA_PORT_ST_ENABLING_QWAIT = 2,
627 BFA_PORT_ST_ENABLING = 3,
628 BFA_PORT_ST_LINKDOWN = 4,
629 BFA_PORT_ST_LINKUP = 5,
630 BFA_PORT_ST_DISABLING_QWAIT = 6,
631 BFA_PORT_ST_DISABLING = 7,
632 BFA_PORT_ST_DISABLED = 8,
633 BFA_PORT_ST_STOPPED = 9,
634 BFA_PORT_ST_IOCDOWN = 10,
635 BFA_PORT_ST_IOCDIS = 11,
636 BFA_PORT_ST_FWMISMATCH = 12,
637 BFA_PORT_ST_PREBOOT_DISABLED = 13,
638 BFA_PORT_ST_TOGGLING_QWAIT = 14,
639 BFA_PORT_ST_MAX_STATE,
640};
641
642/**
643 * Port operational type (in sync with SNIA port type).
644 */
645enum bfa_port_type {
646 BFA_PORT_TYPE_UNKNOWN = 1, /* port type is unknown */
647 BFA_PORT_TYPE_NPORT = 5, /* P2P with switched fabric */
648 BFA_PORT_TYPE_NLPORT = 6, /* public loop */
649 BFA_PORT_TYPE_LPORT = 20, /* private loop */
650 BFA_PORT_TYPE_P2P = 21, /* P2P with no switched fabric */
651 BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
652};
653
654/**
655 * Port topology setting. A port's topology and fabric login status
656 * determine its operational type.
657 */
658enum bfa_port_topology {
659 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */
660 BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */
661 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
662 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
663};
664
665/**
666 * Physical port loopback types.
667 */
668enum bfa_port_opmode {
669 BFA_PORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
670 BFA_PORT_OPMODE_LB_INT = 0x01, /* internal loop back */
671 BFA_PORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
672 BFA_PORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
673 BFA_PORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
674 BFA_PORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
675};
676
677#define BFA_PORT_OPMODE_LB_HARD(_mode) \
678 ((_mode == BFA_PORT_OPMODE_LB_INT) || \
679 (_mode == BFA_PORT_OPMODE_LB_SLW) || \
680 (_mode == BFA_PORT_OPMODE_LB_EXT))
681
682/**
683 * Port link state
684 */
685enum bfa_port_linkstate {
686 BFA_PORT_LINKUP = 1, /* Physical port/Trunk link up */
687 BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
688};
689
690/**
691 * Port link state reason code
692 */
693enum bfa_port_linkstate_rsn {
694 BFA_PORT_LINKSTATE_RSN_NONE = 0,
695 BFA_PORT_LINKSTATE_RSN_DISABLED = 1,
696 BFA_PORT_LINKSTATE_RSN_RX_NOS = 2,
697 BFA_PORT_LINKSTATE_RSN_RX_OLS = 3,
698 BFA_PORT_LINKSTATE_RSN_RX_LIP = 4,
699 BFA_PORT_LINKSTATE_RSN_RX_LIPF7 = 5,
700 BFA_PORT_LINKSTATE_RSN_SFP_REMOVED = 6,
701 BFA_PORT_LINKSTATE_RSN_PORT_FAULT = 7,
702 BFA_PORT_LINKSTATE_RSN_RX_LOS = 8,
703 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
704 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
705 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11,
706
707
708
709 /* CEE related reason codes/errors */
710 CEE_LLDP_INFO_AGED_OUT = 20,
711 CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
712 CEE_PEER_NOT_ADVERTISE_DCBX = 22,
713 CEE_PEER_NOT_ADVERTISE_PG = 23,
714 CEE_PEER_NOT_ADVERTISE_PFC = 24,
715 CEE_PEER_NOT_ADVERTISE_FCOE = 25,
716 CEE_PG_NOT_COMPATIBLE = 26,
717 CEE_PFC_NOT_COMPATIBLE = 27,
718 CEE_FCOE_NOT_COMPATIBLE = 28,
719 CEE_BAD_PG_RCVD = 29,
720 CEE_BAD_BW_RCVD = 30,
721 CEE_BAD_PFC_RCVD = 31,
722 CEE_BAD_APP_PRI_RCVD = 32,
723 CEE_FCOE_PRI_PFC_OFF = 33,
724 CEE_DUP_CONTROL_TLV_RCVD = 34,
725 CEE_DUP_FEAT_TLV_RCVD = 35,
726 CEE_APPLY_NEW_CFG = 36, /* reason, not error */
727 CEE_PROTOCOL_INIT = 37, /* reason, not error */
728 CEE_PHY_LINK_DOWN = 38,
729 CEE_LLS_FCOE_ABSENT = 39,
730 CEE_LLS_FCOE_DOWN = 40,
731 CEE_ISCSI_NOT_COMPATIBLE = 41,
732 CEE_ISCSI_PRI_PFC_OFF = 42,
733 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
734};
735#pragma pack(1)
736/**
737 * Physical port configuration
738 */
739struct bfa_port_cfg_s {
740 u8 topology; /* bfa_port_topology */
741 u8 speed; /* enum bfa_port_speed */
742 u8 trunked; /* trunked or not */
743 u8 qos_enabled; /* qos enabled or not */
744 u8 cfg_hardalpa; /* is hard alpa configured */
745 u8 hardalpa; /* configured hard alpa */
746 u16 maxfrsize; /* maximum frame size */
747 u8 rx_bbcredit; /* receive buffer credits */
748 u8 tx_bbcredit; /* transmit buffer credits */
749 u8 ratelimit; /* ratelimit enabled or not */
750 u8 trl_def_speed; /* ratelimit default speed */
751 u16 path_tov; /* device path timeout */
752 u16 q_depth; /* SCSI Queue depth */
753};
754#pragma pack()
755
756/**
757 * Port attribute values.
758 */
759struct bfa_port_attr_s {
760 /*
761 * Static fields
762 */
763 wwn_t nwwn; /* node wwn */
764 wwn_t pwwn; /* port wwn */
765 wwn_t factorynwwn; /* factory node wwn */
766 wwn_t factorypwwn; /* factory port wwn */
767 enum fc_cos cos_supported; /* supported class of services */
768 u32 rsvd;
769 struct fc_symname_s port_symname; /* port symbolic name */
770 enum bfa_port_speed speed_supported; /* supported speeds */
771 bfa_boolean_t pbind_enabled;
772
773 /*
774 * Configured values
775 */
776 struct bfa_port_cfg_s pport_cfg; /* pport cfg */
777
778 /*
779 * Dynamic field - info from BFA
780 */
781 enum bfa_port_states port_state; /* current port state */
782 enum bfa_port_speed speed; /* current speed */
783 enum bfa_port_topology topology; /* current topology */
784 bfa_boolean_t beacon; /* current beacon status */
785 bfa_boolean_t link_e2e_beacon; /* link beacon is on */
786 bfa_boolean_t plog_enabled; /* portlog is enabled */
787
788 /*
789 * Dynamic field - info from FCS
790 */
791 u32 pid; /* port ID */
792 enum bfa_port_type port_type; /* current topology */
793 u32 loopback; /* external loopback */
794 u32 authfail; /* auth fail state */
795 bfa_boolean_t io_profile; /* get it from fcpim mod */
796 u8 pad[4]; /* for 64-bit alignement */
797
798 /* FCoE specific */
799 u16 fcoe_vlan;
800 u8 rsvd1[6];
801};
802
803/**
804 * Port FCP mappings.
805 */
806struct bfa_port_fcpmap_s {
807 char osdevname[256];
808 u32 bus;
809 u32 target;
810 u32 oslun;
811 u32 fcid;
812 wwn_t nwwn;
813 wwn_t pwwn;
814 u64 fcplun;
815 char luid[256];
816};
817
818/**
819 * Port RNID info.
820 */
821struct bfa_port_rnid_s {
822 wwn_t wwn;
823 u32 unittype;
824 u32 portid;
825 u32 attached_nodes_num;
826 u16 ip_version;
827 u16 udp_port;
828 u8 ipaddr[16];
829 u16 rsvd;
830 u16 topologydiscoveryflags;
831};
832
833#pragma pack(1)
834struct bfa_fcport_fcf_s {
835 wwn_t name; /* FCF name */
836 wwn_t fabric_name; /* Fabric Name */
837 u8 fipenabled; /* FIP enabled or not */
838 u8 fipfailed; /* FIP failed or not */
839 u8 resv[2];
840 u8 pri; /* FCF priority */
841 u8 version; /* FIP version used */
842 u8 available; /* Available for login */
843 u8 fka_disabled; /* FKA is disabled */
844 u8 maxsz_verified; /* FCoE max size verified */
845 u8 fc_map[3]; /* FC map */
846 u16 vlan; /* FCoE vlan tag/priority */
847 u32 fka_adv_per; /* FIP ka advert. period */
848 mac_t mac; /* FCF mac */
849};
850
851/**
852 * Trunk states for BCU/BFAL
853 */
854enum bfa_trunk_state {
855 BFA_TRUNK_DISABLED = 0, /* Trunk is not configured */
856 BFA_TRUNK_ONLINE = 1, /* Trunk is online */
857 BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
858};
859
860/**
861 * VC attributes for trunked link
862 */
863struct bfa_trunk_vc_attr_s {
864 u32 bb_credit;
865 u32 elp_opmode_flags;
866 u32 req_credit;
867 u16 vc_credits[8];
868};
869
870/**
871 * Link state information
872 */
873struct bfa_port_link_s {
874 u8 linkstate; /* Link state bfa_port_linkstate */
875 u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
876 u8 topology; /* P2P/LOOP bfa_port_topology */
877 u8 speed; /* Link speed (1/2/4/8 G) */
878 u32 linkstate_opt; /* Linkstate optional data (debug) */
879 u8 trunked; /* Trunked or not (1 or 0) */
880 u8 resvd[3];
881 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
882 union {
883 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
884 struct bfa_trunk_vc_attr_s trunk_vc_attr;
885 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */
886 } vc_fcf;
887};
888#pragma pack()
889
890enum bfa_trunk_link_fctl {
891 BFA_TRUNK_LINK_FCTL_NORMAL,
892 BFA_TRUNK_LINK_FCTL_VC,
893 BFA_TRUNK_LINK_FCTL_VC_QOS,
894};
895
896enum bfa_trunk_link_state {
897 BFA_TRUNK_LINK_STATE_UP = 1, /* link part of trunk */
898 BFA_TRUNK_LINK_STATE_DN_LINKDN = 2, /* physical link down */
899 BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3, /* trunk group different */
900 BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4, /* speed mismatch */
901 BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5, /* remote port not trunked */
902};
903
904#define BFA_TRUNK_MAX_PORTS 2
905struct bfa_trunk_link_attr_s {
906 wwn_t trunk_wwn;
907 enum bfa_trunk_link_fctl fctl;
908 enum bfa_trunk_link_state link_state;
909 enum bfa_port_speed speed;
910 u32 deskew;
911};
912
913struct bfa_trunk_attr_s {
914 enum bfa_trunk_state state;
915 enum bfa_port_speed speed;
916 u32 port_id;
917 u32 rsvd;
918 struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS];
919};
920
921struct bfa_rport_hal_stats_s {
922 u32 sm_un_cr; /* uninit: create events */
923 u32 sm_un_unexp; /* uninit: exception events */
924 u32 sm_cr_on; /* created: online events */
925 u32 sm_cr_del; /* created: delete events */
926 u32 sm_cr_hwf; /* created: IOC down */
927 u32 sm_cr_unexp; /* created: exception events */
928 u32 sm_fwc_rsp; /* fw create: f/w responses */
929 u32 sm_fwc_del; /* fw create: delete events */
930 u32 sm_fwc_off; /* fw create: offline events */
931 u32 sm_fwc_hwf; /* fw create: IOC down */
932 u32 sm_fwc_unexp; /* fw create: exception events*/
933 u32 sm_on_off; /* online: offline events */
934 u32 sm_on_del; /* online: delete events */
935 u32 sm_on_hwf; /* online: IOC down events */
936 u32 sm_on_unexp; /* online: exception events */
937 u32 sm_fwd_rsp; /* fw delete: fw responses */
938 u32 sm_fwd_del; /* fw delete: delete events */
939 u32 sm_fwd_hwf; /* fw delete: IOC down events */
940 u32 sm_fwd_unexp; /* fw delete: exception events*/
941 u32 sm_off_del; /* offline: delete events */
942 u32 sm_off_on; /* offline: online events */
943 u32 sm_off_hwf; /* offline: IOC down events */
944 u32 sm_off_unexp; /* offline: exception events */
945 u32 sm_del_fwrsp; /* delete: fw responses */
946 u32 sm_del_hwf; /* delete: IOC down events */
947 u32 sm_del_unexp; /* delete: exception events */
948 u32 sm_delp_fwrsp; /* delete pend: fw responses */
949 u32 sm_delp_hwf; /* delete pend: IOC downs */
950 u32 sm_delp_unexp; /* delete pend: exceptions */
951 u32 sm_offp_fwrsp; /* off-pending: fw responses */
952 u32 sm_offp_del; /* off-pending: deletes */
953 u32 sm_offp_hwf; /* off-pending: IOC downs */
954 u32 sm_offp_unexp; /* off-pending: exceptions */
955 u32 sm_iocd_off; /* IOC down: offline events */
956 u32 sm_iocd_del; /* IOC down: delete events */
957 u32 sm_iocd_on; /* IOC down: online events */
958 u32 sm_iocd_unexp; /* IOC down: exceptions */
959 u32 rsvd;
960};
961#pragma pack(1)
962/**
963 * Rport's QoS attributes
964 */
965struct bfa_rport_qos_attr_s {
966 u8 qos_priority; /* rport's QoS priority */
967 u8 rsvd[3];
968 u32 qos_flow_id; /* QoS flow Id */
969};
970#pragma pack()
971
972#define BFA_IOBUCKET_MAX 14
973
974struct bfa_itnim_latency_s {
975 u32 min[BFA_IOBUCKET_MAX];
976 u32 max[BFA_IOBUCKET_MAX];
977 u32 count[BFA_IOBUCKET_MAX];
978 u32 avg[BFA_IOBUCKET_MAX];
979};
980
981struct bfa_itnim_ioprofile_s {
982 u32 clock_res_mul;
983 u32 clock_res_div;
984 u32 index;
985 u32 io_profile_start_time; /* IO profile start time */
986 u32 iocomps[BFA_IOBUCKET_MAX]; /* IO completed */
987 struct bfa_itnim_latency_s io_latency;
988};
989
990/**
991 * FC physical port statistics.
992 */
993struct bfa_port_fc_stats_s {
994 u64 secs_reset; /* Seconds since stats is reset */
995 u64 tx_frames; /* Tx frames */
996 u64 tx_words; /* Tx words */
997 u64 tx_lip; /* Tx LIP */
998 u64 tx_nos; /* Tx NOS */
999 u64 tx_ols; /* Tx OLS */
1000 u64 tx_lr; /* Tx LR */
1001 u64 tx_lrr; /* Tx LRR */
1002 u64 rx_frames; /* Rx frames */
1003 u64 rx_words; /* Rx words */
1004 u64 lip_count; /* Rx LIP */
1005 u64 nos_count; /* Rx NOS */
1006 u64 ols_count; /* Rx OLS */
1007 u64 lr_count; /* Rx LR */
1008 u64 lrr_count; /* Rx LRR */
1009 u64 invalid_crcs; /* Rx CRC err frames */
1010 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
1011 u64 undersized_frm; /* Rx undersized frames */
1012 u64 oversized_frm; /* Rx oversized frames */
1013 u64 bad_eof_frm; /* Rx frames with bad EOF */
1014 u64 error_frames; /* Errored frames */
1015 u64 dropped_frames; /* Dropped frames */
1016 u64 link_failures; /* Link Failure (LF) count */
1017 u64 loss_of_syncs; /* Loss of sync count */
1018 u64 loss_of_signals; /* Loss of signal count */
1019 u64 primseq_errs; /* Primitive sequence protocol err. */
1020 u64 bad_os_count; /* Invalid ordered sets */
1021 u64 err_enc_out; /* Encoding err nonframe_8b10b */
1022 u64 err_enc; /* Encoding err frame_8b10b */
1023};
1024
1025/**
1026 * Eth Physical Port statistics.
1027 */
1028struct bfa_port_eth_stats_s {
1029 u64 secs_reset; /* Seconds since stats is reset */
1030 u64 frame_64; /* Frames 64 bytes */
1031 u64 frame_65_127; /* Frames 65-127 bytes */
1032 u64 frame_128_255; /* Frames 128-255 bytes */
1033 u64 frame_256_511; /* Frames 256-511 bytes */
1034 u64 frame_512_1023; /* Frames 512-1023 bytes */
1035 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
1036 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
1037 u64 tx_bytes; /* Tx bytes */
1038 u64 tx_packets; /* Tx packets */
1039 u64 tx_mcast_packets; /* Tx multicast packets */
1040 u64 tx_bcast_packets; /* Tx broadcast packets */
1041 u64 tx_control_frame; /* Tx control frame */
1042 u64 tx_drop; /* Tx drops */
1043 u64 tx_jabber; /* Tx jabber */
1044 u64 tx_fcs_error; /* Tx FCS errors */
1045 u64 tx_fragments; /* Tx fragments */
1046 u64 rx_bytes; /* Rx bytes */
1047 u64 rx_packets; /* Rx packets */
1048 u64 rx_mcast_packets; /* Rx multicast packets */
1049 u64 rx_bcast_packets; /* Rx broadcast packets */
1050 u64 rx_control_frames; /* Rx control frames */
1051 u64 rx_unknown_opcode; /* Rx unknown opcode */
1052 u64 rx_drop; /* Rx drops */
1053 u64 rx_jabber; /* Rx jabber */
1054 u64 rx_fcs_error; /* Rx FCS errors */
1055 u64 rx_alignment_error; /* Rx alignment errors */
1056 u64 rx_frame_length_error; /* Rx frame len errors */
1057 u64 rx_code_error; /* Rx code errors */
1058 u64 rx_fragments; /* Rx fragments */
1059 u64 rx_pause; /* Rx pause */
1060 u64 rx_zero_pause; /* Rx zero pause */
1061 u64 tx_pause; /* Tx pause */
1062 u64 tx_zero_pause; /* Tx zero pause */
1063 u64 rx_fcoe_pause; /* Rx FCoE pause */
1064 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
1065 u64 tx_fcoe_pause; /* Tx FCoE pause */
1066 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
1067 u64 rx_iscsi_pause; /* Rx iSCSI pause */
1068 u64 rx_iscsi_zero_pause; /* Rx iSCSI zero pause */
1069 u64 tx_iscsi_pause; /* Tx iSCSI pause */
1070 u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
1071};
1072
1073/**
1074 * Port statistics.
1075 */
1076union bfa_port_stats_u {
1077 struct bfa_port_fc_stats_s fc;
1078 struct bfa_port_eth_stats_s eth;
1079};
1080
1081#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_drv.c
index a7fcc80c177e..14127646dc54 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -14,10 +14,8 @@
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17#include <bfa.h> 17
18#include <defs/bfa_defs_pci.h> 18#include "bfa_modules.h"
19#include <cs/bfa_debug.h>
20#include <bfa_iocfc.h>
21 19
22/** 20/**
23 * BFA module list terminated by NULL 21 * BFA module list terminated by NULL
@@ -30,9 +28,6 @@ struct bfa_module_s *hal_mods[] = {
30 &hal_mod_uf, 28 &hal_mod_uf,
31 &hal_mod_rport, 29 &hal_mod_rport,
32 &hal_mod_fcpim, 30 &hal_mod_fcpim,
33#ifdef BFA_CFG_PBIND
34 &hal_mod_pbind,
35#endif
36 NULL 31 NULL
37}; 32};
38 33
@@ -74,17 +69,39 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
74 bfa_isr_unhandled, /* --------- */ 69 bfa_isr_unhandled, /* --------- */
75}; 70};
76 71
72
77/** 73/**
78 * Message handlers for mailbox command classes 74 * Message handlers for mailbox command classes
79 */ 75 */
80bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
81 NULL, 77 NULL,
82 NULL, /* BFI_MC_IOC */ 78 NULL, /* BFI_MC_IOC */
83 NULL, /* BFI_MC_DIAG */ 79 NULL, /* BFI_MC_DIAG */
84 NULL, /* BFI_MC_FLASH */ 80 NULL, /* BFI_MC_FLASH */
85 NULL, /* BFI_MC_CEE */ 81 NULL, /* BFI_MC_CEE */
86 NULL, /* BFI_MC_PORT */ 82 NULL, /* BFI_MC_PORT */
87 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 83 bfa_iocfc_isr, /* BFI_MC_IOCFC */
88 NULL, 84 NULL,
89}; 85};
90 86
87
88
89void
90bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
91{
92 struct bfa_port_s *port = &bfa->modules.port;
93 u32 dm_len;
94 u8 *dm_kva;
95 u64 dm_pa;
96
97 dm_len = bfa_port_meminfo();
98 dm_kva = bfa_meminfo_dma_virt(mi);
99 dm_pa = bfa_meminfo_dma_phys(mi);
100
101 memset(port, 0, sizeof(struct bfa_port_s));
102 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
103 bfa_port_mem_claim(port, dm_kva, dm_pa);
104
105 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
106 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
107}
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/bfa_fc.h
index 436dd7c5643a..6eff705564eb 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,13 +15,50 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#ifndef __FC_H__ 18#ifndef __BFA_FC_H__
19#define __FC_H__ 19#define __BFA_FC_H__
20 20
21#include <protocol/types.h> 21#include "bfa_os_inc.h"
22
23typedef u64 wwn_t;
24typedef u64 lun_t;
25
26#define WWN_NULL (0)
27#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
28#define FC_ALPA_MAX 128
22 29
23#pragma pack(1) 30#pragma pack(1)
24 31
32#define MAC_ADDRLEN (6)
33struct mac_s { u8 mac[MAC_ADDRLEN]; };
34#define mac_t struct mac_s
35
36/*
37 * generic SCSI cdb definition
38 */
39#define SCSI_MAX_CDBLEN 16
40struct scsi_cdb_s {
41 u8 scsi_cdb[SCSI_MAX_CDBLEN];
42};
43#define scsi_cdb_t struct scsi_cdb_s
44
45/* ------------------------------------------------------------
46 * SCSI status byte values
47 * ------------------------------------------------------------
48 */
49#define SCSI_STATUS_GOOD 0x00
50#define SCSI_STATUS_CHECK_CONDITION 0x02
51#define SCSI_STATUS_CONDITION_MET 0x04
52#define SCSI_STATUS_BUSY 0x08
53#define SCSI_STATUS_INTERMEDIATE 0x10
54#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
55#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
56#define SCSI_STATUS_COMMAND_TERMINATED 0x22
57#define SCSI_STATUS_QUEUE_FULL 0x28
58#define SCSI_STATUS_ACA_ACTIVE 0x30
59
60#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
61
25/* 62/*
26 * Fibre Channel Header Structure (FCHS) definition 63 * Fibre Channel Header Structure (FCHS) definition
27 */ 64 */
@@ -51,9 +88,9 @@ struct fchs_s {
51 u32 ro; /* relative offset */ 88 u32 ro; /* relative offset */
52}; 89};
53 90
54#define FC_SOF_LEN 4 91#define FC_SOF_LEN 4
55#define FC_EOF_LEN 4 92#define FC_EOF_LEN 4
56#define FC_CRC_LEN 4 93#define FC_CRC_LEN 4
57 94
58/* 95/*
59 * Fibre Channel BB_E Header Structure 96 * Fibre Channel BB_E Header Structure
@@ -140,10 +177,12 @@ enum {
140 FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */ 177 FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */
141 FC_TYPE_FC_AL = 0x23, /* FC-AL */ 178 FC_TYPE_FC_AL = 0x23, /* FC-AL */
142 FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */ 179 FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */
180 FC_TYPE_FC_SPINFAB = 0xEE, /* SPINFAB */
181 FC_TYPE_FC_DIAG = 0xEF, /* DIAG */
143 FC_TYPE_MAX = 256, /* 256 FC-4 types */ 182 FC_TYPE_MAX = 256, /* 256 FC-4 types */
144}; 183};
145 184
146struct fc_fc4types_s{ 185struct fc_fc4types_s {
147 u8 bits[FC_TYPE_MAX / 8]; 186 u8 bits[FC_TYPE_MAX / 8];
148}; 187};
149 188
@@ -168,7 +207,7 @@ enum {
168 */ 207 */
169enum { 208enum {
170 FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0, 209 FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0,
171 FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00, 210 FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00,
172 FC_ALIAS_SERVER = 0xFFFFF8, 211 FC_ALIAS_SERVER = 0xFFFFF8,
173 FC_MGMT_SERVER = 0xFFFFFA, 212 FC_MGMT_SERVER = 0xFFFFFA,
174 FC_TIME_SERVER = 0xFFFFFB, 213 FC_TIME_SERVER = 0xFFFFFB,
@@ -201,7 +240,7 @@ enum {
201/* 240/*
202 * generic ELS command 241 * generic ELS command
203 */ 242 */
204struct fc_els_cmd_s{ 243struct fc_els_cmd_s {
205 u32 els_code:8; /* ELS Command Code */ 244 u32 els_code:8; /* ELS Command Code */
206 u32 reserved:24; 245 u32 reserved:24;
207}; 246};
@@ -233,6 +272,8 @@ enum {
233 FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */ 272 FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */
234 FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */ 273 FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */
235 FC_ELS_ADISC = 0x52, /* Discover Address. */ 274 FC_ELS_ADISC = 0x52, /* Discover Address. */
275 FC_ELS_FARP_REQ = 0x54, /* FARP Request. */
276 FC_ELS_FARP_REP = 0x55, /* FARP Reply. */
236 FC_ELS_FAN = 0x60, /* Fabric Address Notification */ 277 FC_ELS_FAN = 0x60, /* Fabric Address Notification */
237 FC_ELS_RSCN = 0x61, /* Reg State Change Notification */ 278 FC_ELS_RSCN = 0x61, /* Reg State Change Notification */
238 FC_ELS_SCR = 0x62, /* State Change Registration. */ 279 FC_ELS_SCR = 0x62, /* State Change Registration. */
@@ -272,7 +313,7 @@ enum {
272 * N_Port PLOGI Common Service Parameters. 313 * N_Port PLOGI Common Service Parameters.
273 * FC-PH-x. Figure-76. pg. 308. 314 * FC-PH-x. Figure-76. pg. 308.
274 */ 315 */
275struct fc_plogi_csp_s{ 316struct fc_plogi_csp_s {
276 u8 verhi; /* FC-PH high version */ 317 u8 verhi; /* FC-PH high version */
277 u8 verlo; /* FC-PH low version */ 318 u8 verlo; /* FC-PH low version */
278 u16 bbcred; /* BB_Credit */ 319 u16 bbcred; /* BB_Credit */
@@ -326,7 +367,7 @@ struct fc_plogi_csp_s{
326 * N_Port PLOGI Class Specific Parameters. 367 * N_Port PLOGI Class Specific Parameters.
327 * FC-PH-x. Figure 78. pg. 318. 368 * FC-PH-x. Figure 78. pg. 318.
328 */ 369 */
329struct fc_plogi_clp_s{ 370struct fc_plogi_clp_s {
330#ifdef __BIGENDIAN 371#ifdef __BIGENDIAN
331 u32 class_valid:1; 372 u32 class_valid:1;
332 u32 intermix:1; /* class intermix supported if set =1. 373 u32 intermix:1; /* class intermix supported if set =1.
@@ -361,29 +402,29 @@ struct fc_plogi_clp_s{
361 u32 reserved8:16; 402 u32 reserved8:16;
362}; 403};
363 404
364#define FLOGI_VVL_BRCD 0x42524344 /* ASCII value for each character in 405/* ASCII value for each character in string "BRCD" */
365 * string "BRCD" */ 406#define FLOGI_VVL_BRCD 0x42524344
366 407
367/* 408/*
368 * PLOGI els command and reply payload 409 * PLOGI els command and reply payload
369 */ 410 */
370struct fc_logi_s{ 411struct fc_logi_s {
371 struct fc_els_cmd_s els_cmd; /* ELS command code */ 412 struct fc_els_cmd_s els_cmd; /* ELS command code */
372 struct fc_plogi_csp_s csp; /* common service params */ 413 struct fc_plogi_csp_s csp; /* common service params */
373 wwn_t port_name; 414 wwn_t port_name;
374 wwn_t node_name; 415 wwn_t node_name;
375 struct fc_plogi_clp_s class1; /* class 1 service parameters */ 416 struct fc_plogi_clp_s class1; /* class 1 service parameters */
376 struct fc_plogi_clp_s class2; /* class 2 service parameters */ 417 struct fc_plogi_clp_s class2; /* class 2 service parameters */
377 struct fc_plogi_clp_s class3; /* class 3 service parameters */ 418 struct fc_plogi_clp_s class3; /* class 3 service parameters */
378 struct fc_plogi_clp_s class4; /* class 4 service parameters */ 419 struct fc_plogi_clp_s class4; /* class 4 service parameters */
379 u8 vvl[16]; /* vendor version level */ 420 u8 vvl[16]; /* vendor version level */
380}; 421};
381 422
382/* 423/*
383 * LOGO els command payload 424 * LOGO els command payload
384 */ 425 */
385struct fc_logo_s{ 426struct fc_logo_s {
386 struct fc_els_cmd_s els_cmd; /* ELS command code */ 427 struct fc_els_cmd_s els_cmd; /* ELS command code */
387 u32 res1:8; 428 u32 res1:8;
388 u32 nport_id:24; /* N_Port identifier of source */ 429 u32 nport_id:24; /* N_Port identifier of source */
389 wwn_t orig_port_name; /* Port name of the LOGO originator */ 430 wwn_t orig_port_name; /* Port name of the LOGO originator */
@@ -393,7 +434,7 @@ struct fc_logo_s{
393 * ADISC els command payload 434 * ADISC els command payload
394 */ 435 */
395struct fc_adisc_s { 436struct fc_adisc_s {
396 struct fc_els_cmd_s els_cmd; /* ELS command code */ 437 struct fc_els_cmd_s els_cmd; /* ELS command code */
397 u32 res1:8; 438 u32 res1:8;
398 u32 orig_HA:24; /* originator hard address */ 439 u32 orig_HA:24; /* originator hard address */
399 wwn_t orig_port_name; /* originator port name */ 440 wwn_t orig_port_name; /* originator port name */
@@ -405,7 +446,7 @@ struct fc_adisc_s {
405/* 446/*
406 * Exchange status block 447 * Exchange status block
407 */ 448 */
408struct fc_exch_status_blk_s{ 449struct fc_exch_status_blk_s {
409 u32 oxid:16; 450 u32 oxid:16;
410 u32 rxid:16; 451 u32 rxid:16;
411 u32 res1:8; 452 u32 res1:8;
@@ -423,7 +464,7 @@ struct fc_exch_status_blk_s{
423 * RES els command payload 464 * RES els command payload
424 */ 465 */
425struct fc_res_s { 466struct fc_res_s {
426 struct fc_els_cmd_s els_cmd; /* ELS command code */ 467 struct fc_els_cmd_s els_cmd; /* ELS command code */
427 u32 res1:8; 468 u32 res1:8;
428 u32 nport_id:24; /* N_Port identifier of source */ 469 u32 nport_id:24; /* N_Port identifier of source */
429 u32 oxid:16; 470 u32 oxid:16;
@@ -434,16 +475,16 @@ struct fc_res_s {
434/* 475/*
435 * RES els accept payload 476 * RES els accept payload
436 */ 477 */
437struct fc_res_acc_s{ 478struct fc_res_acc_s {
438 struct fc_els_cmd_s els_cmd; /* ELS command code */ 479 struct fc_els_cmd_s els_cmd; /* ELS command code */
439 struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */ 480 struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */
440}; 481};
441 482
442/* 483/*
443 * REC els command payload 484 * REC els command payload
444 */ 485 */
445struct fc_rec_s { 486struct fc_rec_s {
446 struct fc_els_cmd_s els_cmd; /* ELS command code */ 487 struct fc_els_cmd_s els_cmd; /* ELS command code */
447 u32 res1:8; 488 u32 res1:8;
448 u32 nport_id:24; /* N_Port identifier of source */ 489 u32 nport_id:24; /* N_Port identifier of source */
449 u32 oxid:16; 490 u32 oxid:16;
@@ -451,9 +492,9 @@ struct fc_rec_s {
451}; 492};
452 493
453#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */ 494#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */
454#define FC_REC_ESB_SI 0x40000000 /* SI is owned */ 495#define FC_REC_ESB_SI 0x40000000 /* SI is owned */
455#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */ 496#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */
456#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */ 497#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */
457#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */ 498#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */
458#define FC_REC_ESB_ERRP_MSK 0x03000000 499#define FC_REC_ESB_ERRP_MSK 0x03000000
459#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */ 500#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */
@@ -464,7 +505,7 @@ struct fc_rec_s {
464 * REC els accept payload 505 * REC els accept payload
465 */ 506 */
466struct fc_rec_acc_s { 507struct fc_rec_acc_s {
467 struct fc_els_cmd_s els_cmd; /* ELS command code */ 508 struct fc_els_cmd_s els_cmd; /* ELS command code */
468 u32 oxid:16; 509 u32 oxid:16;
469 u32 rxid:16; 510 u32 rxid:16;
470 u32 res1:8; 511 u32 res1:8;
@@ -479,7 +520,7 @@ struct fc_rec_acc_s {
479 * RSI els payload 520 * RSI els payload
480 */ 521 */
481struct fc_rsi_s { 522struct fc_rsi_s {
482 struct fc_els_cmd_s els_cmd; 523 struct fc_els_cmd_s els_cmd;
483 u32 res1:8; 524 u32 res1:8;
484 u32 orig_sid:24; 525 u32 orig_sid:24;
485 u32 oxid:16; 526 u32 oxid:16;
@@ -490,7 +531,7 @@ struct fc_rsi_s {
490 * structure for PRLI paramater pages, both request & response 531 * structure for PRLI paramater pages, both request & response
491 * see FC-PH-X table 113 & 115 for explanation also FCP table 8 532 * see FC-PH-X table 113 & 115 for explanation also FCP table 8
492 */ 533 */
493struct fc_prli_params_s{ 534struct fc_prli_params_s {
494 u32 reserved:16; 535 u32 reserved:16;
495#ifdef __BIGENDIAN 536#ifdef __BIGENDIAN
496 u32 reserved1:5; 537 u32 reserved1:5;
@@ -531,7 +572,7 @@ enum {
531 FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */ 572 FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */
532}; 573};
533 574
534struct fc_prli_params_page_s{ 575struct fc_prli_params_page_s {
535 u32 type:8; 576 u32 type:8;
536 u32 codext:8; 577 u32 codext:8;
537#ifdef __BIGENDIAN 578#ifdef __BIGENDIAN
@@ -551,13 +592,13 @@ struct fc_prli_params_page_s{
551 592
552 u32 origprocas; 593 u32 origprocas;
553 u32 rspprocas; 594 u32 rspprocas;
554 struct fc_prli_params_s servparams; 595 struct fc_prli_params_s servparams;
555}; 596};
556 597
557/* 598/*
558 * PRLI request and accept payload, FC-PH-X tables 112 & 114 599 * PRLI request and accept payload, FC-PH-X tables 112 & 114
559 */ 600 */
560struct fc_prli_s{ 601struct fc_prli_s {
561 u32 command:8; 602 u32 command:8;
562 u32 pglen:8; 603 u32 pglen:8;
563 u32 pagebytes:16; 604 u32 pagebytes:16;
@@ -567,7 +608,7 @@ struct fc_prli_s{
567/* 608/*
568 * PRLO logout params page 609 * PRLO logout params page
569 */ 610 */
570struct fc_prlo_params_page_s{ 611struct fc_prlo_params_page_s {
571 u32 type:8; 612 u32 type:8;
572 u32 type_ext:8; 613 u32 type_ext:8;
573#ifdef __BIGENDIAN 614#ifdef __BIGENDIAN
@@ -592,17 +633,17 @@ struct fc_prlo_params_page_s{
592/* 633/*
593 * PRLO els command payload 634 * PRLO els command payload
594 */ 635 */
595struct fc_prlo_s{ 636struct fc_prlo_s {
596 u32 command:8; 637 u32 command:8;
597 u32 page_len:8; 638 u32 page_len:8;
598 u32 payload_len:16; 639 u32 payload_len:16;
599 struct fc_prlo_params_page_s prlo_params[1]; 640 struct fc_prlo_params_page_s prlo_params[1];
600}; 641};
601 642
602/* 643/*
603 * PRLO Logout response parameter page 644 * PRLO Logout response parameter page
604 */ 645 */
605struct fc_prlo_acc_params_page_s{ 646struct fc_prlo_acc_params_page_s {
606 u32 type:8; 647 u32 type:8;
607 u32 type_ext:8; 648 u32 type_ext:8;
608 649
@@ -628,7 +669,7 @@ struct fc_prlo_acc_params_page_s{
628/* 669/*
629 * PRLO els command ACC payload 670 * PRLO els command ACC payload
630 */ 671 */
631struct fc_prlo_acc_s{ 672struct fc_prlo_acc_s {
632 u32 command:8; 673 u32 command:8;
633 u32 page_len:8; 674 u32 page_len:8;
634 u32 payload_len:16; 675 u32 payload_len:16;
@@ -650,7 +691,7 @@ enum {
650 FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01 691 FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
651}; 692};
652 693
653struct fc_scr_s{ 694struct fc_scr_s {
654 u32 command:8; 695 u32 command:8;
655 u32 res:24; 696 u32 res:24;
656 u32 vu_reg_func:8; /* Vendor Unique Registrations */ 697 u32 vu_reg_func:8; /* Vendor Unique Registrations */
@@ -674,7 +715,7 @@ enum {
674 * LS_RJT els reply payload 715 * LS_RJT els reply payload
675 */ 716 */
676struct fc_ls_rjt_s { 717struct fc_ls_rjt_s {
677 struct fc_els_cmd_s els_cmd; /* ELS command code */ 718 struct fc_els_cmd_s els_cmd; /* ELS command code */
678 u32 res1:8; 719 u32 res1:8;
679 u32 reason_code:8; /* Reason code for reject */ 720 u32 reason_code:8; /* Reason code for reject */
680 u32 reason_code_expl:8; /* Reason code explanation */ 721 u32 reason_code_expl:8; /* Reason code explanation */
@@ -722,8 +763,8 @@ enum {
722/* 763/*
723 * RRQ els command payload 764 * RRQ els command payload
724 */ 765 */
725struct fc_rrq_s{ 766struct fc_rrq_s {
726 struct fc_els_cmd_s els_cmd; /* ELS command code */ 767 struct fc_els_cmd_s els_cmd; /* ELS command code */
727 u32 res1:8; 768 u32 res1:8;
728 u32 s_id:24; /* exchange originator S_ID */ 769 u32 s_id:24; /* exchange originator S_ID */
729 770
@@ -736,7 +777,7 @@ struct fc_rrq_s{
736/* 777/*
737 * ABTS BA_ACC reply payload 778 * ABTS BA_ACC reply payload
738 */ 779 */
739struct fc_ba_acc_s{ 780struct fc_ba_acc_s {
740 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ 781 u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */
741 u32 seq_id:8; /* invalid for Abort Exchange */ 782 u32 seq_id:8; /* invalid for Abort Exchange */
742 u32 res2:16; 783 u32 res2:16;
@@ -749,7 +790,7 @@ struct fc_ba_acc_s{
749/* 790/*
750 * ABTS BA_RJT reject payload 791 * ABTS BA_RJT reject payload
751 */ 792 */
752struct fc_ba_rjt_s{ 793struct fc_ba_rjt_s {
753 u32 res1:8; /* Reserved */ 794 u32 res1:8; /* Reserved */
754 u32 reason_code:8; /* reason code for reject */ 795 u32 reason_code:8; /* reason code for reject */
755 u32 reason_expl:8; /* reason code explanation */ 796 u32 reason_expl:8; /* reason code explanation */
@@ -759,9 +800,9 @@ struct fc_ba_rjt_s{
759/* 800/*
760 * TPRLO logout parameter page 801 * TPRLO logout parameter page
761 */ 802 */
762struct fc_tprlo_params_page_s{ 803struct fc_tprlo_params_page_s {
763 u32 type:8; 804u32 type:8;
764 u32 type_ext:8; 805u32 type_ext:8;
765 806
766#ifdef __BIGENDIAN 807#ifdef __BIGENDIAN
767 u32 opa_valid:1; 808 u32 opa_valid:1;
@@ -787,7 +828,7 @@ struct fc_tprlo_params_page_s{
787/* 828/*
788 * TPRLO ELS command payload 829 * TPRLO ELS command payload
789 */ 830 */
790struct fc_tprlo_s{ 831struct fc_tprlo_s {
791 u32 command:8; 832 u32 command:8;
792 u32 page_len:8; 833 u32 page_len:8;
793 u32 payload_len:16; 834 u32 payload_len:16;
@@ -795,7 +836,7 @@ struct fc_tprlo_s{
795 struct fc_tprlo_params_page_s tprlo_params[1]; 836 struct fc_tprlo_params_page_s tprlo_params[1];
796}; 837};
797 838
798enum fc_tprlo_type{ 839enum fc_tprlo_type {
799 FC_GLOBAL_LOGO = 1, 840 FC_GLOBAL_LOGO = 1,
800 FC_TPR_LOGO 841 FC_TPR_LOGO
801}; 842};
@@ -803,7 +844,7 @@ enum fc_tprlo_type{
803/* 844/*
804 * TPRLO els command ACC payload 845 * TPRLO els command ACC payload
805 */ 846 */
806struct fc_tprlo_acc_s{ 847struct fc_tprlo_acc_s {
807 u32 command:8; 848 u32 command:8;
808 u32 page_len:8; 849 u32 page_len:8;
809 u32 payload_len:16; 850 u32 payload_len:16;
@@ -815,21 +856,21 @@ struct fc_tprlo_acc_s{
815 */ 856 */
816#define FC_RSCN_PGLEN 0x4 857#define FC_RSCN_PGLEN 0x4
817 858
818enum fc_rscn_format{ 859enum fc_rscn_format {
819 FC_RSCN_FORMAT_PORTID = 0x0, 860 FC_RSCN_FORMAT_PORTID = 0x0,
820 FC_RSCN_FORMAT_AREA = 0x1, 861 FC_RSCN_FORMAT_AREA = 0x1,
821 FC_RSCN_FORMAT_DOMAIN = 0x2, 862 FC_RSCN_FORMAT_DOMAIN = 0x2,
822 FC_RSCN_FORMAT_FABRIC = 0x3, 863 FC_RSCN_FORMAT_FABRIC = 0x3,
823}; 864};
824 865
825struct fc_rscn_event_s{ 866struct fc_rscn_event_s {
826 u32 format:2; 867 u32 format:2;
827 u32 qualifier:4; 868 u32 qualifier:4;
828 u32 resvd:2; 869 u32 resvd:2;
829 u32 portid:24; 870 u32 portid:24;
830}; 871};
831 872
832struct fc_rscn_pl_s{ 873struct fc_rscn_pl_s {
833 u8 command; 874 u8 command;
834 u8 pagelen; 875 u8 pagelen;
835 u16 payldlen; 876 u16 payldlen;
@@ -840,18 +881,18 @@ struct fc_rscn_pl_s{
840 * ECHO els command req payload 881 * ECHO els command req payload
841 */ 882 */
842struct fc_echo_s { 883struct fc_echo_s {
843 struct fc_els_cmd_s els_cmd; 884 struct fc_els_cmd_s els_cmd;
844}; 885};
845 886
846/* 887/*
847 * RNID els command 888 * RNID els command
848 */ 889 */
849 890
850#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 891#define RNID_NODEID_DATA_FORMAT_COMMON 0x00
851#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 892#define RNID_NODEID_DATA_FORMAT_FCP3 0x08
852#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF 893#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF
853 894
854#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 895#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
855#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 896#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
856#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 897#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
857#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 898#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
@@ -868,8 +909,8 @@ struct fc_echo_s {
868/* 909/*
869 * RNID els command payload 910 * RNID els command payload
870 */ 911 */
871struct fc_rnid_cmd_s{ 912struct fc_rnid_cmd_s {
872 struct fc_els_cmd_s els_cmd; 913 struct fc_els_cmd_s els_cmd;
873 u32 node_id_data_format:8; 914 u32 node_id_data_format:8;
874 u32 reserved:24; 915 u32 reserved:24;
875}; 916};
@@ -878,12 +919,12 @@ struct fc_rnid_cmd_s{
878 * RNID els response payload 919 * RNID els response payload
879 */ 920 */
880 921
881struct fc_rnid_common_id_data_s{ 922struct fc_rnid_common_id_data_s {
882 wwn_t port_name; 923 wwn_t port_name;
883 wwn_t node_name; 924 wwn_t node_name;
884}; 925};
885 926
886struct fc_rnid_general_topology_data_s{ 927struct fc_rnid_general_topology_data_s {
887 u32 vendor_unique[4]; 928 u32 vendor_unique[4];
888 u32 asso_type; 929 u32 asso_type;
889 u32 phy_port_num; 930 u32 phy_port_num;
@@ -896,8 +937,8 @@ struct fc_rnid_general_topology_data_s{
896 u32 vendor_specific:16; 937 u32 vendor_specific:16;
897}; 938};
898 939
899struct fc_rnid_acc_s{ 940struct fc_rnid_acc_s {
900 struct fc_els_cmd_s els_cmd; 941 struct fc_els_cmd_s els_cmd;
901 u32 node_id_data_format:8; 942 u32 node_id_data_format:8;
902 u32 common_id_data_length:8; 943 u32 common_id_data_length:8;
903 u32 reserved:8; 944 u32 reserved:8;
@@ -920,7 +961,7 @@ struct fc_rnid_acc_s{
920#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 961#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
921#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF 962#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
922 963
923enum fc_rpsc_speed_cap{ 964enum fc_rpsc_speed_cap {
924 RPSC_SPEED_CAP_1G = 0x8000, 965 RPSC_SPEED_CAP_1G = 0x8000,
925 RPSC_SPEED_CAP_2G = 0x4000, 966 RPSC_SPEED_CAP_2G = 0x4000,
926 RPSC_SPEED_CAP_4G = 0x2000, 967 RPSC_SPEED_CAP_4G = 0x2000,
@@ -931,7 +972,7 @@ enum fc_rpsc_speed_cap{
931 RPSC_SPEED_CAP_UNKNOWN = 0x0001, 972 RPSC_SPEED_CAP_UNKNOWN = 0x0001,
932}; 973};
933 974
934enum fc_rpsc_op_speed_s{ 975enum fc_rpsc_op_speed {
935 RPSC_OP_SPEED_1G = 0x8000, 976 RPSC_OP_SPEED_1G = 0x8000,
936 RPSC_OP_SPEED_2G = 0x4000, 977 RPSC_OP_SPEED_2G = 0x4000,
937 RPSC_OP_SPEED_4G = 0x2000, 978 RPSC_OP_SPEED_4G = 0x2000,
@@ -942,24 +983,24 @@ enum fc_rpsc_op_speed_s{
942 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ 983 RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */
943}; 984};
944 985
945struct fc_rpsc_speed_info_s{ 986struct fc_rpsc_speed_info_s {
946 u16 port_speed_cap; /*! see fc_rpsc_speed_cap_t */ 987 u16 port_speed_cap; /*! see enum fc_rpsc_speed_cap */
947 u16 port_op_speed; /*! see fc_rpsc_op_speed_t */ 988 u16 port_op_speed; /*! see enum fc_rpsc_op_speed */
948}; 989};
949 990
950enum link_e2e_beacon_subcmd{ 991enum link_e2e_beacon_subcmd {
951 LINK_E2E_BEACON_ON = 1, 992 LINK_E2E_BEACON_ON = 1,
952 LINK_E2E_BEACON_OFF = 2 993 LINK_E2E_BEACON_OFF = 2
953}; 994};
954 995
955enum beacon_type{ 996enum beacon_type {
956 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */ 997 BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */
957 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */ 998 BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */
958 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */ 999 BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */
959}; 1000};
960 1001
961struct link_e2e_beacon_param_s { 1002struct link_e2e_beacon_param_s {
962 u8 beacon_type; /* Beacon Type. See beacon_type_t */ 1003 u8 beacon_type; /* Beacon Type. See enum beacon_type */
963 u8 beacon_frequency; 1004 u8 beacon_frequency;
964 /* Beacon frequency. Number of blinks 1005 /* Beacon frequency. Number of blinks
965 * per 10 seconds 1006 * per 10 seconds
@@ -978,12 +1019,13 @@ struct link_e2e_beacon_param_s {
978}; 1019};
979 1020
980/* 1021/*
981 * Link E2E beacon request/good response format. For LS_RJTs use fc_ls_rjt_t 1022 * Link E2E beacon request/good response format.
1023 * For LS_RJTs use struct fc_ls_rjt_s
982 */ 1024 */
983struct link_e2e_beacon_req_s{ 1025struct link_e2e_beacon_req_s {
984 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests * 1026 u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests *
985 *or FC_ELS_ACC in good replies */ 1027 *or FC_ELS_ACC in good replies */
986 u32 ls_sub_cmd; /*! See link_e2e_beacon_subcmd_t */ 1028 u32 ls_sub_cmd; /*! See enum link_e2e_beacon_subcmd */
987 struct link_e2e_beacon_param_s beacon_parm; 1029 struct link_e2e_beacon_param_s beacon_parm;
988}; 1030};
989 1031
@@ -992,14 +1034,14 @@ struct link_e2e_beacon_req_s{
992 * all the ports within that domain (TODO - I don't think FOS implements 1034 * all the ports within that domain (TODO - I don't think FOS implements
993 * this...). 1035 * this...).
994 */ 1036 */
995struct fc_rpsc_cmd_s{ 1037struct fc_rpsc_cmd_s {
996 struct fc_els_cmd_s els_cmd; 1038 struct fc_els_cmd_s els_cmd;
997}; 1039};
998 1040
999/* 1041/*
1000 * RPSC Acc 1042 * RPSC Acc
1001 */ 1043 */
1002struct fc_rpsc_acc_s{ 1044struct fc_rpsc_acc_s {
1003 u32 command:8; 1045 u32 command:8;
1004 u32 rsvd:8; 1046 u32 rsvd:8;
1005 u32 num_entries:16; 1047 u32 num_entries:16;
@@ -1012,51 +1054,50 @@ struct fc_rpsc_acc_s{
1012 */ 1054 */
1013#define FC_BRCD_TOKEN 0x42524344 1055#define FC_BRCD_TOKEN 0x42524344
1014 1056
1015struct fc_rpsc2_cmd_s{ 1057struct fc_rpsc2_cmd_s {
1016 struct fc_els_cmd_s els_cmd; 1058 struct fc_els_cmd_s els_cmd;
1017 u32 token; 1059 u32 token;
1018 u16 resvd; 1060 u16 resvd;
1019 u16 num_pids; /* Number of pids in the request */ 1061 u16 num_pids; /* Number of pids in the request */
1020 struct { 1062 struct {
1021 u32 rsvd1:8; 1063 u32 rsvd1:8;
1022 u32 pid:24; /* port identifier */ 1064 u32 pid:24; /* port identifier */
1023 } pid_list[1]; 1065 } pid_list[1];
1024}; 1066};
1025 1067
1026enum fc_rpsc2_port_type{ 1068enum fc_rpsc2_port_type {
1027 RPSC2_PORT_TYPE_UNKNOWN = 0, 1069 RPSC2_PORT_TYPE_UNKNOWN = 0,
1028 RPSC2_PORT_TYPE_NPORT = 1, 1070 RPSC2_PORT_TYPE_NPORT = 1,
1029 RPSC2_PORT_TYPE_NLPORT = 2, 1071 RPSC2_PORT_TYPE_NLPORT = 2,
1030 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, 1072 RPSC2_PORT_TYPE_NPIV_PORT = 0x5f,
1031 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, 1073 RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f,
1032}; 1074};
1033
1034/* 1075/*
1035 * RPSC2 portInfo entry structure 1076 * RPSC2 portInfo entry structure
1036 */ 1077 */
1037struct fc_rpsc2_port_info_s{ 1078struct fc_rpsc2_port_info_s {
1038 u32 pid; /* PID */ 1079 u32 pid; /* PID */
1039 u16 resvd1; 1080 u16 resvd1;
1040 u16 index; /* port number / index */ 1081 u16 index; /* port number / index */
1041 u8 resvd2; 1082 u8 resvd2;
1042 u8 type; /* port type N/NL/... */ 1083 u8 type; /* port type N/NL/... */
1043 u16 speed; /* port Operating Speed */ 1084 u16 speed; /* port Operating Speed */
1044}; 1085};
1045 1086
1046/* 1087/*
1047 * RPSC2 Accept payload 1088 * RPSC2 Accept payload
1048 */ 1089 */
1049struct fc_rpsc2_acc_s{ 1090struct fc_rpsc2_acc_s {
1050 u8 els_cmd; 1091 u8 els_cmd;
1051 u8 resvd; 1092 u8 resvd;
1052 u16 num_pids; /* Number of pids in the request */ 1093 u16 num_pids; /* Number of pids in the request */
1053 struct fc_rpsc2_port_info_s port_info[1]; /* port information */ 1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */
1054}; 1095};
1055 1096
1056/** 1097/**
1057 * bit fields so that multiple classes can be specified 1098 * bit fields so that multiple classes can be specified
1058 */ 1099 */
1059enum fc_cos{ 1100enum fc_cos {
1060 FC_CLASS_2 = 0x04, 1101 FC_CLASS_2 = 0x04,
1061 FC_CLASS_3 = 0x08, 1102 FC_CLASS_3 = 0x08,
1062 FC_CLASS_2_3 = 0x0C, 1103 FC_CLASS_2_3 = 0x0C,
@@ -1065,11 +1106,11 @@ enum fc_cos{
1065/* 1106/*
1066 * symbolic name 1107 * symbolic name
1067 */ 1108 */
1068struct fc_symname_s{ 1109struct fc_symname_s {
1069 u8 symname[FC_SYMNAME_MAX]; 1110 u8 symname[FC_SYMNAME_MAX];
1070}; 1111};
1071 1112
1072struct fc_alpabm_s{ 1113struct fc_alpabm_s {
1073 u8 alpa_bm[FC_ALPA_MAX / 8]; 1114 u8 alpa_bm[FC_ALPA_MAX / 8];
1074}; 1115};
1075 1116
@@ -1094,7 +1135,7 @@ struct fc_alpabm_s{
1094 * Virtual Fabric Tagging header format 1135 * Virtual Fabric Tagging header format
1095 * @caution This is defined only in BIG ENDIAN format. 1136 * @caution This is defined only in BIG ENDIAN format.
1096 */ 1137 */
1097struct fc_vft_s{ 1138struct fc_vft_s {
1098 u32 r_ctl:8; 1139 u32 r_ctl:8;
1099 u32 ver:2; 1140 u32 ver:2;
1100 u32 type:4; 1141 u32 type:4;
@@ -1106,6 +1147,770 @@ struct fc_vft_s{
1106 u32 res_c:24; 1147 u32 res_c:24;
1107}; 1148};
1108 1149
1109#pragma pack() 1150/*
1151 * FCP
1152 */
1153enum {
1154 FCP_RJT = 0x01000000, /* SRR reject */
1155 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
1156 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
1157};
1158
1159/*
1160 * SRR FC-4 LS payload
1161 */
1162struct fc_srr_s {
1163 u32 ls_cmd;
1164 u32 ox_id:16; /* ox-id */
1165 u32 rx_id:16; /* rx-id */
1166 u32 ro; /* relative offset */
1167 u32 r_ctl:8; /* R_CTL for I.U. */
1168 u32 res:24;
1169};
1170
1171
1172/*
1173 * FCP_CMND definitions
1174 */
1175#define FCP_CMND_CDB_LEN 16
1176#define FCP_CMND_LUN_LEN 8
1177
1178struct fcp_cmnd_s {
1179 lun_t lun; /* 64-bit LU number */
1180 u8 crn; /* command reference number */
1181#ifdef __BIGENDIAN
1182 u8 resvd:1,
1183 priority:4, /* FCP-3: SAM-3 priority */
1184 taskattr:3; /* scsi task attribute */
1185#else
1186 u8 taskattr:3, /* scsi task attribute */
1187 priority:4, /* FCP-3: SAM-3 priority */
1188 resvd:1;
1189#endif
1190 u8 tm_flags; /* task management flags */
1191#ifdef __BIGENDIAN
1192 u8 addl_cdb_len:6, /* additional CDB length words */
1193 iodir:2; /* read/write FCP_DATA IUs */
1194#else
1195 u8 iodir:2, /* read/write FCP_DATA IUs */
1196 addl_cdb_len:6; /* additional CDB length */
1197#endif
1198 scsi_cdb_t cdb;
1199
1200 /*
1201 * !!! additional cdb bytes follows here!!!
1202 */
1203 u32 fcp_dl; /* bytes to be transferred */
1204};
1205
1206#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
1207#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
1110 1208
1209/*
1210 * struct fcp_cmnd_s .iodir field values
1211 */
1212enum fcp_iodir {
1213 FCP_IODIR_NONE = 0,
1214 FCP_IODIR_WRITE = 1,
1215 FCP_IODIR_READ = 2,
1216 FCP_IODIR_RW = 3,
1217};
1218
1219/*
1220 * Task attribute field
1221 */
1222enum {
1223 FCP_TASK_ATTR_SIMPLE = 0,
1224 FCP_TASK_ATTR_HOQ = 1,
1225 FCP_TASK_ATTR_ORDERED = 2,
1226 FCP_TASK_ATTR_ACA = 4,
1227 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
1228};
1229
1230/*
1231 * Task management flags field - only one bit shall be set
1232 */
1233enum fcp_tm_cmnd {
1234 FCP_TM_ABORT_TASK_SET = BIT(1),
1235 FCP_TM_CLEAR_TASK_SET = BIT(2),
1236 FCP_TM_LUN_RESET = BIT(4),
1237 FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
1238 FCP_TM_CLEAR_ACA = BIT(6),
1239};
1240
1241/*
1242 * FCP_XFER_RDY IU defines
1243 */
1244struct fcp_xfer_rdy_s {
1245 u32 data_ro;
1246 u32 burst_len;
1247 u32 reserved;
1248};
1249
1250/*
1251 * FCP_RSP residue flags
1252 */
1253enum fcp_residue {
1254 FCP_NO_RESIDUE = 0, /* no residue */
1255 FCP_RESID_OVER = 1, /* more data left that was not sent */
1256 FCP_RESID_UNDER = 2, /* less data than requested */
1257};
1258
1259enum {
1260 FCP_RSPINFO_GOOD = 0,
1261 FCP_RSPINFO_DATALEN_MISMATCH = 1,
1262 FCP_RSPINFO_CMND_INVALID = 2,
1263 FCP_RSPINFO_ROLEN_MISMATCH = 3,
1264 FCP_RSPINFO_TM_NOT_SUPP = 4,
1265 FCP_RSPINFO_TM_FAILED = 5,
1266};
1267
1268struct fcp_rspinfo_s {
1269 u32 res0:24;
1270 u32 rsp_code:8; /* response code (as above) */
1271 u32 res1;
1272};
1273
1274struct fcp_resp_s {
1275 u32 reserved[2]; /* 2 words reserved */
1276 u16 reserved2;
1277#ifdef __BIGENDIAN
1278 u8 reserved3:3;
1279 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1280 u8 resid_flags:2; /* underflow/overflow */
1281 u8 sns_len_valid:1;/* sense len is valid */
1282 u8 rsp_len_valid:1;/* response len is valid */
1283#else
1284 u8 rsp_len_valid:1;/* response len is valid */
1285 u8 sns_len_valid:1;/* sense len is valid */
1286 u8 resid_flags:2; /* underflow/overflow */
1287 u8 fcp_conf_req:1; /* FCP_CONF is requested */
1288 u8 reserved3:3;
1111#endif 1289#endif
1290 u8 scsi_status; /* one byte SCSI status */
1291 u32 residue; /* residual data bytes */
1292 u32 sns_len; /* length od sense info */
1293 u32 rsp_len; /* length of response info */
1294};
1295
1296#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
1297 (__fcprsp)->sns_len : 0)
1298#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
1299 (__fcprsp)->rsp_len : 0)
1300#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
1301#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
1302 fcp_rsplen(__fcprsp))
1303
1304struct fcp_cmnd_fr_s {
1305 struct fchs_s fchs;
1306 struct fcp_cmnd_s fcp;
1307};
1308
1309/*
1310 * CT
1311 */
1312struct ct_hdr_s {
1313 u32 rev_id:8; /* Revision of the CT */
1314 u32 in_id:24; /* Initiator Id */
1315 u32 gs_type:8; /* Generic service Type */
1316 u32 gs_sub_type:8; /* Generic service sub type */
1317 u32 options:8; /* options */
1318 u32 rsvrd:8; /* reserved */
1319 u32 cmd_rsp_code:16;/* ct command/response code */
1320 u32 max_res_size:16;/* maximum/residual size */
1321 u32 frag_id:8; /* fragment ID */
1322 u32 reason_code:8; /* reason code */
1323 u32 exp_code:8; /* explanation code */
1324 u32 vendor_unq:8; /* vendor unique */
1325};
1326
1327/*
1328 * defines for the Revision
1329 */
1330enum {
1331 CT_GS3_REVISION = 0x01,
1332};
1333
1334/*
1335 * defines for gs_type
1336 */
1337enum {
1338 CT_GSTYPE_KEYSERVICE = 0xF7,
1339 CT_GSTYPE_ALIASSERVICE = 0xF8,
1340 CT_GSTYPE_MGMTSERVICE = 0xFA,
1341 CT_GSTYPE_TIMESERVICE = 0xFB,
1342 CT_GSTYPE_DIRSERVICE = 0xFC,
1343};
1344
1345/*
1346 * defines for gs_sub_type for gs type directory service
1347 */
1348enum {
1349 CT_GSSUBTYPE_NAMESERVER = 0x02,
1350};
1351
1352/*
1353 * defines for gs_sub_type for gs type management service
1354 */
1355enum {
1356 CT_GSSUBTYPE_CFGSERVER = 0x01,
1357 CT_GSSUBTYPE_UNZONED_NS = 0x02,
1358 CT_GSSUBTYPE_ZONESERVER = 0x03,
1359 CT_GSSUBTYPE_LOCKSERVER = 0x04,
1360 CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
1361};
1362
1363/*
1364 * defines for CT response code field
1365 */
1366enum {
1367 CT_RSP_REJECT = 0x8001,
1368 CT_RSP_ACCEPT = 0x8002,
1369};
1370
1371/*
1372 * defintions for CT reason code
1373 */
1374enum {
1375 CT_RSN_INV_CMD = 0x01,
1376 CT_RSN_INV_VER = 0x02,
1377 CT_RSN_LOGIC_ERR = 0x03,
1378 CT_RSN_INV_SIZE = 0x04,
1379 CT_RSN_LOGICAL_BUSY = 0x05,
1380 CT_RSN_PROTO_ERR = 0x07,
1381 CT_RSN_UNABLE_TO_PERF = 0x09,
1382 CT_RSN_NOT_SUPP = 0x0B,
1383 CT_RSN_SERVER_NOT_AVBL = 0x0D,
1384 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
1385 CT_RSN_VENDOR_SPECIFIC = 0xFF,
1386
1387};
1388
1389/*
1390 * definitions for explanations code for Name server
1391 */
1392enum {
1393 CT_NS_EXP_NOADDITIONAL = 0x00,
1394 CT_NS_EXP_ID_NOT_REG = 0x01,
1395 CT_NS_EXP_PN_NOT_REG = 0x02,
1396 CT_NS_EXP_NN_NOT_REG = 0x03,
1397 CT_NS_EXP_CS_NOT_REG = 0x04,
1398 CT_NS_EXP_IPN_NOT_REG = 0x05,
1399 CT_NS_EXP_IPA_NOT_REG = 0x06,
1400 CT_NS_EXP_FT_NOT_REG = 0x07,
1401 CT_NS_EXP_SPN_NOT_REG = 0x08,
1402 CT_NS_EXP_SNN_NOT_REG = 0x09,
1403 CT_NS_EXP_PT_NOT_REG = 0x0A,
1404 CT_NS_EXP_IPP_NOT_REG = 0x0B,
1405 CT_NS_EXP_FPN_NOT_REG = 0x0C,
1406 CT_NS_EXP_HA_NOT_REG = 0x0D,
1407 CT_NS_EXP_FD_NOT_REG = 0x0E,
1408 CT_NS_EXP_FF_NOT_REG = 0x0F,
1409 CT_NS_EXP_ACCESSDENIED = 0x10,
1410 CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
1411 CT_NS_EXP_DATABASEEMPTY = 0x12,
1412 CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
1413 CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
1414 CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
1415 CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
1416};
1417
1418/*
1419 * defintions for the explanation code for all servers
1420 */
1421enum {
1422 CT_EXP_AUTH_EXCEPTION = 0xF1,
1423 CT_EXP_DB_FULL = 0xF2,
1424 CT_EXP_DB_EMPTY = 0xF3,
1425 CT_EXP_PROCESSING_REQ = 0xF4,
1426 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
1427 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
1428};
1429
1430/*
1431 * Command codes for Name server
1432 */
1433enum {
1434 GS_GID_PN = 0x0121, /* Get Id on port name */
1435 GS_GPN_ID = 0x0112, /* Get port name on ID */
1436 GS_GNN_ID = 0x0113, /* Get node name on ID */
1437 GS_GID_FT = 0x0171, /* Get Id on FC4 type */
1438 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
1439 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
1440 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
1441 GS_RPN_ID = 0x0212, /* Register port name */
1442 GS_RNN_ID = 0x0213, /* Register node name */
1443 GS_RCS_ID = 0x0214, /* Register class of service */
1444 GS_RPT_ID = 0x021A, /* Register port type */
1445 GS_GA_NXT = 0x0100, /* Get all next */
1446 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
1447};
1448
1449struct fcgs_id_req_s{
1450 u32 rsvd:8;
1451 u32 dap:24; /* port identifier */
1452};
1453#define fcgs_gpnid_req_t struct fcgs_id_req_s
1454#define fcgs_gnnid_req_t struct fcgs_id_req_s
1455#define fcgs_gspnid_req_t struct fcgs_id_req_s
1456
1457struct fcgs_gidpn_req_s {
1458 wwn_t port_name; /* port wwn */
1459};
1460
1461struct fcgs_gidpn_resp_s {
1462 u32 rsvd:8;
1463 u32 dap:24; /* port identifier */
1464};
1465
1466/**
1467 * RFT_ID
1468 */
1469struct fcgs_rftid_req_s {
1470 u32 rsvd:8;
1471 u32 dap:24; /* port identifier */
1472 u32 fc4_type[8]; /* fc4 types */
1473};
1474
1475/**
1476 * RFF_ID : Register FC4 features.
1477 */
1478
1479#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
1480#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
1481
1482struct fcgs_rffid_req_s {
1483 u32 rsvd:8;
1484 u32 dap:24; /* port identifier */
1485 u32 rsvd1:16;
1486 u32 fc4ftr_bits:8; /* fc4 feature bits */
1487 u32 fc4_type:8; /* corresponding FC4 Type */
1488};
1489
1490/**
1491 * GID_FT Request
1492 */
1493struct fcgs_gidft_req_s {
1494 u8 reserved;
1495 u8 domain_id; /* domain, 0 - all fabric */
1496 u8 area_id; /* area, 0 - whole domain */
1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
1498}; /* GID_FT Request */
1499
1500/**
1501 * GID_FT Response
1502 */
1503struct fcgs_gidft_resp_s {
1504 u8 last:1; /* last port identifier flag */
1505 u8 reserved:7;
1506 u32 pid:24; /* port identifier */
1507}; /* GID_FT Response */
1508
1509/**
1510 * RSPN_ID
1511 */
1512struct fcgs_rspnid_req_s {
1513 u32 rsvd:8;
1514 u32 dap:24; /* port identifier */
1515 u8 spn_len; /* symbolic port name length */
1516 u8 spn[256]; /* symbolic port name */
1517};
1518
1519/**
1520 * RPN_ID
1521 */
1522struct fcgs_rpnid_req_s {
1523 u32 rsvd:8;
1524 u32 port_id:24;
1525 wwn_t port_name;
1526};
1527
1528/**
1529 * RNN_ID
1530 */
1531struct fcgs_rnnid_req_s {
1532 u32 rsvd:8;
1533 u32 port_id:24;
1534 wwn_t node_name;
1535};
1536
1537/**
1538 * RCS_ID
1539 */
1540struct fcgs_rcsid_req_s {
1541 u32 rsvd:8;
1542 u32 port_id:24;
1543 u32 cos;
1544};
1545
1546/**
1547 * RPT_ID
1548 */
1549struct fcgs_rptid_req_s {
1550 u32 rsvd:8;
1551 u32 port_id:24;
1552 u32 port_type:8;
1553 u32 rsvd1:24;
1554};
1555
1556/**
1557 * GA_NXT Request
1558 */
1559struct fcgs_ganxt_req_s {
1560 u32 rsvd:8;
1561 u32 port_id:24;
1562};
1563
1564/**
1565 * GA_NXT Response
1566 */
1567struct fcgs_ganxt_rsp_s {
1568 u32 port_type:8; /* Port Type */
1569 u32 port_id:24; /* Port Identifier */
1570 wwn_t port_name; /* Port Name */
1571 u8 spn_len; /* Length of Symbolic Port Name */
1572 char spn[255]; /* Symbolic Port Name */
1573 wwn_t node_name; /* Node Name */
1574 u8 snn_len; /* Length of Symbolic Node Name */
1575 char snn[255]; /* Symbolic Node Name */
1576 u8 ipa[8]; /* Initial Process Associator */
1577 u8 ip[16]; /* IP Address */
1578 u32 cos; /* Class of Service */
1579 u32 fc4types[8]; /* FC-4 TYPEs */
1580 wwn_t fabric_port_name;
1581 /* Fabric Port Name */
1582 u32 rsvd:8; /* Reserved */
1583 u32 hard_addr:24; /* Hard Address */
1584};
1585
1586/*
1587 * Fabric Config Server
1588 */
1589
1590/*
1591 * Command codes for Fabric Configuration Server
1592 */
1593enum {
1594 GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
1595 GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
1596 GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
1597 GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
1598};
1599
1600/*
1601 * Source or Destination Port Tags.
1602 */
1603enum {
1604 GS_FTRACE_TAG_NPORT_ID = 1,
1605 GS_FTRACE_TAG_NPORT_NAME = 2,
1606};
1607
1608/*
1609* Port Value : Could be a Port id or wwn
1610 */
1611union fcgs_port_val_u {
1612 u32 nport_id;
1613 wwn_t nport_wwn;
1614};
1615
1616#define GS_FTRACE_MAX_HOP_COUNT 20
1617#define GS_FTRACE_REVISION 1
1618
1619/*
1620 * Ftrace Related Structures.
1621 */
1622
1623/*
1624 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
1625 */
1626enum {
1627 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
1628 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
1629 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
1630 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
1631 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
1632 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
1633 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
1634 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
1635 GS_FTRACE_STR_NO_ADDL_EXPLN,
1636 GS_FTRACE_STR_FABRIC_BUSY,
1637 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
1638 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
1639 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
1640};
1641
1642/*
1643 * Ftrace Request
1644 */
1645struct fcgs_ftrace_req_s {
1646 u32 revision;
1647 u16 src_port_tag; /* Source Port tag */
1648 u16 src_port_len; /* Source Port len */
1649 union fcgs_port_val_u src_port_val; /* Source Port value */
1650 u16 dst_port_tag; /* Destination Port tag */
1651 u16 dst_port_len; /* Destination Port len */
1652 union fcgs_port_val_u dst_port_val; /* Destination Port value */
1653 u32 token;
1654 u8 vendor_id[8]; /* T10 Vendor Identifier */
1655 u8 vendor_info[8]; /* Vendor specific Info */
1656 u32 max_hop_cnt; /* Max Hop Count */
1657};
1658
1659/*
1660 * Path info structure
1661 */
1662struct fcgs_ftrace_path_info_s {
1663 wwn_t switch_name; /* Switch WWN */
1664 u32 domain_id;
1665 wwn_t ingress_port_name; /* Ingress ports wwn */
1666 u32 ingress_phys_port_num; /* Ingress ports physical port
1667 * number
1668 */
1669 wwn_t egress_port_name; /* Ingress ports wwn */
1670 u32 egress_phys_port_num; /* Ingress ports physical port
1671 * number
1672 */
1673};
1674
1675/*
1676 * Ftrace Acc Response
1677 */
1678struct fcgs_ftrace_resp_s {
1679 u32 revision;
1680 u32 token;
1681 u8 vendor_id[8]; /* T10 Vendor Identifier */
1682 u8 vendor_info[8]; /* Vendor specific Info */
1683 u32 str_rej_reason_code; /* STR Reject Reason Code */
1684 u32 num_path_info_entries; /* No. of path info entries */
1685 /*
1686 * path info entry/entries.
1687 */
1688 struct fcgs_ftrace_path_info_s path_info[1];
1689
1690};
1691
1692/*
1693* Fabric Config Server : FCPing
1694 */
1695
1696/*
1697 * FC Ping Request
1698 */
1699struct fcgs_fcping_req_s {
1700 u32 revision;
1701 u16 port_tag;
1702 u16 port_len; /* Port len */
1703 union fcgs_port_val_u port_val; /* Port value */
1704 u32 token;
1705};
1706
1707/*
1708 * FC Ping Response
1709 */
1710struct fcgs_fcping_resp_s {
1711 u32 token;
1712};
1713
1714/*
1715 * Command codes for zone server query.
1716 */
1717enum {
1718 ZS_GZME = 0x0124, /* Get zone member extended */
1719};
1720
1721/*
1722 * ZS GZME request
1723 */
1724#define ZS_GZME_ZNAMELEN 32
1725struct zs_gzme_req_s {
1726 u8 znamelen;
1727 u8 rsvd[3];
1728 u8 zname[ZS_GZME_ZNAMELEN];
1729};
1730
1731enum zs_mbr_type {
1732 ZS_MBR_TYPE_PWWN = 1,
1733 ZS_MBR_TYPE_DOMPORT = 2,
1734 ZS_MBR_TYPE_PORTID = 3,
1735 ZS_MBR_TYPE_NWWN = 4,
1736};
1737
1738struct zs_mbr_wwn_s {
1739 u8 mbr_type;
1740 u8 rsvd[3];
1741 wwn_t wwn;
1742};
1743
1744struct zs_query_resp_s {
1745 u32 nmbrs; /* number of zone members */
1746 struct zs_mbr_wwn_s mbr[1];
1747};
1748
1749/*
1750 * GMAL Command ( Get ( interconnect Element) Management Address List)
1751 * To retrieve the IP Address of a Switch.
1752 */
1753
1754#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
1755#define CT_GMAL_RESP_PREFIX_HTTP "http://"
1756
1757/* GMAL/GFN request */
1758struct fcgs_req_s {
1759 wwn_t wwn; /* PWWN/NWWN */
1760};
1761
1762#define fcgs_gmal_req_t struct fcgs_req_s
1763#define fcgs_gfn_req_t struct fcgs_req_s
1764
1765/* Accept Response to GMAL */
1766struct fcgs_gmal_resp_s {
1767 u32 ms_len; /* Num of entries */
1768 u8 ms_ma[256];
1769};
1770
1771struct fcgs_gmal_entry_s {
1772 u8 len;
1773 u8 prefix[7]; /* like "http://" */
1774 u8 ip_addr[248];
1775};
1776
1777/*
1778 * FDMI
1779 */
1780/*
1781 * FDMI Command Codes
1782 */
1783#define FDMI_GRHL 0x0100
1784#define FDMI_GHAT 0x0101
1785#define FDMI_GRPL 0x0102
1786#define FDMI_GPAT 0x0110
1787#define FDMI_RHBA 0x0200
1788#define FDMI_RHAT 0x0201
1789#define FDMI_RPRT 0x0210
1790#define FDMI_RPA 0x0211
1791#define FDMI_DHBA 0x0300
1792#define FDMI_DPRT 0x0310
1793
1794/*
1795 * FDMI reason codes
1796 */
1797#define FDMI_NO_ADDITIONAL_EXP 0x00
1798#define FDMI_HBA_ALREADY_REG 0x10
1799#define FDMI_HBA_ATTRIB_NOT_REG 0x11
1800#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
1801#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
1802#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
1803#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
1804#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
1805#define FDMI_PORT_ATTRIB_NOT_REG 0x20
1806#define FDMI_PORT_NOT_REG 0x21
1807#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
1808#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
1809#define FDMI_PORT_ALREADY_REGISTEREED 0x24
1810
1811/*
1812 * FDMI Transmission Speed Mask values
1813 */
1814#define FDMI_TRANS_SPEED_1G 0x00000001
1815#define FDMI_TRANS_SPEED_2G 0x00000002
1816#define FDMI_TRANS_SPEED_10G 0x00000004
1817#define FDMI_TRANS_SPEED_4G 0x00000008
1818#define FDMI_TRANS_SPEED_8G 0x00000010
1819#define FDMI_TRANS_SPEED_16G 0x00000020
1820#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
1821
1822/*
1823 * FDMI HBA attribute types
1824 */
1825enum fdmi_hba_attribute_type {
1826 FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
1827 FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
1828 FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
1829 FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
1830 FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
1831 FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
1832 FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
1833 FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
1834 FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
1835 FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
1836 FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
1837
1838 FDMI_HBA_ATTRIB_MAX_TYPE
1839};
1840
1841/*
1842 * FDMI Port attribute types
1843 */
1844enum fdmi_port_attribute_type {
1845 FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
1846 FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
1847 FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
1848 FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
1849 FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
1850 FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
1851
1852 FDMI_PORT_ATTR_MAX_TYPE
1853};
1854
1855/*
1856 * FDMI attribute
1857 */
1858struct fdmi_attr_s {
1859 u16 type;
1860 u16 len;
1861 u8 value[1];
1862};
1863
1864/*
1865 * HBA Attribute Block
1866 */
1867struct fdmi_hba_attr_s {
1868 u32 attr_count; /* # of attributes */
1869 struct fdmi_attr_s hba_attr; /* n attributes */
1870};
1871
1872/*
1873 * Registered Port List
1874 */
1875struct fdmi_port_list_s {
1876 u32 num_ports; /* number Of Port Entries */
1877 wwn_t port_entry; /* one or more */
1878};
1879
1880/*
1881 * Port Attribute Block
1882 */
1883struct fdmi_port_attr_s {
1884 u32 attr_count; /* # of attributes */
1885 struct fdmi_attr_s port_attr; /* n attributes */
1886};
1887
1888/*
1889 * FDMI Register HBA Attributes
1890 */
1891struct fdmi_rhba_s {
1892 wwn_t hba_id; /* HBA Identifier */
1893 struct fdmi_port_list_s port_list; /* Registered Port List */
1894 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
1895};
1896
1897/*
1898 * FDMI Register Port
1899 */
1900struct fdmi_rprt_s {
1901 wwn_t hba_id; /* HBA Identifier */
1902 wwn_t port_name; /* Port wwn */
1903 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1904};
1905
1906/*
1907 * FDMI Register Port Attributes
1908 */
1909struct fdmi_rpa_s {
1910 wwn_t port_name; /* port wwn */
1911 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
1912};
1913
1914#pragma pack()
1915
1916#endif /* __BFA_FC_H__ */
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index fee5456451cb..b7d2657ca82a 100644
--- a/drivers/scsi/bfa/fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,25 +18,25 @@
18 * fcbuild.c - FC link service frame building and parsing routines 18 * fcbuild.c - FC link service frame building and parsing routines
19 */ 19 */
20 20
21#include <bfa_os_inc.h> 21#include "bfa_os_inc.h"
22#include "fcbuild.h" 22#include "bfa_fcbuild.h"
23 23
24/* 24/*
25 * static build functions 25 * static build functions
26 */ 26 */
27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 27static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
28 u16 ox_id); 28 u16 ox_id);
29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 29static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
30 u16 ox_id); 30 u16 ox_id);
31static struct fchs_s fc_els_req_tmpl; 31static struct fchs_s fc_els_req_tmpl;
32static struct fchs_s fc_els_rsp_tmpl; 32static struct fchs_s fc_els_rsp_tmpl;
33static struct fchs_s fc_bls_req_tmpl; 33static struct fchs_s fc_bls_req_tmpl;
34static struct fchs_s fc_bls_rsp_tmpl; 34static struct fchs_s fc_bls_rsp_tmpl;
35static struct fc_ba_acc_s ba_acc_tmpl; 35static struct fc_ba_acc_s ba_acc_tmpl;
36static struct fc_logi_s plogi_tmpl; 36static struct fc_logi_s plogi_tmpl;
37static struct fc_prli_s prli_tmpl; 37static struct fc_prli_s prli_tmpl;
38static struct fc_rrq_s rrq_tmpl; 38static struct fc_rrq_s rrq_tmpl;
39static struct fchs_s fcp_fchs_tmpl; 39static struct fchs_s fcp_fchs_tmpl;
40 40
41void 41void
42fcbuild_init(void) 42fcbuild_init(void)
@@ -123,7 +123,7 @@ fcbuild_init(void)
123 rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; 123 rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
124 124
125 /* 125 /*
126 * fcp_fchs_tmpl 126 * fcp_struct fchs_s mpl
127 */ 127 */
128 fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; 128 fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; 129 fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
@@ -135,8 +135,7 @@ fcbuild_init(void)
135} 135}
136 136
137static void 137static void
138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
139 u32 ox_id)
140{ 139{
141 bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); 140 bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
142 141
@@ -158,8 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
158} 157}
159 158
160void 159void
161fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
162 u16 ox_id)
163{ 161{
164 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 162 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
165 fchs->d_id = (d_id); 163 fchs->d_id = (d_id);
@@ -168,8 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
168} 166}
169 167
170static void 168static void
171fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
172 u16 ox_id)
173{ 170{
174 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 171 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
175 fchs->d_id = d_id; 172 fchs->d_id = d_id;
@@ -180,8 +177,8 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
180enum fc_parse_status 177enum fc_parse_status
181fc_els_rsp_parse(struct fchs_s *fchs, int len) 178fc_els_rsp_parse(struct fchs_s *fchs, int len)
182{ 179{
183 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 180 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
184 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd; 181 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
185 182
186 len = len; 183 len = len;
187 184
@@ -199,8 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
199} 196}
200 197
201static void 198static void
202fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
203 u16 ox_id)
204{ 200{
205 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 201 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
206 fchs->d_id = d_id; 202 fchs->d_id = d_id;
@@ -213,7 +209,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
213 u16 ox_id, wwn_t port_name, wwn_t node_name, 209 u16 ox_id, wwn_t port_name, wwn_t node_name,
214 u16 pdu_size, u8 els_code) 210 u16 pdu_size, u8 els_code)
215{ 211{
216 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
217 213
218 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 214 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
219 215
@@ -233,12 +229,11 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
233 229
234u16 230u16
235fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 231fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
236 u16 ox_id, wwn_t port_name, wwn_t node_name, 232 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
237 u16 pdu_size, u8 set_npiv, u8 set_auth, 233 u8 set_npiv, u8 set_auth, u16 local_bb_credits)
238 u16 local_bb_credits)
239{ 234{
240 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
241 u32 *vvl_info; 236 u32 *vvl_info;
242 237
243 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 238 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
244 239
@@ -292,8 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
292 287
293u16 288u16
294fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 289fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
295 u16 ox_id, wwn_t port_name, wwn_t node_name, 290 u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
296 u16 pdu_size)
297{ 291{
298 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
299 293
@@ -330,9 +324,9 @@ fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
330enum fc_parse_status 324enum fc_parse_status
331fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) 325fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
332{ 326{
333 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 327 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
334 struct fc_logi_s *plogi; 328 struct fc_logi_s *plogi;
335 struct fc_ls_rjt_s *ls_rjt; 329 struct fc_ls_rjt_s *ls_rjt;
336 330
337 switch (els_cmd->els_code) { 331 switch (els_cmd->els_code) {
338 case FC_ELS_LS_RJT: 332 case FC_ELS_LS_RJT:
@@ -364,7 +358,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
364enum fc_parse_status 358enum fc_parse_status
365fc_plogi_parse(struct fchs_s *fchs) 359fc_plogi_parse(struct fchs_s *fchs)
366{ 360{
367 struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); 361 struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
368 362
369 if (plogi->class3.class_valid != 1) 363 if (plogi->class3.class_valid != 1)
370 return FC_PARSE_FAILURE; 364 return FC_PARSE_FAILURE;
@@ -381,7 +375,7 @@ u16
381fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 375fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
382 u16 ox_id) 376 u16 ox_id)
383{ 377{
384 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 378 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
385 379
386 fc_els_req_build(fchs, d_id, s_id, ox_id); 380 fc_els_req_build(fchs, d_id, s_id, ox_id);
387 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 381 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
@@ -398,19 +392,16 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
398 392
399u16 393u16
400fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 394fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
401 u16 ox_id, enum bfa_port_role role) 395 u16 ox_id, enum bfa_lport_role role)
402{ 396{
403 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
404 398
405 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 399 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
406 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 400 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
407 401
408 prli->command = FC_ELS_ACC; 402 prli->command = FC_ELS_ACC;
409 403
410 if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM) 404 prli->parampage.servparams.initiator = 1;
411 prli->parampage.servparams.target = 1;
412 else
413 prli->parampage.servparams.initiator = 1;
414 405
415 prli->parampage.rspcode = FC_PRLI_ACC_XQTD; 406 prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
416 407
@@ -452,12 +443,12 @@ fc_prli_parse(struct fc_prli_s *prli)
452} 443}
453 444
454u16 445u16
455fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, 446fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
456 u32 s_id, u16 ox_id, wwn_t port_name) 447 u16 ox_id, wwn_t port_name)
457{ 448{
458 fc_els_req_build(fchs, d_id, s_id, ox_id); 449 fc_els_req_build(fchs, d_id, s_id, ox_id);
459 450
460 memset(logo, '\0', sizeof(struct fc_logo_s)); 451 bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
461 logo->els_cmd.els_code = FC_ELS_LOGO; 452 logo->els_cmd.els_code = FC_ELS_LOGO;
462 logo->nport_id = (s_id); 453 logo->nport_id = (s_id);
463 logo->orig_port_name = port_name; 454 logo->orig_port_name = port_name;
@@ -470,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
470 u32 s_id, u16 ox_id, wwn_t port_name, 461 u32 s_id, u16 ox_id, wwn_t port_name,
471 wwn_t node_name, u8 els_code) 462 wwn_t node_name, u8 els_code)
472{ 463{
473 memset(adisc, '\0', sizeof(struct fc_adisc_s)); 464 bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
474 465
475 adisc->els_cmd.els_code = els_code; 466 adisc->els_cmd.els_code = els_code;
476 467
@@ -489,8 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
489 480
490u16 481u16
491fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, 482fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
492 u32 s_id, u16 ox_id, wwn_t port_name, 483 u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
493 wwn_t node_name)
494{ 484{
495 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, 485 return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
496 node_name, FC_ELS_ADISC); 486 node_name, FC_ELS_ADISC);
@@ -523,10 +513,10 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
523} 513}
524 514
525enum fc_parse_status 515enum fc_parse_status
526fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, 516fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
527 wwn_t node_name, wwn_t port_name) 517 wwn_t port_name)
528{ 518{
529 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; 519 struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
530 520
531 if (adisc->els_cmd.els_code != FC_ELS_ACC) 521 if (adisc->els_cmd.els_code != FC_ELS_ACC)
532 return FC_PARSE_FAILURE; 522 return FC_PARSE_FAILURE;
@@ -542,13 +532,13 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
542enum fc_parse_status 532enum fc_parse_status
543fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) 533fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
544{ 534{
545 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 535 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
546 536
547 if (pdisc->class3.class_valid != 1) 537 if (pdisc->class3.class_valid != 1)
548 return FC_PARSE_FAILURE; 538 return FC_PARSE_FAILURE;
549 539
550 if ((bfa_os_ntohs(pdisc->class3.rxsz) < 540 if ((bfa_os_ntohs(pdisc->class3.rxsz) <
551 (FC_MIN_PDUSZ - sizeof(struct fchs_s))) 541 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
552 || (pdisc->class3.rxsz == 0)) 542 || (pdisc->class3.rxsz == 0))
553 return FC_PARSE_FAILURE; 543 return FC_PARSE_FAILURE;
554 544
@@ -584,8 +574,8 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
584} 574}
585 575
586u16 576u16
587fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, 577fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
588 u32 s_id, u16 ox_id, u16 rrq_oxid) 578 u16 ox_id, u16 rrq_oxid)
589{ 579{
590 fc_els_req_build(fchs, d_id, s_id, ox_id); 580 fc_els_req_build(fchs, d_id, s_id, ox_id);
591 581
@@ -604,11 +594,11 @@ u16
604fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 594fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
605 u16 ox_id) 595 u16 ox_id)
606{ 596{
607 struct fc_els_cmd_s *acc = pld; 597 struct fc_els_cmd_s *acc = pld;
608 598
609 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 599 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
610 600
611 memset(acc, 0, sizeof(struct fc_els_cmd_s)); 601 bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
612 acc->els_code = FC_ELS_ACC; 602 acc->els_code = FC_ELS_ACC;
613 603
614 return sizeof(struct fc_els_cmd_s); 604 return sizeof(struct fc_els_cmd_s);
@@ -620,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
620 u8 reason_code_expl) 610 u8 reason_code_expl)
621{ 611{
622 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 612 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
623 memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); 613 bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
624 614
625 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; 615 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
626 ls_rjt->reason_code = reason_code; 616 ls_rjt->reason_code = reason_code;
@@ -647,11 +637,11 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
647} 637}
648 638
649u16 639u16
650fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, 640fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
651 u32 d_id, u32 s_id, u16 ox_id) 641 u32 s_id, u16 ox_id)
652{ 642{
653 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 643 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
654 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 644 bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
655 els_cmd->els_code = FC_ELS_ACC; 645 els_cmd->els_code = FC_ELS_ACC;
656 646
657 return sizeof(struct fc_els_cmd_s); 647 return sizeof(struct fc_els_cmd_s);
@@ -661,8 +651,8 @@ int
661fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) 651fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
662{ 652{
663 int num_pages = 0; 653 int num_pages = 0;
664 struct fc_prlo_s *prlo; 654 struct fc_prlo_s *prlo;
665 struct fc_tprlo_s *tprlo; 655 struct fc_tprlo_s *tprlo;
666 656
667 if (els_code == FC_ELS_PRLO) { 657 if (els_code == FC_ELS_PRLO) {
668 prlo = (struct fc_prlo_s *) (fc_frame + 1); 658 prlo = (struct fc_prlo_s *) (fc_frame + 1);
@@ -676,14 +666,13 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
676 666
677u16 667u16
678fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, 668fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
679 u32 d_id, u32 s_id, u16 ox_id, 669 u32 d_id, u32 s_id, u16 ox_id, int num_pages)
680 int num_pages)
681{ 670{
682 int page; 671 int page;
683 672
684 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 673 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
685 674
686 memset(tprlo_acc, 0, (num_pages * 16) + 4); 675 bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
687 tprlo_acc->command = FC_ELS_ACC; 676 tprlo_acc->command = FC_ELS_ACC;
688 677
689 tprlo_acc->page_len = 0x10; 678 tprlo_acc->page_len = 0x10;
@@ -700,15 +689,14 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
700} 689}
701 690
702u16 691u16
703fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, 692fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
704 u32 d_id, u32 s_id, u16 ox_id, 693 u32 s_id, u16 ox_id, int num_pages)
705 int num_pages)
706{ 694{
707 int page; 695 int page;
708 696
709 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 697 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
710 698
711 memset(prlo_acc, 0, (num_pages * 16) + 4); 699 bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
712 prlo_acc->command = FC_ELS_ACC; 700 prlo_acc->command = FC_ELS_ACC;
713 prlo_acc->page_len = 0x10; 701 prlo_acc->page_len = 0x10;
714 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 702 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -726,11 +714,11 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
726 714
727u16 715u16
728fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, 716fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
729 u32 s_id, u16 ox_id, u32 data_format) 717 u32 s_id, u16 ox_id, u32 data_format)
730{ 718{
731 fc_els_req_build(fchs, d_id, s_id, ox_id); 719 fc_els_req_build(fchs, d_id, s_id, ox_id);
732 720
733 memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); 721 bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
734 722
735 rnid->els_cmd.els_code = FC_ELS_RNID; 723 rnid->els_cmd.els_code = FC_ELS_RNID;
736 rnid->node_id_data_format = data_format; 724 rnid->node_id_data_format = data_format;
@@ -739,13 +727,12 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
739} 727}
740 728
741u16 729u16
742fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, 730fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
743 u32 d_id, u32 s_id, u16 ox_id, 731 u32 s_id, u16 ox_id, u32 data_format,
744 u32 data_format, 732 struct fc_rnid_common_id_data_s *common_id_data,
745 struct fc_rnid_common_id_data_s *common_id_data, 733 struct fc_rnid_general_topology_data_s *gen_topo_data)
746 struct fc_rnid_general_topology_data_s *gen_topo_data)
747{ 734{
748 memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); 735 bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
749 736
750 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 737 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
751 738
@@ -769,27 +756,26 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
769 756
770u16 757u16
771fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, 758fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
772 u32 s_id, u16 ox_id) 759 u32 s_id, u16 ox_id)
773{ 760{
774 fc_els_req_build(fchs, d_id, s_id, ox_id); 761 fc_els_req_build(fchs, d_id, s_id, ox_id);
775 762
776 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 763 bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
777 764
778 rpsc->els_cmd.els_code = FC_ELS_RPSC; 765 rpsc->els_cmd.els_code = FC_ELS_RPSC;
779 return sizeof(struct fc_rpsc_cmd_s); 766 return sizeof(struct fc_rpsc_cmd_s);
780} 767}
781 768
782u16 769u16
783fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, 770fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
784 u32 d_id, u32 s_id, u32 *pid_list, 771 u32 s_id, u32 *pid_list, u16 npids)
785 u16 npids)
786{ 772{
787 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); 773 u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
788 int i = 0; 774 int i = 0;
789 775
790 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); 776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
791 777
792 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 778 bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
793 779
794 rpsc2->els_cmd.els_code = FC_ELS_RPSC; 780 rpsc2->els_cmd.els_code = FC_ELS_RPSC;
795 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); 781 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
@@ -797,16 +783,15 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
797 for (i = 0; i < npids; i++) 783 for (i = 0; i < npids; i++)
798 rpsc2->pid_list[i].pid = pid_list[i]; 784 rpsc2->pid_list[i].pid = pid_list[i];
799 785
800 return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * 786 return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
801 (sizeof(u32)));
802} 787}
803 788
804u16 789u16
805fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, 790fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
806 u32 d_id, u32 s_id, u16 ox_id, 791 u32 d_id, u32 s_id, u16 ox_id,
807 struct fc_rpsc_speed_info_s *oper_speed) 792 struct fc_rpsc_speed_info_s *oper_speed)
808{ 793{
809 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 794 bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
810 795
811 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 796 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
812 797
@@ -820,7 +805,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
820 bfa_os_htons(oper_speed->port_op_speed); 805 bfa_os_htons(oper_speed->port_op_speed);
821 806
822 return sizeof(struct fc_rpsc_acc_s); 807 return sizeof(struct fc_rpsc_acc_s);
823
824} 808}
825 809
826/* 810/*
@@ -831,7 +815,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
831u16 815u16
832fc_logo_rsp_parse(struct fchs_s *fchs, int len) 816fc_logo_rsp_parse(struct fchs_s *fchs, int len)
833{ 817{
834 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 818 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
835 819
836 len = len; 820 len = len;
837 if (els_cmd->els_code != FC_ELS_ACC) 821 if (els_cmd->els_code != FC_ELS_ACC)
@@ -841,11 +825,10 @@ fc_logo_rsp_parse(struct fchs_s *fchs, int len)
841} 825}
842 826
843u16 827u16
844fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 828fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
845 u16 ox_id, wwn_t port_name, wwn_t node_name, 829 wwn_t port_name, wwn_t node_name, u16 pdu_size)
846 u16 pdu_size)
847{ 830{
848 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 831 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
849 832
850 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); 833 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
851 834
@@ -862,7 +845,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
862u16 845u16
863fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) 846fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
864{ 847{
865 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 848 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
866 849
867 if (len < sizeof(struct fc_logi_s)) 850 if (len < sizeof(struct fc_logi_s))
868 return FC_PARSE_LEN_INVAL; 851 return FC_PARSE_LEN_INVAL;
@@ -886,11 +869,11 @@ u16
886fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, 869fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
887 int num_pages) 870 int num_pages)
888{ 871{
889 struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); 872 struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
890 int page; 873 int page;
891 874
892 fc_els_req_build(fchs, d_id, s_id, ox_id); 875 fc_els_req_build(fchs, d_id, s_id, ox_id);
893 memset(prlo, 0, (num_pages * 16) + 4); 876 bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
894 prlo->command = FC_ELS_PRLO; 877 prlo->command = FC_ELS_PRLO;
895 prlo->page_len = 0x10; 878 prlo->page_len = 0x10;
896 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 879 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -909,7 +892,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
909u16 892u16
910fc_prlo_rsp_parse(struct fchs_s *fchs, int len) 893fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
911{ 894{
912 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1); 895 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
913 int num_pages = 0; 896 int num_pages = 0;
914 int page = 0; 897 int page = 0;
915 898
@@ -941,15 +924,14 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
941} 924}
942 925
943u16 926u16
944fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 927fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
945 u16 ox_id, int num_pages, 928 int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
946 enum fc_tprlo_type tprlo_type, u32 tpr_id)
947{ 929{
948 struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); 930 struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
949 int page; 931 int page;
950 932
951 fc_els_req_build(fchs, d_id, s_id, ox_id); 933 fc_els_req_build(fchs, d_id, s_id, ox_id);
952 memset(tprlo, 0, (num_pages * 16) + 4); 934 bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
953 tprlo->command = FC_ELS_TPRLO; 935 tprlo->command = FC_ELS_TPRLO;
954 tprlo->page_len = 0x10; 936 tprlo->page_len = 0x10;
955 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 937 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -1003,7 +985,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
1003enum fc_parse_status 985enum fc_parse_status
1004fc_rrq_rsp_parse(struct fchs_s *fchs, int len) 986fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1005{ 987{
1006 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 988 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1007 989
1008 len = len; 990 len = len;
1009 if (els_cmd->els_code != FC_ELS_ACC) 991 if (els_cmd->els_code != FC_ELS_ACC)
@@ -1013,11 +995,10 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1013} 995}
1014 996
1015u16 997u16
1016fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 998fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
1017 u16 ox_id, u32 reason_code, 999 u32 reason_code, u32 reason_expl)
1018 u32 reason_expl)
1019{ 1000{
1020 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); 1001 struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
1021 1002
1022 fc_bls_rsp_build(fchs, d_id, s_id, ox_id); 1003 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
1023 1004
@@ -1062,10 +1043,8 @@ u16
1062fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1043fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1063 wwn_t port_name) 1044 wwn_t port_name)
1064{ 1045{
1065 1046 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1066 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1047 struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
1067 struct fcgs_gidpn_req_s *gidpn =
1068 (struct fcgs_gidpn_req_s *) (cthdr + 1);
1069 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1048 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1070 1049
1071 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1080,8 +1059,7 @@ u16
1080fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1059fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1081 u32 port_id) 1060 u32 port_id)
1082{ 1061{
1083 1062 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1084 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1085 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); 1063 fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
1086 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1064 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1087 1065
@@ -1097,8 +1075,7 @@ u16
1097fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1075fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1098 u32 port_id) 1076 u32 port_id)
1099{ 1077{
1100 1078 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1101 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1102 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); 1079 fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
1103 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1080 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1104 1081
@@ -1124,8 +1101,8 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
1124} 1101}
1125 1102
1126u16 1103u16
1127fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, 1104fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
1128 u32 s_id, u16 ox_id) 1105 u8 set_br_reg, u32 s_id, u16 ox_id)
1129{ 1106{
1130 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1107 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1131 1108
@@ -1141,8 +1118,8 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
1141} 1118}
1142 1119
1143u16 1120u16
1144fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, 1121fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
1145 u16 ox_id) 1122 u32 s_id, u16 ox_id)
1146{ 1123{
1147 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); 1124 u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
1148 u16 payldlen; 1125 u16 payldlen;
@@ -1162,11 +1139,10 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
1162 1139
1163u16 1140u16
1164fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1141fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1165 enum bfa_port_role roles) 1142 enum bfa_lport_role roles)
1166{ 1143{
1167 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1144 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1168 struct fcgs_rftid_req_s *rftid = 1145 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1169 (struct fcgs_rftid_req_s *) (cthdr + 1);
1170 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); 1146 u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
1171 u8 index; 1147 u8 index;
1172 1148
@@ -1182,23 +1158,15 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1182 type_value = 1 << (FC_TYPE_FCP % 32); 1158 type_value = 1 << (FC_TYPE_FCP % 32);
1183 rftid->fc4_type[index] = bfa_os_htonl(type_value); 1159 rftid->fc4_type[index] = bfa_os_htonl(type_value);
1184 1160
1185 if (roles & BFA_PORT_ROLE_FCP_IPFC) {
1186 index = FC_TYPE_IP >> 5;
1187 type_value = 1 << (FC_TYPE_IP % 32);
1188 rftid->fc4_type[index] |= bfa_os_htonl(type_value);
1189 }
1190
1191 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1161 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1192} 1162}
1193 1163
1194u16 1164u16
1195fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, 1165fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1196 u16 ox_id, u8 *fc4_bitmap, 1166 u8 *fc4_bitmap, u32 bitmap_size)
1197 u32 bitmap_size)
1198{ 1167{
1199 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1168 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1200 struct fcgs_rftid_req_s *rftid = 1169 struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
1201 (struct fcgs_rftid_req_s *) (cthdr + 1);
1202 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1170 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1203 1171
1204 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1208,7 +1176,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
1208 1176
1209 rftid->dap = s_id; 1177 rftid->dap = s_id;
1210 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1178 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
1211 (bitmap_size < 32 ? bitmap_size : 32)); 1179 (bitmap_size < 32 ? bitmap_size : 32));
1212 1180
1213 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1181 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1214} 1182}
@@ -1217,9 +1185,8 @@ u16
1217fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, 1185fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1218 u8 fc4_type, u8 fc4_ftrs) 1186 u8 fc4_type, u8 fc4_ftrs)
1219{ 1187{
1220 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1188 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1221 struct fcgs_rffid_req_s *rffid = 1189 struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
1222 (struct fcgs_rffid_req_s *) (cthdr + 1);
1223 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1190 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1224 1191
1225 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1227,9 +1194,9 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1227 1194
1228 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); 1195 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
1229 1196
1230 rffid->dap = s_id; 1197 rffid->dap = s_id;
1231 rffid->fc4ftr_bits = fc4_ftrs; 1198 rffid->fc4ftr_bits = fc4_ftrs;
1232 rffid->fc4_type = fc4_type; 1199 rffid->fc4_type = fc4_type;
1233 1200
1234 return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); 1201 return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
1235} 1202}
@@ -1239,9 +1206,9 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1239 u8 *name) 1206 u8 *name)
1240{ 1207{
1241 1208
1242 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1209 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1243 struct fcgs_rspnid_req_s *rspnid = 1210 struct fcgs_rspnid_req_s *rspnid =
1244 (struct fcgs_rspnid_req_s *) (cthdr + 1); 1211 (struct fcgs_rspnid_req_s *)(cthdr + 1);
1245 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1212 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1246 1213
1247 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1257,13 +1224,11 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1257} 1224}
1258 1225
1259u16 1226u16
1260fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, 1227fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
1261 u8 fc4_type)
1262{ 1228{
1263 1229
1264 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1230 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1265 struct fcgs_gidft_req_s *gidft = 1231 struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
1266 (struct fcgs_gidft_req_s *) (cthdr + 1);
1267 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1232 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1268 1233
1269 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1234 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1282,9 +1247,8 @@ u16
1282fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1247fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1283 wwn_t port_name) 1248 wwn_t port_name)
1284{ 1249{
1285 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1250 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1286 struct fcgs_rpnid_req_s *rpnid = 1251 struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
1287 (struct fcgs_rpnid_req_s *) (cthdr + 1);
1288 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1252 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1289 1253
1290 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1301,9 +1265,8 @@ u16
1301fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1265fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1302 wwn_t node_name) 1266 wwn_t node_name)
1303{ 1267{
1304 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1268 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1305 struct fcgs_rnnid_req_s *rnnid = 1269 struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
1306 (struct fcgs_rnnid_req_s *) (cthdr + 1);
1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1270 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1308 1271
1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1320,7 +1283,7 @@ u16
1320fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1283fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1321 u32 cos) 1284 u32 cos)
1322{ 1285{
1323 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1286 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1324 struct fcgs_rcsid_req_s *rcsid = 1287 struct fcgs_rcsid_req_s *rcsid =
1325 (struct fcgs_rcsid_req_s *) (cthdr + 1); 1288 (struct fcgs_rcsid_req_s *) (cthdr + 1);
1326 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1289 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
@@ -1339,9 +1302,8 @@ u16
1339fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, 1302fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1340 u8 port_type) 1303 u8 port_type)
1341{ 1304{
1342 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1305 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1343 struct fcgs_rptid_req_s *rptid = 1306 struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
1344 (struct fcgs_rptid_req_s *) (cthdr + 1);
1345 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1307 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1346 1308
1347 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1357,9 +1319,8 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1357u16 1319u16
1358fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) 1320fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1359{ 1321{
1360 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1322 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1361 struct fcgs_ganxt_req_s *ganxt = 1323 struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
1362 (struct fcgs_ganxt_req_s *) (cthdr + 1);
1363 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); 1324 u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
1364 1325
1365 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1379,7 +1340,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1379 u16 cmd_code) 1340 u16 cmd_code)
1380{ 1341{
1381 1342
1382 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1343 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1383 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1344 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1384 1345
1385 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1346 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1409,12 +1370,12 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
1409} 1370}
1410 1371
1411/* 1372/*
1412 * GMAL Request 1373 * GMAL Request
1413 */ 1374 */
1414u16 1375u16
1415fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) 1376fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1416{ 1377{
1417 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1378 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1418 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); 1379 fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
1419 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1380 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1420 1381
@@ -1434,7 +1395,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1434u16 1395u16
1435fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) 1396fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1436{ 1397{
1437 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; 1398 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1438 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); 1399 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
1439 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); 1400 u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
1440 1401
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
new file mode 100644
index 000000000000..73abd02e53cc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -0,0 +1,316 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.h - FC link service frame building and parsing routines
19 */
20
21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__
23
24#include "bfa_os_inc.h"
25#include "bfa_fc.h"
26#include "bfa_defs_fcs.h"
27
28/*
29 * Utility Macros/functions
30 */
31
32#define wwn_is_equal(_wwn1, _wwn2) \
33 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
34
35#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
36
37/*
38 * Given the fc response length, this routine will return
39 * the length of the actual payload bytes following the CT header.
40 *
41 * Assumes the input response length does not include the crc, eof, etc.
42 */
43static inline u32
44fc_get_ctresp_pyld_len(u32 resp_len)
45{
46 return resp_len - sizeof(struct ct_hdr_s);
47}
48
49/*
50 * Convert bfa speed to rpsc speed value.
51 */
52static inline enum bfa_port_speed
53fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
54{
55 switch (speed) {
56
57 case RPSC_OP_SPEED_1G:
58 return BFA_PORT_SPEED_1GBPS;
59
60 case RPSC_OP_SPEED_2G:
61 return BFA_PORT_SPEED_2GBPS;
62
63 case RPSC_OP_SPEED_4G:
64 return BFA_PORT_SPEED_4GBPS;
65
66 case RPSC_OP_SPEED_8G:
67 return BFA_PORT_SPEED_8GBPS;
68
69 case RPSC_OP_SPEED_10G:
70 return BFA_PORT_SPEED_10GBPS;
71
72 default:
73 return BFA_PORT_SPEED_UNKNOWN;
74 }
75}
76
77/*
78 * Convert RPSC speed to bfa speed value.
79 */
80static inline enum fc_rpsc_op_speed
81fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
82{
83 switch (op_speed) {
84
85 case BFA_PORT_SPEED_1GBPS:
86 return RPSC_OP_SPEED_1G;
87
88 case BFA_PORT_SPEED_2GBPS:
89 return RPSC_OP_SPEED_2G;
90
91 case BFA_PORT_SPEED_4GBPS:
92 return RPSC_OP_SPEED_4G;
93
94 case BFA_PORT_SPEED_8GBPS:
95 return RPSC_OP_SPEED_8G;
96
97 case BFA_PORT_SPEED_10GBPS:
98 return RPSC_OP_SPEED_10G;
99
100 default:
101 return RPSC_OP_SPEED_NOT_EST;
102 }
103}
104
105enum fc_parse_status {
106 FC_PARSE_OK = 0,
107 FC_PARSE_FAILURE = 1,
108 FC_PARSE_BUSY = 2,
109 FC_PARSE_LEN_INVAL,
110 FC_PARSE_ACC_INVAL,
111 FC_PARSE_PWWN_NOT_EQUAL,
112 FC_PARSE_NWWN_NOT_EQUAL,
113 FC_PARSE_RXSZ_INVAL,
114 FC_PARSE_NOT_FCP,
115 FC_PARSE_OPAFLAG_INVAL,
116 FC_PARSE_RPAFLAG_INVAL,
117 FC_PARSE_OPA_INVAL,
118 FC_PARSE_RPA_INVAL,
119
120};
121
122struct fc_templates_s {
123 struct fchs_s fc_els_req;
124 struct fchs_s fc_bls_req;
125 struct fc_logi_s plogi;
126 struct fc_rrq_s rrq;
127};
128
129void fcbuild_init(void);
130
131u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
132 u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
133 u16 pdu_size, u8 set_npiv, u8 set_auth,
134 u16 local_bb_credits);
135
136u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
137 u16 ox_id, wwn_t port_name, wwn_t node_name,
138 u16 pdu_size);
139
140u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
141 u32 s_id, u16 ox_id,
142 wwn_t port_name, wwn_t node_name,
143 u16 pdu_size,
144 u16 local_bb_credits);
145
146u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
147 u32 s_id, u16 ox_id, wwn_t port_name,
148 wwn_t node_name, u16 pdu_size);
149
150enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
151
152u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
153 u16 ox_id);
154
155enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
156
157u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
158 u32 s_id, u16 ox_id, u16 rrq_oxid);
159enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
160
161u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
162 u16 ox_id, u8 *name);
163
164u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
165 u16 ox_id, enum bfa_lport_role role);
166
167u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
168 u16 ox_id, u8 *fc4_bitmap,
169 u32 bitmap_size);
170
171u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
172 u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
173
174u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
175 u16 ox_id, wwn_t port_name);
176
177u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
178 u16 ox_id, u32 port_id);
179
180u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
181 u8 set_br_reg, u32 s_id, u16 ox_id);
182
183u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
184 u32 s_id, u16 ox_id,
185 wwn_t port_name, wwn_t node_name,
186 u16 pdu_size);
187
188u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
189 u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
190 wwn_t node_name);
191
192enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
193 u32 host_dap, wwn_t node_name, wwn_t port_name);
194
195enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
196 wwn_t port_name, wwn_t node_name);
197
198u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
199 u32 d_id, u32 s_id, u16 ox_id,
200 wwn_t port_name, wwn_t node_name);
201u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
202 u32 d_id, u32 s_id, u16 ox_id,
203 u8 reason_code, u8 reason_code_expl);
204u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
205 u32 d_id, u32 s_id, u16 ox_id);
206u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
207 u32 s_id, u16 ox_id);
208
209enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
210
211u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
212 u32 s_id, u16 ox_id,
213 enum bfa_lport_role role);
214
215u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
216 u32 d_id, u32 s_id, u16 ox_id,
217 u32 data_format);
218
219u16 fc_rnid_acc_build(struct fchs_s *fchs,
220 struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
221 u16 ox_id, u32 data_format,
222 struct fc_rnid_common_id_data_s *common_id_data,
223 struct fc_rnid_general_topology_data_s *gen_topo_data);
224
225u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
226 u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
227u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
228 u32 d_id, u32 s_id, u16 ox_id);
229u16 fc_rpsc_acc_build(struct fchs_s *fchs,
230 struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
231 u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
232u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
233 u8 fc4_type);
234
235u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
236 u32 port_id, wwn_t port_name);
237
238u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
239 u32 port_id, wwn_t node_name);
240
241u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
242 u32 port_id, u32 cos);
243
244u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
245 u32 port_id, u8 port_type);
246
247u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
248 u32 port_id);
249
250u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
251 u32 s_id, u16 ox_id, wwn_t port_name);
252
253u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
254 u32 s_id, u16 ox_id);
255
256u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
257 u16 cmd_code);
258u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
259u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
260
261void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
262
263void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
264 u16 ox_id);
265
266enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
267
268enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
269 wwn_t port_name);
270
271enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
272
273enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
274 wwn_t port_name);
275
276u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
277 u32 s_id, u16 ox_id, u16 rx_id);
278
279int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
280
281u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
282 u32 d_id, u32 s_id, u16 ox_id, int num_pages);
283
284u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
285 u32 d_id, u32 s_id, u16 ox_id, int num_pages);
286
287u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
288
289u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
290 u16 ox_id, wwn_t port_name, wwn_t node_name,
291 u16 pdu_size);
292
293u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
294
295u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
296 u16 ox_id, int num_pages);
297
298u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
299
300u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
301 u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
302 u32 tpr_id);
303
304u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
305
306u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
307 u16 ox_id, u32 reason_code, u32 reason_expl);
308
309u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
310 u32 port_id);
311
312u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
313
314u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
315 u16 ox_id);
316#endif
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 8c703d8dc94b..33c8dd51f474 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,18 +15,291 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_modules.h"
19#include <log/bfa_log_hal.h> 19#include "bfa_cb_ioim.h"
20 20
21BFA_TRC_FILE(HAL, FCPIM); 21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim); 22BFA_MODULE(fcpim);
23 23
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
29/**
30 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
75/**
76 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
92/**
93 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
110/**
111 * hal_ioim_sm
112 */
113
114/**
115 * IO state machine events
116 */
117enum bfa_ioim_event {
118 BFA_IOIM_SM_START = 1, /* io start request from host */
119 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
120 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
121 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
122 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
123 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
124 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
125 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
126 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
127 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
128 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
129 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
130 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
131 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
132 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
133 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
134 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
135 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
136};
137
138
139/**
140 * BFA TSKIM related definitions
141 */
142
143/**
144 * task management completion handling
145 */
146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
147 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
148 bfa_tskim_notify_comp(__tskim); \
149} while (0)
150
151#define bfa_tskim_notify_comp(__tskim) do { \
152 if ((__tskim)->notify) \
153 bfa_itnim_tskdone((__tskim)->itnim); \
154} while (0)
155
156
157enum bfa_tskim_event {
158 BFA_TSKIM_SM_START = 1, /* TM command start */
159 BFA_TSKIM_SM_DONE = 2, /* TM completion */
160 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
161 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
162 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
163 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
164 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
166};
167
168/**
169 * forward declaration for BFA ITNIM functions
170 */
171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
172static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
173static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
174static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
175static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
176static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
177static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
178static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
179static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov(void *itnim_arg);
182static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185
186/**
187 * forward declaration of ITNIM state machine
188 */
189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
209static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
210 enum bfa_itnim_event event);
211static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
212 enum bfa_itnim_event event);
213static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
214 enum bfa_itnim_event event);
215static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
216 enum bfa_itnim_event event);
217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218 enum bfa_itnim_event event);
219
220/**
221 * forward declaration for BFA IOIM functions
222 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
224static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
225static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
226static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
227static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
228static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
229static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
230static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
231static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
232static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234
235
236/**
237 * forward declaration of BFA IO state machine
238 */
239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event);
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event);
263
264/**
265 * forward declaration for BFA TSKIM functions
266 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
268static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
269static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
270 lun_t lun);
271static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
272static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
273static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
274static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
275static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277
278
279/**
280 * forward declaration of BFA TSKIM state machine
281 */
282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
293 enum bfa_tskim_event event);
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event);
296
24/** 297/**
25 * hal_fcpim_mod BFA FCP Initiator Mode module 298 * hal_fcpim_mod BFA FCP Initiator Mode module
26 */ 299 */
27 300
28/** 301/**
29 * Compute and return memory needed by FCP(im) module. 302 * Compute and return memory needed by FCP(im) module.
30 */ 303 */
31static void 304static void
32bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 305bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@ -58,7 +331,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
58 331
59static void 332static void
60bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 333bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
61 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 334 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
62{ 335{
63 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 336 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
64 337
@@ -67,12 +340,14 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
67 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); 340 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
68 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); 341 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
69 342
70 fcpim->bfa = bfa; 343 fcpim->bfa = bfa;
71 fcpim->num_itnims = cfg->fwcfg.num_rports; 344 fcpim->num_itnims = cfg->fwcfg.num_rports;
72 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 345 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
73 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; 346 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
74 fcpim->path_tov = cfg->drvcfg.path_tov; 347 fcpim->path_tov = cfg->drvcfg.path_tov;
75 fcpim->delay_comp = cfg->drvcfg.delay_comp; 348 fcpim->delay_comp = cfg->drvcfg.delay_comp;
349 fcpim->profile_comp = NULL;
350 fcpim->profile_start = NULL;
76 351
77 bfa_itnim_attach(fcpim, meminfo); 352 bfa_itnim_attach(fcpim, meminfo);
78 bfa_tskim_attach(fcpim, meminfo); 353 bfa_tskim_attach(fcpim, meminfo);
@@ -103,7 +378,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
103{ 378{
104 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 379 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
105 struct bfa_itnim_s *itnim; 380 struct bfa_itnim_s *itnim;
106 struct list_head *qe, *qen; 381 struct list_head *qe, *qen;
107 382
108 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 383 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
109 itnim = (struct bfa_itnim_s *) qe; 384 itnim = (struct bfa_itnim_s *) qe;
@@ -112,6 +387,56 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
112} 387}
113 388
114void 389void
390bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
391 struct bfa_itnim_iostats_s *rstats)
392{
393 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
394 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
395 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
396 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
397 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
398 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
399 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
400 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
401 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
402 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
403 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
404 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
405 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
406 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
407 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
408 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
409 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
410 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
411 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
412 bfa_fcpim_add_iostats(lstats, rstats, onlines);
413 bfa_fcpim_add_iostats(lstats, rstats, offlines);
414 bfa_fcpim_add_iostats(lstats, rstats, creates);
415 bfa_fcpim_add_iostats(lstats, rstats, deletes);
416 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
417 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
418 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
419 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
420 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
421 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
422 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
424 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
425 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
426 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
427 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
429 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
430 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
431 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
432 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
433 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
434 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
435 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
436 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
437}
438
439void
115bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) 440bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
116{ 441{
117 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 442 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -130,21 +455,113 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
130} 455}
131 456
132bfa_status_t 457bfa_status_t
133bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats) 458bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
459 u8 lp_tag)
460{
461 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
462 struct list_head *qe, *qen;
463 struct bfa_itnim_s *itnim;
464
465 /* accumulate IO stats from itnim */
466 bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
467 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468 itnim = (struct bfa_itnim_s *) qe;
469 if (itnim->rport->rport_info.lp_tag != lp_tag)
470 continue;
471 bfa_fcpim_add_stats(stats, &(itnim->stats));
472 }
473 return BFA_STATUS_OK;
474}
475bfa_status_t
476bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
477{
478 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
479 struct list_head *qe, *qen;
480 struct bfa_itnim_s *itnim;
481
482 /* accumulate IO stats from itnim */
483 bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
484 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485 itnim = (struct bfa_itnim_s *) qe;
486 bfa_fcpim_add_stats(modstats, &(itnim->stats));
487 }
488 return BFA_STATUS_OK;
489}
490
491bfa_status_t
492bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
493 struct bfa_fcpim_del_itn_stats_s *modstats)
134{ 494{
135 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 495 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
136 496
137 *modstats = fcpim->stats; 497 *modstats = fcpim->del_itn_stats;
138 498
139 return BFA_STATUS_OK; 499 return BFA_STATUS_OK;
140} 500}
141 501
502
503bfa_status_t
504bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
505{
506 struct bfa_itnim_s *itnim;
507 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
508 struct list_head *qe, *qen;
509
510 /* accumulate IO stats from itnim */
511 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512 itnim = (struct bfa_itnim_s *) qe;
513 bfa_itnim_clear_stats(itnim);
514 }
515 fcpim->io_profile = BFA_TRUE;
516 fcpim->io_profile_start_time = time;
517 fcpim->profile_comp = bfa_ioim_profile_comp;
518 fcpim->profile_start = bfa_ioim_profile_start;
519
520 return BFA_STATUS_OK;
521}
522bfa_status_t
523bfa_fcpim_profile_off(struct bfa_s *bfa)
524{
525 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
526 fcpim->io_profile = BFA_FALSE;
527 fcpim->io_profile_start_time = 0;
528 fcpim->profile_comp = NULL;
529 fcpim->profile_start = NULL;
530 return BFA_STATUS_OK;
531}
532
533bfa_status_t
534bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
535{
536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
537 struct list_head *qe, *qen;
538 struct bfa_itnim_s *itnim;
539
540 /* clear IO stats from all active itnims */
541 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
542 itnim = (struct bfa_itnim_s *) qe;
543 if (itnim->rport->rport_info.lp_tag != lp_tag)
544 continue;
545 bfa_itnim_clear_stats(itnim);
546 }
547 return BFA_STATUS_OK;
548
549}
550
142bfa_status_t 551bfa_status_t
143bfa_fcpim_clr_modstats(struct bfa_s *bfa) 552bfa_fcpim_clr_modstats(struct bfa_s *bfa)
144{ 553{
145 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 554 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
555 struct list_head *qe, *qen;
556 struct bfa_itnim_s *itnim;
146 557
147 memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s)); 558 /* clear IO stats from all active itnims */
559 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
560 itnim = (struct bfa_itnim_s *) qe;
561 bfa_itnim_clear_stats(itnim);
562 }
563 bfa_os_memset(&fcpim->del_itn_stats, 0,
564 sizeof(struct bfa_fcpim_del_itn_stats_s));
148 565
149 return BFA_STATUS_OK; 566 return BFA_STATUS_OK;
150} 567}
@@ -176,14 +593,6 @@ bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
176 * IO redirection is turned off when QoS is enabled and vice versa 593 * IO redirection is turned off when QoS is enabled and vice versa
177 */ 594 */
178 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE; 595 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
179
180 /*
181 * Notify the bfad module of a possible state change in
182 * IO redirection capability, due to a QoS state change. bfad will
183 * check on the support for io redirection and update the
184 * fcpim's ioredirect state accordingly.
185 */
186 bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect);
187} 596}
188 597
189void 598void
@@ -192,3 +601,3012 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
192 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 601 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
193 fcpim->ioredirect = state; 602 fcpim->ioredirect = state;
194} 603}
604
605
606
607/**
608 * BFA ITNIM module state machine functions
609 */
610
611/**
612 * Beginning/unallocated state - no events expected.
613 */
614static void
615bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
616{
617 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
618 bfa_trc(itnim->bfa, event);
619
620 switch (event) {
621 case BFA_ITNIM_SM_CREATE:
622 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
623 itnim->is_online = BFA_FALSE;
624 bfa_fcpim_additn(itnim);
625 break;
626
627 default:
628 bfa_sm_fault(itnim->bfa, event);
629 }
630}
631
632/**
633 * Beginning state, only online event expected.
634 */
635static void
636bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
637{
638 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
639 bfa_trc(itnim->bfa, event);
640
641 switch (event) {
642 case BFA_ITNIM_SM_ONLINE:
643 if (bfa_itnim_send_fwcreate(itnim))
644 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
645 else
646 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
647 break;
648
649 case BFA_ITNIM_SM_DELETE:
650 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
651 bfa_fcpim_delitn(itnim);
652 break;
653
654 case BFA_ITNIM_SM_HWFAIL:
655 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656 break;
657
658 default:
659 bfa_sm_fault(itnim->bfa, event);
660 }
661}
662
663/**
664 * Waiting for itnim create response from firmware.
665 */
666static void
667bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668{
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
671
672 switch (event) {
673 case BFA_ITNIM_SM_FWRSP:
674 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
675 itnim->is_online = BFA_TRUE;
676 bfa_itnim_iotov_online(itnim);
677 bfa_itnim_online_cb(itnim);
678 break;
679
680 case BFA_ITNIM_SM_DELETE:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
682 break;
683
684 case BFA_ITNIM_SM_OFFLINE:
685 if (bfa_itnim_send_fwdelete(itnim))
686 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
687 else
688 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
689 break;
690
691 case BFA_ITNIM_SM_HWFAIL:
692 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
693 break;
694
695 default:
696 bfa_sm_fault(itnim->bfa, event);
697 }
698}
699
700static void
701bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
702 enum bfa_itnim_event event)
703{
704 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
705 bfa_trc(itnim->bfa, event);
706
707 switch (event) {
708 case BFA_ITNIM_SM_QRESUME:
709 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
710 bfa_itnim_send_fwcreate(itnim);
711 break;
712
713 case BFA_ITNIM_SM_DELETE:
714 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
715 bfa_reqq_wcancel(&itnim->reqq_wait);
716 bfa_fcpim_delitn(itnim);
717 break;
718
719 case BFA_ITNIM_SM_OFFLINE:
720 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
721 bfa_reqq_wcancel(&itnim->reqq_wait);
722 bfa_itnim_offline_cb(itnim);
723 break;
724
725 case BFA_ITNIM_SM_HWFAIL:
726 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
727 bfa_reqq_wcancel(&itnim->reqq_wait);
728 break;
729
730 default:
731 bfa_sm_fault(itnim->bfa, event);
732 }
733}
734
735/**
736 * Waiting for itnim create response from firmware, a delete is pending.
737 */
738static void
739bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
740 enum bfa_itnim_event event)
741{
742 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
743 bfa_trc(itnim->bfa, event);
744
745 switch (event) {
746 case BFA_ITNIM_SM_FWRSP:
747 if (bfa_itnim_send_fwdelete(itnim))
748 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
749 else
750 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
751 break;
752
753 case BFA_ITNIM_SM_HWFAIL:
754 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
755 bfa_fcpim_delitn(itnim);
756 break;
757
758 default:
759 bfa_sm_fault(itnim->bfa, event);
760 }
761}
762
763/**
764 * Online state - normal parking state.
765 */
766static void
767bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
768{
769 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
770 bfa_trc(itnim->bfa, event);
771
772 switch (event) {
773 case BFA_ITNIM_SM_OFFLINE:
774 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
775 itnim->is_online = BFA_FALSE;
776 bfa_itnim_iotov_start(itnim);
777 bfa_itnim_cleanup(itnim);
778 break;
779
780 case BFA_ITNIM_SM_DELETE:
781 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
782 itnim->is_online = BFA_FALSE;
783 bfa_itnim_cleanup(itnim);
784 break;
785
786 case BFA_ITNIM_SM_SLER:
787 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
788 itnim->is_online = BFA_FALSE;
789 bfa_itnim_iotov_start(itnim);
790 bfa_itnim_sler_cb(itnim);
791 break;
792
793 case BFA_ITNIM_SM_HWFAIL:
794 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
795 itnim->is_online = BFA_FALSE;
796 bfa_itnim_iotov_start(itnim);
797 bfa_itnim_iocdisable_cleanup(itnim);
798 break;
799
800 default:
801 bfa_sm_fault(itnim->bfa, event);
802 }
803}
804
805/**
806 * Second level error recovery need.
807 */
808static void
809bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
810{
811 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
812 bfa_trc(itnim->bfa, event);
813
814 switch (event) {
815 case BFA_ITNIM_SM_OFFLINE:
816 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
817 bfa_itnim_cleanup(itnim);
818 break;
819
820 case BFA_ITNIM_SM_DELETE:
821 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
822 bfa_itnim_cleanup(itnim);
823 bfa_itnim_iotov_delete(itnim);
824 break;
825
826 case BFA_ITNIM_SM_HWFAIL:
827 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
828 bfa_itnim_iocdisable_cleanup(itnim);
829 break;
830
831 default:
832 bfa_sm_fault(itnim->bfa, event);
833 }
834}
835
836/**
837 * Going offline. Waiting for active IO cleanup.
838 */
839static void
840bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
841 enum bfa_itnim_event event)
842{
843 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
844 bfa_trc(itnim->bfa, event);
845
846 switch (event) {
847 case BFA_ITNIM_SM_CLEANUP:
848 if (bfa_itnim_send_fwdelete(itnim))
849 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
850 else
851 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
852 break;
853
854 case BFA_ITNIM_SM_DELETE:
855 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
856 bfa_itnim_iotov_delete(itnim);
857 break;
858
859 case BFA_ITNIM_SM_HWFAIL:
860 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
861 bfa_itnim_iocdisable_cleanup(itnim);
862 bfa_itnim_offline_cb(itnim);
863 break;
864
865 case BFA_ITNIM_SM_SLER:
866 break;
867
868 default:
869 bfa_sm_fault(itnim->bfa, event);
870 }
871}
872
873/**
874 * Deleting itnim. Waiting for active IO cleanup.
875 */
876static void
877bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
878 enum bfa_itnim_event event)
879{
880 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
881 bfa_trc(itnim->bfa, event);
882
883 switch (event) {
884 case BFA_ITNIM_SM_CLEANUP:
885 if (bfa_itnim_send_fwdelete(itnim))
886 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
887 else
888 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
889 break;
890
891 case BFA_ITNIM_SM_HWFAIL:
892 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
893 bfa_itnim_iocdisable_cleanup(itnim);
894 break;
895
896 default:
897 bfa_sm_fault(itnim->bfa, event);
898 }
899}
900
901/**
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903 */
904static void
905bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
906{
907 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
908 bfa_trc(itnim->bfa, event);
909
910 switch (event) {
911 case BFA_ITNIM_SM_FWRSP:
912 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
913 bfa_itnim_offline_cb(itnim);
914 break;
915
916 case BFA_ITNIM_SM_DELETE:
917 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
918 break;
919
920 case BFA_ITNIM_SM_HWFAIL:
921 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
922 bfa_itnim_offline_cb(itnim);
923 break;
924
925 default:
926 bfa_sm_fault(itnim->bfa, event);
927 }
928}
929
930static void
931bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
932 enum bfa_itnim_event event)
933{
934 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
935 bfa_trc(itnim->bfa, event);
936
937 switch (event) {
938 case BFA_ITNIM_SM_QRESUME:
939 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
940 bfa_itnim_send_fwdelete(itnim);
941 break;
942
943 case BFA_ITNIM_SM_DELETE:
944 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
945 break;
946
947 case BFA_ITNIM_SM_HWFAIL:
948 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
949 bfa_reqq_wcancel(&itnim->reqq_wait);
950 bfa_itnim_offline_cb(itnim);
951 break;
952
953 default:
954 bfa_sm_fault(itnim->bfa, event);
955 }
956}
957
958/**
959 * Offline state.
960 */
961static void
962bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
963{
964 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
965 bfa_trc(itnim->bfa, event);
966
967 switch (event) {
968 case BFA_ITNIM_SM_DELETE:
969 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
970 bfa_itnim_iotov_delete(itnim);
971 bfa_fcpim_delitn(itnim);
972 break;
973
974 case BFA_ITNIM_SM_ONLINE:
975 if (bfa_itnim_send_fwcreate(itnim))
976 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
977 else
978 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
979 break;
980
981 case BFA_ITNIM_SM_HWFAIL:
982 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
983 break;
984
985 default:
986 bfa_sm_fault(itnim->bfa, event);
987 }
988}
989
990/**
991 * IOC h/w failed state.
992 */
993static void
994bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
995 enum bfa_itnim_event event)
996{
997 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
998 bfa_trc(itnim->bfa, event);
999
1000 switch (event) {
1001 case BFA_ITNIM_SM_DELETE:
1002 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1003 bfa_itnim_iotov_delete(itnim);
1004 bfa_fcpim_delitn(itnim);
1005 break;
1006
1007 case BFA_ITNIM_SM_OFFLINE:
1008 bfa_itnim_offline_cb(itnim);
1009 break;
1010
1011 case BFA_ITNIM_SM_ONLINE:
1012 if (bfa_itnim_send_fwcreate(itnim))
1013 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1014 else
1015 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1016 break;
1017
1018 case BFA_ITNIM_SM_HWFAIL:
1019 break;
1020
1021 default:
1022 bfa_sm_fault(itnim->bfa, event);
1023 }
1024}
1025
1026/**
1027 * Itnim is deleted, waiting for firmware response to delete.
1028 */
1029static void
1030bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1031{
1032 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1033 bfa_trc(itnim->bfa, event);
1034
1035 switch (event) {
1036 case BFA_ITNIM_SM_FWRSP:
1037 case BFA_ITNIM_SM_HWFAIL:
1038 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1039 bfa_fcpim_delitn(itnim);
1040 break;
1041
1042 default:
1043 bfa_sm_fault(itnim->bfa, event);
1044 }
1045}
1046
1047static void
1048bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1049 enum bfa_itnim_event event)
1050{
1051 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1052 bfa_trc(itnim->bfa, event);
1053
1054 switch (event) {
1055 case BFA_ITNIM_SM_QRESUME:
1056 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1057 bfa_itnim_send_fwdelete(itnim);
1058 break;
1059
1060 case BFA_ITNIM_SM_HWFAIL:
1061 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1062 bfa_reqq_wcancel(&itnim->reqq_wait);
1063 bfa_fcpim_delitn(itnim);
1064 break;
1065
1066 default:
1067 bfa_sm_fault(itnim->bfa, event);
1068 }
1069}
1070
1071/**
1072 * Initiate cleanup of all IOs on an IOC failure.
1073 */
1074static void
1075bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1076{
1077 struct bfa_tskim_s *tskim;
1078 struct bfa_ioim_s *ioim;
1079 struct list_head *qe, *qen;
1080
1081 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1082 tskim = (struct bfa_tskim_s *) qe;
1083 bfa_tskim_iocdisable(tskim);
1084 }
1085
1086 list_for_each_safe(qe, qen, &itnim->io_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_iocdisable(ioim);
1089 }
1090
1091 /**
1092 * For IO request in pending queue, we pretend an early timeout.
1093 */
1094 list_for_each_safe(qe, qen, &itnim->pending_q) {
1095 ioim = (struct bfa_ioim_s *) qe;
1096 bfa_ioim_tov(ioim);
1097 }
1098
1099 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1100 ioim = (struct bfa_ioim_s *) qe;
1101 bfa_ioim_iocdisable(ioim);
1102 }
1103}
1104
1105/**
1106 * IO cleanup completion
1107 */
1108static void
1109bfa_itnim_cleanp_comp(void *itnim_cbarg)
1110{
1111 struct bfa_itnim_s *itnim = itnim_cbarg;
1112
1113 bfa_stats(itnim, cleanup_comps);
1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115}
1116
1117/**
1118 * Initiate cleanup of all IOs.
1119 */
1120static void
1121bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1122{
1123 struct bfa_ioim_s *ioim;
1124 struct bfa_tskim_s *tskim;
1125 struct list_head *qe, *qen;
1126
1127 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1128
1129 list_for_each_safe(qe, qen, &itnim->io_q) {
1130 ioim = (struct bfa_ioim_s *) qe;
1131
1132 /**
1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO.
1135 */
1136 list_del(&ioim->qe);
1137 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1138
1139 bfa_wc_up(&itnim->wc);
1140 bfa_ioim_cleanup(ioim);
1141 }
1142
1143 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1144 tskim = (struct bfa_tskim_s *) qe;
1145 bfa_wc_up(&itnim->wc);
1146 bfa_tskim_cleanup(tskim);
1147 }
1148
1149 bfa_wc_wait(&itnim->wc);
1150}
1151
1152static void
1153__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1154{
1155 struct bfa_itnim_s *itnim = cbarg;
1156
1157 if (complete)
1158 bfa_cb_itnim_online(itnim->ditn);
1159}
1160
1161static void
1162__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1163{
1164 struct bfa_itnim_s *itnim = cbarg;
1165
1166 if (complete)
1167 bfa_cb_itnim_offline(itnim->ditn);
1168}
1169
1170static void
1171__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1172{
1173 struct bfa_itnim_s *itnim = cbarg;
1174
1175 if (complete)
1176 bfa_cb_itnim_sler(itnim->ditn);
1177}
1178
1179/**
1180 * Call to resume any I/O requests waiting for room in request queue.
1181 */
1182static void
1183bfa_itnim_qresume(void *cbarg)
1184{
1185 struct bfa_itnim_s *itnim = cbarg;
1186
1187 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1188}
1189
1190
1191
1192
1193/**
1194 * bfa_itnim_public
1195 */
1196
1197void
1198bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1199{
1200 bfa_wc_down(&itnim->wc);
1201}
1202
1203void
1204bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1205{
1206 bfa_wc_down(&itnim->wc);
1207}
1208
1209void
1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211 u32 *dm_len)
1212{
1213 /**
1214 * ITN memory
1215 */
1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1217}
1218
1219void
1220bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1221{
1222 struct bfa_s *bfa = fcpim->bfa;
1223 struct bfa_itnim_s *itnim;
1224 int i, j;
1225
1226 INIT_LIST_HEAD(&fcpim->itnim_q);
1227
1228 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1229 fcpim->itnim_arr = itnim;
1230
1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1232 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
1233 itnim->bfa = bfa;
1234 itnim->fcpim = fcpim;
1235 itnim->reqq = BFA_REQQ_QOS_LO;
1236 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1237 itnim->iotov_active = BFA_FALSE;
1238 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1239
1240 INIT_LIST_HEAD(&itnim->io_q);
1241 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1242 INIT_LIST_HEAD(&itnim->pending_q);
1243 INIT_LIST_HEAD(&itnim->tsk_q);
1244 INIT_LIST_HEAD(&itnim->delay_comp_q);
1245 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1246 itnim->ioprofile.io_latency.min[j] = ~0;
1247 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1248 }
1249
1250 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1251}
1252
1253void
1254bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1255{
1256 bfa_stats(itnim, ioc_disabled);
1257 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1258}
1259
1260static bfa_boolean_t
1261bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1262{
1263 struct bfi_itnim_create_req_s *m;
1264
1265 itnim->msg_no++;
1266
1267 /**
1268 * check for room in queue to send request now
1269 */
1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1271 if (!m) {
1272 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1273 return BFA_FALSE;
1274 }
1275
1276 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1277 bfa_lpuid(itnim->bfa));
1278 m->fw_handle = itnim->rport->fw_handle;
1279 m->class = FC_CLASS_3;
1280 m->seq_rec = itnim->seq_rec;
1281 m->msg_no = itnim->msg_no;
1282 bfa_stats(itnim, fw_create);
1283
1284 /**
1285 * queue I/O message to firmware
1286 */
1287 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1288 return BFA_TRUE;
1289}
1290
1291static bfa_boolean_t
1292bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293{
1294 struct bfi_itnim_delete_req_s *m;
1295
1296 /**
1297 * check for room in queue to send request now
1298 */
1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1300 if (!m) {
1301 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1302 return BFA_FALSE;
1303 }
1304
1305 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1306 bfa_lpuid(itnim->bfa));
1307 m->fw_handle = itnim->rport->fw_handle;
1308 bfa_stats(itnim, fw_delete);
1309
1310 /**
1311 * queue I/O message to firmware
1312 */
1313 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314 return BFA_TRUE;
1315}
1316
1317/**
1318 * Cleanup all pending failed inflight requests.
1319 */
1320static void
1321bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1322{
1323 struct bfa_ioim_s *ioim;
1324 struct list_head *qe, *qen;
1325
1326 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1327 ioim = (struct bfa_ioim_s *)qe;
1328 bfa_ioim_delayed_comp(ioim, iotov);
1329 }
1330}
1331
1332/**
1333 * Start all pending IO requests.
1334 */
1335static void
1336bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1337{
1338 struct bfa_ioim_s *ioim;
1339
1340 bfa_itnim_iotov_stop(itnim);
1341
1342 /**
1343 * Abort all inflight IO requests in the queue
1344 */
1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346
1347 /**
1348 * Start all pending IO requests.
1349 */
1350 while (!list_empty(&itnim->pending_q)) {
1351 bfa_q_deq(&itnim->pending_q, &ioim);
1352 list_add_tail(&ioim->qe, &itnim->io_q);
1353 bfa_ioim_start(ioim);
1354 }
1355}
1356
1357/**
1358 * Fail all pending IO requests
1359 */
1360static void
1361bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362{
1363 struct bfa_ioim_s *ioim;
1364
1365 /**
1366 * Fail all inflight IO requests in the queue
1367 */
1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369
1370 /**
1371 * Fail any pending IO requests.
1372 */
1373 while (!list_empty(&itnim->pending_q)) {
1374 bfa_q_deq(&itnim->pending_q, &ioim);
1375 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1376 bfa_ioim_tov(ioim);
1377 }
1378}
1379
1380/**
1381 * IO TOV timer callback. Fail any pending IO requests.
1382 */
1383static void
1384bfa_itnim_iotov(void *itnim_arg)
1385{
1386 struct bfa_itnim_s *itnim = itnim_arg;
1387
1388 itnim->iotov_active = BFA_FALSE;
1389
1390 bfa_cb_itnim_tov_begin(itnim->ditn);
1391 bfa_itnim_iotov_cleanup(itnim);
1392 bfa_cb_itnim_tov(itnim->ditn);
1393}
1394
1395/**
1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1397 */
1398static void
1399bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1400{
1401 if (itnim->fcpim->path_tov > 0) {
1402
1403 itnim->iotov_active = BFA_TRUE;
1404 bfa_assert(bfa_itnim_hold_io(itnim));
1405 bfa_timer_start(itnim->bfa, &itnim->timer,
1406 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1407 }
1408}
1409
1410/**
1411 * Stop IO TOV timer.
1412 */
1413static void
1414bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1415{
1416 if (itnim->iotov_active) {
1417 itnim->iotov_active = BFA_FALSE;
1418 bfa_timer_stop(&itnim->timer);
1419 }
1420}
1421
1422/**
1423 * Stop IO TOV timer.
1424 */
1425static void
1426bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1427{
1428 bfa_boolean_t pathtov_active = BFA_FALSE;
1429
1430 if (itnim->iotov_active)
1431 pathtov_active = BFA_TRUE;
1432
1433 bfa_itnim_iotov_stop(itnim);
1434 if (pathtov_active)
1435 bfa_cb_itnim_tov_begin(itnim->ditn);
1436 bfa_itnim_iotov_cleanup(itnim);
1437 if (pathtov_active)
1438 bfa_cb_itnim_tov(itnim->ditn);
1439}
1440
1441static void
1442bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1443{
1444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1445 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1446 itnim->stats.iocomp_aborted;
1447 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1448 itnim->stats.iocomp_timedout;
1449 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1450 itnim->stats.iocom_sqer_needed;
1451 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1452 itnim->stats.iocom_res_free;
1453 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1454 itnim->stats.iocom_hostabrts;
1455 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1456 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1457 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1458}
1459
1460
1461
1462/**
1463 * bfa_itnim_public
1464 */
1465
1466/**
1467 * Itnim interrupt processing.
1468 */
1469void
1470bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1471{
1472 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1473 union bfi_itnim_i2h_msg_u msg;
1474 struct bfa_itnim_s *itnim;
1475
1476 bfa_trc(bfa, m->mhdr.msg_id);
1477
1478 msg.msg = m;
1479
1480 switch (m->mhdr.msg_id) {
1481 case BFI_ITNIM_I2H_CREATE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.create_rsp->bfa_handle);
1484 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, create_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_DELETE_RSP:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.delete_rsp->bfa_handle);
1492 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1493 bfa_stats(itnim, delete_comps);
1494 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1495 break;
1496
1497 case BFI_ITNIM_I2H_SLER_EVENT:
1498 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1499 msg.sler_event->bfa_handle);
1500 bfa_stats(itnim, sler_events);
1501 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1502 break;
1503
1504 default:
1505 bfa_trc(bfa, m->mhdr.msg_id);
1506 bfa_assert(0);
1507 }
1508}
1509
1510
1511
1512/**
1513 * bfa_itnim_api
1514 */
1515
1516struct bfa_itnim_s *
1517bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1518{
1519 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1520 struct bfa_itnim_s *itnim;
1521
1522 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1523 bfa_assert(itnim->rport == rport);
1524
1525 itnim->ditn = ditn;
1526
1527 bfa_stats(itnim, creates);
1528 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1529
1530 return itnim;
1531}
1532
1533void
1534bfa_itnim_delete(struct bfa_itnim_s *itnim)
1535{
1536 bfa_stats(itnim, deletes);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1538}
1539
1540void
1541bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1542{
1543 itnim->seq_rec = seq_rec;
1544 bfa_stats(itnim, onlines);
1545 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1546}
1547
1548void
1549bfa_itnim_offline(struct bfa_itnim_s *itnim)
1550{
1551 bfa_stats(itnim, offlines);
1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553}
1554
1555/**
1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted.
1558 */
1559bfa_boolean_t
1560bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1561{
1562 return itnim->fcpim->path_tov && itnim->iotov_active &&
1563 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1564 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1565 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1566 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1567 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1568 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1569}
1570
1571bfa_status_t
1572bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1573 struct bfa_itnim_ioprofile_s *ioprofile)
1574{
1575 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1576 if (!fcpim->io_profile)
1577 return BFA_STATUS_IOPROFILE_OFF;
1578
1579 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1580 itnim->ioprofile.io_profile_start_time =
1581 bfa_io_profile_start_time(itnim->bfa);
1582 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1583 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1584 *ioprofile = itnim->ioprofile;
1585
1586 return BFA_STATUS_OK;
1587}
1588
1589void
1590bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1591 struct bfa_itnim_iostats_s *stats)
1592{
1593 *stats = itnim->stats;
1594}
1595
1596void
1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598{
1599 int j;
1600 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1601 bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1603 itnim->ioprofile.io_latency.min[j] = ~0;
1604}
1605
1606/**
1607 * BFA IO module state machine functions
1608 */
1609
1610/**
1611 * IO is not started (unallocated).
1612 */
1613static void
1614bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615{
1616 bfa_trc_fp(ioim->bfa, ioim->iotag);
1617 bfa_trc_fp(ioim->bfa, event);
1618
1619 switch (event) {
1620 case BFA_IOIM_SM_START:
1621 if (!bfa_itnim_is_online(ioim->itnim)) {
1622 if (!bfa_itnim_hold_io(ioim->itnim)) {
1623 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1624 list_del(&ioim->qe);
1625 list_add_tail(&ioim->qe,
1626 &ioim->fcpim->ioim_comp_q);
1627 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1628 __bfa_cb_ioim_pathtov, ioim);
1629 } else {
1630 list_del(&ioim->qe);
1631 list_add_tail(&ioim->qe,
1632 &ioim->itnim->pending_q);
1633 }
1634 break;
1635 }
1636
1637 if (ioim->nsges > BFI_SGE_INLINE) {
1638 if (!bfa_ioim_sge_setup(ioim)) {
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1640 return;
1641 }
1642 }
1643
1644 if (!bfa_ioim_send_ioreq(ioim)) {
1645 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1646 break;
1647 }
1648
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1650 break;
1651
1652 case BFA_IOIM_SM_IOTOV:
1653 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1654 bfa_ioim_move_to_comp_q(ioim);
1655 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1656 __bfa_cb_ioim_pathtov, ioim);
1657 break;
1658
1659 case BFA_IOIM_SM_ABORT:
1660 /**
1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately.
1663 */
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_abort, ioim);
1668 break;
1669
1670 default:
1671 bfa_sm_fault(ioim->bfa, event);
1672 }
1673}
1674
1675/**
1676 * IO is waiting for SG pages.
1677 */
1678static void
1679bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1680{
1681 bfa_trc(ioim->bfa, ioim->iotag);
1682 bfa_trc(ioim->bfa, event);
1683
1684 switch (event) {
1685 case BFA_IOIM_SM_SGALLOCED:
1686 if (!bfa_ioim_send_ioreq(ioim)) {
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1688 break;
1689 }
1690 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1691 break;
1692
1693 case BFA_IOIM_SM_CLEANUP:
1694 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1695 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1696 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1697 ioim);
1698 bfa_ioim_notify_cleanup(ioim);
1699 break;
1700
1701 case BFA_IOIM_SM_ABORT:
1702 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1703 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1704 bfa_ioim_move_to_comp_q(ioim);
1705 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1706 ioim);
1707 break;
1708
1709 case BFA_IOIM_SM_HWFAIL:
1710 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1711 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1712 bfa_ioim_move_to_comp_q(ioim);
1713 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714 ioim);
1715 break;
1716
1717 default:
1718 bfa_sm_fault(ioim->bfa, event);
1719 }
1720}
1721
1722/**
1723 * IO is active.
1724 */
1725static void
1726bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1727{
1728 bfa_trc_fp(ioim->bfa, ioim->iotag);
1729 bfa_trc_fp(ioim->bfa, event);
1730
1731 switch (event) {
1732 case BFA_IOIM_SM_COMP_GOOD:
1733 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1734 bfa_ioim_move_to_comp_q(ioim);
1735 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1736 __bfa_cb_ioim_good_comp, ioim);
1737 break;
1738
1739 case BFA_IOIM_SM_COMP:
1740 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1741 bfa_ioim_move_to_comp_q(ioim);
1742 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1743 ioim);
1744 break;
1745
1746 case BFA_IOIM_SM_DONE:
1747 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1748 bfa_ioim_move_to_comp_q(ioim);
1749 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1750 ioim);
1751 break;
1752
1753 case BFA_IOIM_SM_ABORT:
1754 ioim->iosp->abort_explicit = BFA_TRUE;
1755 ioim->io_cbfn = __bfa_cb_ioim_abort;
1756
1757 if (bfa_ioim_send_abort(ioim))
1758 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1759 else {
1760 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1761 bfa_stats(ioim->itnim, qwait);
1762 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1763 &ioim->iosp->reqq_wait);
1764 }
1765 break;
1766
1767 case BFA_IOIM_SM_CLEANUP:
1768 ioim->iosp->abort_explicit = BFA_FALSE;
1769 ioim->io_cbfn = __bfa_cb_ioim_failed;
1770
1771 if (bfa_ioim_send_abort(ioim))
1772 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1773 else {
1774 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1775 bfa_stats(ioim->itnim, qwait);
1776 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1777 &ioim->iosp->reqq_wait);
1778 }
1779 break;
1780
1781 case BFA_IOIM_SM_HWFAIL:
1782 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1783 bfa_ioim_move_to_comp_q(ioim);
1784 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1785 ioim);
1786 break;
1787
1788 case BFA_IOIM_SM_SQRETRY:
1789 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1790 /* max retry completed free IO */
1791 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1792 bfa_ioim_move_to_comp_q(ioim);
1793 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1794 __bfa_cb_ioim_failed, ioim);
1795 break;
1796 }
1797 /* waiting for IO tag resource free */
1798 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1799 break;
1800
1801 default:
1802 bfa_sm_fault(ioim->bfa, event);
1803 }
1804}
1805
1806/**
1807* IO is retried with new tag.
1808*/
1809static void
1810bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{
1812 bfa_trc_fp(ioim->bfa, ioim->iotag);
1813 bfa_trc_fp(ioim->bfa, event);
1814
1815 switch (event) {
1816 case BFA_IOIM_SM_FREE:
1817 /* abts and rrq done. Now retry the IO with new tag */
1818 if (!bfa_ioim_send_ioreq(ioim)) {
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1820 break;
1821 }
1822 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1823 break;
1824
1825 case BFA_IOIM_SM_CLEANUP:
1826 ioim->iosp->abort_explicit = BFA_FALSE;
1827 ioim->io_cbfn = __bfa_cb_ioim_failed;
1828
1829 if (bfa_ioim_send_abort(ioim))
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831 else {
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833 bfa_stats(ioim->itnim, qwait);
1834 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835 &ioim->iosp->reqq_wait);
1836 }
1837 break;
1838
1839 case BFA_IOIM_SM_HWFAIL:
1840 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841 bfa_ioim_move_to_comp_q(ioim);
1842 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1843 __bfa_cb_ioim_failed, ioim);
1844 break;
1845
1846 case BFA_IOIM_SM_ABORT:
1847 /** in this state IO abort is done.
1848 * Waiting for IO tag resource free.
1849 */
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1851 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1852 ioim);
1853 break;
1854
1855 default:
1856 bfa_sm_fault(ioim->bfa, event);
1857 }
1858}
1859
1860/**
1861 * IO is being aborted, waiting for completion from firmware.
1862 */
1863static void
1864bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1865{
1866 bfa_trc(ioim->bfa, ioim->iotag);
1867 bfa_trc(ioim->bfa, event);
1868
1869 switch (event) {
1870 case BFA_IOIM_SM_COMP_GOOD:
1871 case BFA_IOIM_SM_COMP:
1872 case BFA_IOIM_SM_DONE:
1873 case BFA_IOIM_SM_FREE:
1874 break;
1875
1876 case BFA_IOIM_SM_ABORT_DONE:
1877 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1878 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1879 ioim);
1880 break;
1881
1882 case BFA_IOIM_SM_ABORT_COMP:
1883 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1884 bfa_ioim_move_to_comp_q(ioim);
1885 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886 ioim);
1887 break;
1888
1889 case BFA_IOIM_SM_COMP_UTAG:
1890 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891 bfa_ioim_move_to_comp_q(ioim);
1892 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1893 ioim);
1894 break;
1895
1896 case BFA_IOIM_SM_CLEANUP:
1897 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1898 ioim->iosp->abort_explicit = BFA_FALSE;
1899
1900 if (bfa_ioim_send_abort(ioim))
1901 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1902 else {
1903 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1904 bfa_stats(ioim->itnim, qwait);
1905 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1906 &ioim->iosp->reqq_wait);
1907 }
1908 break;
1909
1910 case BFA_IOIM_SM_HWFAIL:
1911 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1912 bfa_ioim_move_to_comp_q(ioim);
1913 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1914 ioim);
1915 break;
1916
1917 default:
1918 bfa_sm_fault(ioim->bfa, event);
1919 }
1920}
1921
1922/**
1923 * IO is being cleaned up (implicit abort), waiting for completion from
1924 * firmware.
1925 */
1926static void
1927bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1928{
1929 bfa_trc(ioim->bfa, ioim->iotag);
1930 bfa_trc(ioim->bfa, event);
1931
1932 switch (event) {
1933 case BFA_IOIM_SM_COMP_GOOD:
1934 case BFA_IOIM_SM_COMP:
1935 case BFA_IOIM_SM_DONE:
1936 case BFA_IOIM_SM_FREE:
1937 break;
1938
1939 case BFA_IOIM_SM_ABORT:
1940 /**
1941 * IO is already being aborted implicitly
1942 */
1943 ioim->io_cbfn = __bfa_cb_ioim_abort;
1944 break;
1945
1946 case BFA_IOIM_SM_ABORT_DONE:
1947 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1948 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1949 bfa_ioim_notify_cleanup(ioim);
1950 break;
1951
1952 case BFA_IOIM_SM_ABORT_COMP:
1953 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1954 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1955 bfa_ioim_notify_cleanup(ioim);
1956 break;
1957
1958 case BFA_IOIM_SM_COMP_UTAG:
1959 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1960 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1961 bfa_ioim_notify_cleanup(ioim);
1962 break;
1963
1964 case BFA_IOIM_SM_HWFAIL:
1965 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1966 bfa_ioim_move_to_comp_q(ioim);
1967 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1968 ioim);
1969 break;
1970
1971 case BFA_IOIM_SM_CLEANUP:
1972 /**
1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event.
1975 */
1976 break;
1977
1978 default:
1979 bfa_sm_fault(ioim->bfa, event);
1980 }
1981}
1982
1983/**
1984 * IO is waiting for room in request CQ
1985 */
1986static void
1987bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1988{
1989 bfa_trc(ioim->bfa, ioim->iotag);
1990 bfa_trc(ioim->bfa, event);
1991
1992 switch (event) {
1993 case BFA_IOIM_SM_QRESUME:
1994 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1995 bfa_ioim_send_ioreq(ioim);
1996 break;
1997
1998 case BFA_IOIM_SM_ABORT:
1999 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001 bfa_ioim_move_to_comp_q(ioim);
2002 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2003 ioim);
2004 break;
2005
2006 case BFA_IOIM_SM_CLEANUP:
2007 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2010 ioim);
2011 bfa_ioim_notify_cleanup(ioim);
2012 break;
2013
2014 case BFA_IOIM_SM_HWFAIL:
2015 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2016 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2017 bfa_ioim_move_to_comp_q(ioim);
2018 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2019 ioim);
2020 break;
2021
2022 default:
2023 bfa_sm_fault(ioim->bfa, event);
2024 }
2025}
2026
2027/**
2028 * Active IO is being aborted, waiting for room in request CQ.
2029 */
2030static void
2031bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2032{
2033 bfa_trc(ioim->bfa, ioim->iotag);
2034 bfa_trc(ioim->bfa, event);
2035
2036 switch (event) {
2037 case BFA_IOIM_SM_QRESUME:
2038 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2039 bfa_ioim_send_abort(ioim);
2040 break;
2041
2042 case BFA_IOIM_SM_CLEANUP:
2043 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2044 ioim->iosp->abort_explicit = BFA_FALSE;
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2046 break;
2047
2048 case BFA_IOIM_SM_COMP_GOOD:
2049 case BFA_IOIM_SM_COMP:
2050 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2051 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2052 bfa_ioim_move_to_comp_q(ioim);
2053 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2054 ioim);
2055 break;
2056
2057 case BFA_IOIM_SM_DONE:
2058 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2059 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2060 bfa_ioim_move_to_comp_q(ioim);
2061 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2062 ioim);
2063 break;
2064
2065 case BFA_IOIM_SM_HWFAIL:
2066 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2067 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2068 bfa_ioim_move_to_comp_q(ioim);
2069 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2070 ioim);
2071 break;
2072
2073 default:
2074 bfa_sm_fault(ioim->bfa, event);
2075 }
2076}
2077
2078/**
2079 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */
2081static void
2082bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2083{
2084 bfa_trc(ioim->bfa, ioim->iotag);
2085 bfa_trc(ioim->bfa, event);
2086
2087 switch (event) {
2088 case BFA_IOIM_SM_QRESUME:
2089 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2090 bfa_ioim_send_abort(ioim);
2091 break;
2092
2093 case BFA_IOIM_SM_ABORT:
2094 /**
2095 * IO is alraedy being cleaned up implicitly
2096 */
2097 ioim->io_cbfn = __bfa_cb_ioim_abort;
2098 break;
2099
2100 case BFA_IOIM_SM_COMP_GOOD:
2101 case BFA_IOIM_SM_COMP:
2102 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2103 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2104 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2105 bfa_ioim_notify_cleanup(ioim);
2106 break;
2107
2108 case BFA_IOIM_SM_DONE:
2109 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2110 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2111 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2112 bfa_ioim_notify_cleanup(ioim);
2113 break;
2114
2115 case BFA_IOIM_SM_HWFAIL:
2116 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2117 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2118 bfa_ioim_move_to_comp_q(ioim);
2119 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2120 ioim);
2121 break;
2122
2123 default:
2124 bfa_sm_fault(ioim->bfa, event);
2125 }
2126}
2127
2128/**
2129 * IO bfa callback is pending.
2130 */
2131static void
2132bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2133{
2134 bfa_trc_fp(ioim->bfa, ioim->iotag);
2135 bfa_trc_fp(ioim->bfa, event);
2136
2137 switch (event) {
2138 case BFA_IOIM_SM_HCB:
2139 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2140 bfa_ioim_free(ioim);
2141 break;
2142
2143 case BFA_IOIM_SM_CLEANUP:
2144 bfa_ioim_notify_cleanup(ioim);
2145 break;
2146
2147 case BFA_IOIM_SM_HWFAIL:
2148 break;
2149
2150 default:
2151 bfa_sm_fault(ioim->bfa, event);
2152 }
2153}
2154
2155/**
2156 * IO bfa callback is pending. IO resource cannot be freed.
2157 */
2158static void
2159bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2160{
2161 bfa_trc(ioim->bfa, ioim->iotag);
2162 bfa_trc(ioim->bfa, event);
2163
2164 switch (event) {
2165 case BFA_IOIM_SM_HCB:
2166 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2167 list_del(&ioim->qe);
2168 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2169 break;
2170
2171 case BFA_IOIM_SM_FREE:
2172 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2173 break;
2174
2175 case BFA_IOIM_SM_CLEANUP:
2176 bfa_ioim_notify_cleanup(ioim);
2177 break;
2178
2179 case BFA_IOIM_SM_HWFAIL:
2180 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2181 break;
2182
2183 default:
2184 bfa_sm_fault(ioim->bfa, event);
2185 }
2186}
2187
2188/**
2189 * IO is completed, waiting resource free from firmware.
2190 */
2191static void
2192bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2193{
2194 bfa_trc(ioim->bfa, ioim->iotag);
2195 bfa_trc(ioim->bfa, event);
2196
2197 switch (event) {
2198 case BFA_IOIM_SM_FREE:
2199 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2200 bfa_ioim_free(ioim);
2201 break;
2202
2203 case BFA_IOIM_SM_CLEANUP:
2204 bfa_ioim_notify_cleanup(ioim);
2205 break;
2206
2207 case BFA_IOIM_SM_HWFAIL:
2208 break;
2209
2210 default:
2211 bfa_sm_fault(ioim->bfa, event);
2212 }
2213}
2214
2215
2216
2217/**
2218 * hal_ioim_private
2219 */
2220
2221static void
2222__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2223{
2224 struct bfa_ioim_s *ioim = cbarg;
2225
2226 if (!complete) {
2227 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2228 return;
2229 }
2230
2231 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2232}
2233
2234static void
2235__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2236{
2237 struct bfa_ioim_s *ioim = cbarg;
2238 struct bfi_ioim_rsp_s *m;
2239 u8 *snsinfo = NULL;
2240 u8 sns_len = 0;
2241 s32 residue = 0;
2242
2243 if (!complete) {
2244 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2245 return;
2246 }
2247
2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249 if (m->io_status == BFI_IOIM_STS_OK) {
2250 /**
2251 * setup sense information, if present
2252 */
2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2254 m->sns_len) {
2255 sns_len = m->sns_len;
2256 snsinfo = ioim->iosp->snsinfo;
2257 }
2258
2259 /**
2260 * setup residue value correctly for normal completions
2261 */
2262 if (m->resid_flags == FCP_RESID_UNDER) {
2263 residue = bfa_os_ntohl(m->residue);
2264 bfa_stats(ioim->itnim, iocomp_underrun);
2265 }
2266 if (m->resid_flags == FCP_RESID_OVER) {
2267 residue = bfa_os_ntohl(m->residue);
2268 residue = -residue;
2269 bfa_stats(ioim->itnim, iocomp_overrun);
2270 }
2271 }
2272
2273 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2274 m->scsi_status, sns_len, snsinfo, residue);
2275}
2276
2277static void
2278__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2279{
2280 struct bfa_ioim_s *ioim = cbarg;
2281
2282 if (!complete) {
2283 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2284 return;
2285 }
2286
2287 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2288 0, 0, NULL, 0);
2289}
2290
2291static void
2292__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2293{
2294 struct bfa_ioim_s *ioim = cbarg;
2295
2296 bfa_stats(ioim->itnim, path_tov_expired);
2297 if (!complete) {
2298 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2299 return;
2300 }
2301
2302 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2303 0, 0, NULL, 0);
2304}
2305
2306static void
2307__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2308{
2309 struct bfa_ioim_s *ioim = cbarg;
2310
2311 if (!complete) {
2312 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2313 return;
2314 }
2315
2316 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2317}
2318
2319static void
2320bfa_ioim_sgpg_alloced(void *cbarg)
2321{
2322 struct bfa_ioim_s *ioim = cbarg;
2323
2324 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2325 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2326 bfa_ioim_sgpg_setup(ioim);
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328}
2329
2330/**
2331 * Send I/O request to firmware.
2332 */
2333static bfa_boolean_t
2334bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2335{
2336 struct bfa_itnim_s *itnim = ioim->itnim;
2337 struct bfi_ioim_req_s *m;
2338 static struct fcp_cmnd_s cmnd_z0 = { 0 };
2339 struct bfi_sge_s *sge;
2340 u32 pgdlen = 0;
2341 u32 fcp_dl;
2342 u64 addr;
2343 struct scatterlist *sg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345
2346 /**
2347 * check for room in queue to send request now
2348 */
2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2350 if (!m) {
2351 bfa_stats(ioim->itnim, qwait);
2352 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2353 &ioim->iosp->reqq_wait);
2354 return BFA_FALSE;
2355 }
2356
2357 /**
2358 * build i/o request message next
2359 */
2360 m->io_tag = bfa_os_htons(ioim->iotag);
2361 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363
2364 /**
2365 * build inline IO SG element here
2366 */
2367 sge = &m->sges[0];
2368 if (ioim->nsges) {
2369 sg = (struct scatterlist *)scsi_sglist(cmnd);
2370 addr = bfa_os_sgaddr(sg_dma_address(sg));
2371 sge->sga = *(union bfi_addr_u *) &addr;
2372 pgdlen = sg_dma_len(sg);
2373 sge->sg_len = pgdlen;
2374 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2375 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2376 bfa_sge_to_be(sge);
2377 sge++;
2378 }
2379
2380 if (ioim->nsges > BFI_SGE_INLINE) {
2381 sge->sga = ioim->sgpg->sgpg_pa;
2382 } else {
2383 sge->sga.a32.addr_lo = 0;
2384 sge->sga.a32.addr_hi = 0;
2385 }
2386 sge->sg_len = pgdlen;
2387 sge->flags = BFI_SGE_PGDLEN;
2388 bfa_sge_to_be(sge);
2389
2390 /**
2391 * set up I/O command parameters
2392 */
2393 bfa_os_assign(m->cmnd, cmnd_z0);
2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
2396 bfa_os_assign(m->cmnd.cdb,
2397 *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
2398 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2399 m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
2400
2401 /**
2402 * set up I/O message header
2403 */
2404 switch (m->cmnd.iodir) {
2405 case FCP_IODIR_READ:
2406 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2407 bfa_stats(itnim, input_reqs);
2408 ioim->itnim->stats.rd_throughput += fcp_dl;
2409 break;
2410 case FCP_IODIR_WRITE:
2411 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2412 bfa_stats(itnim, output_reqs);
2413 ioim->itnim->stats.wr_throughput += fcp_dl;
2414 break;
2415 case FCP_IODIR_RW:
2416 bfa_stats(itnim, input_reqs);
2417 bfa_stats(itnim, output_reqs);
2418 default:
2419 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2420 }
2421 if (itnim->seq_rec ||
2422 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2423 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2424
2425#ifdef IOIM_ADVANCED
2426 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
2427 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2428 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2429
2430 /**
2431 * Handle large CDB (>16 bytes).
2432 */
2433 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2434 FCP_CMND_CDB_LEN) / sizeof(u32);
2435 if (m->cmnd.addl_cdb_len) {
2436 bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
2437 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2438 m->cmnd.addl_cdb_len * sizeof(u32));
2439 fcp_cmnd_fcpdl(&m->cmnd) =
2440 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
2441 }
2442#endif
2443
2444 /**
2445 * queue I/O message to firmware
2446 */
2447 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2448 return BFA_TRUE;
2449}
2450
2451/**
2452 * Setup any additional SG pages needed.Inline SG element is setup
2453 * at queuing time.
2454 */
2455static bfa_boolean_t
2456bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2457{
2458 u16 nsgpgs;
2459
2460 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2461
2462 /**
2463 * allocate SG pages needed
2464 */
2465 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2466 if (!nsgpgs)
2467 return BFA_TRUE;
2468
2469 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2470 != BFA_STATUS_OK) {
2471 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2472 return BFA_FALSE;
2473 }
2474
2475 ioim->nsgpgs = nsgpgs;
2476 bfa_ioim_sgpg_setup(ioim);
2477
2478 return BFA_TRUE;
2479}
2480
2481static void
2482bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2483{
2484 int sgeid, nsges, i;
2485 struct bfi_sge_s *sge;
2486 struct bfa_sgpg_s *sgpg;
2487 u32 pgcumsz;
2488 u64 addr;
2489 struct scatterlist *sg;
2490 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2491
2492 sgeid = BFI_SGE_INLINE;
2493 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2494
2495 sg = scsi_sglist(cmnd);
2496 sg = sg_next(sg);
2497
2498 do {
2499 sge = sgpg->sgpg->sges;
2500 nsges = ioim->nsges - sgeid;
2501 if (nsges > BFI_SGPG_DATA_SGES)
2502 nsges = BFI_SGPG_DATA_SGES;
2503
2504 pgcumsz = 0;
2505 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2506 addr = bfa_os_sgaddr(sg_dma_address(sg));
2507 sge->sga = *(union bfi_addr_u *) &addr;
2508 sge->sg_len = sg_dma_len(sg);
2509 pgcumsz += sge->sg_len;
2510
2511 /**
2512 * set flags
2513 */
2514 if (i < (nsges - 1))
2515 sge->flags = BFI_SGE_DATA;
2516 else if (sgeid < (ioim->nsges - 1))
2517 sge->flags = BFI_SGE_DATA_CPL;
2518 else
2519 sge->flags = BFI_SGE_DATA_LAST;
2520
2521 bfa_sge_to_le(sge);
2522 }
2523
2524 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2525
2526 /**
2527 * set the link element of each page
2528 */
2529 if (sgeid == ioim->nsges) {
2530 sge->flags = BFI_SGE_PGDLEN;
2531 sge->sga.a32.addr_lo = 0;
2532 sge->sga.a32.addr_hi = 0;
2533 } else {
2534 sge->flags = BFI_SGE_LINK;
2535 sge->sga = sgpg->sgpg_pa;
2536 }
2537 sge->sg_len = pgcumsz;
2538
2539 bfa_sge_to_le(sge);
2540 } while (sgeid < ioim->nsges);
2541}
2542
2543/**
2544 * Send I/O abort request to firmware.
2545 */
2546static bfa_boolean_t
2547bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2548{
2549 struct bfi_ioim_abort_req_s *m;
2550 enum bfi_ioim_h2i msgop;
2551
2552 /**
2553 * check for room in queue to send request now
2554 */
2555 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2556 if (!m)
2557 return BFA_FALSE;
2558
2559 /**
2560 * build i/o request message next
2561 */
2562 if (ioim->iosp->abort_explicit)
2563 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2564 else
2565 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2566
2567 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2568 m->io_tag = bfa_os_htons(ioim->iotag);
2569 m->abort_tag = ++ioim->abort_tag;
2570
2571 /**
2572 * queue I/O message to firmware
2573 */
2574 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2575 return BFA_TRUE;
2576}
2577
2578/**
2579 * Call to resume any I/O requests waiting for room in request queue.
2580 */
2581static void
2582bfa_ioim_qresume(void *cbarg)
2583{
2584 struct bfa_ioim_s *ioim = cbarg;
2585
2586 bfa_stats(ioim->itnim, qresumes);
2587 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2588}
2589
2590
2591static void
2592bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2593{
2594 /**
2595 * Move IO from itnim queue to fcpim global queue since itnim will be
2596 * freed.
2597 */
2598 list_del(&ioim->qe);
2599 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2600
2601 if (!ioim->iosp->tskim) {
2602 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2603 bfa_cb_dequeue(&ioim->hcb_qe);
2604 list_del(&ioim->qe);
2605 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2606 }
2607 bfa_itnim_iodone(ioim->itnim);
2608 } else
2609 bfa_tskim_iodone(ioim->iosp->tskim);
2610}
2611
2612static bfa_boolean_t
2613bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2614{
2615 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2616 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2617 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2618 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2619 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2620 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2621 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2622 return BFA_FALSE;
2623
2624 return BFA_TRUE;
2625}
2626
2627/**
2628 * or after the link comes back.
2629 */
2630void
2631bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2632{
2633 /**
2634 * If path tov timer expired, failback with PATHTOV status - these
2635 * IO requests are not normally retried by IO stack.
2636 *
2637 * Otherwise device cameback online and fail it with normal failed
2638 * status so that IO stack retries these failed IO requests.
2639 */
2640 if (iotov)
2641 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2642 else {
2643 ioim->io_cbfn = __bfa_cb_ioim_failed;
2644 bfa_stats(ioim->itnim, iocom_nexus_abort);
2645 }
2646 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2647
2648 /**
2649 * Move IO to fcpim global queue since itnim will be
2650 * freed.
2651 */
2652 list_del(&ioim->qe);
2653 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2654}
2655
2656
2657
2658/**
2659 * hal_ioim_friend
2660 */
2661
2662/**
2663 * Memory allocation and initialization.
2664 */
2665void
2666bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2667{
2668 struct bfa_ioim_s *ioim;
2669 struct bfa_ioim_sp_s *iosp;
2670 u16 i;
2671 u8 *snsinfo;
2672 u32 snsbufsz;
2673
2674 /**
2675 * claim memory first
2676 */
2677 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2678 fcpim->ioim_arr = ioim;
2679 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2680
2681 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2682 fcpim->ioim_sp_arr = iosp;
2683 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2684
2685 /**
2686 * Claim DMA memory for per IO sense data.
2687 */
2688 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2689 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2690 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2691
2692 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2693 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2694 snsinfo = fcpim->snsbase.kva;
2695 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2696
2697 /**
2698 * Initialize ioim free queues
2699 */
2700 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2701 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2702 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2703
2704 for (i = 0; i < fcpim->num_ioim_reqs;
2705 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2706 /*
2707 * initialize IOIM
2708 */
2709 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
2710 ioim->iotag = i;
2711 ioim->bfa = fcpim->bfa;
2712 ioim->fcpim = fcpim;
2713 ioim->iosp = iosp;
2714 iosp->snsinfo = snsinfo;
2715 INIT_LIST_HEAD(&ioim->sgpg_q);
2716 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2717 bfa_ioim_qresume, ioim);
2718 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2719 bfa_ioim_sgpg_alloced, ioim);
2720 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2721
2722 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2723 }
2724}
2725
2726/**
2727 * Driver detach time call.
2728 */
2729void
2730bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
2731{
2732}
2733
2734void
2735bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2736{
2737 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2738 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2739 struct bfa_ioim_s *ioim;
2740 u16 iotag;
2741 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2742
2743 iotag = bfa_os_ntohs(rsp->io_tag);
2744
2745 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2746 bfa_assert(ioim->iotag == iotag);
2747
2748 bfa_trc(ioim->bfa, ioim->iotag);
2749 bfa_trc(ioim->bfa, rsp->io_status);
2750 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2751
2752 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2753 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
2754
2755 switch (rsp->io_status) {
2756 case BFI_IOIM_STS_OK:
2757 bfa_stats(ioim->itnim, iocomp_ok);
2758 if (rsp->reuse_io_tag == 0)
2759 evt = BFA_IOIM_SM_DONE;
2760 else
2761 evt = BFA_IOIM_SM_COMP;
2762 break;
2763
2764 case BFI_IOIM_STS_TIMEDOUT:
2765 bfa_stats(ioim->itnim, iocomp_timedout);
2766 case BFI_IOIM_STS_ABORTED:
2767 rsp->io_status = BFI_IOIM_STS_ABORTED;
2768 bfa_stats(ioim->itnim, iocomp_aborted);
2769 if (rsp->reuse_io_tag == 0)
2770 evt = BFA_IOIM_SM_DONE;
2771 else
2772 evt = BFA_IOIM_SM_COMP;
2773 break;
2774
2775 case BFI_IOIM_STS_PROTO_ERR:
2776 bfa_stats(ioim->itnim, iocom_proto_err);
2777 bfa_assert(rsp->reuse_io_tag);
2778 evt = BFA_IOIM_SM_COMP;
2779 break;
2780
2781 case BFI_IOIM_STS_SQER_NEEDED:
2782 bfa_stats(ioim->itnim, iocom_sqer_needed);
2783 bfa_assert(rsp->reuse_io_tag == 0);
2784 evt = BFA_IOIM_SM_SQRETRY;
2785 break;
2786
2787 case BFI_IOIM_STS_RES_FREE:
2788 bfa_stats(ioim->itnim, iocom_res_free);
2789 evt = BFA_IOIM_SM_FREE;
2790 break;
2791
2792 case BFI_IOIM_STS_HOST_ABORTED:
2793 bfa_stats(ioim->itnim, iocom_hostabrts);
2794 if (rsp->abort_tag != ioim->abort_tag) {
2795 bfa_trc(ioim->bfa, rsp->abort_tag);
2796 bfa_trc(ioim->bfa, ioim->abort_tag);
2797 return;
2798 }
2799
2800 if (rsp->reuse_io_tag)
2801 evt = BFA_IOIM_SM_ABORT_COMP;
2802 else
2803 evt = BFA_IOIM_SM_ABORT_DONE;
2804 break;
2805
2806 case BFI_IOIM_STS_UTAG:
2807 bfa_stats(ioim->itnim, iocom_utags);
2808 evt = BFA_IOIM_SM_COMP_UTAG;
2809 break;
2810
2811 default:
2812 bfa_assert(0);
2813 }
2814
2815 bfa_sm_send_event(ioim, evt);
2816}
2817
2818void
2819bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2820{
2821 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2822 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2823 struct bfa_ioim_s *ioim;
2824 u16 iotag;
2825
2826 iotag = bfa_os_ntohs(rsp->io_tag);
2827
2828 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2829 bfa_assert(ioim->iotag == iotag);
2830
2831 bfa_trc_fp(ioim->bfa, ioim->iotag);
2832 bfa_ioim_cb_profile_comp(fcpim, ioim);
2833
2834 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2835}
2836
2837void
2838bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2839{
2840 ioim->start_time = bfa_os_get_clock();
2841}
2842
2843void
2844bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2845{
2846 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2847 u32 index = bfa_ioim_get_index(fcp_dl);
2848 u64 end_time = bfa_os_get_clock();
2849 struct bfa_itnim_latency_s *io_lat =
2850 &(ioim->itnim->ioprofile.io_latency);
2851 u32 val = (u32)(end_time - ioim->start_time);
2852
2853 bfa_itnim_ioprofile_update(ioim->itnim, index);
2854
2855 io_lat->count[index]++;
2856 io_lat->min[index] = (io_lat->min[index] < val) ?
2857 io_lat->min[index] : val;
2858 io_lat->max[index] = (io_lat->max[index] > val) ?
2859 io_lat->max[index] : val;
2860 io_lat->avg[index] += val;
2861}
2862/**
2863 * Called by itnim to clean up IO while going offline.
2864 */
2865void
2866bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2867{
2868 bfa_trc(ioim->bfa, ioim->iotag);
2869 bfa_stats(ioim->itnim, io_cleanups);
2870
2871 ioim->iosp->tskim = NULL;
2872 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2873}
2874
2875void
2876bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2877{
2878 bfa_trc(ioim->bfa, ioim->iotag);
2879 bfa_stats(ioim->itnim, io_tmaborts);
2880
2881 ioim->iosp->tskim = tskim;
2882 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2883}
2884
2885/**
2886 * IOC failure handling.
2887 */
2888void
2889bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2890{
2891 bfa_trc(ioim->bfa, ioim->iotag);
2892 bfa_stats(ioim->itnim, io_iocdowns);
2893 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2894}
2895
2896/**
2897 * IO offline TOV popped. Fail the pending IO.
2898 */
2899void
2900bfa_ioim_tov(struct bfa_ioim_s *ioim)
2901{
2902 bfa_trc(ioim->bfa, ioim->iotag);
2903 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2904}
2905
2906
2907
2908/**
2909 * hal_ioim_api
2910 */
2911
2912/**
2913 * Allocate IOIM resource for initiator mode I/O request.
2914 */
2915struct bfa_ioim_s *
2916bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2917 struct bfa_itnim_s *itnim, u16 nsges)
2918{
2919 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2920 struct bfa_ioim_s *ioim;
2921
2922 /**
2923 * alocate IOIM resource
2924 */
2925 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2926 if (!ioim) {
2927 bfa_stats(itnim, no_iotags);
2928 return NULL;
2929 }
2930
2931 ioim->dio = dio;
2932 ioim->itnim = itnim;
2933 ioim->nsges = nsges;
2934 ioim->nsgpgs = 0;
2935
2936 bfa_stats(itnim, total_ios);
2937 fcpim->ios_active++;
2938
2939 list_add_tail(&ioim->qe, &itnim->io_q);
2940 bfa_trc_fp(ioim->bfa, ioim->iotag);
2941
2942 return ioim;
2943}
2944
2945void
2946bfa_ioim_free(struct bfa_ioim_s *ioim)
2947{
2948 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2949
2950 bfa_trc_fp(ioim->bfa, ioim->iotag);
2951 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2952
2953 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2954 (ioim->nsges > BFI_SGE_INLINE));
2955
2956 if (ioim->nsgpgs > 0)
2957 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2958
2959 bfa_stats(ioim->itnim, io_comps);
2960 fcpim->ios_active--;
2961
2962 list_del(&ioim->qe);
2963 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2964}
2965
2966void
2967bfa_ioim_start(struct bfa_ioim_s *ioim)
2968{
2969 bfa_trc_fp(ioim->bfa, ioim->iotag);
2970
2971 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2972
2973 /**
2974 * Obtain the queue over which this request has to be issued
2975 */
2976 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2977 bfa_cb_ioim_get_reqq(ioim->dio) :
2978 bfa_itnim_get_reqq(ioim);
2979
2980 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2981}
2982
2983/**
2984 * Driver I/O abort request.
2985 */
2986bfa_status_t
2987bfa_ioim_abort(struct bfa_ioim_s *ioim)
2988{
2989
2990 bfa_trc(ioim->bfa, ioim->iotag);
2991
2992 if (!bfa_ioim_is_abortable(ioim))
2993 return BFA_STATUS_FAILED;
2994
2995 bfa_stats(ioim->itnim, io_aborts);
2996 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2997
2998 return BFA_STATUS_OK;
2999}
3000
3001
3002/**
3003 * BFA TSKIM state machine functions
3004 */
3005
3006/**
3007 * Task management command beginning state.
3008 */
3009static void
3010bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3011{
3012 bfa_trc(tskim->bfa, event);
3013
3014 switch (event) {
3015 case BFA_TSKIM_SM_START:
3016 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3017 bfa_tskim_gather_ios(tskim);
3018
3019 /**
3020 * If device is offline, do not send TM on wire. Just cleanup
3021 * any pending IO requests and complete TM request.
3022 */
3023 if (!bfa_itnim_is_online(tskim->itnim)) {
3024 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3025 tskim->tsk_status = BFI_TSKIM_STS_OK;
3026 bfa_tskim_cleanup_ios(tskim);
3027 return;
3028 }
3029
3030 if (!bfa_tskim_send(tskim)) {
3031 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3032 bfa_stats(tskim->itnim, tm_qwait);
3033 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3034 &tskim->reqq_wait);
3035 }
3036 break;
3037
3038 default:
3039 bfa_sm_fault(tskim->bfa, event);
3040 }
3041}
3042
3043/**
3044 * brief
3045 * TM command is active, awaiting completion from firmware to
3046 * cleanup IO requests in TM scope.
3047 */
3048static void
3049bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3050{
3051 bfa_trc(tskim->bfa, event);
3052
3053 switch (event) {
3054 case BFA_TSKIM_SM_DONE:
3055 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3056 bfa_tskim_cleanup_ios(tskim);
3057 break;
3058
3059 case BFA_TSKIM_SM_CLEANUP:
3060 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3061 if (!bfa_tskim_send_abort(tskim)) {
3062 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3063 bfa_stats(tskim->itnim, tm_qwait);
3064 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3065 &tskim->reqq_wait);
3066 }
3067 break;
3068
3069 case BFA_TSKIM_SM_HWFAIL:
3070 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3071 bfa_tskim_iocdisable_ios(tskim);
3072 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3073 break;
3074
3075 default:
3076 bfa_sm_fault(tskim->bfa, event);
3077 }
3078}
3079
3080/**
3081 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3082 * completion event from firmware.
3083 */
3084static void
3085bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3086{
3087 bfa_trc(tskim->bfa, event);
3088
3089 switch (event) {
3090 case BFA_TSKIM_SM_DONE:
3091 /**
3092 * Ignore and wait for ABORT completion from firmware.
3093 */
3094 break;
3095
3096 case BFA_TSKIM_SM_CLEANUP_DONE:
3097 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3098 bfa_tskim_cleanup_ios(tskim);
3099 break;
3100
3101 case BFA_TSKIM_SM_HWFAIL:
3102 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3103 bfa_tskim_iocdisable_ios(tskim);
3104 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3105 break;
3106
3107 default:
3108 bfa_sm_fault(tskim->bfa, event);
3109 }
3110}
3111
3112static void
3113bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3114{
3115 bfa_trc(tskim->bfa, event);
3116
3117 switch (event) {
3118 case BFA_TSKIM_SM_IOS_DONE:
3119 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3120 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3121 break;
3122
3123 case BFA_TSKIM_SM_CLEANUP:
3124 /**
3125 * Ignore, TM command completed on wire.
3126 * Notify TM conmpletion on IO cleanup completion.
3127 */
3128 break;
3129
3130 case BFA_TSKIM_SM_HWFAIL:
3131 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3132 bfa_tskim_iocdisable_ios(tskim);
3133 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3134 break;
3135
3136 default:
3137 bfa_sm_fault(tskim->bfa, event);
3138 }
3139}
3140
3141/**
3142 * Task management command is waiting for room in request CQ
3143 */
3144static void
3145bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3146{
3147 bfa_trc(tskim->bfa, event);
3148
3149 switch (event) {
3150 case BFA_TSKIM_SM_QRESUME:
3151 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3152 bfa_tskim_send(tskim);
3153 break;
3154
3155 case BFA_TSKIM_SM_CLEANUP:
3156 /**
3157 * No need to send TM on wire since ITN is offline.
3158 */
3159 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3160 bfa_reqq_wcancel(&tskim->reqq_wait);
3161 bfa_tskim_cleanup_ios(tskim);
3162 break;
3163
3164 case BFA_TSKIM_SM_HWFAIL:
3165 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3166 bfa_reqq_wcancel(&tskim->reqq_wait);
3167 bfa_tskim_iocdisable_ios(tskim);
3168 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3169 break;
3170
3171 default:
3172 bfa_sm_fault(tskim->bfa, event);
3173 }
3174}
3175
3176/**
3177 * Task management command is active, awaiting for room in request CQ
3178 * to send clean up request.
3179 */
3180static void
3181bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3182 enum bfa_tskim_event event)
3183{
3184 bfa_trc(tskim->bfa, event);
3185
3186 switch (event) {
3187 case BFA_TSKIM_SM_DONE:
3188 bfa_reqq_wcancel(&tskim->reqq_wait);
3189 /**
3190 *
3191 * Fall through !!!
3192 */
3193
3194 case BFA_TSKIM_SM_QRESUME:
3195 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3196 bfa_tskim_send_abort(tskim);
3197 break;
3198
3199 case BFA_TSKIM_SM_HWFAIL:
3200 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3201 bfa_reqq_wcancel(&tskim->reqq_wait);
3202 bfa_tskim_iocdisable_ios(tskim);
3203 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3204 break;
3205
3206 default:
3207 bfa_sm_fault(tskim->bfa, event);
3208 }
3209}
3210
3211/**
3212 * BFA callback is pending
3213 */
3214static void
3215bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3216{
3217 bfa_trc(tskim->bfa, event);
3218
3219 switch (event) {
3220 case BFA_TSKIM_SM_HCB:
3221 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3222 bfa_tskim_free(tskim);
3223 break;
3224
3225 case BFA_TSKIM_SM_CLEANUP:
3226 bfa_tskim_notify_comp(tskim);
3227 break;
3228
3229 case BFA_TSKIM_SM_HWFAIL:
3230 break;
3231
3232 default:
3233 bfa_sm_fault(tskim->bfa, event);
3234 }
3235}
3236
3237
3238
3239/**
3240 * hal_tskim_private
3241 */
3242
3243static void
3244__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3245{
3246 struct bfa_tskim_s *tskim = cbarg;
3247
3248 if (!complete) {
3249 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3250 return;
3251 }
3252
3253 bfa_stats(tskim->itnim, tm_success);
3254 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3255}
3256
3257static void
3258__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3259{
3260 struct bfa_tskim_s *tskim = cbarg;
3261
3262 if (!complete) {
3263 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3264 return;
3265 }
3266
3267 bfa_stats(tskim->itnim, tm_failures);
3268 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3269 BFI_TSKIM_STS_FAILED);
3270}
3271
3272static bfa_boolean_t
3273bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3274{
3275 switch (tskim->tm_cmnd) {
3276 case FCP_TM_TARGET_RESET:
3277 return BFA_TRUE;
3278
3279 case FCP_TM_ABORT_TASK_SET:
3280 case FCP_TM_CLEAR_TASK_SET:
3281 case FCP_TM_LUN_RESET:
3282 case FCP_TM_CLEAR_ACA:
3283 return (tskim->lun == lun);
3284
3285 default:
3286 bfa_assert(0);
3287 }
3288
3289 return BFA_FALSE;
3290}
3291
3292/**
3293 * Gather affected IO requests and task management commands.
3294 */
3295static void
3296bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3297{
3298 struct bfa_itnim_s *itnim = tskim->itnim;
3299 struct bfa_ioim_s *ioim;
3300 struct list_head *qe, *qen;
3301
3302 INIT_LIST_HEAD(&tskim->io_q);
3303
3304 /**
3305 * Gather any active IO requests first.
3306 */
3307 list_for_each_safe(qe, qen, &itnim->io_q) {
3308 ioim = (struct bfa_ioim_s *) qe;
3309 if (bfa_tskim_match_scope
3310 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3311 list_del(&ioim->qe);
3312 list_add_tail(&ioim->qe, &tskim->io_q);
3313 }
3314 }
3315
3316 /**
3317 * Failback any pending IO requests immediately.
3318 */
3319 list_for_each_safe(qe, qen, &itnim->pending_q) {
3320 ioim = (struct bfa_ioim_s *) qe;
3321 if (bfa_tskim_match_scope
3322 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3323 list_del(&ioim->qe);
3324 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3325 bfa_ioim_tov(ioim);
3326 }
3327 }
3328}
3329
3330/**
3331 * IO cleanup completion
3332 */
3333static void
3334bfa_tskim_cleanp_comp(void *tskim_cbarg)
3335{
3336 struct bfa_tskim_s *tskim = tskim_cbarg;
3337
3338 bfa_stats(tskim->itnim, tm_io_comps);
3339 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3340}
3341
3342/**
3343 * Gather affected IO requests and task management commands.
3344 */
3345static void
3346bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3347{
3348 struct bfa_ioim_s *ioim;
3349 struct list_head *qe, *qen;
3350
3351 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3352
3353 list_for_each_safe(qe, qen, &tskim->io_q) {
3354 ioim = (struct bfa_ioim_s *) qe;
3355 bfa_wc_up(&tskim->wc);
3356 bfa_ioim_cleanup_tm(ioim, tskim);
3357 }
3358
3359 bfa_wc_wait(&tskim->wc);
3360}
3361
3362/**
3363 * Send task management request to firmware.
3364 */
3365static bfa_boolean_t
3366bfa_tskim_send(struct bfa_tskim_s *tskim)
3367{
3368 struct bfa_itnim_s *itnim = tskim->itnim;
3369 struct bfi_tskim_req_s *m;
3370
3371 /**
3372 * check for room in queue to send request now
3373 */
3374 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3375 if (!m)
3376 return BFA_FALSE;
3377
3378 /**
3379 * build i/o request message next
3380 */
3381 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3382 bfa_lpuid(tskim->bfa));
3383
3384 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
3385 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3386 m->t_secs = tskim->tsecs;
3387 m->lun = tskim->lun;
3388 m->tm_flags = tskim->tm_cmnd;
3389
3390 /**
3391 * queue I/O message to firmware
3392 */
3393 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3394 return BFA_TRUE;
3395}
3396
3397/**
3398 * Send abort request to cleanup an active TM to firmware.
3399 */
3400static bfa_boolean_t
3401bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3402{
3403 struct bfa_itnim_s *itnim = tskim->itnim;
3404 struct bfi_tskim_abortreq_s *m;
3405
3406 /**
3407 * check for room in queue to send request now
3408 */
3409 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3410 if (!m)
3411 return BFA_FALSE;
3412
3413 /**
3414 * build i/o request message next
3415 */
3416 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3417 bfa_lpuid(tskim->bfa));
3418
3419 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
3420
3421 /**
3422 * queue I/O message to firmware
3423 */
3424 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3425 return BFA_TRUE;
3426}
3427
3428/**
3429 * Call to resume task management cmnd waiting for room in request queue.
3430 */
3431static void
3432bfa_tskim_qresume(void *cbarg)
3433{
3434 struct bfa_tskim_s *tskim = cbarg;
3435
3436 bfa_stats(tskim->itnim, tm_qresumes);
3437 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3438}
3439
3440/**
3441 * Cleanup IOs associated with a task mangement command on IOC failures.
3442 */
3443static void
3444bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3445{
3446 struct bfa_ioim_s *ioim;
3447 struct list_head *qe, *qen;
3448
3449 list_for_each_safe(qe, qen, &tskim->io_q) {
3450 ioim = (struct bfa_ioim_s *) qe;
3451 bfa_ioim_iocdisable(ioim);
3452 }
3453}
3454
3455
3456
3457/**
3458 * hal_tskim_friend
3459 */
3460
3461/**
3462 * Notification on completions from related ioim.
3463 */
3464void
3465bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3466{
3467 bfa_wc_down(&tskim->wc);
3468}
3469
3470/**
3471 * Handle IOC h/w failure notification from itnim.
3472 */
3473void
3474bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3475{
3476 tskim->notify = BFA_FALSE;
3477 bfa_stats(tskim->itnim, tm_iocdowns);
3478 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3479}
3480
3481/**
3482 * Cleanup TM command and associated IOs as part of ITNIM offline.
3483 */
3484void
3485bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3486{
3487 tskim->notify = BFA_TRUE;
3488 bfa_stats(tskim->itnim, tm_cleanups);
3489 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3490}
3491
3492/**
3493 * Memory allocation and initialization.
3494 */
3495void
3496bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3497{
3498 struct bfa_tskim_s *tskim;
3499 u16 i;
3500
3501 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3502
3503 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3504 fcpim->tskim_arr = tskim;
3505
3506 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3507 /*
3508 * initialize TSKIM
3509 */
3510 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
3511 tskim->tsk_tag = i;
3512 tskim->bfa = fcpim->bfa;
3513 tskim->fcpim = fcpim;
3514 tskim->notify = BFA_FALSE;
3515 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3516 tskim);
3517 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3518
3519 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3520 }
3521
3522 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3523}
3524
3525void
3526bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3527{
3528 /**
3529 * @todo
3530 */
3531}
3532
3533void
3534bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3535{
3536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3537 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3538 struct bfa_tskim_s *tskim;
3539 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
3540
3541 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3542 bfa_assert(tskim->tsk_tag == tsk_tag);
3543
3544 tskim->tsk_status = rsp->tsk_status;
3545
3546 /**
3547 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548 * requests. All other statuses are for normal completions.
3549 */
3550 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3551 bfa_stats(tskim->itnim, tm_cleanup_comps);
3552 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3553 } else {
3554 bfa_stats(tskim->itnim, tm_fw_rsps);
3555 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3556 }
3557}
3558
3559
3560
3561/**
3562 * hal_tskim_api
3563 */
3564
3565
3566struct bfa_tskim_s *
3567bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3568{
3569 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3570 struct bfa_tskim_s *tskim;
3571
3572 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3573
3574 if (tskim)
3575 tskim->dtsk = dtsk;
3576
3577 return tskim;
3578}
3579
3580void
3581bfa_tskim_free(struct bfa_tskim_s *tskim)
3582{
3583 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3584 list_del(&tskim->qe);
3585 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3586}
3587
3588/**
3589 * Start a task management command.
3590 *
3591 * @param[in] tskim BFA task management command instance
3592 * @param[in] itnim i-t nexus for the task management command
3593 * @param[in] lun lun, if applicable
3594 * @param[in] tm_cmnd Task management command code.
3595 * @param[in] t_secs Timeout in seconds
3596 *
3597 * @return None.
3598 */
3599void
3600bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3601 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3602{
3603 tskim->itnim = itnim;
3604 tskim->lun = lun;
3605 tskim->tm_cmnd = tm_cmnd;
3606 tskim->tsecs = tsecs;
3607 tskim->notify = BFA_FALSE;
3608 bfa_stats(itnim, tm_cmnds);
3609
3610 list_add_tail(&tskim->qe, &itnim->tsk_q);
3611 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3612}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
new file mode 100644
index 000000000000..3bf343160aac
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -0,0 +1,401 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_H__
19#define __BFA_FCPIM_H__
20
21#include "bfa.h"
22#include "bfa_svc.h"
23#include "bfi_ms.h"
24#include "bfa_defs_svc.h"
25#include "bfa_cs.h"
26
27
28#define BFA_ITNIM_MIN 32
29#define BFA_ITNIM_MAX 1024
30
31#define BFA_IOIM_MIN 8
32#define BFA_IOIM_MAX 2000
33
34#define BFA_TSKIM_MIN 4
35#define BFA_TSKIM_MAX 512
36#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
37#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
38
39
40#define bfa_itnim_ioprofile_update(__itnim, __index) \
41 (__itnim->ioprofile.iocomps[__index]++)
42
43#define BFA_IOIM_RETRY_TAG_OFFSET 11
44#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
45#define BFA_IOIM_RETRY_MAX 7
46
47/* Buckets are are 512 bytes to 2MB */
48static inline u32
49bfa_ioim_get_index(u32 n) {
50 int pos = 0;
51 if (n >= (1UL)<<22)
52 return BFA_IOBUCKET_MAX - 1;
53 n >>= 8;
54 if (n >= (1UL)<<16)
55 n >>= 16; pos += 16;
56 if (n >= 1 << 8)
57 n >>= 8; pos += 8;
58 if (n >= 1 << 4)
59 n >>= 4; pos += 4;
60 if (n >= 1 << 2)
61 n >>= 2; pos += 2;
62 if (n >= 1 << 1)
63 pos += 1;
64
65 return (n == 0) ? (0) : pos;
66}
67
68/*
69 * forward declarations
70 */
71struct bfa_ioim_s;
72struct bfa_tskim_s;
73struct bfad_ioim_s;
74struct bfad_tskim_s;
75
76typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
77
78struct bfa_fcpim_mod_s {
79 struct bfa_s *bfa;
80 struct bfa_itnim_s *itnim_arr;
81 struct bfa_ioim_s *ioim_arr;
82 struct bfa_ioim_sp_s *ioim_sp_arr;
83 struct bfa_tskim_s *tskim_arr;
84 struct bfa_dma_s snsbase;
85 int num_itnims;
86 int num_ioim_reqs;
87 int num_tskim_reqs;
88 u32 path_tov;
89 u16 q_depth;
90 u8 reqq; /* Request queue to be used */
91 u8 rsvd;
92 struct list_head itnim_q; /* queue of active itnim */
93 struct list_head ioim_free_q; /* free IO resources */
94 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
95 struct list_head ioim_comp_q; /* IO global comp Q */
96 struct list_head tskim_free_q;
97 u32 ios_active; /* current active IOs */
98 u32 delay_comp;
99 struct bfa_fcpim_del_itn_stats_s del_itn_stats;
100 bfa_boolean_t ioredirect;
101 bfa_boolean_t io_profile;
102 u32 io_profile_start_time;
103 bfa_fcpim_profile_t profile_comp;
104 bfa_fcpim_profile_t profile_start;
105};
106
107/**
108 * BFA IO (initiator mode)
109 */
110struct bfa_ioim_s {
111 struct list_head qe; /* queue elememt */
112 bfa_sm_t sm; /* BFA ioim state machine */
113 struct bfa_s *bfa; /* BFA module */
114 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
115 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
116 struct bfad_ioim_s *dio; /* driver IO handle */
117 u16 iotag; /* FWI IO tag */
118 u16 abort_tag; /* unqiue abort request tag */
119 u16 nsges; /* number of SG elements */
120 u16 nsgpgs; /* number of SG pages */
121 struct bfa_sgpg_s *sgpg; /* first SG page */
122 struct list_head sgpg_q; /* allocated SG pages */
123 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
124 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
125 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
126 u8 reqq; /* Request queue for I/O */
127 u64 start_time; /* IO's Profile start val */
128};
129
130
131struct bfa_ioim_sp_s {
132 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
133 u8 *snsinfo; /* sense info for this IO */
134 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
135 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
136 bfa_boolean_t abort_explicit; /* aborted by OS */
137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
138};
139
140/**
141 * BFA Task management command (initiator mode)
142 */
143struct bfa_tskim_s {
144 struct list_head qe;
145 bfa_sm_t sm;
146 struct bfa_s *bfa; /* BFA module */
147 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
148 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
149 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
150 bfa_boolean_t notify; /* notify itnim on TM comp */
151 lun_t lun; /* lun if applicable */
152 enum fcp_tm_cmnd tm_cmnd; /* task management command */
153 u16 tsk_tag; /* FWI IO tag */
154 u8 tsecs; /* timeout in seconds */
155 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
156 struct list_head io_q; /* queue of affected IOs */
157 struct bfa_wc_s wc; /* waiting counter */
158 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
159 enum bfi_tskim_status tsk_status; /* TM status */
160};
161
162
163/**
164 * BFA i-t-n (initiator mode)
165 */
166struct bfa_itnim_s {
167 struct list_head qe; /* queue element */
168 bfa_sm_t sm; /* i-t-n im BFA state machine */
169 struct bfa_s *bfa; /* bfa instance */
170 struct bfa_rport_s *rport; /* bfa rport */
171 void *ditn; /* driver i-t-n structure */
172 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
173 u8 msg_no; /* itnim/rport firmware handle */
174 u8 reqq; /* CQ for requests */
175 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
176 struct list_head pending_q; /* queue of pending IO requests */
177 struct list_head io_q; /* queue of active IO requests */
178 struct list_head io_cleanup_q; /* IO being cleaned up */
179 struct list_head tsk_q; /* queue of active TM commands */
180 struct list_head delay_comp_q; /* queue of failed inflight cmds */
181 bfa_boolean_t seq_rec; /* SQER supported */
182 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
183 bfa_boolean_t iotov_active; /* IO TOV timer is active */
184 struct bfa_wc_s wc; /* waiting counter */
185 struct bfa_timer_s timer; /* pending IO TOV */
186 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
187 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
188 struct bfa_itnim_iostats_s stats;
189 struct bfa_itnim_ioprofile_s ioprofile;
190};
191
192
193#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
194#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
195#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
196 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
197#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
198 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
199
200#define bfa_io_profile_start_time(_bfa) \
201 (_bfa->modules.fcpim_mod.io_profile_start_time)
202#define bfa_fcpim_get_io_profile(_bfa) \
203 (_bfa->modules.fcpim_mod.io_profile)
204
205static inline bfa_boolean_t
206bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
207{
208 u16 k = ioim->iotag;
209
210 k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
211
212 if (k > BFA_IOIM_RETRY_MAX)
213 return BFA_FALSE;
214 ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
215 ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
216 return BFA_TRUE;
217}
218/*
219 * function prototypes
220 */
221void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
222 struct bfa_meminfo_s *minfo);
223void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
224void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
225void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
226 struct bfi_msg_s *msg);
227void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
228void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
229 struct bfa_tskim_s *tskim);
230void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
231void bfa_ioim_tov(struct bfa_ioim_s *ioim);
232
233void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
234 struct bfa_meminfo_s *minfo);
235void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
236void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
237void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
238void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
239void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
240
241void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
242 u32 *dm_len);
243void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
244 struct bfa_meminfo_s *minfo);
245void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
246void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
247void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
248void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
249void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
250bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
251void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
252void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
253
254
255/*
256 * bfa fcpim module API functions
257 */
258void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
259u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
260void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
261u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
262bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
263 struct bfa_itnim_iostats_s *modstats);
264bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
265 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
266bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
267 struct bfa_fcpim_del_itn_stats_s *modstats);
268bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
269void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
270 struct bfa_itnim_iostats_s *itnim_stats);
271bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
272void bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
273 bfa_boolean_t state);
274void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
275bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
276bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
277#define bfa_fcpim_ioredirect_enabled(__bfa) \
278 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
279
280#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
281{ \
282 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
283 __fcpim->reqq++; \
284 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
285 *(__qid) = __fcpim->reqq; \
286}
287
288#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
289 *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
290/*
291 * bfa itnim API functions
292 */
293struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
294 struct bfa_rport_s *rport, void *itnim);
295void bfa_itnim_delete(struct bfa_itnim_s *itnim);
296void bfa_itnim_online(struct bfa_itnim_s *itnim,
297 bfa_boolean_t seq_rec);
298void bfa_itnim_offline(struct bfa_itnim_s *itnim);
299void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
300 struct bfa_itnim_iostats_s *stats);
301void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
302bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
303 struct bfa_itnim_ioprofile_s *ioprofile);
304#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
305
306/**
307 * BFA completion callback for bfa_itnim_online().
308 *
309 * @param[in] itnim FCS or driver itnim instance
310 *
311 * return None
312 */
313void bfa_cb_itnim_online(void *itnim);
314
315/**
316 * BFA completion callback for bfa_itnim_offline().
317 *
318 * @param[in] itnim FCS or driver itnim instance
319 *
320 * return None
321 */
322void bfa_cb_itnim_offline(void *itnim);
323void bfa_cb_itnim_tov_begin(void *itnim);
324void bfa_cb_itnim_tov(void *itnim);
325
326/**
327 * BFA notification to FCS/driver for second level error recovery.
328 *
329 * Atleast one I/O request has timedout and target is unresponsive to
330 * repeated abort requests. Second level error recovery should be initiated
331 * by starting implicit logout and recovery procedures.
332 *
333 * @param[in] itnim FCS or driver itnim instance
334 *
335 * return None
336 */
337void bfa_cb_itnim_sler(void *itnim);
338
339/*
340 * bfa ioim API functions
341 */
342struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
343 struct bfad_ioim_s *dio,
344 struct bfa_itnim_s *itnim,
345 u16 nsgles);
346
347void bfa_ioim_free(struct bfa_ioim_s *ioim);
348void bfa_ioim_start(struct bfa_ioim_s *ioim);
349bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim);
350void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
351 bfa_boolean_t iotov);
352
353
354/**
355 * I/O completion notification.
356 *
357 * @param[in] dio driver IO structure
358 * @param[in] io_status IO completion status
359 * @param[in] scsi_status SCSI status returned by target
360 * @param[in] sns_len SCSI sense length, 0 if none
361 * @param[in] sns_info SCSI sense data, if any
362 * @param[in] residue Residual length
363 *
364 * @return None
365 */
366void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
367 enum bfi_ioim_status io_status,
368 u8 scsi_status, int sns_len,
369 u8 *sns_info, s32 residue);
370
371/**
372 * I/O good completion notification.
373 *
374 * @param[in] dio driver IO structure
375 *
376 * @return None
377 */
378void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
379
380/**
381 * I/O abort completion notification
382 *
383 * @param[in] dio driver IO that was aborted
384 *
385 * @return None
386 */
387void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
388
389/*
390 * bfa tskim API functions
391 */
392struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
393 struct bfad_tskim_s *dtsk);
394void bfa_tskim_free(struct bfa_tskim_s *tskim);
395void bfa_tskim_start(struct bfa_tskim_s *tskim,
396 struct bfa_itnim_s *itnim, lun_t lun,
397 enum fcp_tm_cmnd tm, u8 t_secs);
398void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
399 enum bfi_tskim_status tsk_status);
400
401#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
deleted file mode 100644
index 762516cb5cb2..000000000000
--- a/drivers/scsi/bfa/bfa_fcpim_priv.h
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_PRIV_H__
19#define __BFA_FCPIM_PRIV_H__
20
21#include <bfa_fcpim.h>
22#include <defs/bfa_defs_fcpim.h>
23#include <cs/bfa_wc.h>
24#include "bfa_sgpg_priv.h"
25
26#define BFA_ITNIM_MIN 32
27#define BFA_ITNIM_MAX 1024
28
29#define BFA_IOIM_MIN 8
30#define BFA_IOIM_MAX 2000
31
32#define BFA_TSKIM_MIN 4
33#define BFA_TSKIM_MAX 512
34#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
35#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
36
37#define bfa_fcpim_stats(__fcpim, __stats) \
38 ((__fcpim)->stats.__stats++)
39
40struct bfa_fcpim_mod_s {
41 struct bfa_s *bfa;
42 struct bfa_itnim_s *itnim_arr;
43 struct bfa_ioim_s *ioim_arr;
44 struct bfa_ioim_sp_s *ioim_sp_arr;
45 struct bfa_tskim_s *tskim_arr;
46 struct bfa_dma_s snsbase;
47 int num_itnims;
48 int num_ioim_reqs;
49 int num_tskim_reqs;
50 u32 path_tov;
51 u16 q_depth;
52 u8 reqq; /* Request queue to be used */
53 u8 rsvd;
54 struct list_head itnim_q; /* queue of active itnim */
55 struct list_head ioim_free_q; /* free IO resources */
56 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
57 struct list_head ioim_comp_q; /* IO global comp Q */
58 struct list_head tskim_free_q;
59 u32 ios_active; /* current active IOs */
60 u32 delay_comp;
61 struct bfa_fcpim_stats_s stats;
62 bfa_boolean_t ioredirect;
63};
64
65struct bfa_ioim_s;
66struct bfa_tskim_s;
67
68/**
69 * BFA IO (initiator mode)
70 */
71struct bfa_ioim_s {
72 struct list_head qe; /* queue elememt */
73 bfa_sm_t sm; /* BFA ioim state machine */
74 struct bfa_s *bfa; /* BFA module */
75 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
76 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
77 struct bfad_ioim_s *dio; /* driver IO handle */
78 u16 iotag; /* FWI IO tag */
79 u16 abort_tag; /* unqiue abort request tag */
80 u16 nsges; /* number of SG elements */
81 u16 nsgpgs; /* number of SG pages */
82 struct bfa_sgpg_s *sgpg; /* first SG page */
83 struct list_head sgpg_q; /* allocated SG pages */
84 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
85 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
86 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
87 u8 reqq; /* Request queue for I/O */
88};
89
90struct bfa_ioim_sp_s {
91 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
92 u8 *snsinfo; /* sense info for this IO */
93 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
94 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
95 bfa_boolean_t abort_explicit; /* aborted by OS */
96 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
97};
98
99/**
100 * BFA Task management command (initiator mode)
101 */
102struct bfa_tskim_s {
103 struct list_head qe;
104 bfa_sm_t sm;
105 struct bfa_s *bfa; /* BFA module */
106 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
107 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
108 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
109 bfa_boolean_t notify; /* notify itnim on TM comp */
110 lun_t lun; /* lun if applicable */
111 enum fcp_tm_cmnd tm_cmnd; /* task management command */
112 u16 tsk_tag; /* FWI IO tag */
113 u8 tsecs; /* timeout in seconds */
114 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
115 struct list_head io_q; /* queue of affected IOs */
116 struct bfa_wc_s wc; /* waiting counter */
117 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
118 enum bfi_tskim_status tsk_status; /* TM status */
119};
120
121/**
122 * BFA i-t-n (initiator mode)
123 */
124struct bfa_itnim_s {
125 struct list_head qe; /* queue element */
126 bfa_sm_t sm; /* i-t-n im BFA state machine */
127 struct bfa_s *bfa; /* bfa instance */
128 struct bfa_rport_s *rport; /* bfa rport */
129 void *ditn; /* driver i-t-n structure */
130 struct bfi_mhdr_s mhdr; /* pre-built mhdr */
131 u8 msg_no; /* itnim/rport firmware handle */
132 u8 reqq; /* CQ for requests */
133 struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
134 struct list_head pending_q; /* queue of pending IO requests*/
135 struct list_head io_q; /* queue of active IO requests */
136 struct list_head io_cleanup_q; /* IO being cleaned up */
137 struct list_head tsk_q; /* queue of active TM commands */
138 struct list_head delay_comp_q;/* queue of failed inflight cmds */
139 bfa_boolean_t seq_rec; /* SQER supported */
140 bfa_boolean_t is_online; /* itnim is ONLINE for IO */
141 bfa_boolean_t iotov_active; /* IO TOV timer is active */
142 struct bfa_wc_s wc; /* waiting counter */
143 struct bfa_timer_s timer; /* pending IO TOV */
144 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
145 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
146 struct bfa_itnim_hal_stats_s stats;
147 struct bfa_itnim_latency_s io_latency;
148};
149
150#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
151#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
152#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
153 (&fcpim->ioim_arr[_iotag])
154#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
155 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
156
157/*
158 * function prototypes
159 */
160void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
161 struct bfa_meminfo_s *minfo);
162void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
163void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
164void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
165 struct bfi_msg_s *msg);
166void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
167void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
168 struct bfa_tskim_s *tskim);
169void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
170void bfa_ioim_tov(struct bfa_ioim_s *ioim);
171
172void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
173 struct bfa_meminfo_s *minfo);
174void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
175void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
176void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
177void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
178void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
179
180void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
181 u32 *dm_len);
182void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
183 struct bfa_meminfo_s *minfo);
184void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
185void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
186void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
187void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
188void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
189bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
190
191#endif /* __BFA_FCPIM_PRIV_H__ */
192
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
deleted file mode 100644
index 76867b5577fa..000000000000
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ /dev/null
@@ -1,1962 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <bfi/bfi_pport.h>
21#include <bfi/bfi_pbc.h>
22#include <cs/bfa_debug.h>
23#include <aen/bfa_aen.h>
24#include <cs/bfa_plog.h>
25#include <aen/bfa_aen_port.h>
26
27BFA_TRC_FILE(HAL, FCPORT);
28BFA_MODULE(fcport);
29
30/*
31 * The port is considered disabled if corresponding physical port or IOC are
32 * disabled explicitly
33 */
34#define BFA_PORT_IS_DISABLED(bfa) \
35 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
36 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
37
38/*
39 * forward declarations
40 */
41static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
42static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
43static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
44static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
45static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
46static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
47static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
48 enum bfa_pport_linkstate event);
49static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
50 enum bfa_pport_linkstate event);
51static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
52static void bfa_fcport_stats_get_timeout(void *cbarg);
53static void bfa_fcport_stats_clr_timeout(void *cbarg);
54
55/**
56 * bfa_pport_private
57 */
58
59/**
60 * BFA port state machine events
61 */
62enum bfa_fcport_sm_event {
63 BFA_FCPORT_SM_START = 1, /* start port state machine */
64 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
65 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
66 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
67 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
68 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
69 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
70 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
71 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
72};
73
74/**
75 * BFA port link notification state machine events
76 */
77
78enum bfa_fcport_ln_sm_event {
79 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
80 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
81 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82};
83
84static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
85 enum bfa_fcport_sm_event event);
86static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
87 enum bfa_fcport_sm_event event);
88static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
89 enum bfa_fcport_sm_event event);
90static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
91 enum bfa_fcport_sm_event event);
92static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
93 enum bfa_fcport_sm_event event);
94static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
95 enum bfa_fcport_sm_event event);
96static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
97 enum bfa_fcport_sm_event event);
98static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
99 enum bfa_fcport_sm_event event);
100static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
101 enum bfa_fcport_sm_event event);
102static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
103 enum bfa_fcport_sm_event event);
104static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
105 enum bfa_fcport_sm_event event);
106
107static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
108 enum bfa_fcport_ln_sm_event event);
109static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
110 enum bfa_fcport_ln_sm_event event);
111static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
112 enum bfa_fcport_ln_sm_event event);
113static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
114 enum bfa_fcport_ln_sm_event event);
115static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
116 enum bfa_fcport_ln_sm_event event);
117static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
118 enum bfa_fcport_ln_sm_event event);
119static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
120 enum bfa_fcport_ln_sm_event event);
121
122static struct bfa_sm_table_s hal_pport_sm_table[] = {
123 {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
124 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
125 {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
126 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
127 {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
128 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
129 {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
130 {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
131 {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
132 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
133 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
134};
135
136static void
137bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
138{
139 union bfa_aen_data_u aen_data;
140 struct bfa_log_mod_s *logmod = fcport->bfa->logm;
141 wwn_t pwwn = fcport->pwwn;
142 char pwwn_ptr[BFA_STRING_32];
143
144 memset(&aen_data, 0, sizeof(aen_data));
145 wwn2str(pwwn_ptr, pwwn);
146 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
147
148 aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
149 aen_data.port.pwwn = pwwn;
150}
151
152static void
153bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
154 enum bfa_fcport_sm_event event)
155{
156 bfa_trc(fcport->bfa, event);
157
158 switch (event) {
159 case BFA_FCPORT_SM_START:
160 /**
161 * Start event after IOC is configured and BFA is started.
162 */
163 if (bfa_fcport_send_enable(fcport))
164 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
165 else
166 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
167 break;
168
169 case BFA_FCPORT_SM_ENABLE:
170 /**
171 * Port is persistently configured to be in enabled state. Do
172 * not change state. Port enabling is done when START event is
173 * received.
174 */
175 break;
176
177 case BFA_FCPORT_SM_DISABLE:
178 /**
179 * If a port is persistently configured to be disabled, the
180 * first event will a port disable request.
181 */
182 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
183 break;
184
185 case BFA_FCPORT_SM_HWFAIL:
186 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
187 break;
188
189 default:
190 bfa_sm_fault(fcport->bfa, event);
191 }
192}
193
194static void
195bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
196 enum bfa_fcport_sm_event event)
197{
198 bfa_trc(fcport->bfa, event);
199
200 switch (event) {
201 case BFA_FCPORT_SM_QRESUME:
202 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
203 bfa_fcport_send_enable(fcport);
204 break;
205
206 case BFA_FCPORT_SM_STOP:
207 bfa_reqq_wcancel(&fcport->reqq_wait);
208 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
209 break;
210
211 case BFA_FCPORT_SM_ENABLE:
212 /**
213 * Already enable is in progress.
214 */
215 break;
216
217 case BFA_FCPORT_SM_DISABLE:
218 /**
219 * Just send disable request to firmware when room becomes
220 * available in request queue.
221 */
222 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
223 bfa_reqq_wcancel(&fcport->reqq_wait);
224 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
225 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
226 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
227 break;
228
229 case BFA_FCPORT_SM_LINKUP:
230 case BFA_FCPORT_SM_LINKDOWN:
231 /**
232 * Possible to get link events when doing back-to-back
233 * enable/disables.
234 */
235 break;
236
237 case BFA_FCPORT_SM_HWFAIL:
238 bfa_reqq_wcancel(&fcport->reqq_wait);
239 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
240 break;
241
242 default:
243 bfa_sm_fault(fcport->bfa, event);
244 }
245}
246
247static void
248bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
249 enum bfa_fcport_sm_event event)
250{
251 bfa_trc(fcport->bfa, event);
252
253 switch (event) {
254 case BFA_FCPORT_SM_FWRSP:
255 case BFA_FCPORT_SM_LINKDOWN:
256 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
257 break;
258
259 case BFA_FCPORT_SM_LINKUP:
260 bfa_fcport_update_linkinfo(fcport);
261 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
262
263 bfa_assert(fcport->event_cbfn);
264 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
265 break;
266
267 case BFA_FCPORT_SM_ENABLE:
268 /**
269 * Already being enabled.
270 */
271 break;
272
273 case BFA_FCPORT_SM_DISABLE:
274 if (bfa_fcport_send_disable(fcport))
275 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
276 else
277 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
278
279 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
280 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
281 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
282 break;
283
284 case BFA_FCPORT_SM_STOP:
285 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
286 break;
287
288 case BFA_FCPORT_SM_HWFAIL:
289 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
290 break;
291
292 default:
293 bfa_sm_fault(fcport->bfa, event);
294 }
295}
296
297static void
298bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
299 enum bfa_fcport_sm_event event)
300{
301 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
302 bfa_trc(fcport->bfa, event);
303
304 switch (event) {
305 case BFA_FCPORT_SM_LINKUP:
306 bfa_fcport_update_linkinfo(fcport);
307 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
308 bfa_assert(fcport->event_cbfn);
309 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311
312 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
313
314 bfa_trc(fcport->bfa,
315 pevent->link_state.vc_fcf.fcf.fipenabled);
316 bfa_trc(fcport->bfa,
317 pevent->link_state.vc_fcf.fcf.fipfailed);
318
319 if (pevent->link_state.vc_fcf.fcf.fipfailed)
320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
321 BFA_PL_EID_FIP_FCF_DISC, 0,
322 "FIP FCF Discovery Failed");
323 else
324 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
325 BFA_PL_EID_FIP_FCF_DISC, 0,
326 "FIP FCF Discovered");
327 }
328
329 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
330 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
331 /**
332 * If QoS is enabled and it is not online,
333 * Send a separate event.
334 */
335 if ((fcport->cfg.qos_enabled)
336 && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
337 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
338
339 break;
340
341 case BFA_FCPORT_SM_LINKDOWN:
342 /**
343 * Possible to get link down event.
344 */
345 break;
346
347 case BFA_FCPORT_SM_ENABLE:
348 /**
349 * Already enabled.
350 */
351 break;
352
353 case BFA_FCPORT_SM_DISABLE:
354 if (bfa_fcport_send_disable(fcport))
355 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
356 else
357 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
358
359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
360 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
361 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
362 break;
363
364 case BFA_FCPORT_SM_STOP:
365 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
366 break;
367
368 case BFA_FCPORT_SM_HWFAIL:
369 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
370 break;
371
372 default:
373 bfa_sm_fault(fcport->bfa, event);
374 }
375}
376
377static void
378bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
379 enum bfa_fcport_sm_event event)
380{
381 bfa_trc(fcport->bfa, event);
382
383 switch (event) {
384 case BFA_FCPORT_SM_ENABLE:
385 /**
386 * Already enabled.
387 */
388 break;
389
390 case BFA_FCPORT_SM_DISABLE:
391 if (bfa_fcport_send_disable(fcport))
392 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
393 else
394 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
395
396 bfa_fcport_reset_linkinfo(fcport);
397 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
398 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
399 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
400 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
401 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
402 break;
403
404 case BFA_FCPORT_SM_LINKDOWN:
405 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
406 bfa_fcport_reset_linkinfo(fcport);
407 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
408 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
409 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
410 if (BFA_PORT_IS_DISABLED(fcport->bfa))
411 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
412 else
413 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
414 break;
415
416 case BFA_FCPORT_SM_STOP:
417 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
418 bfa_fcport_reset_linkinfo(fcport);
419 if (BFA_PORT_IS_DISABLED(fcport->bfa))
420 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
421 else
422 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
423 break;
424
425 case BFA_FCPORT_SM_HWFAIL:
426 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
427 bfa_fcport_reset_linkinfo(fcport);
428 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
429 if (BFA_PORT_IS_DISABLED(fcport->bfa))
430 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
431 else
432 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
433 break;
434
435 default:
436 bfa_sm_fault(fcport->bfa, event);
437 }
438}
439
440static void
441bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
442 enum bfa_fcport_sm_event event)
443{
444 bfa_trc(fcport->bfa, event);
445
446 switch (event) {
447 case BFA_FCPORT_SM_QRESUME:
448 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
449 bfa_fcport_send_disable(fcport);
450 break;
451
452 case BFA_FCPORT_SM_STOP:
453 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
454 bfa_reqq_wcancel(&fcport->reqq_wait);
455 break;
456
457 case BFA_FCPORT_SM_DISABLE:
458 /**
459 * Already being disabled.
460 */
461 break;
462
463 case BFA_FCPORT_SM_LINKUP:
464 case BFA_FCPORT_SM_LINKDOWN:
465 /**
466 * Possible to get link events when doing back-to-back
467 * enable/disables.
468 */
469 break;
470
471 case BFA_FCPORT_SM_HWFAIL:
472 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
473 bfa_reqq_wcancel(&fcport->reqq_wait);
474 break;
475
476 default:
477 bfa_sm_fault(fcport->bfa, event);
478 }
479}
480
481static void
482bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
483 enum bfa_fcport_sm_event event)
484{
485 bfa_trc(fcport->bfa, event);
486
487 switch (event) {
488 case BFA_FCPORT_SM_FWRSP:
489 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
490 break;
491
492 case BFA_FCPORT_SM_DISABLE:
493 /**
494 * Already being disabled.
495 */
496 break;
497
498 case BFA_FCPORT_SM_ENABLE:
499 if (bfa_fcport_send_enable(fcport))
500 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
501 else
502 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
503
504 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
505 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
506 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
507 break;
508
509 case BFA_FCPORT_SM_STOP:
510 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
511 break;
512
513 case BFA_FCPORT_SM_LINKUP:
514 case BFA_FCPORT_SM_LINKDOWN:
515 /**
516 * Possible to get link events when doing back-to-back
517 * enable/disables.
518 */
519 break;
520
521 case BFA_FCPORT_SM_HWFAIL:
522 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
523 break;
524
525 default:
526 bfa_sm_fault(fcport->bfa, event);
527 }
528}
529
530static void
531bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
532 enum bfa_fcport_sm_event event)
533{
534 bfa_trc(fcport->bfa, event);
535
536 switch (event) {
537 case BFA_FCPORT_SM_START:
538 /**
539 * Ignore start event for a port that is disabled.
540 */
541 break;
542
543 case BFA_FCPORT_SM_STOP:
544 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
545 break;
546
547 case BFA_FCPORT_SM_ENABLE:
548 if (bfa_fcport_send_enable(fcport))
549 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
550 else
551 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
552
553 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
554 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
555 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
556 break;
557
558 case BFA_FCPORT_SM_DISABLE:
559 /**
560 * Already disabled.
561 */
562 break;
563
564 case BFA_FCPORT_SM_HWFAIL:
565 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
566 break;
567
568 default:
569 bfa_sm_fault(fcport->bfa, event);
570 }
571}
572
573static void
574bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
575 enum bfa_fcport_sm_event event)
576{
577 bfa_trc(fcport->bfa, event);
578
579 switch (event) {
580 case BFA_FCPORT_SM_START:
581 if (bfa_fcport_send_enable(fcport))
582 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
583 else
584 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
585 break;
586
587 default:
588 /**
589 * Ignore all other events.
590 */
591 ;
592 }
593}
594
595/**
596 * Port is enabled. IOC is down/failed.
597 */
598static void
599bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
600 enum bfa_fcport_sm_event event)
601{
602 bfa_trc(fcport->bfa, event);
603
604 switch (event) {
605 case BFA_FCPORT_SM_START:
606 if (bfa_fcport_send_enable(fcport))
607 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
608 else
609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
610 break;
611
612 default:
613 /**
614 * Ignore all events.
615 */
616 ;
617 }
618}
619
620/**
621 * Port is disabled. IOC is down/failed.
622 */
623static void
624bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
625 enum bfa_fcport_sm_event event)
626{
627 bfa_trc(fcport->bfa, event);
628
629 switch (event) {
630 case BFA_FCPORT_SM_START:
631 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
632 break;
633
634 case BFA_FCPORT_SM_ENABLE:
635 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
636 break;
637
638 default:
639 /**
640 * Ignore all events.
641 */
642 ;
643 }
644}
645
646/**
647 * Link state is down
648 */
649static void
650bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
651 enum bfa_fcport_ln_sm_event event)
652{
653 bfa_trc(ln->fcport->bfa, event);
654
655 switch (event) {
656 case BFA_FCPORT_LN_SM_LINKUP:
657 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
658 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
659 break;
660
661 default:
662 bfa_sm_fault(ln->fcport->bfa, event);
663 }
664}
665
666/**
667 * Link state is waiting for down notification
668 */
669static void
670bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
671 enum bfa_fcport_ln_sm_event event)
672{
673 bfa_trc(ln->fcport->bfa, event);
674
675 switch (event) {
676 case BFA_FCPORT_LN_SM_LINKUP:
677 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
678 break;
679
680 case BFA_FCPORT_LN_SM_NOTIFICATION:
681 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
682 break;
683
684 default:
685 bfa_sm_fault(ln->fcport->bfa, event);
686 }
687}
688
689/**
690 * Link state is waiting for down notification and there is a pending up
691 */
692static void
693bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
694 enum bfa_fcport_ln_sm_event event)
695{
696 bfa_trc(ln->fcport->bfa, event);
697
698 switch (event) {
699 case BFA_FCPORT_LN_SM_LINKDOWN:
700 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
701 break;
702
703 case BFA_FCPORT_LN_SM_NOTIFICATION:
704 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
705 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
706 break;
707
708 default:
709 bfa_sm_fault(ln->fcport->bfa, event);
710 }
711}
712
713/**
714 * Link state is up
715 */
716static void
717bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
718 enum bfa_fcport_ln_sm_event event)
719{
720 bfa_trc(ln->fcport->bfa, event);
721
722 switch (event) {
723 case BFA_FCPORT_LN_SM_LINKDOWN:
724 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
725 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
726 break;
727
728 default:
729 bfa_sm_fault(ln->fcport->bfa, event);
730 }
731}
732
733/**
734 * Link state is waiting for up notification
735 */
736static void
737bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
738 enum bfa_fcport_ln_sm_event event)
739{
740 bfa_trc(ln->fcport->bfa, event);
741
742 switch (event) {
743 case BFA_FCPORT_LN_SM_LINKDOWN:
744 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
745 break;
746
747 case BFA_FCPORT_LN_SM_NOTIFICATION:
748 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
749 break;
750
751 default:
752 bfa_sm_fault(ln->fcport->bfa, event);
753 }
754}
755
756/**
757 * Link state is waiting for up notification and there is a pending down
758 */
759static void
760bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
761 enum bfa_fcport_ln_sm_event event)
762{
763 bfa_trc(ln->fcport->bfa, event);
764
765 switch (event) {
766 case BFA_FCPORT_LN_SM_LINKUP:
767 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
768 break;
769
770 case BFA_FCPORT_LN_SM_NOTIFICATION:
771 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
772 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
773 break;
774
775 default:
776 bfa_sm_fault(ln->fcport->bfa, event);
777 }
778}
779
780/**
781 * Link state is waiting for up notification and there are pending down and up
782 */
783static void
784bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
785 enum bfa_fcport_ln_sm_event event)
786{
787 bfa_trc(ln->fcport->bfa, event);
788
789 switch (event) {
790 case BFA_FCPORT_LN_SM_LINKDOWN:
791 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
792 break;
793
794 case BFA_FCPORT_LN_SM_NOTIFICATION:
795 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
796 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
797 break;
798
799 default:
800 bfa_sm_fault(ln->fcport->bfa, event);
801 }
802}
803
804/**
805 * bfa_pport_private
806 */
807
808static void
809__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
810{
811 struct bfa_fcport_ln_s *ln = cbarg;
812
813 if (complete)
814 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
815 else
816 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
817}
818
819static void
820bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
821{
822 if (fcport->bfa->fcs) {
823 fcport->event_cbfn(fcport->event_cbarg, event);
824 return;
825 }
826
827 switch (event) {
828 case BFA_PPORT_LINKUP:
829 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
830 break;
831 case BFA_PPORT_LINKDOWN:
832 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
833 break;
834 default:
835 bfa_assert(0);
836 }
837}
838
839static void
840bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
841{
842 ln->ln_event = event;
843 bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
844}
845
846#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
847 BFA_CACHELINE_SZ))
848
849static void
850bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
851 u32 *dm_len)
852{
853 *dm_len += FCPORT_STATS_DMA_SZ;
854}
855
856static void
857bfa_fcport_qresume(void *cbarg)
858{
859 struct bfa_fcport_s *fcport = cbarg;
860
861 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
862}
863
864static void
865bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
866{
867 u8 *dm_kva;
868 u64 dm_pa;
869
870 dm_kva = bfa_meminfo_dma_virt(meminfo);
871 dm_pa = bfa_meminfo_dma_phys(meminfo);
872
873 fcport->stats_kva = dm_kva;
874 fcport->stats_pa = dm_pa;
875 fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
876
877 dm_kva += FCPORT_STATS_DMA_SZ;
878 dm_pa += FCPORT_STATS_DMA_SZ;
879
880 bfa_meminfo_dma_virt(meminfo) = dm_kva;
881 bfa_meminfo_dma_phys(meminfo) = dm_pa;
882}
883
884/**
885 * Memory initialization.
886 */
887static void
888bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
889 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
890{
891 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
892 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
893 struct bfa_fcport_ln_s *ln = &fcport->ln;
894 struct bfa_timeval_s tv;
895
896 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
897 fcport->bfa = bfa;
898 ln->fcport = fcport;
899
900 bfa_fcport_mem_claim(fcport, meminfo);
901
902 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
903 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
904
905 /**
906 * initialize time stamp for stats reset
907 */
908 bfa_os_gettimeofday(&tv);
909 fcport->stats_reset_time = tv.tv_sec;
910
911 /**
912 * initialize and set default configuration
913 */
914 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
915 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
916 port_cfg->trunked = BFA_FALSE;
917 port_cfg->maxfrsize = 0;
918
919 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
920
921 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
922}
923
924static void
925bfa_fcport_detach(struct bfa_s *bfa)
926{
927}
928
929/**
930 * Called when IOC is ready.
931 */
932static void
933bfa_fcport_start(struct bfa_s *bfa)
934{
935 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
936}
937
938/**
939 * Called before IOC is stopped.
940 */
941static void
942bfa_fcport_stop(struct bfa_s *bfa)
943{
944 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
945}
946
947/**
948 * Called when IOC failure is detected.
949 */
950static void
951bfa_fcport_iocdisable(struct bfa_s *bfa)
952{
953 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
954}
955
956static void
957bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
958{
959 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
960
961 fcport->speed = pevent->link_state.speed;
962 fcport->topology = pevent->link_state.topology;
963
964 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
965 fcport->myalpa = 0;
966
967 /*
968 * QoS Details
969 */
970 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
971 bfa_os_assign(fcport->qos_vc_attr,
972 pevent->link_state.vc_fcf.qos_vc_attr);
973
974
975 bfa_trc(fcport->bfa, fcport->speed);
976 bfa_trc(fcport->bfa, fcport->topology);
977}
978
979static void
980bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
981{
982 fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
983 fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
984}
985
986/**
987 * Send port enable message to firmware.
988 */
989static bfa_boolean_t
990bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
991{
992 struct bfi_fcport_enable_req_s *m;
993
994 /**
995 * Increment message tag before queue check, so that responses to old
996 * requests are discarded.
997 */
998 fcport->msgtag++;
999
1000 /**
1001 * check for room in queue to send request now
1002 */
1003 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1004 if (!m) {
1005 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1006 &fcport->reqq_wait);
1007 return BFA_FALSE;
1008 }
1009
1010 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
1011 bfa_lpuid(fcport->bfa));
1012 m->nwwn = fcport->nwwn;
1013 m->pwwn = fcport->pwwn;
1014 m->port_cfg = fcport->cfg;
1015 m->msgtag = fcport->msgtag;
1016 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
1017 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
1018 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
1019 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
1020
1021 /**
1022 * queue I/O message to firmware
1023 */
1024 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1025 return BFA_TRUE;
1026}
1027
1028/**
1029 * Send port disable message to firmware.
1030 */
1031static bfa_boolean_t
1032bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
1033{
1034 struct bfi_fcport_req_s *m;
1035
1036 /**
1037 * Increment message tag before queue check, so that responses to old
1038 * requests are discarded.
1039 */
1040 fcport->msgtag++;
1041
1042 /**
1043 * check for room in queue to send request now
1044 */
1045 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1046 if (!m) {
1047 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1048 &fcport->reqq_wait);
1049 return BFA_FALSE;
1050 }
1051
1052 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
1053 bfa_lpuid(fcport->bfa));
1054 m->msgtag = fcport->msgtag;
1055
1056 /**
1057 * queue I/O message to firmware
1058 */
1059 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1060
1061 return BFA_TRUE;
1062}
1063
1064static void
1065bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
1066{
1067 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
1068 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
1069
1070 bfa_trc(fcport->bfa, fcport->pwwn);
1071 bfa_trc(fcport->bfa, fcport->nwwn);
1072}
1073
1074static void
1075bfa_fcport_send_txcredit(void *port_cbarg)
1076{
1077
1078 struct bfa_fcport_s *fcport = port_cbarg;
1079 struct bfi_fcport_set_svc_params_req_s *m;
1080
1081 /**
1082 * check for room in queue to send request now
1083 */
1084 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1085 if (!m) {
1086 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
1087 return;
1088 }
1089
1090 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
1091 bfa_lpuid(fcport->bfa));
1092 m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
1093
1094 /**
1095 * queue I/O message to firmware
1096 */
1097 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1098}
1099
1100static void
1101bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
1102 struct bfa_qos_stats_s *s)
1103{
1104 u32 *dip = (u32 *) d;
1105 u32 *sip = (u32 *) s;
1106 int i;
1107
1108 /* Now swap the 32 bit fields */
1109 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
1110 dip[i] = bfa_os_ntohl(sip[i]);
1111}
1112
1113static void
1114bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
1115 struct bfa_fcoe_stats_s *s)
1116{
1117 u32 *dip = (u32 *) d;
1118 u32 *sip = (u32 *) s;
1119 int i;
1120
1121 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
1122 i = i + 2) {
1123#ifdef __BIGENDIAN
1124 dip[i] = bfa_os_ntohl(sip[i]);
1125 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1126#else
1127 dip[i] = bfa_os_ntohl(sip[i + 1]);
1128 dip[i + 1] = bfa_os_ntohl(sip[i]);
1129#endif
1130 }
1131}
1132
1133static void
1134__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1135{
1136 struct bfa_fcport_s *fcport = cbarg;
1137
1138 if (complete) {
1139 if (fcport->stats_status == BFA_STATUS_OK) {
1140 struct bfa_timeval_s tv;
1141
1142 /* Swap FC QoS or FCoE stats */
1143 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
1144 bfa_fcport_qos_stats_swap(
1145 &fcport->stats_ret->fcqos,
1146 &fcport->stats->fcqos);
1147 } else {
1148 bfa_fcport_fcoe_stats_swap(
1149 &fcport->stats_ret->fcoe,
1150 &fcport->stats->fcoe);
1151
1152 bfa_os_gettimeofday(&tv);
1153 fcport->stats_ret->fcoe.secs_reset =
1154 tv.tv_sec - fcport->stats_reset_time;
1155 }
1156 }
1157 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1158 } else {
1159 fcport->stats_busy = BFA_FALSE;
1160 fcport->stats_status = BFA_STATUS_OK;
1161 }
1162}
1163
1164static void
1165bfa_fcport_stats_get_timeout(void *cbarg)
1166{
1167 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1168
1169 bfa_trc(fcport->bfa, fcport->stats_qfull);
1170
1171 if (fcport->stats_qfull) {
1172 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1173 fcport->stats_qfull = BFA_FALSE;
1174 }
1175
1176 fcport->stats_status = BFA_STATUS_ETIMER;
1177 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
1178 fcport);
1179}
1180
1181static void
1182bfa_fcport_send_stats_get(void *cbarg)
1183{
1184 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1185 struct bfi_fcport_req_s *msg;
1186
1187 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1188
1189 if (!msg) {
1190 fcport->stats_qfull = BFA_TRUE;
1191 bfa_reqq_winit(&fcport->stats_reqq_wait,
1192 bfa_fcport_send_stats_get, fcport);
1193 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1194 &fcport->stats_reqq_wait);
1195 return;
1196 }
1197 fcport->stats_qfull = BFA_FALSE;
1198
1199 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1200 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
1201 bfa_lpuid(fcport->bfa));
1202 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1203}
1204
1205static void
1206__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1207{
1208 struct bfa_fcport_s *fcport = cbarg;
1209
1210 if (complete) {
1211 struct bfa_timeval_s tv;
1212
1213 /**
1214 * re-initialize time stamp for stats reset
1215 */
1216 bfa_os_gettimeofday(&tv);
1217 fcport->stats_reset_time = tv.tv_sec;
1218
1219 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1220 } else {
1221 fcport->stats_busy = BFA_FALSE;
1222 fcport->stats_status = BFA_STATUS_OK;
1223 }
1224}
1225
1226static void
1227bfa_fcport_stats_clr_timeout(void *cbarg)
1228{
1229 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1230
1231 bfa_trc(fcport->bfa, fcport->stats_qfull);
1232
1233 if (fcport->stats_qfull) {
1234 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1235 fcport->stats_qfull = BFA_FALSE;
1236 }
1237
1238 fcport->stats_status = BFA_STATUS_ETIMER;
1239 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1240 __bfa_cb_fcport_stats_clr, fcport);
1241}
1242
1243static void
1244bfa_fcport_send_stats_clear(void *cbarg)
1245{
1246 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1247 struct bfi_fcport_req_s *msg;
1248
1249 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1250
1251 if (!msg) {
1252 fcport->stats_qfull = BFA_TRUE;
1253 bfa_reqq_winit(&fcport->stats_reqq_wait,
1254 bfa_fcport_send_stats_clear, fcport);
1255 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1256 &fcport->stats_reqq_wait);
1257 return;
1258 }
1259 fcport->stats_qfull = BFA_FALSE;
1260
1261 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1262 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
1263 bfa_lpuid(fcport->bfa));
1264 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1265}
1266
1267/**
1268 * bfa_pport_public
1269 */
1270
1271/**
1272 * Called to initialize port attributes
1273 */
1274void
1275bfa_fcport_init(struct bfa_s *bfa)
1276{
1277 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1278
1279 /**
1280 * Initialize port attributes from IOC hardware data.
1281 */
1282 bfa_fcport_set_wwns(fcport);
1283 if (fcport->cfg.maxfrsize == 0)
1284 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
1285 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
1286 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
1287
1288 bfa_assert(fcport->cfg.maxfrsize);
1289 bfa_assert(fcport->cfg.rx_bbcredit);
1290 bfa_assert(fcport->speed_sup);
1291}
1292
1293
1294/**
1295 * Firmware message handler.
1296 */
1297void
1298bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1299{
1300 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1301 union bfi_fcport_i2h_msg_u i2hmsg;
1302
1303 i2hmsg.msg = msg;
1304 fcport->event_arg.i2hmsg = i2hmsg;
1305
1306 switch (msg->mhdr.msg_id) {
1307 case BFI_FCPORT_I2H_ENABLE_RSP:
1308 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
1309 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1310 break;
1311
1312 case BFI_FCPORT_I2H_DISABLE_RSP:
1313 if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag)
1314 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1315 break;
1316
1317 case BFI_FCPORT_I2H_EVENT:
1318 switch (i2hmsg.event->link_state.linkstate) {
1319 case BFA_PPORT_LINKUP:
1320 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
1321 break;
1322 case BFA_PPORT_LINKDOWN:
1323 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
1324 break;
1325 case BFA_PPORT_TRUNK_LINKDOWN:
1326 /** todo: event notification */
1327 break;
1328 }
1329 break;
1330
1331 case BFI_FCPORT_I2H_STATS_GET_RSP:
1332 /*
1333 * check for timer pop before processing the rsp
1334 */
1335 if (fcport->stats_busy == BFA_FALSE ||
1336 fcport->stats_status == BFA_STATUS_ETIMER)
1337 break;
1338
1339 bfa_timer_stop(&fcport->timer);
1340 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
1341 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1342 __bfa_cb_fcport_stats_get, fcport);
1343 break;
1344
1345 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
1346 /*
1347 * check for timer pop before processing the rsp
1348 */
1349 if (fcport->stats_busy == BFA_FALSE ||
1350 fcport->stats_status == BFA_STATUS_ETIMER)
1351 break;
1352
1353 bfa_timer_stop(&fcport->timer);
1354 fcport->stats_status = BFA_STATUS_OK;
1355 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1356 __bfa_cb_fcport_stats_clr, fcport);
1357 break;
1358
1359 default:
1360 bfa_assert(0);
1361 break;
1362 }
1363}
1364
1365/**
1366 * bfa_pport_api
1367 */
1368
1369/**
1370 * Registered callback for port events.
1371 */
1372void
1373bfa_fcport_event_register(struct bfa_s *bfa,
1374 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
1375 void *cbarg)
1376{
1377 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1378
1379 fcport->event_cbfn = cbfn;
1380 fcport->event_cbarg = cbarg;
1381}
1382
1383bfa_status_t
1384bfa_fcport_enable(struct bfa_s *bfa)
1385{
1386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1387 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1388 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1389
1390 /* if port is PBC disabled, return error */
1391 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1392 bfa_trc(bfa, fcport->pwwn);
1393 return BFA_STATUS_PBC;
1394 }
1395
1396 if (bfa_ioc_is_disabled(&bfa->ioc))
1397 return BFA_STATUS_IOC_DISABLED;
1398
1399 if (fcport->diag_busy)
1400 return BFA_STATUS_DIAG_BUSY;
1401 else if (bfa_sm_cmp_state
1402 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
1403 return BFA_STATUS_DEVBUSY;
1404
1405 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
1406 return BFA_STATUS_OK;
1407}
1408
1409bfa_status_t
1410bfa_fcport_disable(struct bfa_s *bfa)
1411{
1412 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1413 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1414 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1415
1416 /* if port is PBC disabled, return error */
1417 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
1418 bfa_trc(bfa, fcport->pwwn);
1419 return BFA_STATUS_PBC;
1420 }
1421
1422 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1423 return BFA_STATUS_OK;
1424}
1425
1426/**
1427 * Configure port speed.
1428 */
1429bfa_status_t
1430bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1431{
1432 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1433
1434 bfa_trc(bfa, speed);
1435
1436 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
1437 bfa_trc(bfa, fcport->speed_sup);
1438 return BFA_STATUS_UNSUPP_SPEED;
1439 }
1440
1441 fcport->cfg.speed = speed;
1442
1443 return BFA_STATUS_OK;
1444}
1445
1446/**
1447 * Get current speed.
1448 */
1449enum bfa_pport_speed
1450bfa_fcport_get_speed(struct bfa_s *bfa)
1451{
1452 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1453
1454 return fcport->speed;
1455}
1456
1457/**
1458 * Configure port topology.
1459 */
1460bfa_status_t
1461bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1462{
1463 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1464
1465 bfa_trc(bfa, topology);
1466 bfa_trc(bfa, fcport->cfg.topology);
1467
1468 switch (topology) {
1469 case BFA_PPORT_TOPOLOGY_P2P:
1470 case BFA_PPORT_TOPOLOGY_LOOP:
1471 case BFA_PPORT_TOPOLOGY_AUTO:
1472 break;
1473
1474 default:
1475 return BFA_STATUS_EINVAL;
1476 }
1477
1478 fcport->cfg.topology = topology;
1479 return BFA_STATUS_OK;
1480}
1481
1482/**
1483 * Get current topology.
1484 */
1485enum bfa_pport_topology
1486bfa_fcport_get_topology(struct bfa_s *bfa)
1487{
1488 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1489
1490 return fcport->topology;
1491}
1492
1493bfa_status_t
1494bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1495{
1496 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1497
1498 bfa_trc(bfa, alpa);
1499 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1500 bfa_trc(bfa, fcport->cfg.hardalpa);
1501
1502 fcport->cfg.cfg_hardalpa = BFA_TRUE;
1503 fcport->cfg.hardalpa = alpa;
1504
1505 return BFA_STATUS_OK;
1506}
1507
1508bfa_status_t
1509bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
1510{
1511 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1512
1513 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1514 bfa_trc(bfa, fcport->cfg.hardalpa);
1515
1516 fcport->cfg.cfg_hardalpa = BFA_FALSE;
1517 return BFA_STATUS_OK;
1518}
1519
1520bfa_boolean_t
1521bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1522{
1523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1524
1525 *alpa = fcport->cfg.hardalpa;
1526 return fcport->cfg.cfg_hardalpa;
1527}
1528
1529u8
1530bfa_fcport_get_myalpa(struct bfa_s *bfa)
1531{
1532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1533
1534 return fcport->myalpa;
1535}
1536
1537bfa_status_t
1538bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1539{
1540 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1541
1542 bfa_trc(bfa, maxfrsize);
1543 bfa_trc(bfa, fcport->cfg.maxfrsize);
1544
1545 /*
1546 * with in range
1547 */
1548 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1549 return BFA_STATUS_INVLD_DFSZ;
1550
1551 /*
1552 * power of 2, if not the max frame size of 2112
1553 */
1554 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1555 return BFA_STATUS_INVLD_DFSZ;
1556
1557 fcport->cfg.maxfrsize = maxfrsize;
1558 return BFA_STATUS_OK;
1559}
1560
1561u16
1562bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
1563{
1564 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1565
1566 return fcport->cfg.maxfrsize;
1567}
1568
1569u32
1570bfa_fcport_mypid(struct bfa_s *bfa)
1571{
1572 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1573
1574 return fcport->mypid;
1575}
1576
1577u8
1578bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
1579{
1580 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1581
1582 return fcport->cfg.rx_bbcredit;
1583}
1584
1585void
1586bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1587{
1588 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1589
1590 fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
1591 bfa_fcport_send_txcredit(fcport);
1592}
1593
1594/**
1595 * Get port attributes.
1596 */
1597
1598wwn_t
1599bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1600{
1601 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1602 if (node)
1603 return fcport->nwwn;
1604 else
1605 return fcport->pwwn;
1606}
1607
1608void
1609bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1610{
1611 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1612 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1613 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1614
1615 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1616
1617 attr->nwwn = fcport->nwwn;
1618 attr->pwwn = fcport->pwwn;
1619
1620 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
1621 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
1622
1623 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1624 sizeof(struct bfa_pport_cfg_s));
1625 /*
1626 * speed attributes
1627 */
1628 attr->pport_cfg.speed = fcport->cfg.speed;
1629 attr->speed_supported = fcport->speed_sup;
1630 attr->speed = fcport->speed;
1631 attr->cos_supported = FC_CLASS_3;
1632
1633 /*
1634 * topology attributes
1635 */
1636 attr->pport_cfg.topology = fcport->cfg.topology;
1637 attr->topology = fcport->topology;
1638
1639 /*
1640 * beacon attributes
1641 */
1642 attr->beacon = fcport->beacon;
1643 attr->link_e2e_beacon = fcport->link_e2e_beacon;
1644 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
1645
1646 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1647 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1648
1649 /* PBC Disabled State */
1650 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED)
1651 attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED;
1652 else {
1653 attr->port_state = bfa_sm_to_state(
1654 hal_pport_sm_table, fcport->sm);
1655 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1656 attr->port_state = BFA_PPORT_ST_IOCDIS;
1657 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1658 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1659 }
1660}
1661
1662#define BFA_FCPORT_STATS_TOV 1000
1663
1664/**
1665 * Fetch port attributes (FCQoS or FCoE).
1666 */
1667bfa_status_t
1668bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1669 bfa_cb_pport_t cbfn, void *cbarg)
1670{
1671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1672
1673 if (fcport->stats_busy) {
1674 bfa_trc(bfa, fcport->stats_busy);
1675 return BFA_STATUS_DEVBUSY;
1676 }
1677
1678 fcport->stats_busy = BFA_TRUE;
1679 fcport->stats_ret = stats;
1680 fcport->stats_cbfn = cbfn;
1681 fcport->stats_cbarg = cbarg;
1682
1683 bfa_fcport_send_stats_get(fcport);
1684
1685 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
1686 fcport, BFA_FCPORT_STATS_TOV);
1687 return BFA_STATUS_OK;
1688}
1689
1690/**
1691 * Reset port statistics (FCQoS or FCoE).
1692 */
1693bfa_status_t
1694bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1695{
1696 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1697
1698 if (fcport->stats_busy) {
1699 bfa_trc(bfa, fcport->stats_busy);
1700 return BFA_STATUS_DEVBUSY;
1701 }
1702
1703 fcport->stats_busy = BFA_TRUE;
1704 fcport->stats_cbfn = cbfn;
1705 fcport->stats_cbarg = cbarg;
1706
1707 bfa_fcport_send_stats_clear(fcport);
1708
1709 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
1710 fcport, BFA_FCPORT_STATS_TOV);
1711 return BFA_STATUS_OK;
1712}
1713
1714/**
1715 * Fetch FCQoS port statistics
1716 */
1717bfa_status_t
1718bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1719 bfa_cb_pport_t cbfn, void *cbarg)
1720{
1721 /* Meaningful only for FC mode */
1722 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1723
1724 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1725}
1726
1727/**
1728 * Reset FCoE port statistics
1729 */
1730bfa_status_t
1731bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1732{
1733 /* Meaningful only for FC mode */
1734 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1735
1736 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1737}
1738
1739/**
1740 * Fetch FCQoS port statistics
1741 */
1742bfa_status_t
1743bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1744 bfa_cb_pport_t cbfn, void *cbarg)
1745{
1746 /* Meaningful only for FCoE mode */
1747 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1748
1749 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1750}
1751
1752/**
1753 * Reset FCoE port statistics
1754 */
1755bfa_status_t
1756bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1757{
1758 /* Meaningful only for FCoE mode */
1759 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1760
1761 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1762}
1763
1764bfa_status_t
1765bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1766{
1767 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1768
1769 bfa_trc(bfa, bitmap);
1770 bfa_trc(bfa, fcport->cfg.trunked);
1771 bfa_trc(bfa, fcport->cfg.trunk_ports);
1772
1773 if (!bitmap || (bitmap & (bitmap - 1)))
1774 return BFA_STATUS_EINVAL;
1775
1776 fcport->cfg.trunked = BFA_TRUE;
1777 fcport->cfg.trunk_ports = bitmap;
1778
1779 return BFA_STATUS_OK;
1780}
1781
1782void
1783bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1784{
1785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1786
1787 qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
1788 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
1789}
1790
1791void
1792bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
1793 struct bfa_qos_vc_attr_s *qos_vc_attr)
1794{
1795 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1796 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
1797 u32 i = 0;
1798
1799 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1800 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1801 qos_vc_attr->elp_opmode_flags =
1802 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1803
1804 /*
1805 * Individual VC info
1806 */
1807 while (i < qos_vc_attr->total_vc_count) {
1808 qos_vc_attr->vc_info[i].vc_credit =
1809 bfa_vc_attr->vc_info[i].vc_credit;
1810 qos_vc_attr->vc_info[i].borrow_credit =
1811 bfa_vc_attr->vc_info[i].borrow_credit;
1812 qos_vc_attr->vc_info[i].priority =
1813 bfa_vc_attr->vc_info[i].priority;
1814 ++i;
1815 }
1816}
1817
1818/**
1819 * Fetch port attributes.
1820 */
1821bfa_status_t
1822bfa_fcport_trunk_disable(struct bfa_s *bfa)
1823{
1824 return BFA_STATUS_OK;
1825}
1826
1827bfa_boolean_t
1828bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1829{
1830 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1831
1832 *bitmap = fcport->cfg.trunk_ports;
1833 return fcport->cfg.trunked;
1834}
1835
1836bfa_boolean_t
1837bfa_fcport_is_disabled(struct bfa_s *bfa)
1838{
1839 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1840
1841 return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
1842 BFA_PPORT_ST_DISABLED;
1843
1844}
1845
1846bfa_boolean_t
1847bfa_fcport_is_ratelim(struct bfa_s *bfa)
1848{
1849 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1850
1851 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1852
1853}
1854
1855void
1856bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1857{
1858 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1859 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1860
1861 bfa_trc(bfa, on_off);
1862 bfa_trc(bfa, fcport->cfg.qos_enabled);
1863
1864 bfa_trc(bfa, ioc_type);
1865
1866 if (ioc_type == BFA_IOC_TYPE_FC) {
1867 fcport->cfg.qos_enabled = on_off;
1868 /**
1869 * Notify fcpim of the change in QoS state
1870 */
1871 bfa_fcpim_update_ioredirect(bfa);
1872 }
1873}
1874
1875void
1876bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1877{
1878 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1879
1880 bfa_trc(bfa, on_off);
1881 bfa_trc(bfa, fcport->cfg.ratelimit);
1882
1883 fcport->cfg.ratelimit = on_off;
1884 if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1885 fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1886}
1887
1888/**
1889 * Configure default minimum ratelim speed
1890 */
1891bfa_status_t
1892bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1893{
1894 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1895
1896 bfa_trc(bfa, speed);
1897
1898 /*
1899 * Auto and speeds greater than the supported speed, are invalid
1900 */
1901 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
1902 bfa_trc(bfa, fcport->speed_sup);
1903 return BFA_STATUS_UNSUPP_SPEED;
1904 }
1905
1906 fcport->cfg.trl_def_speed = speed;
1907
1908 return BFA_STATUS_OK;
1909}
1910
1911/**
1912 * Get default minimum ratelim speed
1913 */
1914enum bfa_pport_speed
1915bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
1916{
1917 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1918
1919 bfa_trc(bfa, fcport->cfg.trl_def_speed);
1920 return fcport->cfg.trl_def_speed;
1921
1922}
1923
1924void
1925bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1926{
1927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1928
1929 bfa_trc(bfa, status);
1930 bfa_trc(bfa, fcport->diag_busy);
1931
1932 fcport->diag_busy = status;
1933}
1934
1935void
1936bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1937 bfa_boolean_t link_e2e_beacon)
1938{
1939 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1940
1941 bfa_trc(bfa, beacon);
1942 bfa_trc(bfa, link_e2e_beacon);
1943 bfa_trc(bfa, fcport->beacon);
1944 bfa_trc(bfa, fcport->link_e2e_beacon);
1945
1946 fcport->beacon = beacon;
1947 fcport->link_e2e_beacon = link_e2e_beacon;
1948}
1949
1950bfa_boolean_t
1951bfa_fcport_is_linkup(struct bfa_s *bfa)
1952{
1953 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
1954}
1955
1956bfa_boolean_t
1957bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
1958{
1959 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1960
1961 return fcport->cfg.qos_enabled;
1962}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index d1a99209bf5f..9cebbe30a678 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,35 +19,28 @@
19 * bfa_fcs.c BFA FCS main 19 * bfa_fcs.c BFA FCS main
20 */ 20 */
21 21
22#include <fcs/bfa_fcs.h> 22#include "bfa_fcs.h"
23#include "fcs_port.h" 23#include "bfa_fcbuild.h"
24#include "fcs_uf.h"
25#include "fcs_vport.h"
26#include "fcs_rport.h"
27#include "fcs_fabric.h"
28#include "fcs_fcpim.h"
29#include "fcs_fcptm.h"
30#include "fcbuild.h"
31#include "fcs.h"
32#include "bfad_drv.h" 24#include "bfad_drv.h"
33#include <fcb/bfa_fcb.h> 25
26BFA_TRC_FILE(FCS, FCS);
34 27
35/** 28/**
36 * FCS sub-modules 29 * FCS sub-modules
37 */ 30 */
38struct bfa_fcs_mod_s { 31struct bfa_fcs_mod_s {
39 void (*attach) (struct bfa_fcs_s *fcs); 32 void (*attach) (struct bfa_fcs_s *fcs);
40 void (*modinit) (struct bfa_fcs_s *fcs); 33 void (*modinit) (struct bfa_fcs_s *fcs);
41 void (*modexit) (struct bfa_fcs_s *fcs); 34 void (*modexit) (struct bfa_fcs_s *fcs);
42}; 35};
43 36
44#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } 37#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
45 38
46static struct bfa_fcs_mod_s fcs_modules[] = { 39static struct bfa_fcs_mod_s fcs_modules[] = {
47 { bfa_fcs_pport_attach, NULL, NULL }, 40 { bfa_fcs_port_attach, NULL, NULL },
48 { bfa_fcs_uf_attach, NULL, NULL }, 41 { bfa_fcs_uf_attach, NULL, NULL },
49 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, 42 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50 bfa_fcs_fabric_modexit }, 43 bfa_fcs_fabric_modexit },
51}; 44};
52 45
53/** 46/**
@@ -57,8 +50,8 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
57static void 50static void
58bfa_fcs_exit_comp(void *fcs_cbarg) 51bfa_fcs_exit_comp(void *fcs_cbarg)
59{ 52{
60 struct bfa_fcs_s *fcs = fcs_cbarg; 53 struct bfa_fcs_s *fcs = fcs_cbarg;
61 struct bfad_s *bfad = fcs->bfad; 54 struct bfad_s *bfad = fcs->bfad;
62 55
63 complete(&bfad->comp); 56 complete(&bfad->comp);
64} 57}
@@ -74,9 +67,9 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
74 */ 67 */
75void 68void
76bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 69bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
77 bfa_boolean_t min_cfg) 70 bfa_boolean_t min_cfg)
78{ 71{
79 int i; 72 int i;
80 struct bfa_fcs_mod_s *mod; 73 struct bfa_fcs_mod_s *mod;
81 74
82 fcs->bfa = bfa; 75 fcs->bfa = bfa;
@@ -86,7 +79,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
86 bfa_attach_fcs(bfa); 79 bfa_attach_fcs(bfa);
87 fcbuild_init(); 80 fcbuild_init();
88 81
89 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 82 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
90 mod = &fcs_modules[i]; 83 mod = &fcs_modules[i];
91 if (mod->attach) 84 if (mod->attach)
92 mod->attach(fcs); 85 mod->attach(fcs);
@@ -99,11 +92,11 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
99void 92void
100bfa_fcs_init(struct bfa_fcs_s *fcs) 93bfa_fcs_init(struct bfa_fcs_s *fcs)
101{ 94{
102 int i, npbc_vports; 95 int i, npbc_vports;
103 struct bfa_fcs_mod_s *mod; 96 struct bfa_fcs_mod_s *mod;
104 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; 97 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
105 98
106 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 99 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
107 mod = &fcs_modules[i]; 100 mod = &fcs_modules[i];
108 if (mod->modinit) 101 if (mod->modinit)
109 mod->modinit(fcs); 102 mod->modinit(fcs);
@@ -111,7 +104,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
111 /* Initialize pbc vports */ 104 /* Initialize pbc vports */
112 if (!fcs->min_cfg) { 105 if (!fcs->min_cfg) {
113 npbc_vports = 106 npbc_vports =
114 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); 107 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
115 for (i = 0; i < npbc_vports; i++) 108 for (i = 0; i < npbc_vports; i++)
116 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); 109 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
117 } 110 }
@@ -127,12 +120,13 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
127} 120}
128 121
129/** 122/**
130 * FCS driver details initialization. 123 * brief
124 * FCS driver details initialization.
131 * 125 *
132 * param[in] fcs FCS instance 126 * param[in] fcs FCS instance
133 * param[in] driver_info Driver Details 127 * param[in] driver_info Driver Details
134 * 128 *
135 * return None 129 * return None
136 */ 130 */
137void 131void
138bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 132bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
@@ -145,13 +139,13 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
145} 139}
146 140
147/** 141/**
148 * @brief 142 * brief
149 * FCS FDMI Driver Parameter Initialization 143 * FCS FDMI Driver Parameter Initialization
150 * 144 *
151 * @param[in] fcs FCS instance 145 * param[in] fcs FCS instance
152 * @param[in] fdmi_enable TRUE/FALSE 146 * param[in] fdmi_enable TRUE/FALSE
153 * 147 *
154 * @return None 148 * return None
155 */ 149 */
156void 150void
157bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) 151bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
@@ -160,22 +154,24 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
160 fcs->fdmi_enabled = fdmi_enable; 154 fcs->fdmi_enabled = fdmi_enable;
161 155
162} 156}
163
164/** 157/**
165 * FCS instance cleanup and exit. 158 * brief
159 * FCS instance cleanup and exit.
166 * 160 *
167 * param[in] fcs FCS instance 161 * param[in] fcs FCS instance
168 * return None 162 * return None
169 */ 163 */
170void 164void
171bfa_fcs_exit(struct bfa_fcs_s *fcs) 165bfa_fcs_exit(struct bfa_fcs_s *fcs)
172{ 166{
173 struct bfa_fcs_mod_s *mod; 167 struct bfa_fcs_mod_s *mod;
174 int i; 168 int nmods, i;
175 169
176 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); 170 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
177 171
178 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { 172 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
173
174 for (i = 0; i < nmods; i++) {
179 175
180 mod = &fcs_modules[i]; 176 mod = &fcs_modules[i];
181 if (mod->modexit) { 177 if (mod->modexit) {
@@ -194,24 +190,1547 @@ bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
194 fcs->trcmod = trcmod; 190 fcs->trcmod = trcmod;
195} 191}
196 192
193void
194bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
195{
196 bfa_wc_down(&fcs->wc);
197}
198
199/**
200 * Fabric module implementation.
201 */
202
203#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
204#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
205
206#define bfa_fcs_fabric_set_opertype(__fabric) do { \
207 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
208 == BFA_PORT_TOPOLOGY_P2P) \
209 (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \
210 else \
211 (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
212} while (0)
213
214/*
215 * forward declarations
216 */
217static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
218static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
219static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
220static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
221static void bfa_fcs_fabric_delay(void *cbarg);
222static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
223static void bfa_fcs_fabric_delete_comp(void *cbarg);
224static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
225 struct fchs_s *fchs, u16 len);
226static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
227 struct fchs_s *fchs, u16 len);
228static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
229static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
230 struct bfa_fcxp_s *fcxp, void *cbarg,
231 bfa_status_t status,
232 u32 rsp_len,
233 u32 resid_len,
234 struct fchs_s *rspfchs);
235/**
236 * fcs_fabric_sm fabric state machine functions
237 */
238
239/**
240 * Fabric state machine events
241 */
242enum bfa_fcs_fabric_event {
243 BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
244 BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
245 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
246 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
247 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
248 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
249 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
250 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
251 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
252 BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
253 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
254 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
255 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
256 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
257 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
258 BFA_FCS_FABRIC_SM_START = 16, /* from driver */
259};
260
261static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
262 enum bfa_fcs_fabric_event event);
263static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
264 enum bfa_fcs_fabric_event event);
265static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
266 enum bfa_fcs_fabric_event event);
267static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
268 enum bfa_fcs_fabric_event event);
269static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
270 enum bfa_fcs_fabric_event event);
271static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
272 enum bfa_fcs_fabric_event event);
273static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
274 enum bfa_fcs_fabric_event event);
275static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
276 enum bfa_fcs_fabric_event event);
277static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
278 enum bfa_fcs_fabric_event event);
279static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
280 enum bfa_fcs_fabric_event event);
281static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
282 enum bfa_fcs_fabric_event event);
283static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
284 enum bfa_fcs_fabric_event event);
285static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
286 enum bfa_fcs_fabric_event event);
287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
288 enum bfa_fcs_fabric_event event);
289/**
290 * Beginning state before fabric creation.
291 */
292static void
293bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
294 enum bfa_fcs_fabric_event event)
295{
296 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
297 bfa_trc(fabric->fcs, event);
298
299 switch (event) {
300 case BFA_FCS_FABRIC_SM_CREATE:
301 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
302 bfa_fcs_fabric_init(fabric);
303 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
304 break;
305
306 case BFA_FCS_FABRIC_SM_LINK_UP:
307 case BFA_FCS_FABRIC_SM_LINK_DOWN:
308 break;
309
310 default:
311 bfa_sm_fault(fabric->fcs, event);
312 }
313}
314
315/**
316 * Beginning state before fabric creation.
317 */
318static void
319bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
320 enum bfa_fcs_fabric_event event)
321{
322 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
323 bfa_trc(fabric->fcs, event);
324
325 switch (event) {
326 case BFA_FCS_FABRIC_SM_START:
327 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
328 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
329 bfa_fcs_fabric_login(fabric);
330 } else
331 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
332 break;
333
334 case BFA_FCS_FABRIC_SM_LINK_UP:
335 case BFA_FCS_FABRIC_SM_LINK_DOWN:
336 break;
337
338 case BFA_FCS_FABRIC_SM_DELETE:
339 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
340 bfa_fcs_modexit_comp(fabric->fcs);
341 break;
342
343 default:
344 bfa_sm_fault(fabric->fcs, event);
345 }
346}
347
348/**
349 * Link is down, awaiting LINK UP event from port. This is also the
350 * first state at fabric creation.
351 */
352static void
353bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
354 enum bfa_fcs_fabric_event event)
355{
356 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
357 bfa_trc(fabric->fcs, event);
358
359 switch (event) {
360 case BFA_FCS_FABRIC_SM_LINK_UP:
361 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
362 bfa_fcs_fabric_login(fabric);
363 break;
364
365 case BFA_FCS_FABRIC_SM_RETRY_OP:
366 break;
367
368 case BFA_FCS_FABRIC_SM_DELETE:
369 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
370 bfa_fcs_fabric_delete(fabric);
371 break;
372
373 default:
374 bfa_sm_fault(fabric->fcs, event);
375 }
376}
377
378/**
379 * FLOGI is in progress, awaiting FLOGI reply.
380 */
381static void
382bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
383 enum bfa_fcs_fabric_event event)
384{
385 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
386 bfa_trc(fabric->fcs, event);
387
388 switch (event) {
389 case BFA_FCS_FABRIC_SM_CONT_OP:
390
391 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
392 fabric->bb_credit);
393 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
394
395 if (fabric->auth_reqd && fabric->is_auth) {
396 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
397 bfa_trc(fabric->fcs, event);
398 } else {
399 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
400 bfa_fcs_fabric_notify_online(fabric);
401 }
402 break;
403
404 case BFA_FCS_FABRIC_SM_RETRY_OP:
405 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
406 bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
407 bfa_fcs_fabric_delay, fabric,
408 BFA_FCS_FABRIC_RETRY_DELAY);
409 break;
410
411 case BFA_FCS_FABRIC_SM_LOOPBACK:
412 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
413 bfa_lps_discard(fabric->lps);
414 bfa_fcs_fabric_set_opertype(fabric);
415 break;
416
417 case BFA_FCS_FABRIC_SM_NO_FABRIC:
418 fabric->fab_type = BFA_FCS_FABRIC_N2N;
419 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
420 fabric->bb_credit);
421 bfa_fcs_fabric_notify_online(fabric);
422 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
423 break;
424
425 case BFA_FCS_FABRIC_SM_LINK_DOWN:
426 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
427 bfa_lps_discard(fabric->lps);
428 break;
429
430 case BFA_FCS_FABRIC_SM_DELETE:
431 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
432 bfa_lps_discard(fabric->lps);
433 bfa_fcs_fabric_delete(fabric);
434 break;
435
436 default:
437 bfa_sm_fault(fabric->fcs, event);
438 }
439}
440
441
442static void
443bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
444 enum bfa_fcs_fabric_event event)
445{
446 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
447 bfa_trc(fabric->fcs, event);
448
449 switch (event) {
450 case BFA_FCS_FABRIC_SM_DELAYED:
451 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
452 bfa_fcs_fabric_login(fabric);
453 break;
454
455 case BFA_FCS_FABRIC_SM_LINK_DOWN:
456 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
457 bfa_timer_stop(&fabric->delay_timer);
458 break;
459
460 case BFA_FCS_FABRIC_SM_DELETE:
461 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
462 bfa_timer_stop(&fabric->delay_timer);
463 bfa_fcs_fabric_delete(fabric);
464 break;
465
466 default:
467 bfa_sm_fault(fabric->fcs, event);
468 }
469}
470
471/**
472 * Authentication is in progress, awaiting authentication results.
473 */
474static void
475bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
476 enum bfa_fcs_fabric_event event)
477{
478 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
479 bfa_trc(fabric->fcs, event);
480
481 switch (event) {
482 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
483 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
484 bfa_lps_discard(fabric->lps);
485 break;
486
487 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
488 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
489 bfa_fcs_fabric_notify_online(fabric);
490 break;
491
492 case BFA_FCS_FABRIC_SM_PERF_EVFP:
493 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
494 break;
495
496 case BFA_FCS_FABRIC_SM_LINK_DOWN:
497 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
498 bfa_lps_discard(fabric->lps);
499 break;
500
501 case BFA_FCS_FABRIC_SM_DELETE:
502 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
503 bfa_fcs_fabric_delete(fabric);
504 break;
505
506 default:
507 bfa_sm_fault(fabric->fcs, event);
508 }
509}
510
511/**
512 * Authentication failed
513 */
514static void
515bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
516 enum bfa_fcs_fabric_event event)
517{
518 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
519 bfa_trc(fabric->fcs, event);
520
521 switch (event) {
522 case BFA_FCS_FABRIC_SM_LINK_DOWN:
523 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
524 bfa_fcs_fabric_notify_offline(fabric);
525 break;
526
527 case BFA_FCS_FABRIC_SM_DELETE:
528 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
529 bfa_fcs_fabric_delete(fabric);
530 break;
531
532 default:
533 bfa_sm_fault(fabric->fcs, event);
534 }
535}
536
537/**
538 * Port is in loopback mode.
539 */
540static void
541bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
542 enum bfa_fcs_fabric_event event)
543{
544 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
545 bfa_trc(fabric->fcs, event);
546
547 switch (event) {
548 case BFA_FCS_FABRIC_SM_LINK_DOWN:
549 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
550 bfa_fcs_fabric_notify_offline(fabric);
551 break;
552
553 case BFA_FCS_FABRIC_SM_DELETE:
554 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
555 bfa_fcs_fabric_delete(fabric);
556 break;
557
558 default:
559 bfa_sm_fault(fabric->fcs, event);
560 }
561}
562
563/**
564 * There is no attached fabric - private loop or NPort-to-NPort topology.
565 */
566static void
567bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
568 enum bfa_fcs_fabric_event event)
569{
570 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
571 bfa_trc(fabric->fcs, event);
572
573 switch (event) {
574 case BFA_FCS_FABRIC_SM_LINK_DOWN:
575 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
576 bfa_lps_discard(fabric->lps);
577 bfa_fcs_fabric_notify_offline(fabric);
578 break;
579
580 case BFA_FCS_FABRIC_SM_DELETE:
581 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
582 bfa_fcs_fabric_delete(fabric);
583 break;
584
585 case BFA_FCS_FABRIC_SM_NO_FABRIC:
586 bfa_trc(fabric->fcs, fabric->bb_credit);
587 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
588 fabric->bb_credit);
589 break;
590
591 default:
592 bfa_sm_fault(fabric->fcs, event);
593 }
594}
595
596/**
597 * Fabric is online - normal operating state.
598 */
599static void
600bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
601 enum bfa_fcs_fabric_event event)
602{
603 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
604 bfa_trc(fabric->fcs, event);
605
606 switch (event) {
607 case BFA_FCS_FABRIC_SM_LINK_DOWN:
608 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
609 bfa_lps_discard(fabric->lps);
610 bfa_fcs_fabric_notify_offline(fabric);
611 break;
612
613 case BFA_FCS_FABRIC_SM_DELETE:
614 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
615 bfa_fcs_fabric_delete(fabric);
616 break;
617
618 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
619 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
620 bfa_lps_discard(fabric->lps);
621 break;
622
623 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
624 break;
625
626 default:
627 bfa_sm_fault(fabric->fcs, event);
628 }
629}
630
631/**
632 * Exchanging virtual fabric parameters.
633 */
634static void
635bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
636 enum bfa_fcs_fabric_event event)
637{
638 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
639 bfa_trc(fabric->fcs, event);
640
641 switch (event) {
642 case BFA_FCS_FABRIC_SM_CONT_OP:
643 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
644 break;
197 645
646 case BFA_FCS_FABRIC_SM_ISOLATE:
647 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
648 break;
649
650 default:
651 bfa_sm_fault(fabric->fcs, event);
652 }
653}
654
655/**
656 * EVFP exchange complete and VFT tagging is enabled.
657 */
658static void
659bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
660 enum bfa_fcs_fabric_event event)
661{
662 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
663 bfa_trc(fabric->fcs, event);
664}
665
666/**
667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
668 */
669static void
670bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
671 enum bfa_fcs_fabric_event event)
672{
673 struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
674 char pwwn_ptr[BFA_STRING_32];
675
676 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
677 bfa_trc(fabric->fcs, event);
678 wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
679
680 BFA_LOG(KERN_INFO, bfad, log_level,
681 "Port is isolated due to VF_ID mismatch. "
682 "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
683 pwwn_ptr, fabric->fcs->port_vfid,
684 fabric->event_arg.swp_vfid);
685}
686
687/**
688 * Fabric is being deleted, awaiting vport delete completions.
689 */
690static void
691bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
692 enum bfa_fcs_fabric_event event)
693{
694 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
695 bfa_trc(fabric->fcs, event);
696
697 switch (event) {
698 case BFA_FCS_FABRIC_SM_DELCOMP:
699 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
700 bfa_fcs_modexit_comp(fabric->fcs);
701 break;
702
703 case BFA_FCS_FABRIC_SM_LINK_UP:
704 break;
705
706 case BFA_FCS_FABRIC_SM_LINK_DOWN:
707 bfa_fcs_fabric_notify_offline(fabric);
708 break;
709
710 default:
711 bfa_sm_fault(fabric->fcs, event);
712 }
713}
714
715
716
717/**
718 * fcs_fabric_private fabric private functions
719 */
720
721static void
722bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
723{
724 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
725
726 port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
727 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
729}
730
731/**
732 * Port Symbolic Name Creation for base port.
733 */
198void 734void
199bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod) 735bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
736{
737 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
738 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
739 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
740
741 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
742
743 /* Model name/number */
744 strncpy((char *)&port_cfg->sym_name, model,
745 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
746 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
747 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
748
749 /* Driver Version */
750 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
751 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
752 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
753 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
754
755 /* Host machine name */
756 strncat((char *)&port_cfg->sym_name,
757 (char *)driver_info->host_machine_name,
758 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
759 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
760 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
761
762 /*
763 * Host OS Info :
764 * If OS Patch Info is not there, do not truncate any bytes from the
765 * OS name string and instead copy the entire OS info string (64 bytes).
766 */
767 if (driver_info->host_os_patch[0] == '\0') {
768 strncat((char *)&port_cfg->sym_name,
769 (char *)driver_info->host_os_name,
770 BFA_FCS_OS_STR_LEN);
771 strncat((char *)&port_cfg->sym_name,
772 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
773 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
774 } else {
775 strncat((char *)&port_cfg->sym_name,
776 (char *)driver_info->host_os_name,
777 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
778 strncat((char *)&port_cfg->sym_name,
779 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
780 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
781
782 /* Append host OS Patch Info */
783 strncat((char *)&port_cfg->sym_name,
784 (char *)driver_info->host_os_patch,
785 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
786 }
787
788 /* null terminate */
789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
790}
791
792/**
793 * bfa lps login completion callback
794 */
795void
796bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
797{
798 struct bfa_fcs_fabric_s *fabric = uarg;
799
800 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
801 bfa_trc(fabric->fcs, status);
802
803 switch (status) {
804 case BFA_STATUS_OK:
805 fabric->stats.flogi_accepts++;
806 break;
807
808 case BFA_STATUS_INVALID_MAC:
809 /* Only for CNA */
810 fabric->stats.flogi_acc_err++;
811 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
812
813 return;
814
815 case BFA_STATUS_EPROTOCOL:
816 switch (bfa_lps_get_extstatus(fabric->lps)) {
817 case BFA_EPROTO_BAD_ACCEPT:
818 fabric->stats.flogi_acc_err++;
819 break;
820
821 case BFA_EPROTO_UNKNOWN_RSP:
822 fabric->stats.flogi_unknown_rsp++;
823 break;
824
825 default:
826 break;
827 }
828 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
829
830 return;
831
832 case BFA_STATUS_FABRIC_RJT:
833 fabric->stats.flogi_rejects++;
834 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
835 return;
836
837 default:
838 fabric->stats.flogi_rsp_err++;
839 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
840 return;
841 }
842
843 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
844 bfa_trc(fabric->fcs, fabric->bb_credit);
845
846 if (!bfa_lps_is_brcd_fabric(fabric->lps))
847 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps);
848
849 /*
850 * Check port type. It should be 1 = F-port.
851 */
852 if (bfa_lps_is_fport(fabric->lps)) {
853 fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
854 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
855 fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
856 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
857 } else {
858 /*
859 * Nport-2-Nport direct attached
860 */
861 fabric->bport.port_topo.pn2n.rem_port_wwn =
862 bfa_lps_get_peer_pwwn(fabric->lps);
863 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
864 }
865
866 bfa_trc(fabric->fcs, fabric->bport.pid);
867 bfa_trc(fabric->fcs, fabric->is_npiv);
868 bfa_trc(fabric->fcs, fabric->is_auth);
869}
870/**
871 * Allocate and send FLOGI.
872 */
873static void
874bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
875{
876 struct bfa_s *bfa = fabric->fcs->bfa;
877 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
878 u8 alpa = 0;
879
880 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
881 alpa = bfa_fcport_get_myalpa(bfa);
882
883 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
884 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
885
886 fabric->stats.flogi_sent++;
887}
888
889static void
890bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
891{
892 struct bfa_fcs_vport_s *vport;
893 struct list_head *qe, *qen;
894
895 bfa_trc(fabric->fcs, fabric->fabric_name);
896
897 bfa_fcs_fabric_set_opertype(fabric);
898 fabric->stats.fabric_onlines++;
899
900 /**
901 * notify online event to base and then virtual ports
902 */
903 bfa_fcs_lport_online(&fabric->bport);
904
905 list_for_each_safe(qe, qen, &fabric->vport_q) {
906 vport = (struct bfa_fcs_vport_s *) qe;
907 bfa_fcs_vport_online(vport);
908 }
909}
910
911static void
912bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
913{
914 struct bfa_fcs_vport_s *vport;
915 struct list_head *qe, *qen;
916
917 bfa_trc(fabric->fcs, fabric->fabric_name);
918 fabric->stats.fabric_offlines++;
919
920 /**
921 * notify offline event first to vports and then base port.
922 */
923 list_for_each_safe(qe, qen, &fabric->vport_q) {
924 vport = (struct bfa_fcs_vport_s *) qe;
925 bfa_fcs_vport_offline(vport);
926 }
927
928 bfa_fcs_lport_offline(&fabric->bport);
929
930 fabric->fabric_name = 0;
931 fabric->fabric_ip_addr[0] = 0;
932}
933
934static void
935bfa_fcs_fabric_delay(void *cbarg)
200{ 936{
201 fcs->logm = logmod; 937 struct bfa_fcs_fabric_s *fabric = cbarg;
938
939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
202} 940}
203 941
942/**
943 * Delete all vports and wait for vport delete completions.
944 */
945static void
946bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
947{
948 struct bfa_fcs_vport_s *vport;
949 struct list_head *qe, *qen;
204 950
951 list_for_each_safe(qe, qen, &fabric->vport_q) {
952 vport = (struct bfa_fcs_vport_s *) qe;
953 bfa_fcs_vport_fcs_delete(vport);
954 }
955
956 bfa_fcs_lport_delete(&fabric->bport);
957 bfa_wc_wait(&fabric->wc);
958}
959
960static void
961bfa_fcs_fabric_delete_comp(void *cbarg)
962{
963 struct bfa_fcs_fabric_s *fabric = cbarg;
964
965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
966}
967
968/**
969 * fcs_fabric_public fabric public functions
970 */
971
972/**
973 * Attach time initialization.
974 */
205void 975void
206bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen) 976bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
207{ 977{
208 fcs->aen = aen; 978 struct bfa_fcs_fabric_s *fabric;
979
980 fabric = &fcs->fabric;
981 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
982
983 /**
984 * Initialize base fabric.
985 */
986 fabric->fcs = fcs;
987 INIT_LIST_HEAD(&fabric->vport_q);
988 INIT_LIST_HEAD(&fabric->vf_q);
989 fabric->lps = bfa_lps_alloc(fcs->bfa);
990 bfa_assert(fabric->lps);
991
992 /**
993 * Initialize fabric delete completion handler. Fabric deletion is
994 * complete when the last vport delete is complete.
995 */
996 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
997 bfa_wc_up(&fabric->wc); /* For the base port */
998
999 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
1000 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
209} 1001}
210 1002
211void 1003void
212bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) 1004bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
213{ 1005{
214 bfa_wc_down(&fcs->wc); 1006 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
1007 bfa_trc(fcs, 0);
215} 1008}
216 1009
1010/**
1011 * Module cleanup
1012 */
1013void
1014bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1015{
1016 struct bfa_fcs_fabric_s *fabric;
1017
1018 bfa_trc(fcs, 0);
1019
1020 /**
1021 * Cleanup base fabric.
1022 */
1023 fabric = &fcs->fabric;
1024 bfa_lps_delete(fabric->lps);
1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
1026}
217 1027
1028/**
1029 * Fabric module start -- kick starts FCS actions
1030 */
1031void
1032bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
1033{
1034 struct bfa_fcs_fabric_s *fabric;
1035
1036 bfa_trc(fcs, 0);
1037 fabric = &fcs->fabric;
1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
1039}
1040
1041/**
1042 * Suspend fabric activity as part of driver suspend.
1043 */
1044void
1045bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
1046{
1047}
1048
1049bfa_boolean_t
1050bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
1051{
1052 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
1053}
1054
1055bfa_boolean_t
1056bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
1057{
1058 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
1059}
1060
1061enum bfa_port_type
1062bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
1063{
1064 return fabric->oper_type;
1065}
1066
1067/**
1068 * Link up notification from BFA physical port module.
1069 */
1070void
1071bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
1072{
1073 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
1075}
1076
1077/**
1078 * Link down notification from BFA physical port module.
1079 */
1080void
1081bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
1082{
1083 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
1085}
1086
1087/**
1088 * A child vport is being created in the fabric.
1089 *
1090 * Call from vport module at vport creation. A list of base port and vports
1091 * belonging to a fabric is maintained to propagate link events.
1092 *
1093 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1094 * param[in] vport - Vport being created.
1095 *
1096 * @return None (always succeeds)
1097 */
1098void
1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1100 struct bfa_fcs_vport_s *vport)
1101{
1102 /**
1103 * - add vport to fabric's vport_q
1104 */
1105 bfa_trc(fabric->fcs, fabric->vf_id);
1106
1107 list_add_tail(&vport->qe, &fabric->vport_q);
1108 fabric->num_vports++;
1109 bfa_wc_up(&fabric->wc);
1110}
1111
1112/**
1113 * A child vport is being deleted from fabric.
1114 *
1115 * Vport is being deleted.
1116 */
1117void
1118bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
1119 struct bfa_fcs_vport_s *vport)
1120{
1121 list_del(&vport->qe);
1122 fabric->num_vports--;
1123 bfa_wc_down(&fabric->wc);
1124}
1125
1126/**
1127 * Base port is deleted.
1128 */
1129void
1130bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
1131{
1132 bfa_wc_down(&fabric->wc);
1133}
1134
1135
1136/**
1137 * Check if fabric is online.
1138 *
1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1140 *
1141 * @return TRUE/FALSE
1142 */
1143int
1144bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
1145{
1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
1147}
1148
1149/**
1150 * brief
1151 *
1152 */
1153bfa_status_t
1154bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
1155 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1156{
1157 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
1158 return BFA_STATUS_OK;
1159}
1160
1161/**
1162 * Lookup for a vport withing a fabric given its pwwn
1163 */
1164struct bfa_fcs_vport_s *
1165bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1166{
1167 struct bfa_fcs_vport_s *vport;
1168 struct list_head *qe;
1169
1170 list_for_each(qe, &fabric->vport_q) {
1171 vport = (struct bfa_fcs_vport_s *) qe;
1172 if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn)
1173 return vport;
1174 }
1175
1176 return NULL;
1177}
1178
1179/**
1180 * In a given fabric, return the number of lports.
1181 *
1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1183 *
1184 * @return : 1 or more.
1185 */
1186u16
1187bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1188{
1189 return fabric->num_vports;
1190}
1191
1192/*
1193 * Get OUI of the attached switch.
1194 *
1195 * Note : Use of this function should be avoided as much as possible.
1196 * This function should be used only if there is any requirement
1197* to check for FOS version below 6.3.
1198 * To check if the attached fabric is a brocade fabric, use
1199 * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
1200 * or above only.
1201 */
1202
1203u16
1204bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1205{
1206 wwn_t fab_nwwn;
1207 u8 *tmp;
1208 u16 oui;
1209
1210 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
1211
1212 tmp = (u8 *)&fab_nwwn;
1213 oui = (tmp[3] << 8) | tmp[4];
1214
1215 return oui;
1216}
1217/**
1218 * Unsolicited frame receive handling.
1219 */
1220void
1221bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1222 u16 len)
1223{
1224 u32 pid = fchs->d_id;
1225 struct bfa_fcs_vport_s *vport;
1226 struct list_head *qe;
1227 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1228 struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
1229
1230 bfa_trc(fabric->fcs, len);
1231 bfa_trc(fabric->fcs, pid);
1232
1233 /**
1234 * Look for our own FLOGI frames being looped back. This means an
1235 * external loopback cable is in place. Our own FLOGI frames are
1236 * sometimes looped back when switch port gets temporarily bypassed.
1237 */
1238 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
1239 (els_cmd->els_code == FC_ELS_FLOGI) &&
1240 (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
1241 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
1242 return;
1243 }
1244
1245 /**
1246 * FLOGI/EVFP exchanges should be consumed by base fabric.
1247 */
1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
1249 bfa_trc(fabric->fcs, pid);
1250 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1251 return;
1252 }
1253
1254 if (fabric->bport.pid == pid) {
1255 /**
1256 * All authentication frames should be routed to auth
1257 */
1258 bfa_trc(fabric->fcs, els_cmd->els_code);
1259 if (els_cmd->els_code == FC_ELS_AUTH) {
1260 bfa_trc(fabric->fcs, els_cmd->els_code);
1261 return;
1262 }
1263
1264 bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
1265 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1266 return;
1267 }
1268
1269 /**
1270 * look for a matching local port ID
1271 */
1272 list_for_each(qe, &fabric->vport_q) {
1273 vport = (struct bfa_fcs_vport_s *) qe;
1274 if (vport->lport.pid == pid) {
1275 bfa_fcs_lport_uf_recv(&vport->lport, fchs, len);
1276 return;
1277 }
1278 }
1279 bfa_trc(fabric->fcs, els_cmd->els_code);
1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1281}
1282
1283/**
1284 * Unsolicited frames to be processed by fabric.
1285 */
1286static void
1287bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1288 u16 len)
1289{
1290 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1291
1292 bfa_trc(fabric->fcs, els_cmd->els_code);
1293
1294 switch (els_cmd->els_code) {
1295 case FC_ELS_FLOGI:
1296 bfa_fcs_fabric_process_flogi(fabric, fchs, len);
1297 break;
1298
1299 default:
1300 /*
1301 * need to generate a LS_RJT
1302 */
1303 break;
1304 }
1305}
1306
1307/**
1308 * Process incoming FLOGI
1309 */
1310static void
1311bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1312 struct fchs_s *fchs, u16 len)
1313{
1314 struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
1315 struct bfa_fcs_lport_s *bport = &fabric->bport;
1316
1317 bfa_trc(fabric->fcs, fchs->s_id);
1318
1319 fabric->stats.flogi_rcvd++;
1320 /*
1321 * Check port type. It should be 0 = n-port.
1322 */
1323 if (flogi->csp.port_type) {
1324 /*
1325 * @todo: may need to send a LS_RJT
1326 */
1327 bfa_trc(fabric->fcs, flogi->port_name);
1328 fabric->stats.flogi_rejected++;
1329 return;
1330 }
1331
1332 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
1333 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1334 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1335
1336 /*
1337 * Send a Flogi Acc
1338 */
1339 bfa_fcs_fabric_send_flogi_acc(fabric);
1340 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
1341}
1342
1343static void
1344bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1345{
1346 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
1347 struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
1348 struct bfa_s *bfa = fabric->fcs->bfa;
1349 struct bfa_fcxp_s *fcxp;
1350 u16 reqlen;
1351 struct fchs_s fchs;
1352
1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1354 /**
1355 * Do not expect this failure -- expect remote node to retry
1356 */
1357 if (!fcxp)
1358 return;
1359
1360 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1361 bfa_os_hton3b(FC_FABRIC_PORT),
1362 n2n_port->reply_oxid, pcfg->pwwn,
1363 pcfg->nwwn,
1364 bfa_fcport_get_maxfrsize(bfa),
1365 bfa_fcport_get_rx_bbcredit(bfa));
1366
1367 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1368 BFA_FALSE, FC_CLASS_3,
1369 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
1370 FC_MAX_PDUSZ, 0);
1371}
1372
1373/**
1374 * Flogi Acc completion callback.
1375 */
1376static void
1377bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1378 bfa_status_t status, u32 rsp_len,
1379 u32 resid_len, struct fchs_s *rspfchs)
1380{
1381 struct bfa_fcs_fabric_s *fabric = cbarg;
1382
1383 bfa_trc(fabric->fcs, status);
1384}
1385
1386/*
1387 *
1388 * @param[in] fabric - fabric
1389 * @param[in] wwn_t - new fabric name
1390 *
1391 * @return - none
1392 */
1393void
1394bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1395 wwn_t fabric_name)
1396{
1397 struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
1398 char pwwn_ptr[BFA_STRING_32];
1399 char fwwn_ptr[BFA_STRING_32];
1400
1401 bfa_trc(fabric->fcs, fabric_name);
1402
1403 if (fabric->fabric_name == 0) {
1404 /*
1405 * With BRCD switches, we don't get Fabric Name in FLOGI.
1406 * Don't generate a fabric name change event in this case.
1407 */
1408 fabric->fabric_name = fabric_name;
1409 } else {
1410 fabric->fabric_name = fabric_name;
1411 wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
1412 wwn2str(fwwn_ptr,
1413 bfa_fcs_lport_get_fabric_name(&fabric->bport));
1414 BFA_LOG(KERN_WARNING, bfad, log_level,
1415 "Base port WWN = %s Fabric WWN = %s\n",
1416 pwwn_ptr, fwwn_ptr);
1417 }
1418}
1419
1420/**
1421 * fcs_vf_api virtual fabrics API
1422 */
1423
1424/**
1425 * Enable VF mode.
1426 *
1427 * @param[in] fcs fcs module instance
1428 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
1429 * to use standard default vf_id of 1.
1430 *
1431 * @retval BFA_STATUS_OK vf mode is enabled
1432 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
1433 * before VF mode can be enabled.
1434 */
1435bfa_status_t
1436bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
1437{
1438 return BFA_STATUS_OK;
1439}
1440
1441/**
1442 * Disable VF mode.
1443 *
1444 * @param[in] fcs fcs module instance
1445 *
1446 * @retval BFA_STATUS_OK vf mode is disabled
1447 * @retval BFA_STATUS_BUSY VFs are present and being used. All
1448 * VFs must be deleted before disabling
1449 * VF mode.
1450 */
1451bfa_status_t
1452bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
1453{
1454 return BFA_STATUS_OK;
1455}
1456
1457/**
1458 * Create a new VF instance.
1459 *
1460 * A new VF is created using the given VF configuration. A VF is identified
1461 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
1462 * a VF is created, VF is automatically started after link initialization
1463 * and EVFP exchange is completed.
1464 *
1465 * param[in] vf - FCS vf data structure. Memory is
1466 * allocated by caller (driver)
1467 * param[in] fcs - FCS module
1468 * param[in] vf_cfg - VF configuration
1469 * param[in] vf_drv - Opaque handle back to the driver's
1470 * virtual vf structure
1471 *
1472 * retval BFA_STATUS_OK VF creation is successful
1473 * retval BFA_STATUS_FAILED VF creation failed
1474 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
1475 */
1476bfa_status_t
1477bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
1478 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1479{
1480 bfa_trc(fcs, vf_id);
1481 return BFA_STATUS_OK;
1482}
1483
1484/**
1485 * Use this function to delete a BFA VF object. VF object should
1486 * be stopped before this function call.
1487 *
1488 * param[in] vf - pointer to bfa_vf_t.
1489 *
1490 * retval BFA_STATUS_OK On vf deletion success
1491 * retval BFA_STATUS_BUSY VF is not in a stopped state
1492 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
1493 */
1494bfa_status_t
1495bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
1496{
1497 bfa_trc(vf->fcs, vf->vf_id);
1498 return BFA_STATUS_OK;
1499}
1500
1501
1502/**
1503 * Returns attributes of the given VF.
1504 *
1505 * param[in] vf pointer to bfa_vf_t.
1506 * param[out] vf_attr vf attributes returned
1507 *
1508 * return None
1509 */
1510void
1511bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
1512{
1513 bfa_trc(vf->fcs, vf->vf_id);
1514}
1515
1516/**
1517 * Return statistics associated with the given vf.
1518 *
1519 * param[in] vf pointer to bfa_vf_t.
1520 * param[out] vf_stats vf statistics returned
1521 *
1522 * @return None
1523 */
1524void
1525bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
1526{
1527 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
1528}
1529
1530/**
1531 * clear statistics associated with the given vf.
1532 *
1533 * param[in] vf pointer to bfa_vf_t.
1534 *
1535 * @return None
1536 */
1537void
1538bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
1539{
1540 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
1541}
1542
1543/**
1544 * Returns FCS vf structure for a given vf_id.
1545 *
1546 * param[in] vf_id - VF_ID
1547 *
1548 * return
1549 * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
1550 */
1551bfa_fcs_vf_t *
1552bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
1553{
1554 bfa_trc(fcs, vf_id);
1555 if (vf_id == FC_VF_ID_NULL)
1556 return &fcs->fabric;
1557
1558 return NULL;
1559}
1560
1561/**
1562 * Return the list of VFs configured.
1563 *
1564 * param[in] fcs fcs module instance
1565 * param[out] vf_ids returned list of vf_ids
1566 * param[in,out] nvfs in:size of vf_ids array,
1567 * out:total elements present,
1568 * actual elements returned is limited by the size
1569 *
1570 * return Driver VF structure
1571 */
1572void
1573bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1574{
1575 bfa_trc(fcs, *nvfs);
1576}
1577
1578/**
1579 * Return the list of all VFs visible from fabric.
1580 *
1581 * param[in] fcs fcs module instance
1582 * param[out] vf_ids returned list of vf_ids
1583 * param[in,out] nvfs in:size of vf_ids array,
1584 * out:total elements present,
1585 * actual elements returned is limited by the size
1586 *
1587 * return Driver VF structure
1588 */
1589void
1590bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1591{
1592 bfa_trc(fcs, *nvfs);
1593}
1594
1595/**
1596 * Return the list of local logical ports present in the given VF.
1597 *
1598 * param[in] vf vf for which logical ports are returned
1599 * param[out] lpwwn returned logical port wwn list
1600 * param[in,out] nlports in:size of lpwwn list;
1601 * out:total elements present,
1602 * actual elements returned is limited by the size
1603 */
1604void
1605bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
1606{
1607 struct list_head *qe;
1608 struct bfa_fcs_vport_s *vport;
1609 int i;
1610 struct bfa_fcs_s *fcs;
1611
1612 if (vf == NULL || lpwwn == NULL || *nlports == 0)
1613 return;
1614
1615 fcs = vf->fcs;
1616
1617 bfa_trc(fcs, vf->vf_id);
1618 bfa_trc(fcs, (u32) *nlports);
1619
1620 i = 0;
1621 lpwwn[i++] = vf->bport.port_cfg.pwwn;
1622
1623 list_for_each(qe, &vf->vport_q) {
1624 if (i >= *nlports)
1625 break;
1626
1627 vport = (struct bfa_fcs_vport_s *) qe;
1628 lpwwn[i++] = vport->lport.port_cfg.pwwn;
1629 }
1630
1631 bfa_trc(fcs, i);
1632 *nlports = i;
1633}
1634
1635/**
1636 * BFA FCS PPORT ( physical port)
1637 */
1638static void
1639bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
1640{
1641 struct bfa_fcs_s *fcs = cbarg;
1642
1643 bfa_trc(fcs, event);
1644
1645 switch (event) {
1646 case BFA_PORT_LINKUP:
1647 bfa_fcs_fabric_link_up(&fcs->fabric);
1648 break;
1649
1650 case BFA_PORT_LINKDOWN:
1651 bfa_fcs_fabric_link_down(&fcs->fabric);
1652 break;
1653
1654 default:
1655 bfa_assert(0);
1656 }
1657}
1658
1659void
1660bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
1661{
1662 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1663}
1664
1665/**
1666 * BFA FCS UF ( Unsolicited Frames)
1667 */
1668
1669/**
1670 * BFA callback for unsolicited frame receive handler.
1671 *
1672 * @param[in] cbarg callback arg for receive handler
1673 * @param[in] uf unsolicited frame descriptor
1674 *
1675 * @return None
1676 */
1677static void
1678bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1679{
1680 struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
1681 struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
1682 u16 len = bfa_uf_get_frmlen(uf);
1683 struct fc_vft_s *vft;
1684 struct bfa_fcs_fabric_s *fabric;
1685
1686 /**
1687 * check for VFT header
1688 */
1689 if (fchs->routing == FC_RTG_EXT_HDR &&
1690 fchs->cat_info == FC_CAT_VFT_HDR) {
1691 bfa_stats(fcs, uf.tagged);
1692 vft = bfa_uf_get_frmbuf(uf);
1693 if (fcs->port_vfid == vft->vf_id)
1694 fabric = &fcs->fabric;
1695 else
1696 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
1697
1698 /**
1699 * drop frame if vfid is unknown
1700 */
1701 if (!fabric) {
1702 bfa_assert(0);
1703 bfa_stats(fcs, uf.vfid_unknown);
1704 bfa_uf_free(uf);
1705 return;
1706 }
1707
1708 /**
1709 * skip vft header
1710 */
1711 fchs = (struct fchs_s *) (vft + 1);
1712 len -= sizeof(struct fc_vft_s);
1713
1714 bfa_trc(fcs, vft->vf_id);
1715 } else {
1716 bfa_stats(fcs, uf.untagged);
1717 fabric = &fcs->fabric;
1718 }
1719
1720 bfa_trc(fcs, ((u32 *) fchs)[0]);
1721 bfa_trc(fcs, ((u32 *) fchs)[1]);
1722 bfa_trc(fcs, ((u32 *) fchs)[2]);
1723 bfa_trc(fcs, ((u32 *) fchs)[3]);
1724 bfa_trc(fcs, ((u32 *) fchs)[4]);
1725 bfa_trc(fcs, ((u32 *) fchs)[5]);
1726 bfa_trc(fcs, len);
1727
1728 bfa_fcs_fabric_uf_recv(fabric, fchs, len);
1729 bfa_uf_free(uf);
1730}
1731
1732void
1733bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
1734{
1735 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
1736}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
new file mode 100644
index 000000000000..d75045df1e7e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -0,0 +1,779 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_H__
19#define __BFA_FCS_H__
20
21#include "bfa_cs.h"
22#include "bfa_defs.h"
23#include "bfa_defs_fcs.h"
24#include "bfa_modules.h"
25#include "bfa_fc.h"
26
27#define BFA_FCS_OS_STR_LEN 64
28
29/*
30 * !!! Only append to the enums defined here to avoid any versioning
31 * !!! needed between trace utility and driver version
32 */
33enum {
34 BFA_TRC_FCS_FCS = 1,
35 BFA_TRC_FCS_PORT = 2,
36 BFA_TRC_FCS_RPORT = 3,
37 BFA_TRC_FCS_FCPIM = 4,
38};
39
40
41struct bfa_fcs_s;
42
43#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
44void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
45
46#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
47#define N2N_LOCAL_PID 0x010000
48#define N2N_REMOTE_PID 0x020000
49#define BFA_FCS_RETRY_TIMEOUT 2000
50#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
51
52
53
54struct bfa_fcs_lport_ns_s {
55 bfa_sm_t sm; /* state machine */
56 struct bfa_timer_s timer;
57 struct bfa_fcs_lport_s *port; /* parent port */
58 struct bfa_fcxp_s *fcxp;
59 struct bfa_fcxp_wqe_s fcxp_wqe;
60};
61
62
63struct bfa_fcs_lport_scn_s {
64 bfa_sm_t sm; /* state machine */
65 struct bfa_timer_s timer;
66 struct bfa_fcs_lport_s *port; /* parent port */
67 struct bfa_fcxp_s *fcxp;
68 struct bfa_fcxp_wqe_s fcxp_wqe;
69};
70
71
72struct bfa_fcs_lport_fdmi_s {
73 bfa_sm_t sm; /* state machine */
74 struct bfa_timer_s timer;
75 struct bfa_fcs_lport_ms_s *ms; /* parent ms */
76 struct bfa_fcxp_s *fcxp;
77 struct bfa_fcxp_wqe_s fcxp_wqe;
78 u8 retry_cnt; /* retry count */
79 u8 rsvd[3];
80};
81
82
83struct bfa_fcs_lport_ms_s {
84 bfa_sm_t sm; /* state machine */
85 struct bfa_timer_s timer;
86 struct bfa_fcs_lport_s *port; /* parent port */
87 struct bfa_fcxp_s *fcxp;
88 struct bfa_fcxp_wqe_s fcxp_wqe;
89 struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */
90 u8 retry_cnt; /* retry count */
91 u8 rsvd[3];
92};
93
94
95struct bfa_fcs_lport_fab_s {
96 struct bfa_fcs_lport_ns_s ns; /* NS component of port */
97 struct bfa_fcs_lport_scn_s scn; /* scn component of port */
98 struct bfa_fcs_lport_ms_s ms; /* MS component of port */
99};
100
101#define MAX_ALPA_COUNT 127
102
103struct bfa_fcs_lport_loop_s {
104 u8 num_alpa; /* Num of ALPA entries in the map */
105 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
106 *Map */
107 struct bfa_fcs_lport_s *port; /* parent port */
108};
109
110struct bfa_fcs_lport_n2n_s {
111 u32 rsvd;
112 u16 reply_oxid; /* ox_id from the req flogi to be
113 *used in flogi acc */
114 wwn_t rem_port_wwn; /* Attached port's wwn */
115};
116
117
118union bfa_fcs_lport_topo_u {
119 struct bfa_fcs_lport_fab_s pfab;
120 struct bfa_fcs_lport_loop_s ploop;
121 struct bfa_fcs_lport_n2n_s pn2n;
122};
123
124
125struct bfa_fcs_lport_s {
126 struct list_head qe; /* used by port/vport */
127 bfa_sm_t sm; /* state machine */
128 struct bfa_fcs_fabric_s *fabric; /* parent fabric */
129 struct bfa_lport_cfg_s port_cfg; /* port configuration */
130 struct bfa_timer_s link_timer; /* timer for link offline */
131 u32 pid:24; /* FC address */
132 u8 lp_tag; /* lport tag */
133 u16 num_rports; /* Num of r-ports */
134 struct list_head rport_q; /* queue of discovered r-ports */
135 struct bfa_fcs_s *fcs; /* FCS instance */
136 union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */
137 struct bfad_port_s *bfad_port; /* driver peer instance */
138 struct bfa_fcs_vport_s *vport; /* NULL for base ports */
139 struct bfa_fcxp_s *fcxp;
140 struct bfa_fcxp_wqe_s fcxp_wqe;
141 struct bfa_lport_stats_s stats;
142 struct bfa_wc_s wc; /* waiting counter for events */
143};
144#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
145#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
146#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
147#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
148#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
149#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
150 (port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
151
152/*
153 * forward declaration
154 */
155struct bfad_vf_s;
156
157enum bfa_fcs_fabric_type {
158 BFA_FCS_FABRIC_UNKNOWN = 0,
159 BFA_FCS_FABRIC_SWITCHED = 1,
160 BFA_FCS_FABRIC_N2N = 2,
161};
162
163
164struct bfa_fcs_fabric_s {
165 struct list_head qe; /* queue element */
166 bfa_sm_t sm; /* state machine */
167 struct bfa_fcs_s *fcs; /* FCS instance */
168 struct bfa_fcs_lport_s bport; /* base logical port */
169 enum bfa_fcs_fabric_type fab_type; /* fabric type */
170 enum bfa_port_type oper_type; /* current link topology */
171 u8 is_vf; /* is virtual fabric? */
172 u8 is_npiv; /* is NPIV supported ? */
173 u8 is_auth; /* is Security/Auth supported ? */
174 u16 bb_credit; /* BB credit from fabric */
175 u16 vf_id; /* virtual fabric ID */
176 u16 num_vports; /* num vports */
177 u16 rsvd;
178 struct list_head vport_q; /* queue of virtual ports */
179 struct list_head vf_q; /* queue of virtual fabrics */
180 struct bfad_vf_s *vf_drv; /* driver vf structure */
181 struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
182 wwn_t fabric_name; /* attached fabric name */
183 bfa_boolean_t auth_reqd; /* authentication required */
184 struct bfa_timer_s delay_timer; /* delay timer */
185 union {
186 u16 swp_vfid;/* switch port VF id */
187 } event_arg;
188 struct bfa_wc_s wc; /* wait counter for delete */
189 struct bfa_vf_stats_s stats; /* fabric/vf stats */
190 struct bfa_lps_s *lps; /* lport login services */
191 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
192 /* attached fabric's ip addr */
193};
194
195#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
196#define bfa_fcs_fabric_is_switched(__f) \
197 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
198
199/**
200 * The design calls for a single implementation of base fabric and vf.
201 */
202#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
203
204struct bfa_vf_event_s {
205 u32 undefined;
206};
207
208struct bfa_fcs_s;
209struct bfa_fcs_fabric_s;
210
211/*
212 * @todo : need to move to a global config file.
213 * Maximum Rports supported per port (physical/logical).
214 */
215#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
216
217#define bfa_fcs_lport_t struct bfa_fcs_lport_s
218
219/**
220 * Symbolic Name related defines
221 * Total bytes 255.
222 * Physical Port's symbolic name 128 bytes.
223 * For Vports, Vport's symbolic name is appended to the Physical port's
224 * Symbolic Name.
225 *
226 * Physical Port's symbolic name Format : (Total 128 bytes)
227 * Adapter Model number/name : 12 bytes
228 * Driver Version : 10 bytes
229 * Host Machine Name : 30 bytes
230 * Host OS Info : 48 bytes
231 * Host OS PATCH Info : 16 bytes
232 * ( remaining 12 bytes reserved to be used for separator)
233 */
234#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
235
236#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
237#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
238#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
239#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
240#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
241
242/**
243 * Get FC port ID for a logical port.
244 */
245#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
246#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
247#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
248#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
249#define bfa_fcs_lport_is_initiator(_lport) \
250 ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
251#define bfa_fcs_lport_get_nrports(_lport) \
252 ((_lport) ? (_lport)->num_rports : 0)
253
254static inline struct bfad_port_s *
255bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
256{
257 return port->bfad_port;
258}
259
260#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type)
261#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
262#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
263 ((_lport)->fabric->fabric_ip_addr)
264
265/**
266 * bfa fcs port public functions
267 */
268
269bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
270struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
271void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
272 wwn_t rport_wwns[], int *nrports);
273
274wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
275 int index, int nrports, bfa_boolean_t bwwn);
276
277struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
278 u16 vf_id, wwn_t lpwwn);
279
280void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
281 struct bfa_lport_info_s *port_info);
282void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
283 struct bfa_lport_attr_s *port_attr);
284void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
285 struct bfa_lport_stats_s *port_stats);
286void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
287enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
288 struct bfa_fcs_lport_s *port);
289
290/* MS FCS routines */
291void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
292void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
293void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
294void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
295
296/* FDMI FCS routines */
297void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
298void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
299void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
300void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
301 u16 len);
302void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
303 u16 vf_id, struct bfa_fcs_vport_s *vport);
304void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
305 struct bfa_lport_cfg_s *port_cfg);
306void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
307void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
308void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
309struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
310 struct bfa_fcs_lport_s *port, u32 pid);
311struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
312 struct bfa_fcs_lport_s *port, wwn_t pwwn);
313struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
314 struct bfa_fcs_lport_s *port, wwn_t nwwn);
315void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
316 struct bfa_fcs_rport_s *rport);
317void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
318 struct bfa_fcs_rport_s *rport);
319void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
320void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
321void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
322void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
323void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
324void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
325void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
326void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
327void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
328void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
329 struct fchs_s *rx_frame, u32 len);
330
331struct bfa_fcs_vport_s {
332 struct list_head qe; /* queue elem */
333 bfa_sm_t sm; /* state machine */
334 bfa_fcs_lport_t lport; /* logical port */
335 struct bfa_timer_s timer;
336 struct bfad_vport_s *vport_drv; /* Driver private */
337 struct bfa_vport_stats_s vport_stats; /* vport statistics */
338 struct bfa_lps_s *lps; /* Lport login service*/
339 int fdisc_retries;
340};
341
342#define bfa_fcs_vport_get_port(vport) \
343 ((struct bfa_fcs_lport_s *)(&vport->port))
344
345/**
346 * bfa fcs vport public functions
347 */
348bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
349 struct bfa_fcs_s *fcs, u16 vf_id,
350 struct bfa_lport_cfg_s *port_cfg,
351 struct bfad_vport_s *vport_drv);
352bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
353 struct bfa_fcs_s *fcs, u16 vf_id,
354 struct bfa_lport_cfg_s *port_cfg,
355 struct bfad_vport_s *vport_drv);
356bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
357bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
358bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
359bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
360void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
361 struct bfa_vport_attr_s *vport_attr);
362void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
363 struct bfa_vport_stats_s *vport_stats);
364void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
365struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
366 u16 vf_id, wwn_t vpwwn);
367void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
368void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
369void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
370void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
371void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
372
373#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
374#define BFA_FCS_RPORT_MAX_RETRIES (5)
375
376/*
377 * forward declarations
378 */
379struct bfad_rport_s;
380
381struct bfa_fcs_itnim_s;
382struct bfa_fcs_tin_s;
383struct bfa_fcs_iprp_s;
384
385/* Rport Features (RPF) */
386struct bfa_fcs_rpf_s {
387 bfa_sm_t sm; /* state machine */
388 struct bfa_fcs_rport_s *rport; /* parent rport */
389 struct bfa_timer_s timer; /* general purpose timer */
390 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
391 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
392 int rpsc_retries; /* max RPSC retry attempts */
393 enum bfa_port_speed rpsc_speed;
394 /* Current Speed from RPSC. O if RPSC fails */
395 enum bfa_port_speed assigned_speed;
396 /**
397 * Speed assigned by the user. will be used if RPSC is
398 * not supported by the rport.
399 */
400};
401
402struct bfa_fcs_rport_s {
403 struct list_head qe; /* used by port/vport */
404 struct bfa_fcs_lport_s *port; /* parent FCS port */
405 struct bfa_fcs_s *fcs; /* fcs instance */
406 struct bfad_rport_s *rp_drv; /* driver peer instance */
407 u32 pid; /* port ID of rport */
408 u16 maxfrsize; /* maximum frame size */
409 u16 reply_oxid; /* OX_ID of inbound requests */
410 enum fc_cos fc_cos; /* FC classes of service supp */
411 bfa_boolean_t cisc; /* CISC capable device */
412 bfa_boolean_t prlo; /* processing prlo or LOGO */
413 wwn_t pwwn; /* port wwn of rport */
414 wwn_t nwwn; /* node wwn of rport */
415 struct bfa_rport_symname_s psym_name; /* port symbolic name */
416 bfa_sm_t sm; /* state machine */
417 struct bfa_timer_s timer; /* general purpose timer */
418 struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
419 struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
420 struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
421 struct bfa_rport_s *bfa_rport; /* BFA Rport */
422 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
423 int plogi_retries; /* max plogi retry attempts */
424 int ns_retries; /* max NS query retry attempts */
425 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
426 struct bfa_rport_stats_s stats; /* rport stats */
427 enum bfa_rport_function scsi_function; /* Initiator/Target */
428 struct bfa_fcs_rpf_s rpf; /* Rport features module */
429};
430
431static inline struct bfa_rport_s *
432bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
433{
434 return rport->bfa_rport;
435}
436
437/**
438 * bfa fcs rport API functions
439 */
440bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
441 struct bfa_fcs_rport_s *rport,
442 struct bfad_rport_s *rport_drv);
443bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
444void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
445 struct bfa_rport_attr_s *attr);
446void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
447 struct bfa_rport_stats_s *stats);
448void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
449struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
450 wwn_t rpwwn);
451struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
452 struct bfa_fcs_lport_s *port, wwn_t rnwwn);
453void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
454
455void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
456 enum bfa_port_speed speed);
457void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
458 struct fchs_s *fchs, u16 len);
459void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
460
461struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
462 u32 pid);
463void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
464void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
465void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
466void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
467 struct fc_logi_s *plogi_rsp);
468void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
469 struct fchs_s *rx_fchs,
470 struct fc_logi_s *plogi);
471void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
472 struct fc_logi_s *plogi);
473void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
474void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
475
476void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
477void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
478void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
479int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
480struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
481 struct bfa_fcs_lport_s *port, wwn_t wwn);
482void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
483void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
484void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
485
486/*
487 * forward declarations
488 */
489struct bfad_itnim_s;
490
491struct bfa_fcs_itnim_s {
492 bfa_sm_t sm; /* state machine */
493 struct bfa_fcs_rport_s *rport; /* parent remote rport */
494 struct bfad_itnim_s *itnim_drv; /* driver peer instance */
495 struct bfa_fcs_s *fcs; /* fcs instance */
496 struct bfa_timer_s timer; /* timer functions */
497 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
498 u32 prli_retries; /* max prli retry attempts */
499 bfa_boolean_t seq_rec; /* seq recovery support */
500 bfa_boolean_t rec_support; /* REC supported */
501 bfa_boolean_t conf_comp; /* FCP_CONF support */
502 bfa_boolean_t task_retry_id; /* task retry id supp */
503 struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
504 struct bfa_fcxp_s *fcxp; /* FCXP in use */
505 struct bfa_itnim_stats_s stats; /* itn statistics */
506};
507#define bfa_fcs_fcxp_alloc(__fcs) \
508 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
509
510#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
511 bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
512 NULL, 0, 0, NULL, NULL, NULL, NULL)
513
514static inline struct bfad_port_s *
515bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
516{
517 return itnim->rport->port->bfad_port;
518}
519
520
521static inline struct bfa_fcs_lport_s *
522bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
523{
524 return itnim->rport->port;
525}
526
527
528static inline wwn_t
529bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
530{
531 return itnim->rport->nwwn;
532}
533
534
535static inline wwn_t
536bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
537{
538 return itnim->rport->pwwn;
539}
540
541
542static inline u32
543bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
544{
545 return itnim->rport->pid;
546}
547
548
549static inline u32
550bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
551{
552 return itnim->rport->maxfrsize;
553}
554
555
556static inline enum fc_cos
557bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
558{
559 return itnim->rport->fc_cos;
560}
561
562
563static inline struct bfad_itnim_s *
564bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
565{
566 return itnim->itnim_drv;
567}
568
569
570static inline struct bfa_itnim_s *
571bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
572{
573 return itnim->bfa_itnim;
574}
575
576/**
577 * bfa fcs FCP Initiator mode API functions
578 */
579void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
580 struct bfa_itnim_attr_s *attr);
581void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
582 struct bfa_itnim_stats_s *stats);
583struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
584 wwn_t rpwwn);
585bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
586 struct bfa_itnim_attr_s *attr);
587bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
588 struct bfa_itnim_stats_s *stats);
589bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
590 wwn_t rpwwn);
591struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
592void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
593void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
594void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
595bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
596void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
597void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
598 struct fchs_s *fchs, u16 len);
599
600#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
601 FDMI_TRANS_SPEED_2G | \
602 FDMI_TRANS_SPEED_4G | \
603 FDMI_TRANS_SPEED_8G)
604
605/*
606 * HBA Attribute Block : BFA internal representation. Note : Some variable
607 * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
608 * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
609 */
610struct bfa_fcs_fdmi_hba_attr_s {
611 wwn_t node_name;
612 u8 manufacturer[64];
613 u8 serial_num[64];
614 u8 model[16];
615 u8 model_desc[256];
616 u8 hw_version[8];
617 u8 driver_version[8];
618 u8 option_rom_ver[BFA_VERSION_LEN];
619 u8 fw_version[8];
620 u8 os_name[256];
621 u32 max_ct_pyld;
622};
623
624/*
625 * Port Attribute Block
626 */
627struct bfa_fcs_fdmi_port_attr_s {
628 u8 supp_fc4_types[32]; /* supported FC4 types */
629 u32 supp_speed; /* supported speed */
630 u32 curr_speed; /* current Speed */
631 u32 max_frm_size; /* max frame size */
632 u8 os_device_name[256]; /* OS device Name */
633 u8 host_name[256]; /* host name */
634};
635
636struct bfa_fcs_stats_s {
637 struct {
638 u32 untagged; /* untagged receive frames */
639 u32 tagged; /* tagged receive frames */
640 u32 vfid_unknown; /* VF id is unknown */
641 } uf;
642};
643
644struct bfa_fcs_driver_info_s {
645 u8 version[BFA_VERSION_LEN]; /* Driver Version */
646 u8 host_machine_name[BFA_FCS_OS_STR_LEN];
647 u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
648 u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
649 u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
650};
651
652struct bfa_fcs_s {
653 struct bfa_s *bfa; /* corresponding BFA bfa instance */
654 struct bfad_s *bfad; /* corresponding BDA driver instance */
655 struct bfa_trc_mod_s *trcmod; /* tracing module */
656 bfa_boolean_t vf_enabled; /* VF mode is enabled */
657 bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
658 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
659 u16 port_vfid; /* port default VF ID */
660 struct bfa_fcs_driver_info_s driver_info;
661 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
662 struct bfa_fcs_stats_s stats; /* FCS statistics */
663 struct bfa_wc_s wc; /* waiting counter */
664};
665
666/*
667 * bfa fcs API functions
668 */
669void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
670 struct bfad_s *bfad,
671 bfa_boolean_t min_cfg);
672void bfa_fcs_init(struct bfa_fcs_s *fcs);
673void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
674 struct bfa_fcs_driver_info_s *driver_info);
675void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
676void bfa_fcs_exit(struct bfa_fcs_s *fcs);
677void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
678void bfa_fcs_start(struct bfa_fcs_s *fcs);
679
680/**
681 * bfa fcs vf public functions
682 */
683bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
684bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
685bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
686 u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
687 struct bfad_vf_s *vf_drv);
688bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
689void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
690void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
691void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
692void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
693 struct bfa_vf_stats_s *vf_stats);
694void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
695void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
696bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
697u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
698
699/*
700 * fabric protected interface functions
701 */
702void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
703void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
704void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
705void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
706void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
707void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
708void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
709 struct bfa_fcs_vport_s *vport);
710void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
711 struct bfa_fcs_vport_s *vport);
712int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
713struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
714 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
715void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
716void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
717 struct fchs_s *fchs, u16 len);
718bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
719bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
720enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
721void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
722void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
723bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
724 struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
725 struct bfad_vf_s *vf_drv);
726void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
727 wwn_t fabric_name);
728u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
729void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
730void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
731
732/**
733 * BFA FCS callback interfaces
734 */
735
736/**
737 * fcb Main fcs callbacks
738 */
739
740struct bfad_port_s;
741struct bfad_vf_s;
742struct bfad_vport_s;
743struct bfad_rport_s;
744
745/**
746 * lport callbacks
747 */
748struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
749 struct bfa_fcs_lport_s *port,
750 enum bfa_lport_role roles,
751 struct bfad_vf_s *vf_drv,
752 struct bfad_vport_s *vp_drv);
753void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
754 struct bfad_vf_s *vf_drv,
755 struct bfad_vport_s *vp_drv);
756
757/**
758 * vport callbacks
759 */
760void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
761
762/**
763 * rport callbacks
764 */
765bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
766 struct bfa_fcs_rport_s **rport,
767 struct bfad_rport_s **rport_drv);
768
769/**
770 * itnim callbacks
771 */
772void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
773 struct bfad_itnim_s **itnim_drv);
774void bfa_fcb_itnim_free(struct bfad_s *bfad,
775 struct bfad_itnim_s *itnim_drv);
776void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
777void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
778
779#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 6b8976ad22fa..569dfefab70d 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,36 +19,24 @@
19 * fcpim.c - FCP initiator mode i-t nexus state machine 19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */ 20 */
21 21
22#include <bfa.h> 22#include "bfa_fcs.h"
23#include <bfa_svc.h> 23#include "bfa_fcbuild.h"
24#include "fcs_fcpim.h" 24#include "bfad_drv.h"
25#include "fcs_rport.h" 25#include "bfad_im.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30#include <fcs/bfa_fcs_fcpim.h>
31#include <fcb/bfa_fcb_fcpim.h>
32#include <aen/bfa_aen_itnim.h>
33 26
34BFA_TRC_FILE(FCS, FCPIM); 27BFA_TRC_FILE(FCS, FCPIM);
35 28
36/* 29/*
37 * forward declarations 30 * forward declarations
38 */ 31 */
39static void bfa_fcs_itnim_timeout(void *arg); 32static void bfa_fcs_itnim_timeout(void *arg);
40static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); 33static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
41static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, 34static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced); 35 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_itnim_prli_response(void *fcsarg, 36static void bfa_fcs_itnim_prli_response(void *fcsarg,
44 struct bfa_fcxp_s *fcxp, 37 struct bfa_fcxp_s *fcxp, void *cbarg,
45 void *cbarg, 38 bfa_status_t req_status, u32 rsp_len,
46 bfa_status_t req_status, 39 u32 resid_len, struct fchs_s *rsp_fchs);
47 u32 rsp_len,
48 u32 resid_len,
49 struct fchs_s *rsp_fchs);
50static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
51 enum bfa_itnim_aen_event event);
52 40
53/** 41/**
54 * fcs_itnim_sm FCS itnim state machine events 42 * fcs_itnim_sm FCS itnim state machine events
@@ -61,28 +49,28 @@ enum bfa_fcs_itnim_event {
61 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ 49 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
62 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ 50 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
63 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ 51 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
64 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ 52 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
65 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ 53 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
66 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ 54 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
67 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ 55 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
68 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ 56 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
69}; 57};
70 58
71static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 59static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
72 enum bfa_fcs_itnim_event event); 60 enum bfa_fcs_itnim_event event);
73static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, 61static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
74 enum bfa_fcs_itnim_event event); 62 enum bfa_fcs_itnim_event event);
75static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, 63static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
76 enum bfa_fcs_itnim_event event); 64 enum bfa_fcs_itnim_event event);
77static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, 65static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
78 enum bfa_fcs_itnim_event event); 66 enum bfa_fcs_itnim_event event);
79static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, 67static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
80 enum bfa_fcs_itnim_event event); 68 enum bfa_fcs_itnim_event event);
81static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, 69static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
82 enum bfa_fcs_itnim_event event); 70 enum bfa_fcs_itnim_event event);
83static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, 71static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
84 enum bfa_fcs_itnim_event event); 72 enum bfa_fcs_itnim_event event);
85static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, 73static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
86 enum bfa_fcs_itnim_event event); 74 enum bfa_fcs_itnim_event event);
87 75
88static struct bfa_sm_table_s itnim_sm_table[] = { 76static struct bfa_sm_table_s itnim_sm_table[] = {
@@ -102,7 +90,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
102 90
103static void 91static void
104bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 92bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
105 enum bfa_fcs_itnim_event event) 93 enum bfa_fcs_itnim_event event)
106{ 94{
107 bfa_trc(itnim->fcs, itnim->rport->pwwn); 95 bfa_trc(itnim->fcs, itnim->rport->pwwn);
108 bfa_trc(itnim->fcs, event); 96 bfa_trc(itnim->fcs, event);
@@ -134,7 +122,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
134 122
135static void 123static void
136bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, 124bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
137 enum bfa_fcs_itnim_event event) 125 enum bfa_fcs_itnim_event event)
138{ 126{
139 bfa_trc(itnim->fcs, itnim->rport->pwwn); 127 bfa_trc(itnim->fcs, itnim->rport->pwwn);
140 bfa_trc(itnim->fcs, event); 128 bfa_trc(itnim->fcs, event);
@@ -168,7 +156,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
168 156
169static void 157static void
170bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, 158bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
171 enum bfa_fcs_itnim_event event) 159 enum bfa_fcs_itnim_event event)
172{ 160{
173 bfa_trc(itnim->fcs, itnim->rport->pwwn); 161 bfa_trc(itnim->fcs, itnim->rport->pwwn);
174 bfa_trc(itnim->fcs, event); 162 bfa_trc(itnim->fcs, event);
@@ -233,6 +221,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
233 } 221 }
234 break; 222 break;
235 223
224
236 case BFA_FCS_ITNIM_SM_OFFLINE: 225 case BFA_FCS_ITNIM_SM_OFFLINE:
237 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 226 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
238 bfa_timer_stop(&itnim->timer); 227 bfa_timer_stop(&itnim->timer);
@@ -259,6 +248,10 @@ static void
259bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, 248bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
260 enum bfa_fcs_itnim_event event) 249 enum bfa_fcs_itnim_event event)
261{ 250{
251 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
252 char lpwwn_buf[BFA_STRING_32];
253 char rpwwn_buf[BFA_STRING_32];
254
262 bfa_trc(itnim->fcs, itnim->rport->pwwn); 255 bfa_trc(itnim->fcs, itnim->rport->pwwn);
263 bfa_trc(itnim->fcs, event); 256 bfa_trc(itnim->fcs, event);
264 257
@@ -266,7 +259,11 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
266 case BFA_FCS_ITNIM_SM_HCB_ONLINE: 259 case BFA_FCS_ITNIM_SM_HCB_ONLINE:
267 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); 260 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
268 bfa_fcb_itnim_online(itnim->itnim_drv); 261 bfa_fcb_itnim_online(itnim->itnim_drv);
269 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); 262 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
263 wwn2str(rpwwn_buf, itnim->rport->pwwn);
264 BFA_LOG(KERN_INFO, bfad, log_level,
265 "Target (WWN = %s) is online for initiator (WWN = %s)\n",
266 rpwwn_buf, lpwwn_buf);
270 break; 267 break;
271 268
272 case BFA_FCS_ITNIM_SM_OFFLINE: 269 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -287,8 +284,12 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
287 284
288static void 285static void
289bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, 286bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
290 enum bfa_fcs_itnim_event event) 287 enum bfa_fcs_itnim_event event)
291{ 288{
289 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
290 char lpwwn_buf[BFA_STRING_32];
291 char rpwwn_buf[BFA_STRING_32];
292
292 bfa_trc(itnim->fcs, itnim->rport->pwwn); 293 bfa_trc(itnim->fcs, itnim->rport->pwwn);
293 bfa_trc(itnim->fcs, event); 294 bfa_trc(itnim->fcs, event);
294 295
@@ -297,10 +298,16 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
297 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); 298 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
298 bfa_fcb_itnim_offline(itnim->itnim_drv); 299 bfa_fcb_itnim_offline(itnim->itnim_drv);
299 bfa_itnim_offline(itnim->bfa_itnim); 300 bfa_itnim_offline(itnim->bfa_itnim);
300 if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) 301 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
301 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); 302 wwn2str(rpwwn_buf, itnim->rport->pwwn);
303 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
304 BFA_LOG(KERN_ERR, bfad, log_level,
305 "Target (WWN = %s) connectivity lost for "
306 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
302 else 307 else
303 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); 308 BFA_LOG(KERN_INFO, bfad, log_level,
309 "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
310 rpwwn_buf, lpwwn_buf);
304 break; 311 break;
305 312
306 case BFA_FCS_ITNIM_SM_DELETE: 313 case BFA_FCS_ITNIM_SM_DELETE:
@@ -343,7 +350,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
343 */ 350 */
344static void 351static void
345bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, 352bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
346 enum bfa_fcs_itnim_event event) 353 enum bfa_fcs_itnim_event event)
347{ 354{
348 bfa_trc(itnim->fcs, itnim->rport->pwwn); 355 bfa_trc(itnim->fcs, itnim->rport->pwwn);
349 bfa_trc(itnim->fcs, event); 356 bfa_trc(itnim->fcs, event);
@@ -369,71 +376,34 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
369 } 376 }
370} 377}
371 378
372
373
374/**
375 * itnim_private FCS ITNIM private interfaces
376 */
377
378static void
379bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
380 enum bfa_itnim_aen_event event)
381{
382 struct bfa_fcs_rport_s *rport = itnim->rport;
383 union bfa_aen_data_u aen_data;
384 struct bfa_log_mod_s *logmod = rport->fcs->logm;
385 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
386 wwn_t rpwwn = rport->pwwn;
387 char lpwwn_ptr[BFA_STRING_32];
388 char rpwwn_ptr[BFA_STRING_32];
389
390 /*
391 * Don't post events for well known addresses
392 */
393 if (BFA_FCS_PID_IS_WKA(rport->pid))
394 return;
395
396 wwn2str(lpwwn_ptr, lpwwn);
397 wwn2str(rpwwn_ptr, rpwwn);
398
399 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
400 rpwwn_ptr, lpwwn_ptr);
401
402 aen_data.itnim.vf_id = rport->port->fabric->vf_id;
403 aen_data.itnim.ppwwn =
404 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs));
405 aen_data.itnim.lpwwn = lpwwn;
406 aen_data.itnim.rpwwn = rpwwn;
407}
408
409static void 379static void
410bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) 380bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
411{ 381{
412 struct bfa_fcs_itnim_s *itnim = itnim_cbarg; 382 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
413 struct bfa_fcs_rport_s *rport = itnim->rport; 383 struct bfa_fcs_rport_s *rport = itnim->rport;
414 struct bfa_fcs_port_s *port = rport->port; 384 struct bfa_fcs_lport_s *port = rport->port;
415 struct fchs_s fchs; 385 struct fchs_s fchs;
416 struct bfa_fcxp_s *fcxp; 386 struct bfa_fcxp_s *fcxp;
417 int len; 387 int len;
418 388
419 bfa_trc(itnim->fcs, itnim->rport->pwwn); 389 bfa_trc(itnim->fcs, itnim->rport->pwwn);
420 390
421 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 391 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
422 if (!fcxp) { 392 if (!fcxp) {
423 itnim->stats.fcxp_alloc_wait++; 393 itnim->stats.fcxp_alloc_wait++;
424 bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, 394 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
425 bfa_fcs_itnim_send_prli, itnim); 395 bfa_fcs_itnim_send_prli, itnim);
426 return; 396 return;
427 } 397 }
428 itnim->fcxp = fcxp; 398 itnim->fcxp = fcxp;
429 399
430 len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, 400 len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
431 bfa_fcs_port_get_fcid(port), 0); 401 itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
432 402
433 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, 403 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
434 BFA_FALSE, FC_CLASS_3, len, &fchs, 404 BFA_FALSE, FC_CLASS_3, len, &fchs,
435 bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, 405 bfa_fcs_itnim_prli_response, (void *)itnim,
436 FC_ELS_TOV); 406 FC_MAX_PDUSZ, FC_ELS_TOV);
437 407
438 itnim->stats.prli_sent++; 408 itnim->stats.prli_sent++;
439 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); 409 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
@@ -444,10 +414,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
444 bfa_status_t req_status, u32 rsp_len, 414 bfa_status_t req_status, u32 rsp_len,
445 u32 resid_len, struct fchs_s *rsp_fchs) 415 u32 resid_len, struct fchs_s *rsp_fchs)
446{ 416{
447 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; 417 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
448 struct fc_els_cmd_s *els_cmd; 418 struct fc_els_cmd_s *els_cmd;
449 struct fc_prli_s *prli_resp; 419 struct fc_prli_s *prli_resp;
450 struct fc_ls_rjt_s *ls_rjt; 420 struct fc_ls_rjt_s *ls_rjt;
451 struct fc_prli_params_s *sparams; 421 struct fc_prli_params_s *sparams;
452 422
453 bfa_trc(itnim->fcs, req_status); 423 bfa_trc(itnim->fcs, req_status);
@@ -475,7 +445,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
475 if (prli_resp->parampage.servparams.initiator) { 445 if (prli_resp->parampage.servparams.initiator) {
476 bfa_trc(itnim->fcs, prli_resp->parampage.type); 446 bfa_trc(itnim->fcs, prli_resp->parampage.type);
477 itnim->rport->scsi_function = 447 itnim->rport->scsi_function =
478 BFA_RPORT_INITIATOR; 448 BFA_RPORT_INITIATOR;
479 itnim->stats.prli_rsp_acc++; 449 itnim->stats.prli_rsp_acc++;
480 bfa_sm_send_event(itnim, 450 bfa_sm_send_event(itnim,
481 BFA_FCS_ITNIM_SM_RSP_OK); 451 BFA_FCS_ITNIM_SM_RSP_OK);
@@ -488,10 +458,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
488 itnim->rport->scsi_function = BFA_RPORT_TARGET; 458 itnim->rport->scsi_function = BFA_RPORT_TARGET;
489 459
490 sparams = &prli_resp->parampage.servparams; 460 sparams = &prli_resp->parampage.servparams;
491 itnim->seq_rec = sparams->retry; 461 itnim->seq_rec = sparams->retry;
492 itnim->rec_support = sparams->rec_support; 462 itnim->rec_support = sparams->rec_support;
493 itnim->task_retry_id = sparams->task_retry_id; 463 itnim->task_retry_id = sparams->task_retry_id;
494 itnim->conf_comp = sparams->confirm; 464 itnim->conf_comp = sparams->confirm;
495 465
496 itnim->stats.prli_rsp_acc++; 466 itnim->stats.prli_rsp_acc++;
497 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); 467 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
@@ -509,7 +479,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
509static void 479static void
510bfa_fcs_itnim_timeout(void *arg) 480bfa_fcs_itnim_timeout(void *arg)
511{ 481{
512 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg; 482 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
513 483
514 itnim->stats.timeout++; 484 itnim->stats.timeout++;
515 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); 485 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
@@ -529,16 +499,16 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
529 */ 499 */
530 500
531/** 501/**
532 * Called by rport when a new rport is created. 502 * Called by rport when a new rport is created.
533 * 503 *
534 * @param[in] rport - remote port. 504 * @param[in] rport - remote port.
535 */ 505 */
536struct bfa_fcs_itnim_s * 506struct bfa_fcs_itnim_s *
537bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) 507bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
538{ 508{
539 struct bfa_fcs_port_s *port = rport->port; 509 struct bfa_fcs_lport_s *port = rport->port;
540 struct bfa_fcs_itnim_s *itnim; 510 struct bfa_fcs_itnim_s *itnim;
541 struct bfad_itnim_s *itnim_drv; 511 struct bfad_itnim_s *itnim_drv;
542 struct bfa_itnim_s *bfa_itnim; 512 struct bfa_itnim_s *bfa_itnim;
543 513
544 /* 514 /*
@@ -560,7 +530,8 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
560 /* 530 /*
561 * call BFA to create the itnim 531 * call BFA to create the itnim
562 */ 532 */
563 bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim); 533 bfa_itnim =
534 bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
564 535
565 if (bfa_itnim == NULL) { 536 if (bfa_itnim == NULL) {
566 bfa_trc(port->fcs, rport->pwwn); 537 bfa_trc(port->fcs, rport->pwwn);
@@ -569,10 +540,10 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
569 return NULL; 540 return NULL;
570 } 541 }
571 542
572 itnim->bfa_itnim = bfa_itnim; 543 itnim->bfa_itnim = bfa_itnim;
573 itnim->seq_rec = BFA_FALSE; 544 itnim->seq_rec = BFA_FALSE;
574 itnim->rec_support = BFA_FALSE; 545 itnim->rec_support = BFA_FALSE;
575 itnim->conf_comp = BFA_FALSE; 546 itnim->conf_comp = BFA_FALSE;
576 itnim->task_retry_id = BFA_FALSE; 547 itnim->task_retry_id = BFA_FALSE;
577 548
578 /* 549 /*
@@ -584,7 +555,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
584} 555}
585 556
586/** 557/**
587 * Called by rport to delete the instance of FCPIM. 558 * Called by rport to delete the instance of FCPIM.
588 * 559 *
589 * @param[in] rport - remote port. 560 * @param[in] rport - remote port.
590 */ 561 */
@@ -607,8 +578,8 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
607 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); 578 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
608 } else { 579 } else {
609 /* 580 /*
610 * For well known addresses, we set the itnim to initiator 581 * For well known addresses, we set the itnim to initiator
611 * state 582 * state
612 */ 583 */
613 itnim->stats.initiator++; 584 itnim->stats.initiator++;
614 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 585 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
@@ -651,7 +622,6 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
651 622
652 default: 623 default:
653 return BFA_STATUS_NO_FCPIM_NEXUS; 624 return BFA_STATUS_NO_FCPIM_NEXUS;
654
655 } 625 }
656} 626}
657 627
@@ -661,7 +631,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
661void 631void
662bfa_cb_itnim_online(void *cbarg) 632bfa_cb_itnim_online(void *cbarg)
663{ 633{
664 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; 634 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
665 635
666 bfa_trc(itnim->fcs, itnim->rport->pwwn); 636 bfa_trc(itnim->fcs, itnim->rport->pwwn);
667 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
@@ -673,7 +643,7 @@ bfa_cb_itnim_online(void *cbarg)
673void 643void
674bfa_cb_itnim_offline(void *cb_arg) 644bfa_cb_itnim_offline(void *cb_arg)
675{ 645{
676 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 646 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
677 647
678 bfa_trc(itnim->fcs, itnim->rport->pwwn); 648 bfa_trc(itnim->fcs, itnim->rport->pwwn);
679 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
@@ -686,7 +656,7 @@ bfa_cb_itnim_offline(void *cb_arg)
686void 656void
687bfa_cb_itnim_tov_begin(void *cb_arg) 657bfa_cb_itnim_tov_begin(void *cb_arg)
688{ 658{
689 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 659 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
690 660
691 bfa_trc(itnim->fcs, itnim->rport->pwwn); 661 bfa_trc(itnim->fcs, itnim->rport->pwwn);
692} 662}
@@ -697,14 +667,15 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
697void 667void
698bfa_cb_itnim_tov(void *cb_arg) 668bfa_cb_itnim_tov(void *cb_arg)
699{ 669{
700 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 670 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
671 struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
701 672
702 bfa_trc(itnim->fcs, itnim->rport->pwwn); 673 bfa_trc(itnim->fcs, itnim->rport->pwwn);
703 bfa_fcb_itnim_tov(itnim->itnim_drv); 674 itnim_drv->state = ITNIM_STATE_TIMEOUT;
704} 675}
705 676
706/** 677/**
707 * BFA notification to FCS/driver for second level error recovery. 678 * BFA notification to FCS/driver for second level error recovery.
708 * 679 *
709 * Atleast one I/O request has timedout and target is unresponsive to 680 * Atleast one I/O request has timedout and target is unresponsive to
710 * repeated abort requests. Second level error recovery should be initiated 681 * repeated abort requests. Second level error recovery should be initiated
@@ -713,7 +684,7 @@ bfa_cb_itnim_tov(void *cb_arg)
713void 684void
714bfa_cb_itnim_sler(void *cb_arg) 685bfa_cb_itnim_sler(void *cb_arg)
715{ 686{
716 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 687 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
717 688
718 itnim->stats.sler++; 689 itnim->stats.sler++;
719 bfa_trc(itnim->fcs, itnim->rport->pwwn); 690 bfa_trc(itnim->fcs, itnim->rport->pwwn);
@@ -721,7 +692,7 @@ bfa_cb_itnim_sler(void *cb_arg)
721} 692}
722 693
723struct bfa_fcs_itnim_s * 694struct bfa_fcs_itnim_s *
724bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) 695bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
725{ 696{
726 struct bfa_fcs_rport_s *rport; 697 struct bfa_fcs_rport_s *rport;
727 rport = bfa_fcs_rport_lookup(port, rpwwn); 698 rport = bfa_fcs_rport_lookup(port, rpwwn);
@@ -734,7 +705,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
734} 705}
735 706
736bfa_status_t 707bfa_status_t
737bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, 708bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
738 struct bfa_itnim_attr_s *attr) 709 struct bfa_itnim_attr_s *attr)
739{ 710{
740 struct bfa_fcs_itnim_s *itnim = NULL; 711 struct bfa_fcs_itnim_s *itnim = NULL;
@@ -744,18 +715,16 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
744 if (itnim == NULL) 715 if (itnim == NULL)
745 return BFA_STATUS_NO_FCPIM_NEXUS; 716 return BFA_STATUS_NO_FCPIM_NEXUS;
746 717
747 attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); 718 attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
748 attr->retry = itnim->seq_rec; 719 attr->retry = itnim->seq_rec;
749 attr->rec_support = itnim->rec_support; 720 attr->rec_support = itnim->rec_support;
750 attr->conf_comp = itnim->conf_comp; 721 attr->conf_comp = itnim->conf_comp;
751 attr->task_retry_id = itnim->task_retry_id; 722 attr->task_retry_id = itnim->task_retry_id;
752 bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
753
754 return BFA_STATUS_OK; 723 return BFA_STATUS_OK;
755} 724}
756 725
757bfa_status_t 726bfa_status_t
758bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, 727bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
759 struct bfa_itnim_stats_s *stats) 728 struct bfa_itnim_stats_s *stats)
760{ 729{
761 struct bfa_fcs_itnim_s *itnim = NULL; 730 struct bfa_fcs_itnim_s *itnim = NULL;
@@ -773,7 +742,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
773} 742}
774 743
775bfa_status_t 744bfa_status_t
776bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn) 745bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
777{ 746{
778 struct bfa_fcs_itnim_s *itnim = NULL; 747 struct bfa_fcs_itnim_s *itnim = NULL;
779 748
@@ -789,10 +758,10 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
789} 758}
790 759
791void 760void
792bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, 761bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
793 u16 len) 762 struct fchs_s *fchs, u16 len)
794{ 763{
795 struct fc_els_cmd_s *els_cmd; 764 struct fc_els_cmd_s *els_cmd;
796 765
797 bfa_trc(itnim->fcs, fchs->type); 766 bfa_trc(itnim->fcs, fchs->type);
798 767
@@ -812,13 +781,3 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
812 bfa_assert(0); 781 bfa_assert(0);
813 } 782 }
814} 783}
815
816void
817bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim)
818{
819}
820
821void
822bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
823{
824}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 35df20e68a52..b522bf30247a 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -16,23 +16,13 @@
16 */ 16 */
17 17
18/** 18/**
19 * bfa_fcs_port.c BFA FCS port 19 * bfa_fcs_lport.c BFA FCS port
20 */ 20 */
21 21
22#include <fcs/bfa_fcs.h> 22#include "bfa_fcs.h"
23#include <fcs/bfa_fcs_lport.h> 23#include "bfa_fcbuild.h"
24#include <fcs/bfa_fcs_rport.h> 24#include "bfa_fc.h"
25#include <fcb/bfa_fcb_port.h> 25#include "bfad_drv.h"
26#include <bfa_svc.h>
27#include <log/bfa_log_fcs.h>
28#include "fcs.h"
29#include "fcs_lport.h"
30#include "fcs_vport.h"
31#include "fcs_rport.h"
32#include "fcs_fcxp.h"
33#include "fcs_trcmod.h"
34#include "lport_priv.h"
35#include <aen/bfa_aen_lport.h>
36 26
37BFA_TRC_FILE(FCS, PORT); 27BFA_TRC_FILE(FCS, PORT);
38 28
@@ -40,49 +30,53 @@ BFA_TRC_FILE(FCS, PORT);
40 * Forward declarations 30 * Forward declarations
41 */ 31 */
42 32
43static void bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port, 33static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
44 enum bfa_lport_aen_event event); 34 struct fchs_s *rx_fchs, u8 reason_code,
45static void bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, 35 u8 reason_code_expl);
46 struct fchs_s *rx_fchs, u8 reason_code, 36static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
47 u8 reason_code_expl); 37 struct fchs_s *rx_fchs, struct fc_logi_s *plogi);
48static void bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, 38static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port);
49 struct fchs_s *rx_fchs, 39static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port);
50 struct fc_logi_s *plogi); 40static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port);
51static void bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port); 41static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port);
52static void bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port); 42static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port);
53static void bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port); 43static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port);
54static void bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port); 44static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port,
55static void bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port);
56static void bfa_fcs_port_deleted(struct bfa_fcs_port_s *port);
57static void bfa_fcs_port_echo(struct bfa_fcs_port_s *port,
58 struct fchs_s *rx_fchs, 45 struct fchs_s *rx_fchs,
59 struct fc_echo_s *echo, u16 len); 46 struct fc_echo_s *echo, u16 len);
60static void bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, 47static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port,
61 struct fchs_s *rx_fchs, 48 struct fchs_s *rx_fchs,
62 struct fc_rnid_cmd_s *rnid, u16 len); 49 struct fc_rnid_cmd_s *rnid, u16 len);
63static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, 50static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
64 struct fc_rnid_general_topology_data_s *gen_topo_data); 51 struct fc_rnid_general_topology_data_s *gen_topo_data);
65 52
53static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port);
54static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port);
55static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port);
56
57static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
58static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
59static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
60
66static struct { 61static struct {
67 void (*init) (struct bfa_fcs_port_s *port); 62 void (*init) (struct bfa_fcs_lport_s *port);
68 void (*online) (struct bfa_fcs_port_s *port); 63 void (*online) (struct bfa_fcs_lport_s *port);
69 void (*offline) (struct bfa_fcs_port_s *port); 64 void (*offline) (struct bfa_fcs_lport_s *port);
70} __port_action[] = { 65} __port_action[] = {
71 { 66 {
72 bfa_fcs_port_unknown_init, bfa_fcs_port_unknown_online, 67 bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
73 bfa_fcs_port_unknown_offline}, { 68 bfa_fcs_lport_unknown_offline}, {
74 bfa_fcs_port_fab_init, bfa_fcs_port_fab_online, 69 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
75 bfa_fcs_port_fab_offline}, { 70 bfa_fcs_lport_fab_offline}, {
76 bfa_fcs_port_loop_init, bfa_fcs_port_loop_online, 71 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
77 bfa_fcs_port_loop_offline}, { 72 bfa_fcs_lport_n2n_offline},
78bfa_fcs_port_n2n_init, bfa_fcs_port_n2n_online, 73 };
79 bfa_fcs_port_n2n_offline},};
80 74
81/** 75/**
82 * fcs_port_sm FCS logical port state machine 76 * fcs_port_sm FCS logical port state machine
83 */ 77 */
84 78
85enum bfa_fcs_port_event { 79enum bfa_fcs_lport_event {
86 BFA_FCS_PORT_SM_CREATE = 1, 80 BFA_FCS_PORT_SM_CREATE = 1,
87 BFA_FCS_PORT_SM_ONLINE = 2, 81 BFA_FCS_PORT_SM_ONLINE = 2,
88 BFA_FCS_PORT_SM_OFFLINE = 3, 82 BFA_FCS_PORT_SM_OFFLINE = 3,
@@ -90,27 +84,28 @@ enum bfa_fcs_port_event {
90 BFA_FCS_PORT_SM_DELRPORT = 5, 84 BFA_FCS_PORT_SM_DELRPORT = 5,
91}; 85};
92 86
93static void bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, 87static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
94 enum bfa_fcs_port_event event); 88 enum bfa_fcs_lport_event event);
95static void bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, 89static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
96 enum bfa_fcs_port_event event); 90 enum bfa_fcs_lport_event event);
97static void bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, 91static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port,
98 enum bfa_fcs_port_event event); 92 enum bfa_fcs_lport_event event);
99static void bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, 93static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
100 enum bfa_fcs_port_event event); 94 enum bfa_fcs_lport_event event);
101static void bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, 95static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
102 enum bfa_fcs_port_event event); 96 enum bfa_fcs_lport_event event);
103 97
104static void 98static void
105bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, 99bfa_fcs_lport_sm_uninit(
106 enum bfa_fcs_port_event event) 100 struct bfa_fcs_lport_s *port,
101 enum bfa_fcs_lport_event event)
107{ 102{
108 bfa_trc(port->fcs, port->port_cfg.pwwn); 103 bfa_trc(port->fcs, port->port_cfg.pwwn);
109 bfa_trc(port->fcs, event); 104 bfa_trc(port->fcs, event);
110 105
111 switch (event) { 106 switch (event) {
112 case BFA_FCS_PORT_SM_CREATE: 107 case BFA_FCS_PORT_SM_CREATE:
113 bfa_sm_set_state(port, bfa_fcs_port_sm_init); 108 bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
114 break; 109 break;
115 110
116 default: 111 default:
@@ -119,20 +114,21 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
119} 114}
120 115
121static void 116static void
122bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event) 117bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
118 enum bfa_fcs_lport_event event)
123{ 119{
124 bfa_trc(port->fcs, port->port_cfg.pwwn); 120 bfa_trc(port->fcs, port->port_cfg.pwwn);
125 bfa_trc(port->fcs, event); 121 bfa_trc(port->fcs, event);
126 122
127 switch (event) { 123 switch (event) {
128 case BFA_FCS_PORT_SM_ONLINE: 124 case BFA_FCS_PORT_SM_ONLINE:
129 bfa_sm_set_state(port, bfa_fcs_port_sm_online); 125 bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
130 bfa_fcs_port_online_actions(port); 126 bfa_fcs_lport_online_actions(port);
131 break; 127 break;
132 128
133 case BFA_FCS_PORT_SM_DELETE: 129 case BFA_FCS_PORT_SM_DELETE:
134 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 130 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
135 bfa_fcs_port_deleted(port); 131 bfa_fcs_lport_deleted(port);
136 break; 132 break;
137 133
138 case BFA_FCS_PORT_SM_OFFLINE: 134 case BFA_FCS_PORT_SM_OFFLINE:
@@ -144,19 +140,20 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
144} 140}
145 141
146static void 142static void
147bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, 143bfa_fcs_lport_sm_online(
148 enum bfa_fcs_port_event event) 144 struct bfa_fcs_lport_s *port,
145 enum bfa_fcs_lport_event event)
149{ 146{
150 struct bfa_fcs_rport_s *rport; 147 struct bfa_fcs_rport_s *rport;
151 struct list_head *qe, *qen; 148 struct list_head *qe, *qen;
152 149
153 bfa_trc(port->fcs, port->port_cfg.pwwn); 150 bfa_trc(port->fcs, port->port_cfg.pwwn);
154 bfa_trc(port->fcs, event); 151 bfa_trc(port->fcs, event);
155 152
156 switch (event) { 153 switch (event) {
157 case BFA_FCS_PORT_SM_OFFLINE: 154 case BFA_FCS_PORT_SM_OFFLINE:
158 bfa_sm_set_state(port, bfa_fcs_port_sm_offline); 155 bfa_sm_set_state(port, bfa_fcs_lport_sm_offline);
159 bfa_fcs_port_offline_actions(port); 156 bfa_fcs_lport_offline_actions(port);
160 break; 157 break;
161 158
162 case BFA_FCS_PORT_SM_DELETE: 159 case BFA_FCS_PORT_SM_DELETE:
@@ -164,12 +161,12 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
164 __port_action[port->fabric->fab_type].offline(port); 161 __port_action[port->fabric->fab_type].offline(port);
165 162
166 if (port->num_rports == 0) { 163 if (port->num_rports == 0) {
167 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 164 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
168 bfa_fcs_port_deleted(port); 165 bfa_fcs_lport_deleted(port);
169 } else { 166 } else {
170 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); 167 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
171 list_for_each_safe(qe, qen, &port->rport_q) { 168 list_for_each_safe(qe, qen, &port->rport_q) {
172 rport = (struct bfa_fcs_rport_s *)qe; 169 rport = (struct bfa_fcs_rport_s *) qe;
173 bfa_fcs_rport_delete(rport); 170 bfa_fcs_rport_delete(rport);
174 } 171 }
175 } 172 }
@@ -184,29 +181,30 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
184} 181}
185 182
186static void 183static void
187bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, 184bfa_fcs_lport_sm_offline(
188 enum bfa_fcs_port_event event) 185 struct bfa_fcs_lport_s *port,
186 enum bfa_fcs_lport_event event)
189{ 187{
190 struct bfa_fcs_rport_s *rport; 188 struct bfa_fcs_rport_s *rport;
191 struct list_head *qe, *qen; 189 struct list_head *qe, *qen;
192 190
193 bfa_trc(port->fcs, port->port_cfg.pwwn); 191 bfa_trc(port->fcs, port->port_cfg.pwwn);
194 bfa_trc(port->fcs, event); 192 bfa_trc(port->fcs, event);
195 193
196 switch (event) { 194 switch (event) {
197 case BFA_FCS_PORT_SM_ONLINE: 195 case BFA_FCS_PORT_SM_ONLINE:
198 bfa_sm_set_state(port, bfa_fcs_port_sm_online); 196 bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
199 bfa_fcs_port_online_actions(port); 197 bfa_fcs_lport_online_actions(port);
200 break; 198 break;
201 199
202 case BFA_FCS_PORT_SM_DELETE: 200 case BFA_FCS_PORT_SM_DELETE:
203 if (port->num_rports == 0) { 201 if (port->num_rports == 0) {
204 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 202 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
205 bfa_fcs_port_deleted(port); 203 bfa_fcs_lport_deleted(port);
206 } else { 204 } else {
207 bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); 205 bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
208 list_for_each_safe(qe, qen, &port->rport_q) { 206 list_for_each_safe(qe, qen, &port->rport_q) {
209 rport = (struct bfa_fcs_rport_s *)qe; 207 rport = (struct bfa_fcs_rport_s *) qe;
210 bfa_fcs_rport_delete(rport); 208 bfa_fcs_rport_delete(rport);
211 } 209 }
212 } 210 }
@@ -222,8 +220,9 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
222} 220}
223 221
224static void 222static void
225bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, 223bfa_fcs_lport_sm_deleting(
226 enum bfa_fcs_port_event event) 224 struct bfa_fcs_lport_s *port,
225 enum bfa_fcs_lport_event event)
227{ 226{
228 bfa_trc(port->fcs, port->port_cfg.pwwn); 227 bfa_trc(port->fcs, port->port_cfg.pwwn);
229 bfa_trc(port->fcs, event); 228 bfa_trc(port->fcs, event);
@@ -231,8 +230,8 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
231 switch (event) { 230 switch (event) {
232 case BFA_FCS_PORT_SM_DELRPORT: 231 case BFA_FCS_PORT_SM_DELRPORT:
233 if (port->num_rports == 0) { 232 if (port->num_rports == 0) {
234 bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); 233 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
235 bfa_fcs_port_deleted(port); 234 bfa_fcs_lport_deleted(port);
236 } 235 }
237 break; 236 break;
238 237
@@ -241,74 +240,44 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
241 } 240 }
242} 241}
243 242
244
245
246/** 243/**
247 * fcs_port_pvt 244 * fcs_port_pvt
248 */ 245 */
249 246
250/**
251 * Send AEN notification
252 */
253static void
254bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
255 enum bfa_lport_aen_event event)
256{
257 union bfa_aen_data_u aen_data;
258 struct bfa_log_mod_s *logmod = port->fcs->logm;
259 enum bfa_port_role role = port->port_cfg.roles;
260 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
261 char lpwwn_ptr[BFA_STRING_32];
262 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
263 { "Initiator", "Target", "IPFC" };
264
265 wwn2str(lpwwn_ptr, lpwwn);
266
267 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
268
269 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
270 role_str[role/2]);
271
272 aen_data.lport.vf_id = port->fabric->vf_id;
273 aen_data.lport.roles = role;
274 aen_data.lport.ppwwn =
275 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
276 aen_data.lport.lpwwn = lpwwn;
277}
278
279/* 247/*
280 * Send a LS reject 248 * Send a LS reject
281 */ 249 */
282static void 250static void
283bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 251bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
284 u8 reason_code, u8 reason_code_expl) 252 u8 reason_code, u8 reason_code_expl)
285{ 253{
286 struct fchs_s fchs; 254 struct fchs_s fchs;
287 struct bfa_fcxp_s *fcxp; 255 struct bfa_fcxp_s *fcxp;
288 struct bfa_rport_s *bfa_rport = NULL; 256 struct bfa_rport_s *bfa_rport = NULL;
289 int len; 257 int len;
290 258
259 bfa_trc(port->fcs, rx_fchs->d_id);
291 bfa_trc(port->fcs, rx_fchs->s_id); 260 bfa_trc(port->fcs, rx_fchs->s_id);
292 261
293 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 262 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
294 if (!fcxp) 263 if (!fcxp)
295 return; 264 return;
296 265
297 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 266 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
298 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 267 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
299 reason_code, reason_code_expl); 268 rx_fchs->ox_id, reason_code, reason_code_expl);
300 269
301 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 270 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
302 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 271 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
303 FC_MAX_PDUSZ, 0); 272 FC_MAX_PDUSZ, 0);
304} 273}
305 274
306/** 275/**
307 * Process incoming plogi from a remote port. 276 * Process incoming plogi from a remote port.
308 */ 277 */
309static void 278static void
310bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 279bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
311 struct fc_logi_s *plogi) 280 struct fchs_s *rx_fchs, struct fc_logi_s *plogi)
312{ 281{
313 struct bfa_fcs_rport_s *rport; 282 struct bfa_fcs_rport_s *rport;
314 283
@@ -328,46 +297,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
328 /* 297 /*
329 * send a LS reject 298 * send a LS reject
330 */ 299 */
331 bfa_fcs_port_send_ls_rjt(port, rx_fchs, 300 bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
332 FC_LS_RJT_RSN_PROTOCOL_ERROR, 301 FC_LS_RJT_RSN_PROTOCOL_ERROR,
333 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); 302 FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
334 return; 303 return;
335 } 304 }
336 305
337 /** 306 /**
338* Direct Attach P2P mode : verify address assigned by the r-port. 307 * Direct Attach P2P mode : verify address assigned by the r-port.
339 */ 308 */
340 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
341 && 310 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
342 (memcmp 311 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
343 ((void *)&bfa_fcs_port_get_pwwn(port), (void *)&plogi->port_name,
344 sizeof(wwn_t)) < 0)) {
345 if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) { 312 if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
346 /* 313 /* Address assigned to us cannot be a WKA */
347 * Address assigned to us cannot be a WKA 314 bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
348 */
349 bfa_fcs_port_send_ls_rjt(port, rx_fchs,
350 FC_LS_RJT_RSN_PROTOCOL_ERROR, 315 FC_LS_RJT_RSN_PROTOCOL_ERROR,
351 FC_LS_RJT_EXP_INVALID_NPORT_ID); 316 FC_LS_RJT_EXP_INVALID_NPORT_ID);
352 return; 317 return;
353 } 318 }
354 port->pid = rx_fchs->d_id; 319 port->pid = rx_fchs->d_id;
355 } 320 }
356 321
357 /** 322 /**
358 * First, check if we know the device by pwwn. 323 * First, check if we know the device by pwwn.
359 */ 324 */
360 rport = bfa_fcs_port_get_rport_by_pwwn(port, plogi->port_name); 325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
361 if (rport) { 326 if (rport) {
362 /** 327 /**
363 * Direct Attach P2P mode: handle address assigned by the rport. 328 * Direct Attach P2P mode : handle address assigned by r-port.
364 */ 329 */
365 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
366 && 331 (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
367 (memcmp 332 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
368 ((void *)&bfa_fcs_port_get_pwwn(port), 333 port->pid = rx_fchs->d_id;
369 (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
370 port->pid = rx_fchs->d_id;
371 rport->pid = rx_fchs->s_id; 334 rport->pid = rx_fchs->s_id;
372 } 335 }
373 bfa_fcs_rport_plogi(rport, rx_fchs, plogi); 336 bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -377,7 +340,7 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
377 /** 340 /**
378 * Next, lookup rport by PID. 341 * Next, lookup rport by PID.
379 */ 342 */
380 rport = bfa_fcs_port_get_rport_by_pid(port, rx_fchs->s_id); 343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
381 if (!rport) { 344 if (!rport) {
382 /** 345 /**
383 * Inbound PLOGI from a new device. 346 * Inbound PLOGI from a new device.
@@ -416,39 +379,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
416 * Since it does not require a login, it is processed here. 379 * Since it does not require a login, it is processed here.
417 */ 380 */
418static void 381static void
419bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 382bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
420 struct fc_echo_s *echo, u16 rx_len) 383 struct fc_echo_s *echo, u16 rx_len)
421{ 384{
422 struct fchs_s fchs; 385 struct fchs_s fchs;
423 struct bfa_fcxp_s *fcxp; 386 struct bfa_fcxp_s *fcxp;
424 struct bfa_rport_s *bfa_rport = NULL; 387 struct bfa_rport_s *bfa_rport = NULL;
425 int len, pyld_len; 388 int len, pyld_len;
426 389
427 bfa_trc(port->fcs, rx_fchs->s_id); 390 bfa_trc(port->fcs, rx_fchs->s_id);
428 bfa_trc(port->fcs, rx_fchs->d_id); 391 bfa_trc(port->fcs, rx_fchs->d_id);
429 bfa_trc(port->fcs, rx_len);
430 392
431 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 393 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
432 if (!fcxp) 394 if (!fcxp)
433 return; 395 return;
434 396
435 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 397 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
436 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id); 398 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
399 rx_fchs->ox_id);
437 400
438 /* 401 /*
439 * Copy the payload (if any) from the echo frame 402 * Copy the payload (if any) from the echo frame
440 */ 403 */
441 pyld_len = rx_len - sizeof(struct fchs_s); 404 pyld_len = rx_len - sizeof(struct fchs_s);
405 bfa_trc(port->fcs, rx_len);
442 bfa_trc(port->fcs, pyld_len); 406 bfa_trc(port->fcs, pyld_len);
443 407
444 if (pyld_len > len) 408 if (pyld_len > len)
445 memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) + 409 memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
446 sizeof(struct fc_echo_s), (echo + 1), 410 sizeof(struct fc_echo_s), (echo + 1),
447 (pyld_len - sizeof(struct fc_echo_s))); 411 (pyld_len - sizeof(struct fc_echo_s)));
448 412
449 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 413 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
450 BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, 414 BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
451 FC_MAX_PDUSZ, 0); 415 FC_MAX_PDUSZ, 0);
452} 416}
453 417
454/* 418/*
@@ -456,16 +420,16 @@ bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
456 * Since it does not require a login, it is processed here. 420 * Since it does not require a login, it is processed here.
457 */ 421 */
458static void 422static void
459bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, 423bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
460 struct fc_rnid_cmd_s *rnid, u16 rx_len) 424 struct fc_rnid_cmd_s *rnid, u16 rx_len)
461{ 425{
462 struct fc_rnid_common_id_data_s common_id_data; 426 struct fc_rnid_common_id_data_s common_id_data;
463 struct fc_rnid_general_topology_data_s gen_topo_data; 427 struct fc_rnid_general_topology_data_s gen_topo_data;
464 struct fchs_s fchs; 428 struct fchs_s fchs;
465 struct bfa_fcxp_s *fcxp; 429 struct bfa_fcxp_s *fcxp;
466 struct bfa_rport_s *bfa_rport = NULL; 430 struct bfa_rport_s *bfa_rport = NULL;
467 u16 len; 431 u16 len;
468 u32 data_format; 432 u32 data_format;
469 433
470 bfa_trc(port->fcs, rx_fchs->s_id); 434 bfa_trc(port->fcs, rx_fchs->s_id);
471 bfa_trc(port->fcs, rx_fchs->d_id); 435 bfa_trc(port->fcs, rx_fchs->d_id);
@@ -495,28 +459,26 @@ bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
495 /* 459 /*
496 * Copy the Node Id Info 460 * Copy the Node Id Info
497 */ 461 */
498 common_id_data.port_name = bfa_fcs_port_get_pwwn(port); 462 common_id_data.port_name = bfa_fcs_lport_get_pwwn(port);
499 common_id_data.node_name = bfa_fcs_port_get_nwwn(port); 463 common_id_data.node_name = bfa_fcs_lport_get_nwwn(port);
500 464
501 len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 465 len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
502 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 466 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
503 data_format, &common_id_data, &gen_topo_data); 467 rx_fchs->ox_id, data_format, &common_id_data,
468 &gen_topo_data);
504 469
505 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, 470 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
506 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 471 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
507 FC_MAX_PDUSZ, 0); 472 FC_MAX_PDUSZ, 0);
508
509 return;
510} 473}
511 474
512/* 475/*
513 * Fill out General Topolpgy Discovery Data for RNID ELS. 476 * Fill out General Topolpgy Discovery Data for RNID ELS.
514 */ 477 */
515static void 478static void
516bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, 479bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
517 struct fc_rnid_general_topology_data_s *gen_topo_data) 480 struct fc_rnid_general_topology_data_s *gen_topo_data)
518{ 481{
519
520 bfa_os_memset(gen_topo_data, 0, 482 bfa_os_memset(gen_topo_data, 0,
521 sizeof(struct fc_rnid_general_topology_data_s)); 483 sizeof(struct fc_rnid_general_topology_data_s));
522 484
@@ -526,76 +488,111 @@ bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
526} 488}
527 489
528static void 490static void
529bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port) 491bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
530{ 492{
493 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
494 char lpwwn_buf[BFA_STRING_32];
495
531 bfa_trc(port->fcs, port->fabric->oper_type); 496 bfa_trc(port->fcs, port->fabric->oper_type);
532 497
533 __port_action[port->fabric->fab_type].init(port); 498 __port_action[port->fabric->fab_type].init(port);
534 __port_action[port->fabric->fab_type].online(port); 499 __port_action[port->fabric->fab_type].online(port);
535 500
536 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_ONLINE); 501 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
537 bfa_fcb_port_online(port->fcs->bfad, port->port_cfg.roles, 502 BFA_LOG(KERN_INFO, bfad, log_level,
538 port->fabric->vf_drv, (port->vport == NULL) ? 503 "Logical port online: WWN = %s Role = %s\n",
539 NULL : port->vport->vport_drv); 504 lpwwn_buf, "Initiator");
505
506 bfad->bfad_flags |= BFAD_PORT_ONLINE;
540} 507}
541 508
542static void 509static void
543bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port) 510bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
544{ 511{
545 struct list_head *qe, *qen; 512 struct list_head *qe, *qen;
546 struct bfa_fcs_rport_s *rport; 513 struct bfa_fcs_rport_s *rport;
514 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
515 char lpwwn_buf[BFA_STRING_32];
547 516
548 bfa_trc(port->fcs, port->fabric->oper_type); 517 bfa_trc(port->fcs, port->fabric->oper_type);
549 518
550 __port_action[port->fabric->fab_type].offline(port); 519 __port_action[port->fabric->fab_type].offline(port);
551 520
521 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
552 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) 522 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
553 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); 523 BFA_LOG(KERN_ERR, bfad, log_level,
524 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
525 lpwwn_buf, "Initiator");
554 else 526 else
555 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); 527 BFA_LOG(KERN_INFO, bfad, log_level,
556 bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, 528 "Logical port taken offline: WWN = %s Role = %s\n",
557 port->fabric->vf_drv, 529 lpwwn_buf, "Initiator");
558 (port->vport == NULL) ? NULL : port->vport->vport_drv);
559 530
560 list_for_each_safe(qe, qen, &port->rport_q) { 531 list_for_each_safe(qe, qen, &port->rport_q) {
561 rport = (struct bfa_fcs_rport_s *)qe; 532 rport = (struct bfa_fcs_rport_s *) qe;
562 bfa_fcs_rport_offline(rport); 533 bfa_fcs_rport_offline(rport);
563 } 534 }
564} 535}
565 536
566static void 537static void
567bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port) 538bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
568{ 539{
569 bfa_assert(0); 540 bfa_assert(0);
570} 541}
571 542
572static void 543static void
573bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port) 544bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
574{ 545{
575 bfa_assert(0); 546 bfa_assert(0);
576} 547}
577 548
578static void 549static void
579bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port) 550bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
580{ 551{
581 bfa_assert(0); 552 bfa_assert(0);
582} 553}
583 554
584static void 555static void
585bfa_fcs_port_deleted(struct bfa_fcs_port_s *port) 556bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
586{ 557{
587 bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DELETE); 558 struct fchs_s fchs;
559 struct bfa_fcxp_s *fcxp;
560 int len;
588 561
589 /* 562 bfa_trc(port->fcs, rx_fchs->d_id);
590 * Base port will be deleted by the OS driver 563 bfa_trc(port->fcs, rx_fchs->s_id);
591 */ 564
565 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
566 if (!fcxp)
567 return;
568
569 len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
570 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
571 rx_fchs->ox_id, 0);
572
573 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
574 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
575 FC_MAX_PDUSZ, 0);
576}
577static void
578bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
579{
580 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
581 char lpwwn_buf[BFA_STRING_32];
582
583 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
584 BFA_LOG(KERN_INFO, bfad, log_level,
585 "Logical port deleted: WWN = %s Role = %s\n",
586 lpwwn_buf, "Initiator");
587
588 /* Base port will be deleted by the OS driver */
592 if (port->vport) { 589 if (port->vport) {
593 bfa_fcb_port_delete(port->fcs->bfad, port->port_cfg.roles, 590 bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
594 port->fabric->vf_drv, 591 port->fabric->vf_drv,
595 port->vport ? port->vport->vport_drv : NULL); 592 port->vport ? port->vport->vport_drv : NULL);
596 bfa_fcs_vport_delete_comp(port->vport); 593 bfa_fcs_vport_delete_comp(port->vport);
597 } else { 594 } else {
598 bfa_fcs_fabric_port_delete_comp(port->fabric); 595 bfa_fcs_fabric_port_delete_comp(port->fabric);
599 } 596 }
600} 597}
601 598
@@ -608,7 +605,7 @@ bfa_fcs_port_deleted(struct bfa_fcs_port_s *port)
608 * Module initialization 605 * Module initialization
609 */ 606 */
610void 607void
611bfa_fcs_port_modinit(struct bfa_fcs_s *fcs) 608bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
612{ 609{
613 610
614} 611}
@@ -617,25 +614,25 @@ bfa_fcs_port_modinit(struct bfa_fcs_s *fcs)
617 * Module cleanup 614 * Module cleanup
618 */ 615 */
619void 616void
620bfa_fcs_port_modexit(struct bfa_fcs_s *fcs) 617bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
621{ 618{
622 bfa_fcs_modexit_comp(fcs); 619 bfa_fcs_modexit_comp(fcs);
623} 620}
624 621
625/** 622/**
626 * Unsolicited frame receive handling. 623 * Unsolicited frame receive handling.
627 */ 624 */
628void 625void
629bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, 626bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
630 u16 len) 627 struct fchs_s *fchs, u16 len)
631{ 628{
632 u32 pid = fchs->s_id; 629 u32 pid = fchs->s_id;
633 struct bfa_fcs_rport_s *rport = NULL; 630 struct bfa_fcs_rport_s *rport = NULL;
634 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); 631 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
635 632
636 bfa_stats(lport, uf_recvs); 633 bfa_stats(lport, uf_recvs);
637 634
638 if (!bfa_fcs_port_is_online(lport)) { 635 if (!bfa_fcs_lport_is_online(lport)) {
639 bfa_stats(lport, uf_recv_drops); 636 bfa_stats(lport, uf_recv_drops);
640 return; 637 return;
641 } 638 }
@@ -648,7 +645,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
648 */ 645 */
649 if ((fchs->type == FC_TYPE_ELS) && 646 if ((fchs->type == FC_TYPE_ELS) &&
650 (els_cmd->els_code == FC_ELS_PLOGI)) { 647 (els_cmd->els_code == FC_ELS_PLOGI)) {
651 bfa_fcs_port_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); 648 bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
652 return; 649 return;
653 } 650 }
654 651
@@ -656,8 +653,8 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
656 * Handle ECHO separately. 653 * Handle ECHO separately.
657 */ 654 */
658 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) { 655 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
659 bfa_fcs_port_echo(lport, fchs, 656 bfa_fcs_lport_echo(lport, fchs,
660 (struct fc_echo_s *) els_cmd, len); 657 (struct fc_echo_s *)els_cmd, len);
661 return; 658 return;
662 } 659 }
663 660
@@ -665,15 +662,21 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
665 * Handle RNID separately. 662 * Handle RNID separately.
666 */ 663 */
667 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) { 664 if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
668 bfa_fcs_port_rnid(lport, fchs, 665 bfa_fcs_lport_rnid(lport, fchs,
669 (struct fc_rnid_cmd_s *) els_cmd, len); 666 (struct fc_rnid_cmd_s *) els_cmd, len);
670 return; 667 return;
671 } 668 }
672 669
670 if (fchs->type == FC_TYPE_BLS) {
671 if ((fchs->routing == FC_RTG_BASIC_LINK) &&
672 (fchs->cat_info == FC_CAT_ABTS))
673 bfa_fcs_lport_abts_acc(lport, fchs);
674 return;
675 }
673 /** 676 /**
674 * look for a matching remote port ID 677 * look for a matching remote port ID
675 */ 678 */
676 rport = bfa_fcs_port_get_rport_by_pid(lport, pid); 679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
677 if (rport) { 680 if (rport) {
678 bfa_trc(rport->fcs, fchs->s_id); 681 bfa_trc(rport->fcs, fchs->s_id);
679 bfa_trc(rport->fcs, fchs->d_id); 682 bfa_trc(rport->fcs, fchs->d_id);
@@ -694,7 +697,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
694 697
695 bfa_trc(lport->fcs, els_cmd->els_code); 698 bfa_trc(lport->fcs, els_cmd->els_code);
696 if (els_cmd->els_code == FC_ELS_RSCN) { 699 if (els_cmd->els_code == FC_ELS_RSCN) {
697 bfa_fcs_port_scn_process_rscn(lport, fchs, len); 700 bfa_fcs_lport_scn_process_rscn(lport, fchs, len);
698 return; 701 return;
699 } 702 }
700 703
@@ -702,7 +705,6 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
702 /** 705 /**
703 * @todo Handle LOGO frames received. 706 * @todo Handle LOGO frames received.
704 */ 707 */
705 bfa_trc(lport->fcs, els_cmd->els_code);
706 return; 708 return;
707 } 709 }
708 710
@@ -710,14 +712,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
710 /** 712 /**
711 * @todo Handle PRLI frames received. 713 * @todo Handle PRLI frames received.
712 */ 714 */
713 bfa_trc(lport->fcs, els_cmd->els_code);
714 return; 715 return;
715 } 716 }
716 717
717 /** 718 /**
718 * Unhandled ELS frames. Send a LS_RJT. 719 * Unhandled ELS frames. Send a LS_RJT.
719 */ 720 */
720 bfa_fcs_port_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, 721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
721 FC_LS_RJT_EXP_NO_ADDL_INFO); 722 FC_LS_RJT_EXP_NO_ADDL_INFO);
722 723
723} 724}
@@ -726,13 +727,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
726 * PID based Lookup for a R-Port in the Port R-Port Queue 727 * PID based Lookup for a R-Port in the Port R-Port Queue
727 */ 728 */
728struct bfa_fcs_rport_s * 729struct bfa_fcs_rport_s *
729bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid) 730bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
730{ 731{
731 struct bfa_fcs_rport_s *rport; 732 struct bfa_fcs_rport_s *rport;
732 struct list_head *qe; 733 struct list_head *qe;
733 734
734 list_for_each(qe, &port->rport_q) { 735 list_for_each(qe, &port->rport_q) {
735 rport = (struct bfa_fcs_rport_s *)qe; 736 rport = (struct bfa_fcs_rport_s *) qe;
736 if (rport->pid == pid) 737 if (rport->pid == pid)
737 return rport; 738 return rport;
738 } 739 }
@@ -745,13 +746,13 @@ bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid)
745 * PWWN based Lookup for a R-Port in the Port R-Port Queue 746 * PWWN based Lookup for a R-Port in the Port R-Port Queue
746 */ 747 */
747struct bfa_fcs_rport_s * 748struct bfa_fcs_rport_s *
748bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) 749bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
749{ 750{
750 struct bfa_fcs_rport_s *rport; 751 struct bfa_fcs_rport_s *rport;
751 struct list_head *qe; 752 struct list_head *qe;
752 753
753 list_for_each(qe, &port->rport_q) { 754 list_for_each(qe, &port->rport_q) {
754 rport = (struct bfa_fcs_rport_s *)qe; 755 rport = (struct bfa_fcs_rport_s *) qe;
755 if (wwn_is_equal(rport->pwwn, pwwn)) 756 if (wwn_is_equal(rport->pwwn, pwwn))
756 return rport; 757 return rport;
757 } 758 }
@@ -764,13 +765,13 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
764 * NWWN based Lookup for a R-Port in the Port R-Port Queue 765 * NWWN based Lookup for a R-Port in the Port R-Port Queue
765 */ 766 */
766struct bfa_fcs_rport_s * 767struct bfa_fcs_rport_s *
767bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) 768bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
768{ 769{
769 struct bfa_fcs_rport_s *rport; 770 struct bfa_fcs_rport_s *rport;
770 struct list_head *qe; 771 struct list_head *qe;
771 772
772 list_for_each(qe, &port->rport_q) { 773 list_for_each(qe, &port->rport_q) {
773 rport = (struct bfa_fcs_rport_s *)qe; 774 rport = (struct bfa_fcs_rport_s *) qe;
774 if (wwn_is_equal(rport->nwwn, nwwn)) 775 if (wwn_is_equal(rport->nwwn, nwwn))
775 return rport; 776 return rport;
776 } 777 }
@@ -783,8 +784,9 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
783 * Called by rport module when new rports are discovered. 784 * Called by rport module when new rports are discovered.
784 */ 785 */
785void 786void
786bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port, 787bfa_fcs_lport_add_rport(
787 struct bfa_fcs_rport_s *rport) 788 struct bfa_fcs_lport_s *port,
789 struct bfa_fcs_rport_s *rport)
788{ 790{
789 list_add_tail(&rport->qe, &port->rport_q); 791 list_add_tail(&rport->qe, &port->rport_q);
790 port->num_rports++; 792 port->num_rports++;
@@ -794,8 +796,9 @@ bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
794 * Called by rport module to when rports are deleted. 796 * Called by rport module to when rports are deleted.
795 */ 797 */
796void 798void
797bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port, 799bfa_fcs_lport_del_rport(
798 struct bfa_fcs_rport_s *rport) 800 struct bfa_fcs_lport_s *port,
801 struct bfa_fcs_rport_s *rport)
799{ 802{
800 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport)); 803 bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
801 list_del(&rport->qe); 804 list_del(&rport->qe);
@@ -809,7 +812,7 @@ bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
809 * Called by vport for virtual ports when FDISC is complete. 812 * Called by vport for virtual ports when FDISC is complete.
810 */ 813 */
811void 814void
812bfa_fcs_port_online(struct bfa_fcs_port_s *port) 815bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
813{ 816{
814 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
815} 818}
@@ -819,7 +822,7 @@ bfa_fcs_port_online(struct bfa_fcs_port_s *port)
819 * Called by vport for virtual ports when virtual port becomes offline. 822 * Called by vport for virtual ports when virtual port becomes offline.
820 */ 823 */
821void 824void
822bfa_fcs_port_offline(struct bfa_fcs_port_s *port) 825bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
823{ 826{
824 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
825} 828}
@@ -831,40 +834,32 @@ bfa_fcs_port_offline(struct bfa_fcs_port_s *port)
831 * bfa_fcs_vport_delete_comp() for vports on completion. 834 * bfa_fcs_vport_delete_comp() for vports on completion.
832 */ 835 */
833void 836void
834bfa_fcs_port_delete(struct bfa_fcs_port_s *port) 837bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
835{ 838{
836 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
837} 840}
838 841
839/** 842/**
840 * Called by fabric in private loop topology to process LIP event.
841 */
842void
843bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
844{
845}
846
847/**
848 * Return TRUE if port is online, else return FALSE 843 * Return TRUE if port is online, else return FALSE
849 */ 844 */
850bfa_boolean_t 845bfa_boolean_t
851bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) 846bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
852{ 847{
853 return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online); 848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
854} 849}
855 850
856/** 851/**
857 * Attach time initialization of logical ports. 852 * Attach time initialization of logical ports.
858 */ 853 */
859void 854void
860bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 855bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
861 uint16_t vf_id, struct bfa_fcs_vport_s *vport) 856 u16 vf_id, struct bfa_fcs_vport_s *vport)
862{ 857{
863 lport->fcs = fcs; 858 lport->fcs = fcs;
864 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 859 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
865 lport->vport = vport; 860 lport->vport = vport;
866 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 861 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
867 bfa_lps_get_tag(lport->fabric->lps); 862 bfa_lps_get_tag(lport->fabric->lps);
868 863
869 INIT_LIST_HEAD(&lport->rport_q); 864 INIT_LIST_HEAD(&lport->rport_q);
870 lport->num_rports = 0; 865 lport->num_rports = 0;
@@ -876,21 +871,26 @@ bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
876 */ 871 */
877 872
878void 873void
879bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, 874bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
880 struct bfa_port_cfg_s *port_cfg) 875 struct bfa_lport_cfg_s *port_cfg)
881{ 876{
882 struct bfa_fcs_vport_s *vport = lport->vport; 877 struct bfa_fcs_vport_s *vport = lport->vport;
878 struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
879 char lpwwn_buf[BFA_STRING_32];
883 880
884 bfa_os_assign(lport->port_cfg, *port_cfg); 881 bfa_os_assign(lport->port_cfg, *port_cfg);
885 882
886 lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport, 883 lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
887 lport->port_cfg.roles, 884 lport->port_cfg.roles,
888 lport->fabric->vf_drv, 885 lport->fabric->vf_drv,
889 vport ? vport->vport_drv : NULL); 886 vport ? vport->vport_drv : NULL);
890 887
891 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); 888 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
889 BFA_LOG(KERN_INFO, bfad, log_level,
890 "New logical port created: WWN = %s Role = %s\n",
891 lpwwn_buf, "Initiator");
892 892
893 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); 893 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
895} 895}
896 896
@@ -899,10 +899,11 @@ bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
899 */ 899 */
900 900
901void 901void
902bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, 902bfa_fcs_lport_get_attr(
903 struct bfa_port_attr_s *port_attr) 903 struct bfa_fcs_lport_s *port,
904 struct bfa_lport_attr_s *port_attr)
904{ 905{
905 if (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)) 906 if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
906 port_attr->pid = port->pid; 907 port_attr->pid = port->pid;
907 else 908 else
908 port_attr->pid = 0; 909 port_attr->pid = 0;
@@ -913,25 +914,4895 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
913 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 914 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
914 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 915 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
915 port_attr->authfail = 916 port_attr->authfail =
916 bfa_fcs_fabric_is_auth_failed(port->fabric); 917 bfa_fcs_fabric_is_auth_failed(port->fabric);
917 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); 918 port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port);
918 memcpy(port_attr->fabric_ip_addr, 919 memcpy(port_attr->fabric_ip_addr,
919 bfa_fcs_port_get_fabric_ipaddr(port), 920 bfa_fcs_lport_get_fabric_ipaddr(port),
920 BFA_FCS_FABRIC_IPADDR_SZ); 921 BFA_FCS_FABRIC_IPADDR_SZ);
921 922
922 if (port->vport != NULL) { 923 if (port->vport != NULL) {
923 port_attr->port_type = BFA_PPORT_TYPE_VPORT; 924 port_attr->port_type = BFA_PORT_TYPE_VPORT;
924 port_attr->fpma_mac = 925 port_attr->fpma_mac =
925 bfa_lps_get_lp_mac(port->vport->lps); 926 bfa_lps_get_lp_mac(port->vport->lps);
926 } else 927 } else {
927 port_attr->fpma_mac = 928 port_attr->fpma_mac =
928 bfa_lps_get_lp_mac(port->fabric->lps); 929 bfa_lps_get_lp_mac(port->fabric->lps);
930 }
931 } else {
932 port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
933 port_attr->state = BFA_LPORT_UNINIT;
934 }
935}
936
937/**
938 * bfa_fcs_lport_fab port fab functions
939 */
940
941/**
942 * Called by port to initialize fabric services of the base port.
943 */
944static void
945bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
946{
947 bfa_fcs_lport_ns_init(port);
948 bfa_fcs_lport_scn_init(port);
949 bfa_fcs_lport_ms_init(port);
950}
951
952/**
953 * Called by port to notify transition to online state.
954 */
955static void
956bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
957{
958 bfa_fcs_lport_ns_online(port);
959 bfa_fcs_lport_scn_online(port);
960}
961
962/**
963 * Called by port to notify transition to offline state.
964 */
965static void
966bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
967{
968 bfa_fcs_lport_ns_offline(port);
969 bfa_fcs_lport_scn_offline(port);
970 bfa_fcs_lport_ms_offline(port);
971}
972
973/**
974 * bfa_fcs_lport_n2n functions
975 */
976
977/**
978 * Called by fcs/port to initialize N2N topology.
979 */
980static void
981bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
982{
983}
984
985/**
986 * Called by fcs/port to notify transition to online state.
987 */
988static void
989bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
990{
991 struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
992 struct bfa_lport_cfg_s *pcfg = &port->port_cfg;
993 struct bfa_fcs_rport_s *rport;
994
995 bfa_trc(port->fcs, pcfg->pwwn);
996
997 /*
998 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
999 * and assign an Address. if not, we need to wait for its PLOGI.
1000 *
1001 * If our PWWN is < than that of the remote port, it will send a PLOGI
1002 * with the PIDs assigned. The rport state machine take care of this
1003 * incoming PLOGI.
1004 */
1005 if (memcmp
1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
1007 sizeof(wwn_t)) > 0) {
1008 port->pid = N2N_LOCAL_PID;
1009 /**
1010 * First, check if we know the device by pwwn.
1011 */
1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port,
1013 n2n_port->rem_port_wwn);
1014 if (rport) {
1015 bfa_trc(port->fcs, rport->pid);
1016 bfa_trc(port->fcs, rport->pwwn);
1017 rport->pid = N2N_REMOTE_PID;
1018 bfa_fcs_rport_online(rport);
1019 return;
1020 }
1021
1022 /*
1023 * In n2n there can be only one rport. Delete the old one
1024 * whose pid should be zero, because it is offline.
1025 */
1026 if (port->num_rports > 0) {
1027 rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
1028 bfa_assert(rport != NULL);
1029 if (rport) {
1030 bfa_trc(port->fcs, rport->pwwn);
1031 bfa_fcs_rport_delete(rport);
1032 }
1033 }
1034 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
1035 }
1036}
1037
1038/**
1039 * Called by fcs/port to notify transition to offline state.
1040 */
1041static void
1042bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
1043{
1044 struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
1045
1046 bfa_trc(port->fcs, port->pid);
1047 port->pid = 0;
1048 n2n_port->rem_port_wwn = 0;
1049 n2n_port->reply_oxid = 0;
1050}
1051
1052#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
1053
1054/*
1055 * forward declarations
1056 */
1057static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg,
1058 struct bfa_fcxp_s *fcxp_alloced);
1059static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg,
1060 struct bfa_fcxp_s *fcxp_alloced);
1061static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg,
1062 struct bfa_fcxp_s *fcxp_alloced);
1063static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg,
1064 struct bfa_fcxp_s *fcxp,
1065 void *cbarg,
1066 bfa_status_t req_status,
1067 u32 rsp_len,
1068 u32 resid_len,
1069 struct fchs_s *rsp_fchs);
1070static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg,
1071 struct bfa_fcxp_s *fcxp,
1072 void *cbarg,
1073 bfa_status_t req_status,
1074 u32 rsp_len,
1075 u32 resid_len,
1076 struct fchs_s *rsp_fchs);
1077static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
1078 struct bfa_fcxp_s *fcxp,
1079 void *cbarg,
1080 bfa_status_t req_status,
1081 u32 rsp_len,
1082 u32 resid_len,
1083 struct fchs_s *rsp_fchs);
1084static void bfa_fcs_lport_fdmi_timeout(void *arg);
1085static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1086 u8 *pyld);
1087static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1088 u8 *pyld);
1089static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
1090 u8 *pyld);
1091static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *
1092 fdmi, u8 *pyld);
1093static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
1095static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_port_attr_s *port_attr);
1097/**
1098 * fcs_fdmi_sm FCS FDMI state machine
1099 */
1100
1101/**
1102 * FDMI State Machine events
1103 */
1104enum port_fdmi_event {
1105 FDMISM_EVENT_PORT_ONLINE = 1,
1106 FDMISM_EVENT_PORT_OFFLINE = 2,
1107 FDMISM_EVENT_RSP_OK = 4,
1108 FDMISM_EVENT_RSP_ERROR = 5,
1109 FDMISM_EVENT_TIMEOUT = 6,
1110 FDMISM_EVENT_RHBA_SENT = 7,
1111 FDMISM_EVENT_RPRT_SENT = 8,
1112 FDMISM_EVENT_RPA_SENT = 9,
1113};
1114
1115static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
1116 enum port_fdmi_event event);
1117static void bfa_fcs_lport_fdmi_sm_sending_rhba(
1118 struct bfa_fcs_lport_fdmi_s *fdmi,
1119 enum port_fdmi_event event);
1120static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1121 enum port_fdmi_event event);
1122static void bfa_fcs_lport_fdmi_sm_rhba_retry(
1123 struct bfa_fcs_lport_fdmi_s *fdmi,
1124 enum port_fdmi_event event);
1125static void bfa_fcs_lport_fdmi_sm_sending_rprt(
1126 struct bfa_fcs_lport_fdmi_s *fdmi,
1127 enum port_fdmi_event event);
1128static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1129 enum port_fdmi_event event);
1130static void bfa_fcs_lport_fdmi_sm_rprt_retry(
1131 struct bfa_fcs_lport_fdmi_s *fdmi,
1132 enum port_fdmi_event event);
1133static void bfa_fcs_lport_fdmi_sm_sending_rpa(
1134 struct bfa_fcs_lport_fdmi_s *fdmi,
1135 enum port_fdmi_event event);
1136static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1137 enum port_fdmi_event event);
1138static void bfa_fcs_lport_fdmi_sm_rpa_retry(
1139 struct bfa_fcs_lport_fdmi_s *fdmi,
1140 enum port_fdmi_event event);
1141static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1142 enum port_fdmi_event event);
1143static void bfa_fcs_lport_fdmi_sm_disabled(
1144 struct bfa_fcs_lport_fdmi_s *fdmi,
1145 enum port_fdmi_event event);
1146/**
1147 * Start in offline state - awaiting MS to send start.
1148 */
1149static void
1150bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
1151 enum port_fdmi_event event)
1152{
1153 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1154
1155 bfa_trc(port->fcs, port->port_cfg.pwwn);
1156 bfa_trc(port->fcs, event);
1157
1158 fdmi->retry_cnt = 0;
1159
1160 switch (event) {
1161 case FDMISM_EVENT_PORT_ONLINE:
1162 if (port->vport) {
1163 /*
1164 * For Vports, register a new port.
1165 */
1166 bfa_sm_set_state(fdmi,
1167 bfa_fcs_lport_fdmi_sm_sending_rprt);
1168 bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
1169 } else {
1170 /*
1171 * For a base port, we should first register the HBA
1172 * atribute. The HBA attribute also contains the base
1173 * port registration.
1174 */
1175 bfa_sm_set_state(fdmi,
1176 bfa_fcs_lport_fdmi_sm_sending_rhba);
1177 bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
1178 }
1179 break;
1180
1181 case FDMISM_EVENT_PORT_OFFLINE:
1182 break;
1183
1184 default:
1185 bfa_sm_fault(port->fcs, event);
1186 }
1187}
1188
1189static void
1190bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1191 enum port_fdmi_event event)
1192{
1193 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1194
1195 bfa_trc(port->fcs, port->port_cfg.pwwn);
1196 bfa_trc(port->fcs, event);
1197
1198 switch (event) {
1199 case FDMISM_EVENT_RHBA_SENT:
1200 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba);
1201 break;
1202
1203 case FDMISM_EVENT_PORT_OFFLINE:
1204 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1205 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1206 &fdmi->fcxp_wqe);
1207 break;
1208
1209 default:
1210 bfa_sm_fault(port->fcs, event);
1211 }
1212}
1213
1214static void
1215bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
1216 enum port_fdmi_event event)
1217{
1218 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1219
1220 bfa_trc(port->fcs, port->port_cfg.pwwn);
1221 bfa_trc(port->fcs, event);
1222
1223 switch (event) {
1224 case FDMISM_EVENT_RSP_ERROR:
1225 /*
1226 * if max retries have not been reached, start timer for a
1227 * delayed retry
1228 */
1229 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1230 bfa_sm_set_state(fdmi,
1231 bfa_fcs_lport_fdmi_sm_rhba_retry);
1232 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1233 &fdmi->timer,
1234 bfa_fcs_lport_fdmi_timeout, fdmi,
1235 BFA_FCS_RETRY_TIMEOUT);
1236 } else {
1237 /*
1238 * set state to offline
1239 */
1240 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1241 }
1242 break;
1243
1244 case FDMISM_EVENT_RSP_OK:
1245 /*
1246 * Initiate Register Port Attributes
1247 */
1248 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
1249 fdmi->retry_cnt = 0;
1250 bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
1251 break;
1252
1253 case FDMISM_EVENT_PORT_OFFLINE:
1254 bfa_fcxp_discard(fdmi->fcxp);
1255 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1256 break;
1257
1258 default:
1259 bfa_sm_fault(port->fcs, event);
1260 }
1261}
1262
1263static void
1264bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1265 enum port_fdmi_event event)
1266{
1267 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1268
1269 bfa_trc(port->fcs, port->port_cfg.pwwn);
1270 bfa_trc(port->fcs, event);
1271
1272 switch (event) {
1273 case FDMISM_EVENT_TIMEOUT:
1274 /*
1275 * Retry Timer Expired. Re-send
1276 */
1277 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba);
1278 bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
1279 break;
1280
1281 case FDMISM_EVENT_PORT_OFFLINE:
1282 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1283 bfa_timer_stop(&fdmi->timer);
1284 break;
1285
1286 default:
1287 bfa_sm_fault(port->fcs, event);
1288 }
1289}
1290
1291/*
1292* RPRT : Register Port
1293 */
1294static void
1295bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1296 enum port_fdmi_event event)
1297{
1298 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1299
1300 bfa_trc(port->fcs, port->port_cfg.pwwn);
1301 bfa_trc(port->fcs, event);
1302
1303 switch (event) {
1304 case FDMISM_EVENT_RPRT_SENT:
1305 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt);
1306 break;
1307
1308 case FDMISM_EVENT_PORT_OFFLINE:
1309 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1310 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1311 &fdmi->fcxp_wqe);
1312 break;
1313
1314 default:
1315 bfa_sm_fault(port->fcs, event);
1316 }
1317}
1318
1319static void
1320bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
1321 enum port_fdmi_event event)
1322{
1323 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1324
1325 bfa_trc(port->fcs, port->port_cfg.pwwn);
1326 bfa_trc(port->fcs, event);
1327
1328 switch (event) {
1329 case FDMISM_EVENT_RSP_ERROR:
1330 /*
1331 * if max retries have not been reached, start timer for a
1332 * delayed retry
1333 */
1334 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1335 bfa_sm_set_state(fdmi,
1336 bfa_fcs_lport_fdmi_sm_rprt_retry);
1337 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1338 &fdmi->timer,
1339 bfa_fcs_lport_fdmi_timeout, fdmi,
1340 BFA_FCS_RETRY_TIMEOUT);
1341
1342 } else {
1343 /*
1344 * set state to offline
1345 */
1346 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1347 fdmi->retry_cnt = 0;
1348 }
1349 break;
1350
1351 case FDMISM_EVENT_RSP_OK:
1352 fdmi->retry_cnt = 0;
1353 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
1354 break;
1355
1356 case FDMISM_EVENT_PORT_OFFLINE:
1357 bfa_fcxp_discard(fdmi->fcxp);
1358 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1359 break;
1360
1361 default:
1362 bfa_sm_fault(port->fcs, event);
1363 }
1364}
1365
1366static void
1367bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1368 enum port_fdmi_event event)
1369{
1370 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1371
1372 bfa_trc(port->fcs, port->port_cfg.pwwn);
1373 bfa_trc(port->fcs, event);
1374
1375 switch (event) {
1376 case FDMISM_EVENT_TIMEOUT:
1377 /*
1378 * Retry Timer Expired. Re-send
1379 */
1380 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt);
1381 bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
1382 break;
1383
1384 case FDMISM_EVENT_PORT_OFFLINE:
1385 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1386 bfa_timer_stop(&fdmi->timer);
1387 break;
1388
1389 default:
1390 bfa_sm_fault(port->fcs, event);
1391 }
1392}
1393
1394/*
1395 * Register Port Attributes
1396 */
1397static void
1398bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1399 enum port_fdmi_event event)
1400{
1401 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1402
1403 bfa_trc(port->fcs, port->port_cfg.pwwn);
1404 bfa_trc(port->fcs, event);
1405
1406 switch (event) {
1407 case FDMISM_EVENT_RPA_SENT:
1408 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa);
1409 break;
1410
1411 case FDMISM_EVENT_PORT_OFFLINE:
1412 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1413 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
1414 &fdmi->fcxp_wqe);
1415 break;
1416
1417 default:
1418 bfa_sm_fault(port->fcs, event);
1419 }
1420}
1421
1422static void
1423bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
1424 enum port_fdmi_event event)
1425{
1426 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1427
1428 bfa_trc(port->fcs, port->port_cfg.pwwn);
1429 bfa_trc(port->fcs, event);
1430
1431 switch (event) {
1432 case FDMISM_EVENT_RSP_ERROR:
1433 /*
1434 * if max retries have not been reached, start timer for a
1435 * delayed retry
1436 */
1437 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
1438 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry);
1439 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
1440 &fdmi->timer,
1441 bfa_fcs_lport_fdmi_timeout, fdmi,
1442 BFA_FCS_RETRY_TIMEOUT);
1443 } else {
1444 /*
1445 * set state to offline
1446 */
1447 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1448 fdmi->retry_cnt = 0;
1449 }
1450 break;
1451
1452 case FDMISM_EVENT_RSP_OK:
1453 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
1454 fdmi->retry_cnt = 0;
1455 break;
1456
1457 case FDMISM_EVENT_PORT_OFFLINE:
1458 bfa_fcxp_discard(fdmi->fcxp);
1459 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1460 break;
1461
1462 default:
1463 bfa_sm_fault(port->fcs, event);
1464 }
1465}
1466
1467static void
1468bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
1469 enum port_fdmi_event event)
1470{
1471 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1472
1473 bfa_trc(port->fcs, port->port_cfg.pwwn);
1474 bfa_trc(port->fcs, event);
1475
1476 switch (event) {
1477 case FDMISM_EVENT_TIMEOUT:
1478 /*
1479 * Retry Timer Expired. Re-send
1480 */
1481 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
1482 bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
1483 break;
1484
1485 case FDMISM_EVENT_PORT_OFFLINE:
1486 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1487 bfa_timer_stop(&fdmi->timer);
1488 break;
1489
1490 default:
1491 bfa_sm_fault(port->fcs, event);
1492 }
1493}
1494
1495static void
1496bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1497 enum port_fdmi_event event)
1498{
1499 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1500
1501 bfa_trc(port->fcs, port->port_cfg.pwwn);
1502 bfa_trc(port->fcs, event);
1503
1504 switch (event) {
1505 case FDMISM_EVENT_PORT_OFFLINE:
1506 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
1507 break;
1508
1509 default:
1510 bfa_sm_fault(port->fcs, event);
1511 }
1512}
1513/**
1514 * FDMI is disabled state.
1515 */
1516static void
1517bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
1518 enum port_fdmi_event event)
1519{
1520 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1521
1522 bfa_trc(port->fcs, port->port_cfg.pwwn);
1523 bfa_trc(port->fcs, event);
1524
1525 /* No op State. It can only be enabled at Driver Init. */
1526}
1527
1528/**
1529* RHBA : Register HBA Attributes.
1530 */
1531static void
1532bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1533{
1534 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
1535 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1536 struct fchs_s fchs;
1537 int len, attr_len;
1538 struct bfa_fcxp_s *fcxp;
1539 u8 *pyld;
1540
1541 bfa_trc(port->fcs, port->port_cfg.pwwn);
1542
1543 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1544 if (!fcxp) {
1545 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1546 bfa_fcs_lport_fdmi_send_rhba, fdmi);
1547 return;
1548 }
1549 fdmi->fcxp = fcxp;
1550
1551 pyld = bfa_fcxp_get_reqbuf(fcxp);
1552 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1553
1554 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1555 FDMI_RHBA);
1556
1557 attr_len =
1558 bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
1559 (u8 *) ((struct ct_hdr_s *) pyld
1560 + 1));
1561
1562 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1563 FC_CLASS_3, (len + attr_len), &fchs,
1564 bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi,
1565 FC_MAX_PDUSZ, FC_FCCT_TOV);
1566
1567 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
1568}
1569
1570static u16
1571bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1572{
1573 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1574 struct bfa_fcs_fdmi_hba_attr_s hba_attr;
1575 struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
1576 struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
1577 struct fdmi_attr_s *attr;
1578 u8 *curr_ptr;
1579 u16 len, count;
1580
1581 /*
1582 * get hba attributes
1583 */
1584 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
1585
1586 rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
1587 rhba->port_list.num_ports = bfa_os_htonl(1);
1588 rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
1589
1590 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
1591
1592 count = 0;
1593 len += sizeof(rhba->hba_attr_blk.attr_count);
1594
1595 /*
1596 * fill out the invididual entries of the HBA attrib Block
1597 */
1598 curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
1599
1600 /*
1601 * Node Name
1602 */
1603 attr = (struct fdmi_attr_s *) curr_ptr;
1604 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
1605 attr->len = sizeof(wwn_t);
1606 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1608 len += attr->len;
1609 count++;
1610 attr->len =
1611 bfa_os_htons(attr->len + sizeof(attr->type) +
1612 sizeof(attr->len));
1613
1614 /*
1615 * Manufacturer
1616 */
1617 attr = (struct fdmi_attr_s *) curr_ptr;
1618 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
1619 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
1620 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
1621 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1622 *fields need
1623 *to be 4 byte
1624 *aligned */
1625 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1626 len += attr->len;
1627 count++;
1628 attr->len =
1629 bfa_os_htons(attr->len + sizeof(attr->type) +
1630 sizeof(attr->len));
1631
1632 /*
1633 * Serial Number
1634 */
1635 attr = (struct fdmi_attr_s *) curr_ptr;
1636 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
1637 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
1638 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
1639 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1640 *fields need
1641 *to be 4 byte
1642 *aligned */
1643 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1644 len += attr->len;
1645 count++;
1646 attr->len =
1647 bfa_os_htons(attr->len + sizeof(attr->type) +
1648 sizeof(attr->len));
1649
1650 /*
1651 * Model
1652 */
1653 attr = (struct fdmi_attr_s *) curr_ptr;
1654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
1655 attr->len = (u16) strlen(fcs_hba_attr->model);
1656 memcpy(attr->value, fcs_hba_attr->model, attr->len);
1657 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1658 *fields need
1659 *to be 4 byte
1660 *aligned */
1661 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1662 len += attr->len;
1663 count++;
1664 attr->len =
1665 bfa_os_htons(attr->len + sizeof(attr->type) +
1666 sizeof(attr->len));
1667
1668 /*
1669 * Model Desc
1670 */
1671 attr = (struct fdmi_attr_s *) curr_ptr;
1672 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
1673 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
1674 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
1675 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1676 *fields need
1677 *to be 4 byte
1678 *aligned */
1679 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1680 len += attr->len;
1681 count++;
1682 attr->len =
1683 bfa_os_htons(attr->len + sizeof(attr->type) +
1684 sizeof(attr->len));
1685
1686 /*
1687 * H/W Version
1688 */
1689 if (fcs_hba_attr->hw_version[0] != '\0') {
1690 attr = (struct fdmi_attr_s *) curr_ptr;
1691 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
1694 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1695 *fields need
1696 *to be 4 byte
1697 *aligned */
1698 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1699 len += attr->len;
1700 count++;
1701 attr->len =
1702 bfa_os_htons(attr->len + sizeof(attr->type) +
1703 sizeof(attr->len));
1704 }
1705
1706 /*
1707 * Driver Version
1708 */
1709 attr = (struct fdmi_attr_s *) curr_ptr;
1710 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1713 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1714 *fields need
1715 *to be 4 byte
1716 *aligned */
1717 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1718 len += attr->len;;
1719 count++;
1720 attr->len =
1721 bfa_os_htons(attr->len + sizeof(attr->type) +
1722 sizeof(attr->len));
1723
1724 /*
1725 * Option Rom Version
1726 */
1727 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
1728 attr = (struct fdmi_attr_s *) curr_ptr;
1729 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
1732 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1733 *fields need
1734 *to be 4 byte
1735 *aligned */
1736 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1737 len += attr->len;
1738 count++;
1739 attr->len =
1740 bfa_os_htons(attr->len + sizeof(attr->type) +
1741 sizeof(attr->len));
1742 }
1743
1744 /*
1745 * f/w Version = driver version
1746 */
1747 attr = (struct fdmi_attr_s *) curr_ptr;
1748 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1751 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1752 *fields need
1753 *to be 4 byte
1754 *aligned */
1755 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1756 len += attr->len;
1757 count++;
1758 attr->len =
1759 bfa_os_htons(attr->len + sizeof(attr->type) +
1760 sizeof(attr->len));
1761
1762 /*
1763 * OS Name
1764 */
1765 if (fcs_hba_attr->os_name[0] != '\0') {
1766 attr = (struct fdmi_attr_s *) curr_ptr;
1767 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
1768 attr->len = (u16) strlen(fcs_hba_attr->os_name);
1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
1770 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1771 *fields need
1772 *to be 4 byte
1773 *aligned */
1774 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1775 len += attr->len;
1776 count++;
1777 attr->len =
1778 bfa_os_htons(attr->len + sizeof(attr->type) +
1779 sizeof(attr->len));
1780 }
1781
1782 /*
1783 * MAX_CT_PAYLOAD
1784 */
1785 attr = (struct fdmi_attr_s *) curr_ptr;
1786 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
1787 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
1789 len += attr->len;
1790 count++;
1791 attr->len =
1792 bfa_os_htons(attr->len + sizeof(attr->type) +
1793 sizeof(attr->len));
1794
1795 /*
1796 * Update size of payload
1797 */
1798 len += ((sizeof(attr->type) +
1799 sizeof(attr->len)) * count);
1800
1801 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
1802 return len;
1803}
1804
1805static void
1806bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1807 void *cbarg, bfa_status_t req_status,
1808 u32 rsp_len, u32 resid_len,
1809 struct fchs_s *rsp_fchs)
1810{
1811 struct bfa_fcs_lport_fdmi_s *fdmi =
1812 (struct bfa_fcs_lport_fdmi_s *) cbarg;
1813 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1814 struct ct_hdr_s *cthdr = NULL;
1815
1816 bfa_trc(port->fcs, port->port_cfg.pwwn);
1817
1818 /*
1819 * Sanity Checks
1820 */
1821 if (req_status != BFA_STATUS_OK) {
1822 bfa_trc(port->fcs, req_status);
1823 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1824 return;
1825 }
1826
1827 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1828 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1829
1830 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1831 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1832 return;
1833 }
1834
1835 bfa_trc(port->fcs, cthdr->reason_code);
1836 bfa_trc(port->fcs, cthdr->exp_code);
1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1838}
1839
1840/**
1841* RPRT : Register Port
1842 */
1843static void
1844bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1845{
1846 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
1847 struct bfa_fcs_lport_s *port = fdmi->ms->port;
1848 struct fchs_s fchs;
1849 u16 len, attr_len;
1850 struct bfa_fcxp_s *fcxp;
1851 u8 *pyld;
1852
1853 bfa_trc(port->fcs, port->port_cfg.pwwn);
1854
1855 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1856 if (!fcxp) {
1857 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1858 bfa_fcs_lport_fdmi_send_rprt, fdmi);
1859 return;
1860 }
1861 fdmi->fcxp = fcxp;
1862
1863 pyld = bfa_fcxp_get_reqbuf(fcxp);
1864 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1865
1866 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1867 FDMI_RPRT);
1868
1869 attr_len =
1870 bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi,
1871 (u8 *) ((struct ct_hdr_s *) pyld
1872 + 1));
1873
1874 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1875 FC_CLASS_3, len + attr_len, &fchs,
1876 bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi,
1877 FC_MAX_PDUSZ, FC_FCCT_TOV);
1878
1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
1880}
1881
1882/**
1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
1884 */
1885static u16
1886bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1887 u8 *pyld)
1888{
1889 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
1890 struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
1891 struct fdmi_attr_s *attr;
1892 u8 *curr_ptr;
1893 u16 len;
1894 u8 count = 0;
1895
1896 /*
1897 * get port attributes
1898 */
1899 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
1900
1901 len = sizeof(port_attrib->attr_count);
1902
1903 /*
1904 * fill out the invididual entries
1905 */
1906 curr_ptr = (u8 *) &port_attrib->port_attr;
1907
1908 /*
1909 * FC4 Types
1910 */
1911 attr = (struct fdmi_attr_s *) curr_ptr;
1912 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
1913 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
1914 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
1915 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1916 len += attr->len;
1917 ++count;
1918 attr->len =
1919 bfa_os_htons(attr->len + sizeof(attr->type) +
1920 sizeof(attr->len));
1921
1922 /*
1923 * Supported Speed
1924 */
1925 attr = (struct fdmi_attr_s *) curr_ptr;
1926 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
1927 attr->len = sizeof(fcs_port_attr.supp_speed);
1928 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
1929 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1930 len += attr->len;
1931 ++count;
1932 attr->len =
1933 bfa_os_htons(attr->len + sizeof(attr->type) +
1934 sizeof(attr->len));
1935
1936 /*
1937 * current Port Speed
1938 */
1939 attr = (struct fdmi_attr_s *) curr_ptr;
1940 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
1941 attr->len = sizeof(fcs_port_attr.curr_speed);
1942 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1944 len += attr->len;
1945 ++count;
1946 attr->len =
1947 bfa_os_htons(attr->len + sizeof(attr->type) +
1948 sizeof(attr->len));
1949
1950 /*
1951 * max frame size
1952 */
1953 attr = (struct fdmi_attr_s *) curr_ptr;
1954 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
1955 attr->len = sizeof(fcs_port_attr.max_frm_size);
1956 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
1957 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1958 len += attr->len;
1959 ++count;
1960 attr->len =
1961 bfa_os_htons(attr->len + sizeof(attr->type) +
1962 sizeof(attr->len));
1963
1964 /*
1965 * OS Device Name
1966 */
1967 if (fcs_port_attr.os_device_name[0] != '\0') {
1968 attr = (struct fdmi_attr_s *) curr_ptr;
1969 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
1972 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1973 *fields need
1974 *to be 4 byte
1975 *aligned */
1976 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1977 len += attr->len;
1978 ++count;
1979 attr->len =
1980 bfa_os_htons(attr->len + sizeof(attr->type) +
1981 sizeof(attr->len));
1982 }
1983 /*
1984 * Host Name
1985 */
1986 if (fcs_port_attr.host_name[0] != '\0') {
1987 attr = (struct fdmi_attr_s *) curr_ptr;
1988 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
1989 attr->len = (u16) strlen(fcs_port_attr.host_name);
1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
1991 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
1992 *fields need
1993 *to be 4 byte
1994 *aligned */
1995 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1996 len += attr->len;
1997 ++count;
1998 attr->len =
1999 bfa_os_htons(attr->len + sizeof(attr->type) +
2000 sizeof(attr->len));
2001 }
2002
2003 /*
2004 * Update size of payload
2005 */
2006 port_attrib->attr_count = bfa_os_htonl(count);
2007 len += ((sizeof(attr->type) +
2008 sizeof(attr->len)) * count);
2009 return len;
2010}
2011
2012static u16
2013bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
2014{
2015 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2016 struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
2017 u16 len;
2018
2019 rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs));
2020 rprt->port_name = bfa_fcs_lport_get_pwwn(port);
2021
2022 len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
2023 (u8 *) &rprt->port_attr_blk);
2024
2025 len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
2026
2027 return len;
2028}
2029
2030static void
2031bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2032 void *cbarg, bfa_status_t req_status,
2033 u32 rsp_len, u32 resid_len,
2034 struct fchs_s *rsp_fchs)
2035{
2036 struct bfa_fcs_lport_fdmi_s *fdmi =
2037 (struct bfa_fcs_lport_fdmi_s *) cbarg;
2038 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2039 struct ct_hdr_s *cthdr = NULL;
2040
2041 bfa_trc(port->fcs, port->port_cfg.pwwn);
2042
2043 /*
2044 * Sanity Checks
2045 */
2046 if (req_status != BFA_STATUS_OK) {
2047 bfa_trc(port->fcs, req_status);
2048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2049 return;
2050 }
2051
2052 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2053 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2054
2055 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2056 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
2057 return;
2058 }
2059
2060 bfa_trc(port->fcs, cthdr->reason_code);
2061 bfa_trc(port->fcs, cthdr->exp_code);
2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2063}
2064
2065/**
2066* RPA : Register Port Attributes.
2067 */
2068static void
2069bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2070{
2071 struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
2072 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2073 struct fchs_s fchs;
2074 u16 len, attr_len;
2075 struct bfa_fcxp_s *fcxp;
2076 u8 *pyld;
2077
2078 bfa_trc(port->fcs, port->port_cfg.pwwn);
2079
2080 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2081 if (!fcxp) {
2082 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
2083 bfa_fcs_lport_fdmi_send_rpa, fdmi);
2084 return;
2085 }
2086 fdmi->fcxp = fcxp;
2087
2088 pyld = bfa_fcxp_get_reqbuf(fcxp);
2089 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
2090
2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
2092 FDMI_RPA);
2093
2094 attr_len =
2095 bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
2096 (u8 *) ((struct ct_hdr_s *) pyld
2097 + 1));
2098
2099 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2100 FC_CLASS_3, len + attr_len, &fchs,
2101 bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi,
2102 FC_MAX_PDUSZ, FC_FCCT_TOV);
2103
2104 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
2105}
2106
2107static u16
2108bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
2109{
2110 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2111 struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
2112 u16 len;
2113
2114 rpa->port_name = bfa_fcs_lport_get_pwwn(port);
2115
2116 len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
2117 (u8 *) &rpa->port_attr_blk);
2118
2119 len += sizeof(rpa->port_name);
2120
2121 return len;
2122}
2123
2124static void
2125bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2126 void *cbarg, bfa_status_t req_status, u32 rsp_len,
2127 u32 resid_len, struct fchs_s *rsp_fchs)
2128{
2129 struct bfa_fcs_lport_fdmi_s *fdmi =
2130 (struct bfa_fcs_lport_fdmi_s *) cbarg;
2131 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2132 struct ct_hdr_s *cthdr = NULL;
2133
2134 bfa_trc(port->fcs, port->port_cfg.pwwn);
2135
2136 /*
2137 * Sanity Checks
2138 */
2139 if (req_status != BFA_STATUS_OK) {
2140 bfa_trc(port->fcs, req_status);
2141 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2142 return;
2143 }
2144
2145 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2146 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2147
2148 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2149 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
2150 return;
2151 }
2152
2153 bfa_trc(port->fcs, cthdr->reason_code);
2154 bfa_trc(port->fcs, cthdr->exp_code);
2155 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2156}
2157
2158static void
2159bfa_fcs_lport_fdmi_timeout(void *arg)
2160{
2161 struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg;
2162
2163 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
2164}
2165
2166void
2167bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2168 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
2169{
2170 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2171 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2172
2173 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
2174
2175 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
2176 hba_attr->manufacturer);
2177 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
2178 hba_attr->serial_num);
2179 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
2180 hba_attr->model);
2181 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
2182 hba_attr->model_desc);
2183 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc,
2184 hba_attr->hw_version);
2185 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
2186 hba_attr->option_rom_ver);
2187 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
2188 hba_attr->fw_version);
2189
2190 strncpy(hba_attr->driver_version, (char *)driver_info->version,
2191 sizeof(hba_attr->driver_version));
2192
2193 strncpy(hba_attr->os_name, driver_info->host_os_name,
2194 sizeof(hba_attr->os_name));
2195
2196 /*
2197 * If there is a patch level, append it
2198 * to the os name along with a separator
2199 */
2200 if (driver_info->host_os_patch[0] != '\0') {
2201 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
2202 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
2203 strncat(hba_attr->os_name, driver_info->host_os_patch,
2204 sizeof(driver_info->host_os_patch));
2205 }
2206
2207 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
2208}
2209
2210void
2211bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2212 struct bfa_fcs_fdmi_port_attr_s *port_attr)
2213{
2214 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2215 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2216 struct bfa_port_attr_s pport_attr;
2217
2218 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
2219
2220 /*
2221 * get pport attributes from hal
2222 */
2223 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
2224
2225 /*
2226 * get FC4 type Bitmask
2227 */
2228 fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
2229
2230 /*
2231 * Supported Speeds
2232 */
2233 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
2234
2235 /*
2236 * Current Speed
2237 */
2238 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
2239
2240 /*
2241 * Max PDU Size.
2242 */
2243 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
2244
2245 /*
2246 * OS device Name
2247 */
2248 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
2249 sizeof(port_attr->os_device_name));
2250
2251 /*
2252 * Host name
2253 */
2254 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
2255 sizeof(port_attr->host_name));
2256
2257}
2258
2259
2260void
2261bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
2262{
2263 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2264
2265 fdmi->ms = ms;
2266 if (ms->port->fcs->fdmi_enabled)
2267 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
2268 else
2269 bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled);
2270}
2271
2272void
2273bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms)
2274{
2275 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2276
2277 fdmi->ms = ms;
2278 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
2279}
2280
2281void
2282bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms)
2283{
2284 struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
2285
2286 fdmi->ms = ms;
2287 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
2288}
2289
2290#define BFA_FCS_MS_CMD_MAX_RETRIES 2
2291
2292/*
2293 * forward declarations
2294 */
2295static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg,
2296 struct bfa_fcxp_s *fcxp_alloced);
2297static void bfa_fcs_lport_ms_timeout(void *arg);
2298static void bfa_fcs_lport_ms_plogi_response(void *fcsarg,
2299 struct bfa_fcxp_s *fcxp,
2300 void *cbarg,
2301 bfa_status_t req_status,
2302 u32 rsp_len,
2303 u32 resid_len,
2304 struct fchs_s *rsp_fchs);
2305
2306static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg,
2307 struct bfa_fcxp_s *fcxp_alloced);
2308static void bfa_fcs_lport_ms_gmal_response(void *fcsarg,
2309 struct bfa_fcxp_s *fcxp,
2310 void *cbarg,
2311 bfa_status_t req_status,
2312 u32 rsp_len,
2313 u32 resid_len,
2314 struct fchs_s *rsp_fchs);
2315static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg,
2316 struct bfa_fcxp_s *fcxp_alloced);
2317static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
2318 struct bfa_fcxp_s *fcxp,
2319 void *cbarg,
2320 bfa_status_t req_status,
2321 u32 rsp_len,
2322 u32 resid_len,
2323 struct fchs_s *rsp_fchs);
2324/**
2325 * fcs_ms_sm FCS MS state machine
2326 */
2327
2328/**
2329 * MS State Machine events
2330 */
2331enum port_ms_event {
2332 MSSM_EVENT_PORT_ONLINE = 1,
2333 MSSM_EVENT_PORT_OFFLINE = 2,
2334 MSSM_EVENT_RSP_OK = 3,
2335 MSSM_EVENT_RSP_ERROR = 4,
2336 MSSM_EVENT_TIMEOUT = 5,
2337 MSSM_EVENT_FCXP_SENT = 6,
2338 MSSM_EVENT_PORT_FABRIC_RSCN = 7
2339};
2340
2341static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
2342 enum port_ms_event event);
2343static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
2344 enum port_ms_event event);
2345static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2346 enum port_ms_event event);
2347static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
2348 enum port_ms_event event);
2349static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
2350 enum port_ms_event event);
2351static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
2352 enum port_ms_event event);
2353static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2354 enum port_ms_event event);
2355static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
2356 enum port_ms_event event);
2357static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
2358 enum port_ms_event event);
2359static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2360 enum port_ms_event event);
2361static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2362 enum port_ms_event event);
2363/**
2364 * Start in offline state - awaiting NS to send start.
2365 */
2366static void
2367bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
2368 enum port_ms_event event)
2369{
2370 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2371 bfa_trc(ms->port->fcs, event);
2372
2373 switch (event) {
2374 case MSSM_EVENT_PORT_ONLINE:
2375 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
2376 bfa_fcs_lport_ms_send_plogi(ms, NULL);
2377 break;
2378
2379 case MSSM_EVENT_PORT_OFFLINE:
2380 break;
2381
2382 default:
2383 bfa_sm_fault(ms->port->fcs, event);
2384 }
2385}
2386
2387static void
2388bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
2389 enum port_ms_event event)
2390{
2391 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2392 bfa_trc(ms->port->fcs, event);
2393
2394 switch (event) {
2395 case MSSM_EVENT_FCXP_SENT:
2396 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi);
2397 break;
2398
2399 case MSSM_EVENT_PORT_OFFLINE:
2400 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2401 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2402 &ms->fcxp_wqe);
2403 break;
2404
2405 default:
2406 bfa_sm_fault(ms->port->fcs, event);
2407 }
2408}
2409
2410static void
2411bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2412 enum port_ms_event event)
2413{
2414 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2415 bfa_trc(ms->port->fcs, event);
2416
2417 switch (event) {
2418 case MSSM_EVENT_RSP_ERROR:
2419 /*
2420 * Start timer for a delayed retry
2421 */
2422 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry);
2423 ms->port->stats.ms_retries++;
2424 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2425 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2426 BFA_FCS_RETRY_TIMEOUT);
2427 break;
2428
2429 case MSSM_EVENT_RSP_OK:
2430 /*
2431 * since plogi is done, now invoke MS related sub-modules
2432 */
2433 bfa_fcs_lport_fdmi_online(ms);
2434
2435 /**
2436 * if this is a Vport, go to online state.
2437 */
2438 if (ms->port->vport) {
2439 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2440 break;
2441 }
2442
2443 /*
2444 * For a base port we need to get the
2445 * switch's IP address.
2446 */
2447 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
2448 bfa_fcs_lport_ms_send_gmal(ms, NULL);
2449 break;
2450
2451 case MSSM_EVENT_PORT_OFFLINE:
2452 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2453 bfa_fcxp_discard(ms->fcxp);
2454 break;
2455
2456 default:
2457 bfa_sm_fault(ms->port->fcs, event);
2458 }
2459}
2460
2461static void
2462bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
2463 enum port_ms_event event)
2464{
2465 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2466 bfa_trc(ms->port->fcs, event);
2467
2468 switch (event) {
2469 case MSSM_EVENT_TIMEOUT:
2470 /*
2471 * Retry Timer Expired. Re-send
2472 */
2473 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
2474 bfa_fcs_lport_ms_send_plogi(ms, NULL);
2475 break;
2476
2477 case MSSM_EVENT_PORT_OFFLINE:
2478 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2479 bfa_timer_stop(&ms->timer);
2480 break;
2481
2482 default:
2483 bfa_sm_fault(ms->port->fcs, event);
2484 }
2485}
2486
2487static void
2488bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2489 enum port_ms_event event)
2490{
2491 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2492 bfa_trc(ms->port->fcs, event);
2493
2494 switch (event) {
2495 case MSSM_EVENT_PORT_OFFLINE:
2496 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2497 break;
2498
2499 case MSSM_EVENT_PORT_FABRIC_RSCN:
2500 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2501 ms->retry_cnt = 0;
2502 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2503 break;
2504
2505 default:
2506 bfa_sm_fault(ms->port->fcs, event);
2507 }
2508}
2509
2510static void
2511bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
2512 enum port_ms_event event)
2513{
2514 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2515 bfa_trc(ms->port->fcs, event);
2516
2517 switch (event) {
2518 case MSSM_EVENT_FCXP_SENT:
2519 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal);
2520 break;
2521
2522 case MSSM_EVENT_PORT_OFFLINE:
2523 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2524 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2525 &ms->fcxp_wqe);
2526 break;
2527
2528 default:
2529 bfa_sm_fault(ms->port->fcs, event);
2530 }
2531}
2532
2533static void
2534bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
2535 enum port_ms_event event)
2536{
2537 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2538 bfa_trc(ms->port->fcs, event);
2539
2540 switch (event) {
2541 case MSSM_EVENT_RSP_ERROR:
2542 /*
2543 * Start timer for a delayed retry
2544 */
2545 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
2546 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry);
2547 ms->port->stats.ms_retries++;
2548 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2549 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2550 BFA_FCS_RETRY_TIMEOUT);
2551 } else {
2552 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2553 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2554 ms->retry_cnt = 0;
2555 }
2556 break;
2557
2558 case MSSM_EVENT_RSP_OK:
2559 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2560 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2561 break;
2562
2563 case MSSM_EVENT_PORT_OFFLINE:
2564 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2565 bfa_fcxp_discard(ms->fcxp);
2566 break;
2567
2568 default:
2569 bfa_sm_fault(ms->port->fcs, event);
2570 }
2571}
2572
2573static void
2574bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2575 enum port_ms_event event)
2576{
2577 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2578 bfa_trc(ms->port->fcs, event);
2579
2580 switch (event) {
2581 case MSSM_EVENT_TIMEOUT:
2582 /*
2583 * Retry Timer Expired. Re-send
2584 */
2585 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
2586 bfa_fcs_lport_ms_send_gmal(ms, NULL);
2587 break;
2588
2589 case MSSM_EVENT_PORT_OFFLINE:
2590 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2591 bfa_timer_stop(&ms->timer);
2592 break;
2593
2594 default:
2595 bfa_sm_fault(ms->port->fcs, event);
2596 }
2597}
2598/**
2599 * ms_pvt MS local functions
2600 */
2601
2602static void
2603bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2604{
2605 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2606 bfa_fcs_lport_t *port = ms->port;
2607 struct fchs_s fchs;
2608 int len;
2609 struct bfa_fcxp_s *fcxp;
2610
2611 bfa_trc(port->fcs, port->pid);
2612
2613 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2614 if (!fcxp) {
2615 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2616 bfa_fcs_lport_ms_send_gmal, ms);
2617 return;
2618 }
2619 ms->fcxp = fcxp;
2620
2621 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2622 bfa_fcs_lport_get_fcid(port),
2623 bfa_lps_get_peer_nwwn(port->fabric->lps));
2624
2625 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2626 FC_CLASS_3, len, &fchs,
2627 bfa_fcs_lport_ms_gmal_response, (void *)ms,
2628 FC_MAX_PDUSZ, FC_FCCT_TOV);
2629
2630 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2631}
2632
2633static void
2634bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2635 void *cbarg, bfa_status_t req_status,
2636 u32 rsp_len, u32 resid_len,
2637 struct fchs_s *rsp_fchs)
2638{
2639 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2640 bfa_fcs_lport_t *port = ms->port;
2641 struct ct_hdr_s *cthdr = NULL;
2642 struct fcgs_gmal_resp_s *gmal_resp;
2643 struct fcgs_gmal_entry_s *gmal_entry;
2644 u32 num_entries;
2645 u8 *rsp_str;
2646
2647 bfa_trc(port->fcs, req_status);
2648 bfa_trc(port->fcs, port->port_cfg.pwwn);
2649
2650 /*
2651 * Sanity Checks
2652 */
2653 if (req_status != BFA_STATUS_OK) {
2654 bfa_trc(port->fcs, req_status);
2655 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2656 return;
2657 }
2658
2659 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2660 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2661
2662 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2663 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
2664
2665 num_entries = bfa_os_ntohl(gmal_resp->ms_len);
2666 if (num_entries == 0) {
2667 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2668 return;
2669 }
2670 /*
2671 * The response could contain multiple Entries.
2672 * Entries for SNMP interface, etc.
2673 * We look for the entry with a telnet prefix.
2674 * First "http://" entry refers to IP addr
2675 */
2676
2677 gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma;
2678 while (num_entries > 0) {
2679 if (strncmp(gmal_entry->prefix,
2680 CT_GMAL_RESP_PREFIX_HTTP,
2681 sizeof(gmal_entry->prefix)) == 0) {
2682
2683 /*
2684 * if the IP address is terminating with a '/',
2685 * remove it.
2686 * Byte 0 consists of the length of the string.
2687 */
2688 rsp_str = &(gmal_entry->prefix[0]);
2689 if (rsp_str[gmal_entry->len-1] == '/')
2690 rsp_str[gmal_entry->len-1] = 0;
2691
2692 /* copy IP Address to fabric */
2693 strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
2694 gmal_entry->ip_addr,
2695 BFA_FCS_FABRIC_IPADDR_SZ);
2696 break;
2697 } else {
2698 --num_entries;
2699 ++gmal_entry;
2700 }
2701 }
2702
2703 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2704 return;
2705 }
2706
2707 bfa_trc(port->fcs, cthdr->reason_code);
2708 bfa_trc(port->fcs, cthdr->exp_code);
2709 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2710}
2711
2712static void
2713bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
2714 enum port_ms_event event)
2715{
2716 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2717 bfa_trc(ms->port->fcs, event);
2718
2719 switch (event) {
2720 case MSSM_EVENT_FCXP_SENT:
2721 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn);
2722 break;
2723
2724 case MSSM_EVENT_PORT_OFFLINE:
2725 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2726 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2727 &ms->fcxp_wqe);
2728 break;
2729
2730 default:
2731 bfa_sm_fault(ms->port->fcs, event);
2732 }
2733}
2734
2735static void
2736bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
2737 enum port_ms_event event)
2738{
2739 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2740 bfa_trc(ms->port->fcs, event);
2741
2742 switch (event) {
2743 case MSSM_EVENT_RSP_ERROR:
2744 /*
2745 * Start timer for a delayed retry
2746 */
2747 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
2748 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry);
2749 ms->port->stats.ms_retries++;
2750 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
2751 &ms->timer, bfa_fcs_lport_ms_timeout, ms,
2752 BFA_FCS_RETRY_TIMEOUT);
2753 } else {
2754 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2755 ms->retry_cnt = 0;
2756 }
2757 break;
2758
2759 case MSSM_EVENT_RSP_OK:
2760 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
2761 break;
2762
2763 case MSSM_EVENT_PORT_OFFLINE:
2764 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2765 bfa_fcxp_discard(ms->fcxp);
2766 break;
2767
2768 default:
2769 bfa_sm_fault(ms->port->fcs, event);
2770 }
2771}
2772
2773static void
2774bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2775 enum port_ms_event event)
2776{
2777 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
2778 bfa_trc(ms->port->fcs, event);
2779
2780 switch (event) {
2781 case MSSM_EVENT_TIMEOUT:
2782 /*
2783 * Retry Timer Expired. Re-send
2784 */
2785 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
2786 bfa_fcs_lport_ms_send_gfn(ms, NULL);
2787 break;
2788
2789 case MSSM_EVENT_PORT_OFFLINE:
2790 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2791 bfa_timer_stop(&ms->timer);
2792 break;
2793
2794 default:
2795 bfa_sm_fault(ms->port->fcs, event);
2796 }
2797}
2798/**
2799 * ms_pvt MS local functions
2800 */
2801
2802static void
2803bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2804{
2805 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2806 bfa_fcs_lport_t *port = ms->port;
2807 struct fchs_s fchs;
2808 int len;
2809 struct bfa_fcxp_s *fcxp;
2810
2811 bfa_trc(port->fcs, port->pid);
2812
2813 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2814 if (!fcxp) {
2815 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2816 bfa_fcs_lport_ms_send_gfn, ms);
2817 return;
2818 }
2819 ms->fcxp = fcxp;
2820
2821 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2822 bfa_fcs_lport_get_fcid(port),
2823 bfa_lps_get_peer_nwwn(port->fabric->lps));
2824
2825 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2826 FC_CLASS_3, len, &fchs,
2827 bfa_fcs_lport_ms_gfn_response, (void *)ms,
2828 FC_MAX_PDUSZ, FC_FCCT_TOV);
2829
2830 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2831}
2832
2833static void
2834bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2835 void *cbarg, bfa_status_t req_status, u32 rsp_len,
2836 u32 resid_len, struct fchs_s *rsp_fchs)
2837{
2838 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2839 bfa_fcs_lport_t *port = ms->port;
2840 struct ct_hdr_s *cthdr = NULL;
2841 wwn_t *gfn_resp;
2842
2843 bfa_trc(port->fcs, req_status);
2844 bfa_trc(port->fcs, port->port_cfg.pwwn);
2845
2846 /*
2847 * Sanity Checks
2848 */
2849 if (req_status != BFA_STATUS_OK) {
2850 bfa_trc(port->fcs, req_status);
2851 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2852 return;
2853 }
2854
2855 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2856 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
2857
2858 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2859 gfn_resp = (wwn_t *)(cthdr + 1);
2860 /* check if it has actually changed */
2861 if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port),
2862 gfn_resp, sizeof(wwn_t)) != 0)) {
2863 bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
2864 }
2865 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2866 return;
2867 }
2868
2869 bfa_trc(port->fcs, cthdr->reason_code);
2870 bfa_trc(port->fcs, cthdr->exp_code);
2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2872}
2873
2874/**
2875 * ms_pvt MS local functions
2876 */
2877
2878static void
2879bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2880{
2881 struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
2882 struct bfa_fcs_lport_s *port = ms->port;
2883 struct fchs_s fchs;
2884 int len;
2885 struct bfa_fcxp_s *fcxp;
2886
2887 bfa_trc(port->fcs, port->pid);
2888
2889 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
2890 if (!fcxp) {
2891 port->stats.ms_plogi_alloc_wait++;
2892 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2893 bfa_fcs_lport_ms_send_plogi, ms);
2894 return;
2895 }
2896 ms->fcxp = fcxp;
2897
2898 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2899 bfa_os_hton3b(FC_MGMT_SERVER),
2900 bfa_fcs_lport_get_fcid(port), 0,
2901 port->port_cfg.pwwn, port->port_cfg.nwwn,
2902 bfa_fcport_get_maxfrsize(port->fcs->bfa));
2903
2904 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2905 FC_CLASS_3, len, &fchs,
2906 bfa_fcs_lport_ms_plogi_response, (void *)ms,
2907 FC_MAX_PDUSZ, FC_ELS_TOV);
2908
2909 port->stats.ms_plogi_sent++;
2910 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
2911}
2912
2913static void
2914bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2915 void *cbarg, bfa_status_t req_status,
2916 u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs)
2917{
2918 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
2919 struct bfa_fcs_lport_s *port = ms->port;
2920 struct fc_els_cmd_s *els_cmd;
2921 struct fc_ls_rjt_s *ls_rjt;
2922
2923 bfa_trc(port->fcs, req_status);
2924 bfa_trc(port->fcs, port->port_cfg.pwwn);
2925
2926 /*
2927 * Sanity Checks
2928 */
2929 if (req_status != BFA_STATUS_OK) {
2930 port->stats.ms_plogi_rsp_err++;
2931 bfa_trc(port->fcs, req_status);
2932 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2933 return;
2934 }
2935
2936 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
2937
2938 switch (els_cmd->els_code) {
2939
2940 case FC_ELS_ACC:
2941 if (rsp_len < sizeof(struct fc_logi_s)) {
2942 bfa_trc(port->fcs, rsp_len);
2943 port->stats.ms_plogi_acc_err++;
2944 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2945 break;
2946 }
2947 port->stats.ms_plogi_accepts++;
2948 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
2949 break;
2950
2951 case FC_ELS_LS_RJT:
2952 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
2953
2954 bfa_trc(port->fcs, ls_rjt->reason_code);
2955 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
2956
2957 port->stats.ms_rejects++;
2958 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2959 break;
2960
2961 default:
2962 port->stats.ms_plogi_unknown_rsp++;
2963 bfa_trc(port->fcs, els_cmd->els_code);
2964 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2965 }
2966}
2967
2968static void
2969bfa_fcs_lport_ms_timeout(void *arg)
2970{
2971 struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg;
2972
2973 ms->port->stats.ms_timeouts++;
2974 bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
2975}
2976
2977
2978void
2979bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port)
2980{
2981 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
2982
2983 ms->port = port;
2984 bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
2985
2986 /*
2987 * Invoke init routines of sub modules.
2988 */
2989 bfa_fcs_lport_fdmi_init(ms);
2990}
2991
2992void
2993bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port)
2994{
2995 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
2996
2997 ms->port = port;
2998 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
2999 bfa_fcs_lport_fdmi_offline(ms);
3000}
3001
3002void
3003bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port)
3004{
3005 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
3006
3007 ms->port = port;
3008 bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
3009}
3010void
3011bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
3012{
3013 struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
3014
3015 /* todo. Handle this only when in Online state */
3016 if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online))
3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
3018}
3019
3020/**
3021 * @page ns_sm_info VPORT NS State Machine
3022 *
3023 * @section ns_sm_interactions VPORT NS State Machine Interactions
3024 *
3025 * @section ns_sm VPORT NS State Machine
3026 * img ns_sm.jpg
3027 */
3028
3029/*
3030 * forward declarations
3031 */
3032static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg,
3033 struct bfa_fcxp_s *fcxp_alloced);
3034static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg,
3035 struct bfa_fcxp_s *fcxp_alloced);
3036static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg,
3037 struct bfa_fcxp_s *fcxp_alloced);
3038static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
3039 struct bfa_fcxp_s *fcxp_alloced);
3040static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
3041 struct bfa_fcxp_s *fcxp_alloced);
3042static void bfa_fcs_lport_ns_timeout(void *arg);
3043static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
3044 struct bfa_fcxp_s *fcxp,
3045 void *cbarg,
3046 bfa_status_t req_status,
3047 u32 rsp_len,
3048 u32 resid_len,
3049 struct fchs_s *rsp_fchs);
3050static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg,
3051 struct bfa_fcxp_s *fcxp,
3052 void *cbarg,
3053 bfa_status_t req_status,
3054 u32 rsp_len,
3055 u32 resid_len,
3056 struct fchs_s *rsp_fchs);
3057static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg,
3058 struct bfa_fcxp_s *fcxp,
3059 void *cbarg,
3060 bfa_status_t req_status,
3061 u32 rsp_len,
3062 u32 resid_len,
3063 struct fchs_s *rsp_fchs);
3064static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg,
3065 struct bfa_fcxp_s *fcxp,
3066 void *cbarg,
3067 bfa_status_t req_status,
3068 u32 rsp_len,
3069 u32 resid_len,
3070 struct fchs_s *rsp_fchs);
3071static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
3072 struct bfa_fcxp_s *fcxp,
3073 void *cbarg,
3074 bfa_status_t req_status,
3075 u32 rsp_len,
3076 u32 resid_len,
3077 struct fchs_s *rsp_fchs);
3078static void bfa_fcs_lport_ns_process_gidft_pids(
3079 struct bfa_fcs_lport_s *port,
3080 u32 *pid_buf, u32 n_pids);
3081
3082static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
3083/**
3084 * fcs_ns_sm FCS nameserver interface state machine
3085 */
3086
3087/**
3088 * VPort NS State Machine events
3089 */
3090enum vport_ns_event {
3091 NSSM_EVENT_PORT_ONLINE = 1,
3092 NSSM_EVENT_PORT_OFFLINE = 2,
3093 NSSM_EVENT_PLOGI_SENT = 3,
3094 NSSM_EVENT_RSP_OK = 4,
3095 NSSM_EVENT_RSP_ERROR = 5,
3096 NSSM_EVENT_TIMEOUT = 6,
3097 NSSM_EVENT_NS_QUERY = 7,
3098 NSSM_EVENT_RSPNID_SENT = 8,
3099 NSSM_EVENT_RFTID_SENT = 9,
3100 NSSM_EVENT_RFFID_SENT = 10,
3101 NSSM_EVENT_GIDFT_SENT = 11,
3102};
3103
3104static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
3105 enum vport_ns_event event);
3106static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
3107 enum vport_ns_event event);
3108static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
3109 enum vport_ns_event event);
3110static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
3111 enum vport_ns_event event);
3112static void bfa_fcs_lport_ns_sm_sending_rspn_id(
3113 struct bfa_fcs_lport_ns_s *ns,
3114 enum vport_ns_event event);
3115static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3116 enum vport_ns_event event);
3117static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3118 enum vport_ns_event event);
3119static void bfa_fcs_lport_ns_sm_sending_rft_id(
3120 struct bfa_fcs_lport_ns_s *ns,
3121 enum vport_ns_event event);
3122static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
3123 enum vport_ns_event event);
3124static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
3125 enum vport_ns_event event);
3126static void bfa_fcs_lport_ns_sm_sending_rff_id(
3127 struct bfa_fcs_lport_ns_s *ns,
3128 enum vport_ns_event event);
3129static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
3130 enum vport_ns_event event);
3131static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
3132 enum vport_ns_event event);
3133static void bfa_fcs_lport_ns_sm_sending_gid_ft(
3134 struct bfa_fcs_lport_ns_s *ns,
3135 enum vport_ns_event event);
3136static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3137 enum vport_ns_event event);
3138static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3139 enum vport_ns_event event);
3140static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3141 enum vport_ns_event event);
3142/**
3143 * Start in offline state - awaiting linkup
3144 */
3145static void
3146bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
3147 enum vport_ns_event event)
3148{
3149 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3150 bfa_trc(ns->port->fcs, event);
3151
3152 switch (event) {
3153 case NSSM_EVENT_PORT_ONLINE:
3154 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
3155 bfa_fcs_lport_ns_send_plogi(ns, NULL);
3156 break;
3157
3158 case NSSM_EVENT_PORT_OFFLINE:
3159 break;
3160
3161 default:
3162 bfa_sm_fault(ns->port->fcs, event);
3163 }
3164}
3165
3166static void
3167bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
3168 enum vport_ns_event event)
3169{
3170 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3171 bfa_trc(ns->port->fcs, event);
3172
3173 switch (event) {
3174 case NSSM_EVENT_PLOGI_SENT:
3175 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi);
3176 break;
3177
3178 case NSSM_EVENT_PORT_OFFLINE:
3179 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3180 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3181 &ns->fcxp_wqe);
3182 break;
3183
3184 default:
3185 bfa_sm_fault(ns->port->fcs, event);
3186 }
3187}
3188
3189static void
3190bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
3191 enum vport_ns_event event)
3192{
3193 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3194 bfa_trc(ns->port->fcs, event);
3195
3196 switch (event) {
3197 case NSSM_EVENT_RSP_ERROR:
3198 /*
3199 * Start timer for a delayed retry
3200 */
3201 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry);
3202 ns->port->stats.ns_retries++;
3203 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3204 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3205 BFA_FCS_RETRY_TIMEOUT);
3206 break;
3207
3208 case NSSM_EVENT_RSP_OK:
3209 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
3210 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3211 break;
3212
3213 case NSSM_EVENT_PORT_OFFLINE:
3214 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3215 bfa_fcxp_discard(ns->fcxp);
3216 break;
3217
3218 default:
3219 bfa_sm_fault(ns->port->fcs, event);
3220 }
3221}
3222
3223static void
3224bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
3225 enum vport_ns_event event)
3226{
3227 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3228 bfa_trc(ns->port->fcs, event);
3229
3230 switch (event) {
3231 case NSSM_EVENT_TIMEOUT:
3232 /*
3233 * Retry Timer Expired. Re-send
3234 */
3235 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
3236 bfa_fcs_lport_ns_send_plogi(ns, NULL);
3237 break;
3238
3239 case NSSM_EVENT_PORT_OFFLINE:
3240 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3241 bfa_timer_stop(&ns->timer);
3242 break;
3243
3244 default:
3245 bfa_sm_fault(ns->port->fcs, event);
3246 }
3247}
3248
3249static void
3250bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3251 enum vport_ns_event event)
3252{
3253 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3254 bfa_trc(ns->port->fcs, event);
3255
3256 switch (event) {
3257 case NSSM_EVENT_RSPNID_SENT:
3258 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id);
3259 break;
3260
3261 case NSSM_EVENT_PORT_OFFLINE:
3262 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3263 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3264 &ns->fcxp_wqe);
3265 break;
3266
3267 default:
3268 bfa_sm_fault(ns->port->fcs, event);
3269 }
3270}
3271
3272static void
3273bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3274 enum vport_ns_event event)
3275{
3276 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3277 bfa_trc(ns->port->fcs, event);
3278
3279 switch (event) {
3280 case NSSM_EVENT_RSP_ERROR:
3281 /*
3282 * Start timer for a delayed retry
3283 */
3284 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry);
3285 ns->port->stats.ns_retries++;
3286 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3287 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3288 BFA_FCS_RETRY_TIMEOUT);
3289 break;
3290
3291 case NSSM_EVENT_RSP_OK:
3292 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
3293 bfa_fcs_lport_ns_send_rft_id(ns, NULL);
3294 break;
3295
3296 case NSSM_EVENT_PORT_OFFLINE:
3297 bfa_fcxp_discard(ns->fcxp);
3298 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3299 break;
3300
3301 default:
3302 bfa_sm_fault(ns->port->fcs, event);
3303 }
3304}
3305
3306static void
3307bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3308 enum vport_ns_event event)
3309{
3310 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3311 bfa_trc(ns->port->fcs, event);
3312
3313 switch (event) {
3314 case NSSM_EVENT_TIMEOUT:
3315 /*
3316 * Retry Timer Expired. Re-send
3317 */
3318 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
3319 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3320 break;
3321
3322 case NSSM_EVENT_PORT_OFFLINE:
3323 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3324 bfa_timer_stop(&ns->timer);
3325 break;
3326
3327 default:
3328 bfa_sm_fault(ns->port->fcs, event);
3329 }
3330}
3331
3332static void
3333bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns,
3334 enum vport_ns_event event)
3335{
3336 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3337 bfa_trc(ns->port->fcs, event);
3338
3339 switch (event) {
3340 case NSSM_EVENT_RFTID_SENT:
3341 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id);
3342 break;
3343
3344 case NSSM_EVENT_PORT_OFFLINE:
3345 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3346 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3347 &ns->fcxp_wqe);
3348 break;
3349
3350 default:
3351 bfa_sm_fault(ns->port->fcs, event);
3352 }
3353}
3354
3355static void
3356bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
3357 enum vport_ns_event event)
3358{
3359 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3360 bfa_trc(ns->port->fcs, event);
3361
3362 switch (event) {
3363 case NSSM_EVENT_RSP_OK:
3364 /* Now move to register FC4 Features */
3365 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
3366 bfa_fcs_lport_ns_send_rff_id(ns, NULL);
3367 break;
3368
3369 case NSSM_EVENT_RSP_ERROR:
3370 /*
3371 * Start timer for a delayed retry
3372 */
3373 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry);
3374 ns->port->stats.ns_retries++;
3375 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3376 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3377 BFA_FCS_RETRY_TIMEOUT);
3378 break;
3379
3380 case NSSM_EVENT_PORT_OFFLINE:
3381 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3382 bfa_fcxp_discard(ns->fcxp);
3383 break;
3384
3385 default:
3386 bfa_sm_fault(ns->port->fcs, event);
3387 }
3388}
3389
3390static void
3391bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
3392 enum vport_ns_event event)
3393{
3394 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3395 bfa_trc(ns->port->fcs, event);
3396
3397 switch (event) {
3398 case NSSM_EVENT_TIMEOUT:
3399 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
3400 bfa_fcs_lport_ns_send_rft_id(ns, NULL);
3401 break;
3402
3403 case NSSM_EVENT_PORT_OFFLINE:
3404 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3405 bfa_timer_stop(&ns->timer);
3406 break;
3407
3408 default:
3409 bfa_sm_fault(ns->port->fcs, event);
3410 }
3411}
3412
3413static void
3414bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns,
3415 enum vport_ns_event event)
3416{
3417 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3418 bfa_trc(ns->port->fcs, event);
3419
3420 switch (event) {
3421 case NSSM_EVENT_RFFID_SENT:
3422 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id);
3423 break;
3424
3425 case NSSM_EVENT_PORT_OFFLINE:
3426 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3427 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3428 &ns->fcxp_wqe);
3429 break;
3430
3431 default:
3432 bfa_sm_fault(ns->port->fcs, event);
3433 }
3434}
3435
3436static void
3437bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
3438 enum vport_ns_event event)
3439{
3440 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3441 bfa_trc(ns->port->fcs, event);
3442
3443 switch (event) {
3444 case NSSM_EVENT_RSP_OK:
3445
3446 /*
3447 * If min cfg mode is enabled, we donot initiate rport
3448 * discovery with the fabric. Instead, we will retrieve the
3449 * boot targets from HAL/FW.
3450 */
3451 if (__fcs_min_cfg(ns->port->fcs)) {
3452 bfa_fcs_lport_ns_boot_target_disc(ns->port);
3453 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
3454 return;
3455 }
3456
3457 /*
3458 * If the port role is Initiator Mode issue NS query.
3459 * If it is Target Mode, skip this and go to online.
3460 */
3461 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
3462 bfa_sm_set_state(ns,
3463 bfa_fcs_lport_ns_sm_sending_gid_ft);
3464 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3465 }
3466 /*
3467 * kick off mgmt srvr state machine
3468 */
3469 bfa_fcs_lport_ms_online(ns->port);
3470 break;
3471
3472 case NSSM_EVENT_RSP_ERROR:
3473 /*
3474 * Start timer for a delayed retry
3475 */
3476 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry);
3477 ns->port->stats.ns_retries++;
3478 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3479 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3480 BFA_FCS_RETRY_TIMEOUT);
3481 break;
3482
3483 case NSSM_EVENT_PORT_OFFLINE:
3484 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3485 bfa_fcxp_discard(ns->fcxp);
3486 break;
3487
3488 default:
3489 bfa_sm_fault(ns->port->fcs, event);
3490 }
3491}
3492
3493static void
3494bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
3495 enum vport_ns_event event)
3496{
3497 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3498 bfa_trc(ns->port->fcs, event);
3499
3500 switch (event) {
3501 case NSSM_EVENT_TIMEOUT:
3502 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
3503 bfa_fcs_lport_ns_send_rff_id(ns, NULL);
3504 break;
3505
3506 case NSSM_EVENT_PORT_OFFLINE:
3507 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3508 bfa_timer_stop(&ns->timer);
3509 break;
3510
3511 default:
3512 bfa_sm_fault(ns->port->fcs, event);
3513 }
3514}
3515static void
3516bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3517 enum vport_ns_event event)
3518{
3519 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3520 bfa_trc(ns->port->fcs, event);
3521
3522 switch (event) {
3523 case NSSM_EVENT_GIDFT_SENT:
3524 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft);
3525 break;
3526
3527 case NSSM_EVENT_PORT_OFFLINE:
3528 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3529 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3530 &ns->fcxp_wqe);
3531 break;
3532
3533 default:
3534 bfa_sm_fault(ns->port->fcs, event);
3535 }
3536}
3537
3538static void
3539bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
3540 enum vport_ns_event event)
3541{
3542 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3543 bfa_trc(ns->port->fcs, event);
3544
3545 switch (event) {
3546 case NSSM_EVENT_RSP_OK:
3547 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
3548 break;
3549
3550 case NSSM_EVENT_RSP_ERROR:
3551 /*
3552 * TBD: for certain reject codes, we don't need to retry
3553 */
3554 /*
3555 * Start timer for a delayed retry
3556 */
3557 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry);
3558 ns->port->stats.ns_retries++;
3559 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3560 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3561 BFA_FCS_RETRY_TIMEOUT);
3562 break;
3563
3564 case NSSM_EVENT_PORT_OFFLINE:
3565 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3566 bfa_fcxp_discard(ns->fcxp);
3567 break;
3568
3569 case NSSM_EVENT_NS_QUERY:
3570 break;
3571
3572 default:
3573 bfa_sm_fault(ns->port->fcs, event);
3574 }
3575}
3576
3577static void
3578bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3579 enum vport_ns_event event)
3580{
3581 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3582 bfa_trc(ns->port->fcs, event);
3583
3584 switch (event) {
3585 case NSSM_EVENT_TIMEOUT:
3586 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft);
3587 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3588 break;
3589
3590 case NSSM_EVENT_PORT_OFFLINE:
3591 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3592 bfa_timer_stop(&ns->timer);
3593 break;
3594
3595 default:
3596 bfa_sm_fault(ns->port->fcs, event);
3597 }
3598}
3599
3600static void
3601bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3602 enum vport_ns_event event)
3603{
3604 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3605 bfa_trc(ns->port->fcs, event);
3606
3607 switch (event) {
3608 case NSSM_EVENT_PORT_OFFLINE:
3609 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3610 break;
3611
3612 case NSSM_EVENT_NS_QUERY:
3613 /*
3614 * If the port role is Initiator Mode issue NS query.
3615 * If it is Target Mode, skip this and go to online.
3616 */
3617 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
3618 bfa_sm_set_state(ns,
3619 bfa_fcs_lport_ns_sm_sending_gid_ft);
3620 bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
3621 };
3622 break;
3623
3624 default:
3625 bfa_sm_fault(ns->port->fcs, event);
3626 }
3627}
3628
3629
3630
3631/**
3632 * ns_pvt Nameserver local functions
3633 */
3634
3635static void
3636bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3637{
3638 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3639 struct bfa_fcs_lport_s *port = ns->port;
3640 struct fchs_s fchs;
3641 int len;
3642 struct bfa_fcxp_s *fcxp;
3643
3644 bfa_trc(port->fcs, port->pid);
3645
3646fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3647 if (!fcxp) {
3648 port->stats.ns_plogi_alloc_wait++;
3649 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3650 bfa_fcs_lport_ns_send_plogi, ns);
3651 return;
3652 }
3653 ns->fcxp = fcxp;
3654
3655 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3656 bfa_os_hton3b(FC_NAME_SERVER),
3657 bfa_fcs_lport_get_fcid(port), 0,
3658 port->port_cfg.pwwn, port->port_cfg.nwwn,
3659 bfa_fcport_get_maxfrsize(port->fcs->bfa));
3660
3661 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3662 FC_CLASS_3, len, &fchs,
3663 bfa_fcs_lport_ns_plogi_response, (void *)ns,
3664 FC_MAX_PDUSZ, FC_ELS_TOV);
3665 port->stats.ns_plogi_sent++;
3666
3667 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
3668}
3669
3670static void
3671bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3672 void *cbarg, bfa_status_t req_status, u32 rsp_len,
3673 u32 resid_len, struct fchs_s *rsp_fchs)
3674{
3675 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3676 struct bfa_fcs_lport_s *port = ns->port;
3677 /* struct fc_logi_s *plogi_resp; */
3678 struct fc_els_cmd_s *els_cmd;
3679 struct fc_ls_rjt_s *ls_rjt;
3680
3681 bfa_trc(port->fcs, req_status);
3682 bfa_trc(port->fcs, port->port_cfg.pwwn);
3683
3684 /*
3685 * Sanity Checks
3686 */
3687 if (req_status != BFA_STATUS_OK) {
3688 bfa_trc(port->fcs, req_status);
3689 port->stats.ns_plogi_rsp_err++;
3690 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3691 return;
3692 }
3693
3694 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
3695
3696 switch (els_cmd->els_code) {
3697
3698 case FC_ELS_ACC:
3699 if (rsp_len < sizeof(struct fc_logi_s)) {
3700 bfa_trc(port->fcs, rsp_len);
3701 port->stats.ns_plogi_acc_err++;
3702 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3703 break;
3704 }
3705 port->stats.ns_plogi_accepts++;
3706 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3707 break;
3708
3709 case FC_ELS_LS_RJT:
3710 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
3711
3712 bfa_trc(port->fcs, ls_rjt->reason_code);
3713 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
3714
3715 port->stats.ns_rejects++;
3716
3717 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3718 break;
3719
3720 default:
3721 port->stats.ns_plogi_unknown_rsp++;
3722 bfa_trc(port->fcs, els_cmd->els_code);
3723 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3724 }
3725}
3726
3727/**
3728 * Register the symbolic port name.
3729 */
3730static void
3731bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3732{
3733 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3734 struct bfa_fcs_lport_s *port = ns->port;
3735 struct fchs_s fchs;
3736 int len;
3737 struct bfa_fcxp_s *fcxp;
3738 u8 symbl[256];
3739 u8 *psymbl = &symbl[0];
3740
3741 bfa_os_memset(symbl, 0, sizeof(symbl));
3742
3743 bfa_trc(port->fcs, port->port_cfg.pwwn);
3744
3745 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3746 if (!fcxp) {
3747 port->stats.ns_rspnid_alloc_wait++;
3748 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3749 bfa_fcs_lport_ns_send_rspn_id, ns);
3750 return;
3751 }
3752 ns->fcxp = fcxp;
3753
3754 /*
3755 * for V-Port, form a Port Symbolic Name
3756 */
3757 if (port->vport) {
3758 /**
3759 * For Vports, we append the vport's port symbolic name
3760 * to that of the base port.
3761 */
3762
3763 strncpy((char *)psymbl,
3764 (char *) &
3765 (bfa_fcs_lport_get_psym_name
3766 (bfa_fcs_get_base_port(port->fcs))),
3767 strlen((char *) &
3768 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
3769 (port->fcs))));
3770
3771 /* Ensure we have a null terminating string. */
3772 ((char *)psymbl)[strlen((char *) &
3773 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
3774 (port->fcs)))] = 0;
3775 strncat((char *)psymbl,
3776 (char *) &(bfa_fcs_lport_get_psym_name(port)),
3777 strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
3778 } else {
3779 psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
3780 }
3781
3782 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3783 bfa_fcs_lport_get_fcid(port), 0, psymbl);
3784
3785 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3786 FC_CLASS_3, len, &fchs,
3787 bfa_fcs_lport_ns_rspn_id_response, (void *)ns,
3788 FC_MAX_PDUSZ, FC_FCCT_TOV);
3789
3790 port->stats.ns_rspnid_sent++;
3791
3792 bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
3793}
3794
3795static void
3796bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3797 void *cbarg, bfa_status_t req_status,
3798 u32 rsp_len, u32 resid_len,
3799 struct fchs_s *rsp_fchs)
3800{
3801 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3802 struct bfa_fcs_lport_s *port = ns->port;
3803 struct ct_hdr_s *cthdr = NULL;
3804
3805 bfa_trc(port->fcs, port->port_cfg.pwwn);
3806
3807 /*
3808 * Sanity Checks
3809 */
3810 if (req_status != BFA_STATUS_OK) {
3811 bfa_trc(port->fcs, req_status);
3812 port->stats.ns_rspnid_rsp_err++;
3813 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3814 return;
3815 }
3816
3817 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3818 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3819
3820 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3821 port->stats.ns_rspnid_accepts++;
3822 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3823 return;
3824 }
3825
3826 port->stats.ns_rspnid_rejects++;
3827 bfa_trc(port->fcs, cthdr->reason_code);
3828 bfa_trc(port->fcs, cthdr->exp_code);
3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3830}
3831
3832/**
3833 * Register FC4-Types
3834 */
3835static void
3836bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3837{
3838 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3839 struct bfa_fcs_lport_s *port = ns->port;
3840 struct fchs_s fchs;
3841 int len;
3842 struct bfa_fcxp_s *fcxp;
3843
3844 bfa_trc(port->fcs, port->port_cfg.pwwn);
3845
3846 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3847 if (!fcxp) {
3848 port->stats.ns_rftid_alloc_wait++;
3849 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3850 bfa_fcs_lport_ns_send_rft_id, ns);
3851 return;
3852 }
3853 ns->fcxp = fcxp;
3854
3855 len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3856 bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles);
3857
3858 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3859 FC_CLASS_3, len, &fchs,
3860 bfa_fcs_lport_ns_rft_id_response, (void *)ns,
3861 FC_MAX_PDUSZ, FC_FCCT_TOV);
3862
3863 port->stats.ns_rftid_sent++;
3864 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
3865}
3866
3867static void
3868bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3869 void *cbarg, bfa_status_t req_status,
3870 u32 rsp_len, u32 resid_len,
3871 struct fchs_s *rsp_fchs)
3872{
3873 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3874 struct bfa_fcs_lport_s *port = ns->port;
3875 struct ct_hdr_s *cthdr = NULL;
3876
3877 bfa_trc(port->fcs, port->port_cfg.pwwn);
3878
3879 /*
3880 * Sanity Checks
3881 */
3882 if (req_status != BFA_STATUS_OK) {
3883 bfa_trc(port->fcs, req_status);
3884 port->stats.ns_rftid_rsp_err++;
3885 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3886 return;
3887 }
3888
3889 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3890 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3891
3892 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3893 port->stats.ns_rftid_accepts++;
3894 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3895 return;
3896 }
3897
3898 port->stats.ns_rftid_rejects++;
3899 bfa_trc(port->fcs, cthdr->reason_code);
3900 bfa_trc(port->fcs, cthdr->exp_code);
3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3902}
3903
3904/**
3905 * Register FC4-Features : Should be done after RFT_ID
3906 */
3907static void
3908bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3909{
3910 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3911 struct bfa_fcs_lport_s *port = ns->port;
3912 struct fchs_s fchs;
3913 int len;
3914 struct bfa_fcxp_s *fcxp;
3915 u8 fc4_ftrs = 0;
3916
3917 bfa_trc(port->fcs, port->port_cfg.pwwn);
3918
3919 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3920 if (!fcxp) {
3921 port->stats.ns_rffid_alloc_wait++;
3922 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3923 bfa_fcs_lport_ns_send_rff_id, ns);
3924 return;
3925 }
3926 ns->fcxp = fcxp;
3927
3928 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
3929 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
3930
3931 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
3932 bfa_fcs_lport_get_fcid(port), 0,
3933 FC_TYPE_FCP, fc4_ftrs);
3934
3935 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3936 FC_CLASS_3, len, &fchs,
3937 bfa_fcs_lport_ns_rff_id_response, (void *)ns,
3938 FC_MAX_PDUSZ, FC_FCCT_TOV);
3939
3940 port->stats.ns_rffid_sent++;
3941 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
3942}
3943
3944static void
3945bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3946 void *cbarg, bfa_status_t req_status,
3947 u32 rsp_len, u32 resid_len,
3948 struct fchs_s *rsp_fchs)
3949{
3950 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
3951 struct bfa_fcs_lport_s *port = ns->port;
3952 struct ct_hdr_s *cthdr = NULL;
3953
3954 bfa_trc(port->fcs, port->port_cfg.pwwn);
3955
3956 /*
3957 * Sanity Checks
3958 */
3959 if (req_status != BFA_STATUS_OK) {
3960 bfa_trc(port->fcs, req_status);
3961 port->stats.ns_rffid_rsp_err++;
3962 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3963 return;
3964 }
3965
3966 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3967 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
3968
3969 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3970 port->stats.ns_rffid_accepts++;
3971 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3972 return;
3973 }
3974
3975 port->stats.ns_rffid_rejects++;
3976 bfa_trc(port->fcs, cthdr->reason_code);
3977 bfa_trc(port->fcs, cthdr->exp_code);
3978
3979 if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
3980 /* if this command is not supported, we don't retry */
3981 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
3982 } else
3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3984}
3985/**
3986 * Query Fabric for FC4-Types Devices.
3987 *
3988* TBD : Need to use a local (FCS private) response buffer, since the response
3989 * can be larger than 2K.
3990 */
3991static void
3992bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3993{
3994 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
3995 struct bfa_fcs_lport_s *port = ns->port;
3996 struct fchs_s fchs;
3997 int len;
3998 struct bfa_fcxp_s *fcxp;
3999
4000 bfa_trc(port->fcs, port->pid);
4001
4002 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
4003 if (!fcxp) {
4004 port->stats.ns_gidft_alloc_wait++;
4005 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4006 bfa_fcs_lport_ns_send_gid_ft, ns);
4007 return;
4008 }
4009 ns->fcxp = fcxp;
4010
4011 /*
4012 * This query is only initiated for FCP initiator mode.
4013 */
4014 len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4015 ns->port->pid, FC_TYPE_FCP);
4016
4017 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4018 FC_CLASS_3, len, &fchs,
4019 bfa_fcs_lport_ns_gid_ft_response, (void *)ns,
4020 bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV);
4021
4022 port->stats.ns_gidft_sent++;
4023
4024 bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
4025}
4026
4027static void
4028bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4029 void *cbarg, bfa_status_t req_status,
4030 u32 rsp_len, u32 resid_len,
4031 struct fchs_s *rsp_fchs)
4032{
4033 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
4034 struct bfa_fcs_lport_s *port = ns->port;
4035 struct ct_hdr_s *cthdr = NULL;
4036 u32 n_pids;
4037
4038 bfa_trc(port->fcs, port->port_cfg.pwwn);
4039
4040 /*
4041 * Sanity Checks
4042 */
4043 if (req_status != BFA_STATUS_OK) {
4044 bfa_trc(port->fcs, req_status);
4045 port->stats.ns_gidft_rsp_err++;
4046 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4047 return;
4048 }
4049
4050 if (resid_len != 0) {
4051 /*
4052 * TBD : we will need to allocate a larger buffer & retry the
4053 * command
4054 */
4055 bfa_trc(port->fcs, rsp_len);
4056 bfa_trc(port->fcs, resid_len);
4057 return;
4058 }
4059
4060 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
4061 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
4062
4063 switch (cthdr->cmd_rsp_code) {
4064
4065 case CT_RSP_ACCEPT:
4066
4067 port->stats.ns_gidft_accepts++;
4068 n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
4069 bfa_trc(port->fcs, n_pids);
4070 bfa_fcs_lport_ns_process_gidft_pids(port,
4071 (u32 *) (cthdr + 1),
4072 n_pids);
4073 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4074 break;
4075
4076 case CT_RSP_REJECT:
4077
4078 /*
4079 * Check the reason code & explanation.
4080 * There may not have been any FC4 devices in the fabric
4081 */
4082 port->stats.ns_gidft_rejects++;
4083 bfa_trc(port->fcs, cthdr->reason_code);
4084 bfa_trc(port->fcs, cthdr->exp_code);
4085
4086 if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
4087 && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
4088
4089 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4090 } else {
4091 /*
4092 * for all other errors, retry
4093 */
4094 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4095 }
4096 break;
4097
4098 default:
4099 port->stats.ns_gidft_unknown_rsp++;
4100 bfa_trc(port->fcs, cthdr->cmd_rsp_code);
4101 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4102 }
4103}
4104
4105/**
4106 * This routine will be called by bfa_timer on timer timeouts.
4107 *
4108 * param[in] port - pointer to bfa_fcs_lport_t.
4109 *
4110 * return
4111 * void
4112 *
4113 * Special Considerations:
4114 *
4115 * note
4116 */
4117static void
4118bfa_fcs_lport_ns_timeout(void *arg)
4119{
4120 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg;
4121
4122 ns->port->stats.ns_timeouts++;
4123 bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
4124}
4125
4126/*
4127 * Process the PID list in GID_FT response
4128 */
4129static void
4130bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4131 u32 n_pids)
4132{
4133 struct fcgs_gidft_resp_s *gidft_entry;
4134 struct bfa_fcs_rport_s *rport;
4135 u32 ii;
4136
4137 for (ii = 0; ii < n_pids; ii++) {
4138 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
4139
4140 if (gidft_entry->pid == port->pid)
4141 continue;
4142
4143 /*
4144 * Check if this rport already exists
4145 */
4146 rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
4147 if (rport == NULL) {
4148 /*
4149 * this is a new device. create rport
4150 */
4151 rport = bfa_fcs_rport_create(port, gidft_entry->pid);
4152 } else {
4153 /*
4154 * this rport already exists
4155 */
4156 bfa_fcs_rport_scn(rport);
4157 }
4158
4159 bfa_trc(port->fcs, gidft_entry->pid);
4160
4161 /*
4162 * if the last entry bit is set, bail out.
4163 */
4164 if (gidft_entry->last)
4165 return;
4166 }
4167}
4168
4169/**
4170 * fcs_ns_public FCS nameserver public interfaces
4171 */
4172
4173/*
4174 * Functions called by port/fab.
4175 * These will send relevant Events to the ns state machine.
4176 */
4177void
4178bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port)
4179{
4180 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4181
4182 ns->port = port;
4183 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
4184}
4185
4186void
4187bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port)
4188{
4189 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4190
4191 ns->port = port;
4192 bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
4193}
4194
4195void
4196bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port)
4197{
4198 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4199
4200 ns->port = port;
4201 bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
4202}
4203
4204void
4205bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
4206{
4207 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4208
4209 bfa_trc(port->fcs, port->pid);
4210 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
4211}
4212
4213void
4214bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4215{
4216
4217 struct bfa_fcs_rport_s *rport;
4218 u8 nwwns;
4219 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
4220 int ii;
4221
4222 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
4223
4224 for (ii = 0 ; ii < nwwns; ++ii) {
4225 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
4226 bfa_assert(rport);
4227 }
4228}
4229
4230/**
4231 * FCS SCN
4232 */
4233
4234#define FC_QOS_RSCN_EVENT 0x0c
4235#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
4236
4237/*
4238 * forward declarations
4239 */
4240static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg,
4241 struct bfa_fcxp_s *fcxp_alloced);
4242static void bfa_fcs_lport_scn_scr_response(void *fcsarg,
4243 struct bfa_fcxp_s *fcxp,
4244 void *cbarg,
4245 bfa_status_t req_status,
4246 u32 rsp_len,
4247 u32 resid_len,
4248 struct fchs_s *rsp_fchs);
4249static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4250 struct fchs_s *rx_fchs);
4251static void bfa_fcs_lport_scn_timeout(void *arg);
4252
4253/**
4254 * fcs_scm_sm FCS SCN state machine
4255 */
4256
4257/**
4258 * VPort SCN State Machine events
4259 */
4260enum port_scn_event {
4261 SCNSM_EVENT_PORT_ONLINE = 1,
4262 SCNSM_EVENT_PORT_OFFLINE = 2,
4263 SCNSM_EVENT_RSP_OK = 3,
4264 SCNSM_EVENT_RSP_ERROR = 4,
4265 SCNSM_EVENT_TIMEOUT = 5,
4266 SCNSM_EVENT_SCR_SENT = 6,
4267};
4268
4269static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
4270 enum port_scn_event event);
4271static void bfa_fcs_lport_scn_sm_sending_scr(
4272 struct bfa_fcs_lport_scn_s *scn,
4273 enum port_scn_event event);
4274static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
4275 enum port_scn_event event);
4276static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4277 enum port_scn_event event);
4278static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4279 enum port_scn_event event);
4280
4281/**
4282 * Starting state - awaiting link up.
4283 */
4284static void
4285bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
4286 enum port_scn_event event)
4287{
4288 switch (event) {
4289 case SCNSM_EVENT_PORT_ONLINE:
4290 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
4291 bfa_fcs_lport_scn_send_scr(scn, NULL);
4292 break;
4293
4294 case SCNSM_EVENT_PORT_OFFLINE:
4295 break;
4296
4297 default:
4298 bfa_sm_fault(scn->port->fcs, event);
4299 }
4300}
4301
4302static void
4303bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn,
4304 enum port_scn_event event)
4305{
4306 switch (event) {
4307 case SCNSM_EVENT_SCR_SENT:
4308 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr);
4309 break;
4310
4311 case SCNSM_EVENT_PORT_OFFLINE:
4312 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4313 bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
4314 break;
4315
4316 default:
4317 bfa_sm_fault(scn->port->fcs, event);
4318 }
4319}
4320
4321static void
4322bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
4323 enum port_scn_event event)
4324{
4325 struct bfa_fcs_lport_s *port = scn->port;
4326
4327 switch (event) {
4328 case SCNSM_EVENT_RSP_OK:
4329 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online);
4330 break;
4331
4332 case SCNSM_EVENT_RSP_ERROR:
4333 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry);
4334 bfa_timer_start(port->fcs->bfa, &scn->timer,
4335 bfa_fcs_lport_scn_timeout, scn,
4336 BFA_FCS_RETRY_TIMEOUT);
4337 break;
4338
4339 case SCNSM_EVENT_PORT_OFFLINE:
4340 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4341 bfa_fcxp_discard(scn->fcxp);
4342 break;
4343
4344 default:
4345 bfa_sm_fault(port->fcs, event);
4346 }
4347}
4348
4349static void
4350bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4351 enum port_scn_event event)
4352{
4353 switch (event) {
4354 case SCNSM_EVENT_TIMEOUT:
4355 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
4356 bfa_fcs_lport_scn_send_scr(scn, NULL);
4357 break;
4358
4359 case SCNSM_EVENT_PORT_OFFLINE:
4360 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4361 bfa_timer_stop(&scn->timer);
4362 break;
4363
4364 default:
4365 bfa_sm_fault(scn->port->fcs, event);
4366 }
4367}
4368
4369static void
4370bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4371 enum port_scn_event event)
4372{
4373 switch (event) {
4374 case SCNSM_EVENT_PORT_OFFLINE:
4375 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4376 break;
4377
4378 default:
4379 bfa_sm_fault(scn->port->fcs, event);
4380 }
4381}
4382
4383
4384
4385/**
4386 * fcs_scn_private FCS SCN private functions
4387 */
4388
4389/**
4390 * This routine will be called to send a SCR command.
4391 */
4392static void
4393bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4394{
4395 struct bfa_fcs_lport_scn_s *scn = scn_cbarg;
4396 struct bfa_fcs_lport_s *port = scn->port;
4397 struct fchs_s fchs;
4398 int len;
4399 struct bfa_fcxp_s *fcxp;
4400
4401 bfa_trc(port->fcs, port->pid);
4402 bfa_trc(port->fcs, port->port_cfg.pwwn);
4403
4404 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
4405 if (!fcxp) {
4406 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
4407 bfa_fcs_lport_scn_send_scr, scn);
4408 return;
4409 }
4410 scn->fcxp = fcxp;
4411
4412 /* Handle VU registrations for Base port only */
4413 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
4414 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4415 bfa_lps_is_brcd_fabric(port->fabric->lps),
4416 port->pid, 0);
4417 } else {
4418 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4419 BFA_FALSE,
4420 port->pid, 0);
4421 }
4422
4423 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4424 FC_CLASS_3, len, &fchs,
4425 bfa_fcs_lport_scn_scr_response,
4426 (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
4427
4428 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
4429}
4430
4431static void
4432bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4433 void *cbarg, bfa_status_t req_status, u32 rsp_len,
4434 u32 resid_len, struct fchs_s *rsp_fchs)
4435{
4436 struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg;
4437 struct bfa_fcs_lport_s *port = scn->port;
4438 struct fc_els_cmd_s *els_cmd;
4439 struct fc_ls_rjt_s *ls_rjt;
4440
4441 bfa_trc(port->fcs, port->port_cfg.pwwn);
4442
4443 /*
4444 * Sanity Checks
4445 */
4446 if (req_status != BFA_STATUS_OK) {
4447 bfa_trc(port->fcs, req_status);
4448 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4449 return;
4450 }
4451
4452 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
4453
4454 switch (els_cmd->els_code) {
4455
4456 case FC_ELS_ACC:
4457 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
4458 break;
4459
4460 case FC_ELS_LS_RJT:
4461
4462 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
4463
4464 bfa_trc(port->fcs, ls_rjt->reason_code);
4465 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
4466
4467 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4468 break;
4469
4470 default:
4471 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
4472 }
4473}
4474
4475/*
4476 * Send a LS Accept
4477 */
4478static void
4479bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4480 struct fchs_s *rx_fchs)
4481{
4482 struct fchs_s fchs;
4483 struct bfa_fcxp_s *fcxp;
4484 struct bfa_rport_s *bfa_rport = NULL;
4485 int len;
4486
4487 bfa_trc(port->fcs, rx_fchs->s_id);
4488
4489 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
4490 if (!fcxp)
4491 return;
4492
4493 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4494 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
4495 rx_fchs->ox_id);
4496
4497 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
4498 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
4499 FC_MAX_PDUSZ, 0);
4500}
4501
4502/**
4503 * This routine will be called by bfa_timer on timer timeouts.
4504 *
4505 * param[in] vport - pointer to bfa_fcs_lport_t.
4506 * param[out] vport_status - pointer to return vport status in
4507 *
4508 * return
4509 * void
4510 *
4511 * Special Considerations:
4512 *
4513 * note
4514 */
4515static void
4516bfa_fcs_lport_scn_timeout(void *arg)
4517{
4518 struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg;
4519
4520 bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
4521}
4522
4523
4524
4525/**
4526 * fcs_scn_public FCS state change notification public interfaces
4527 */
4528
4529/*
4530 * Functions called by port/fab
4531 */
4532void
4533bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port)
4534{
4535 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4536
4537 scn->port = port;
4538 bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
4539}
4540
4541void
4542bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
4543{
4544 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4545
4546 scn->port = port;
4547 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
4548}
4549
4550void
4551bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port)
4552{
4553 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
4554
4555 scn->port = port;
4556 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
4557}
4558
4559static void
4560bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4561{
4562 struct bfa_fcs_rport_s *rport;
4563
4564 bfa_trc(port->fcs, rpid);
4565
4566 /**
4567 * If this is an unknown device, then it just came online.
4568 * Otherwise let rport handle the RSCN event.
4569 */
4570 rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
4571 if (rport == NULL) {
4572 /*
4573 * If min cfg mode is enabled, we donot need to
4574 * discover any new rports.
4575 */
4576 if (!__fcs_min_cfg(port->fcs))
4577 rport = bfa_fcs_rport_create(port, rpid);
4578 } else
4579 bfa_fcs_rport_scn(rport);
4580}
4581
4582/**
4583 * rscn format based PID comparison
4584 */
4585#define __fc_pid_match(__c0, __c1, __fmt) \
4586 (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
4587 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
4588 ((__c0)[0] == (__c1)[0])) || \
4589 (((__fmt) == FC_RSCN_FORMAT_AREA) && \
4590 ((__c0)[0] == (__c1)[0]) && \
4591 ((__c0)[1] == (__c1)[1])))
4592
4593static void
4594bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port,
4595 enum fc_rscn_format format,
4596 u32 rscn_pid)
4597{
4598 struct bfa_fcs_rport_s *rport;
4599 struct list_head *qe, *qe_next;
4600 u8 *c0, *c1;
4601
4602 bfa_trc(port->fcs, format);
4603 bfa_trc(port->fcs, rscn_pid);
4604
4605 c0 = (u8 *) &rscn_pid;
4606
4607 list_for_each_safe(qe, qe_next, &port->rport_q) {
4608 rport = (struct bfa_fcs_rport_s *) qe;
4609 c1 = (u8 *) &rport->pid;
4610 if (__fc_pid_match(c0, c1, format))
4611 bfa_fcs_rport_scn(rport);
4612 }
4613}
4614
4615
4616void
4617bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4618 struct fchs_s *fchs, u32 len)
4619{
4620 struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
4621 int num_entries;
4622 u32 rscn_pid;
4623 bfa_boolean_t nsquery = BFA_FALSE, found;
4624 int i = 0, j;
4625
4626 num_entries =
4627 (bfa_os_ntohs(rscn->payldlen) -
4628 sizeof(u32)) / sizeof(rscn->event[0]);
4629
4630 bfa_trc(port->fcs, num_entries);
4631
4632 port->stats.num_rscn++;
4633
4634 bfa_fcs_lport_scn_send_ls_acc(port, fchs);
4635
4636 for (i = 0; i < num_entries; i++) {
4637 rscn_pid = rscn->event[i].portid;
4638
4639 bfa_trc(port->fcs, rscn->event[i].format);
4640 bfa_trc(port->fcs, rscn_pid);
4641
4642 /* check for duplicate entries in the list */
4643 found = BFA_FALSE;
4644 for (j = 0; j < i; j++) {
4645 if (rscn->event[j].portid == rscn_pid) {
4646 found = BFA_TRUE;
4647 break;
4648 }
4649 }
4650
4651 /* if found in down the list, pid has been already processed */
4652 if (found) {
4653 bfa_trc(port->fcs, rscn_pid);
4654 continue;
4655 }
4656
4657 switch (rscn->event[i].format) {
4658 case FC_RSCN_FORMAT_PORTID:
4659 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
4660 /*
4661 * Ignore this event.
4662 * f/w would have processed it
4663 */
4664 bfa_trc(port->fcs, rscn_pid);
4665 } else {
4666 port->stats.num_portid_rscn++;
4667 bfa_fcs_lport_scn_portid_rscn(port, rscn_pid);
4668 }
4669 break;
4670
4671 case FC_RSCN_FORMAT_FABRIC:
4672 if (rscn->event[i].qualifier ==
4673 FC_FABRIC_NAME_RSCN_EVENT) {
4674 bfa_fcs_lport_ms_fabric_rscn(port);
4675 break;
4676 }
4677 /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
4678
4679 case FC_RSCN_FORMAT_AREA:
4680 case FC_RSCN_FORMAT_DOMAIN:
4681 nsquery = BFA_TRUE;
4682 bfa_fcs_lport_scn_multiport_rscn(port,
4683 rscn->event[i].format,
4684 rscn_pid);
4685 break;
4686
4687
4688 default:
4689 bfa_assert(0);
4690 nsquery = BFA_TRUE;
4691 }
4692 }
4693
4694 /**
4695 * If any of area, domain or fabric RSCN is received, do a fresh discovery
4696 * to find new devices.
4697 */
4698 if (nsquery)
4699 bfa_fcs_lport_ns_query(port);
4700}
4701
4702/**
4703 * BFA FCS port
4704 */
4705/**
4706 * fcs_port_api BFA FCS port API
4707 */
4708struct bfa_fcs_lport_s *
4709bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
4710{
4711 return &fcs->fabric.bport;
4712}
4713
4714wwn_t
4715bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
4716 int nrports, bfa_boolean_t bwwn)
4717{
4718 struct list_head *qh, *qe;
4719 struct bfa_fcs_rport_s *rport = NULL;
4720 int i;
4721 struct bfa_fcs_s *fcs;
4722
4723 if (port == NULL || nrports == 0)
4724 return (wwn_t) 0;
4725
4726 fcs = port->fcs;
4727 bfa_trc(fcs, (u32) nrports);
4728
4729 i = 0;
4730 qh = &port->rport_q;
4731 qe = bfa_q_first(qh);
4732
4733 while ((qe != qh) && (i < nrports)) {
4734 rport = (struct bfa_fcs_rport_s *) qe;
4735 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
4736 qe = bfa_q_next(qe);
4737 bfa_trc(fcs, (u32) rport->pwwn);
4738 bfa_trc(fcs, rport->pid);
4739 bfa_trc(fcs, i);
4740 continue;
4741 }
4742
4743 if (bwwn) {
4744 if (!memcmp(&wwn, &rport->pwwn, 8))
4745 break;
4746 } else {
4747 if (i == index)
4748 break;
4749 }
4750
4751 i++;
4752 qe = bfa_q_next(qe);
4753 }
4754
4755 bfa_trc(fcs, i);
4756 if (rport)
4757 return rport->pwwn;
4758 else
4759 return (wwn_t) 0;
4760}
4761
4762void
4763bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
4764 wwn_t rport_wwns[], int *nrports)
4765{
4766 struct list_head *qh, *qe;
4767 struct bfa_fcs_rport_s *rport = NULL;
4768 int i;
4769 struct bfa_fcs_s *fcs;
4770
4771 if (port == NULL || rport_wwns == NULL || *nrports == 0)
4772 return;
4773
4774 fcs = port->fcs;
4775 bfa_trc(fcs, (u32) *nrports);
4776
4777 i = 0;
4778 qh = &port->rport_q;
4779 qe = bfa_q_first(qh);
4780
4781 while ((qe != qh) && (i < *nrports)) {
4782 rport = (struct bfa_fcs_rport_s *) qe;
4783 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
4784 qe = bfa_q_next(qe);
4785 bfa_trc(fcs, (u32) rport->pwwn);
4786 bfa_trc(fcs, rport->pid);
4787 bfa_trc(fcs, i);
4788 continue;
4789 }
4790
4791 rport_wwns[i] = rport->pwwn;
4792
4793 i++;
4794 qe = bfa_q_next(qe);
4795 }
4796
4797 bfa_trc(fcs, i);
4798 *nrports = i;
4799}
4800
4801/*
4802 * Iterate's through all the rport's in the given port to
4803 * determine the maximum operating speed.
4804 *
4805 * !!!! To be used in TRL Functionality only !!!!
4806 */
4807bfa_port_speed_t
4808bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
4809{
4810 struct list_head *qh, *qe;
4811 struct bfa_fcs_rport_s *rport = NULL;
4812 struct bfa_fcs_s *fcs;
4813 bfa_port_speed_t max_speed = 0;
4814 struct bfa_port_attr_s port_attr;
4815 bfa_port_speed_t port_speed, rport_speed;
4816 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
4817
4818
4819 if (port == NULL)
4820 return 0;
4821
4822 fcs = port->fcs;
4823
4824 /* Get Physical port's current speed */
4825 bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
4826 port_speed = port_attr.speed;
4827 bfa_trc(fcs, port_speed);
4828
4829 qh = &port->rport_q;
4830 qe = bfa_q_first(qh);
4831
4832 while (qe != qh) {
4833 rport = (struct bfa_fcs_rport_s *) qe;
4834 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
4835 (bfa_fcs_rport_get_state(rport) ==
4836 BFA_RPORT_OFFLINE)) {
4837 qe = bfa_q_next(qe);
4838 continue;
4839 }
4840
4841 rport_speed = rport->rpf.rpsc_speed;
4842 if ((trl_enabled) && (rport_speed ==
4843 BFA_PORT_SPEED_UNKNOWN)) {
4844 /* Use default ratelim speed setting */
4845 rport_speed =
4846 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
4847 }
929 4848
4849 if ((rport_speed == BFA_PORT_SPEED_8GBPS) ||
4850 (rport_speed > port_speed)) {
4851 max_speed = rport_speed;
4852 break;
4853 } else if (rport_speed > max_speed) {
4854 max_speed = rport_speed;
4855 }
4856
4857 qe = bfa_q_next(qe);
4858 }
4859
4860 bfa_trc(fcs, max_speed);
4861 return max_speed;
4862}
4863
4864struct bfa_fcs_lport_s *
4865bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
4866{
4867 struct bfa_fcs_vport_s *vport;
4868 bfa_fcs_vf_t *vf;
4869
4870 bfa_assert(fcs != NULL);
4871
4872 vf = bfa_fcs_vf_lookup(fcs, vf_id);
4873 if (vf == NULL) {
4874 bfa_trc(fcs, vf_id);
4875 return NULL;
4876 }
4877
4878 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
4879 return &vf->bport;
4880
4881 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
4882 if (vport)
4883 return &vport->lport;
4884
4885 return NULL;
4886}
4887
4888/*
4889 * API corresponding to NPIV_VPORT_GETINFO.
4890 */
4891void
4892bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
4893 struct bfa_lport_info_s *port_info)
4894{
4895
4896 bfa_trc(port->fcs, port->fabric->fabric_name);
4897
4898 if (port->vport == NULL) {
4899 /*
4900 * This is a Physical port
4901 */
4902 port_info->port_type = BFA_LPORT_TYPE_PHYSICAL;
4903
4904 /*
4905 * @todo : need to fix the state & reason
4906 */
4907 port_info->port_state = 0;
4908 port_info->offline_reason = 0;
4909
4910 port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
4911 port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
4912
4913 port_info->max_vports_supp =
4914 bfa_lps_get_max_vport(port->fcs->bfa);
4915 port_info->num_vports_inuse =
4916 bfa_fcs_fabric_vport_count(port->fabric);
4917 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
4918 port_info->num_rports_inuse = port->num_rports;
930 } else { 4919 } else {
931 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; 4920 /*
932 port_attr->state = BFA_PORT_UNINIT; 4921 * This is a virtual port
4922 */
4923 port_info->port_type = BFA_LPORT_TYPE_VIRTUAL;
4924
4925 /*
4926 * @todo : need to fix the state & reason
4927 */
4928 port_info->port_state = 0;
4929 port_info->offline_reason = 0;
4930
4931 port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
4932 port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
4933 }
4934}
4935
4936void
4937bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
4938 struct bfa_lport_stats_s *port_stats)
4939{
4940 *port_stats = fcs_port->stats;
4941}
4942
4943void
4944bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
4945{
4946 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
4947}
4948
4949/**
4950 * FCS virtual port state machine
4951 */
4952
4953#define __vport_fcs(__vp) ((__vp)->lport.fcs)
4954#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
4955#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
4956#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
4957#define __vport_fcid(__vp) ((__vp)->lport.pid)
4958#define __vport_fabric(__vp) ((__vp)->lport.fabric)
4959#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
4960
4961#define BFA_FCS_VPORT_MAX_RETRIES 5
4962/*
4963 * Forward declarations
4964 */
4965static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
4966static void bfa_fcs_vport_timeout(void *vport_arg);
4967static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
4968static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
4969
4970/**
4971 * fcs_vport_sm FCS virtual port state machine
4972 */
4973
4974/**
4975 * VPort State Machine events
4976 */
4977enum bfa_fcs_vport_event {
4978 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
4979 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
4980 BFA_FCS_VPORT_SM_START = 3, /* vport start request */
4981 BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
4982 BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
4983 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
4984 BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
4985 BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
4986 BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
4987 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
4988 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
4989 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
4990 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
4991};
4992
4993static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
4994 enum bfa_fcs_vport_event event);
4995static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
4996 enum bfa_fcs_vport_event event);
4997static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
4998 enum bfa_fcs_vport_event event);
4999static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5000 enum bfa_fcs_vport_event event);
5001static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5002 enum bfa_fcs_vport_event event);
5003static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5004 enum bfa_fcs_vport_event event);
5005static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5006 enum bfa_fcs_vport_event event);
5007static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5008 enum bfa_fcs_vport_event event);
5009static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5010 enum bfa_fcs_vport_event event);
5011static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5012 enum bfa_fcs_vport_event event);
5013
5014static struct bfa_sm_table_s vport_sm_table[] = {
5015 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
5016 {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
5017 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
5018 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
5019 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
5020 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
5021 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
5022 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
5023 {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
5025};
5026
5027/**
5028 * Beginning state.
5029 */
5030static void
5031bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
5032 enum bfa_fcs_vport_event event)
5033{
5034 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5035 bfa_trc(__vport_fcs(vport), event);
5036
5037 switch (event) {
5038 case BFA_FCS_VPORT_SM_CREATE:
5039 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
5040 bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
5041 break;
5042
5043 default:
5044 bfa_sm_fault(__vport_fcs(vport), event);
5045 }
5046}
5047
5048/**
5049 * Created state - a start event is required to start up the state machine.
5050 */
5051static void
5052bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5053 enum bfa_fcs_vport_event event)
5054{
5055 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5056 bfa_trc(__vport_fcs(vport), event);
5057
5058 switch (event) {
5059 case BFA_FCS_VPORT_SM_START:
5060 if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
5061 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5063 bfa_fcs_vport_do_fdisc(vport);
5064 } else {
5065 /**
5066 * Fabric is offline or not NPIV capable, stay in
5067 * offline state.
5068 */
5069 vport->vport_stats.fab_no_npiv++;
5070 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5071 }
5072 break;
5073
5074 case BFA_FCS_VPORT_SM_DELETE:
5075 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5076 bfa_fcs_lport_delete(&vport->lport);
5077 break;
5078
5079 case BFA_FCS_VPORT_SM_ONLINE:
5080 case BFA_FCS_VPORT_SM_OFFLINE:
5081 /**
5082 * Ignore ONLINE/OFFLINE events from fabric
5083 * till vport is started.
5084 */
5085 break;
5086
5087 default:
5088 bfa_sm_fault(__vport_fcs(vport), event);
5089 }
5090}
5091
5092/**
5093 * Offline state - awaiting ONLINE event from fabric SM.
5094 */
5095static void
5096bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
5097 enum bfa_fcs_vport_event event)
5098{
5099 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5100 bfa_trc(__vport_fcs(vport), event);
5101
5102 switch (event) {
5103 case BFA_FCS_VPORT_SM_DELETE:
5104 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5105 bfa_fcs_lport_delete(&vport->lport);
5106 break;
5107
5108 case BFA_FCS_VPORT_SM_ONLINE:
5109 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5110 vport->fdisc_retries = 0;
5111 bfa_fcs_vport_do_fdisc(vport);
5112 break;
5113
5114 case BFA_FCS_VPORT_SM_OFFLINE:
5115 /*
5116 * This can happen if the vport couldn't be initialzied
5117 * due the fact that the npiv was not enabled on the switch.
5118 * In that case we will put the vport in offline state.
5119 * However, the link can go down and cause the this event to
5120 * be sent when we are already offline. Ignore it.
5121 */
5122 break;
5123
5124 default:
5125 bfa_sm_fault(__vport_fcs(vport), event);
5126 }
5127}
5128
5129
5130/**
5131 * FDISC is sent and awaiting reply from fabric.
5132 */
5133static void
5134bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5135 enum bfa_fcs_vport_event event)
5136{
5137 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5138 bfa_trc(__vport_fcs(vport), event);
5139
5140 switch (event) {
5141 case BFA_FCS_VPORT_SM_DELETE:
5142 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5143 bfa_lps_discard(vport->lps);
5144 bfa_fcs_lport_delete(&vport->lport);
5145 break;
5146
5147 case BFA_FCS_VPORT_SM_OFFLINE:
5148 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5149 bfa_lps_discard(vport->lps);
5150 break;
5151
5152 case BFA_FCS_VPORT_SM_RSP_OK:
5153 bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
5154 bfa_fcs_lport_online(&vport->lport);
5155 break;
5156
5157 case BFA_FCS_VPORT_SM_RSP_ERROR:
5158 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
5159 bfa_timer_start(__vport_bfa(vport), &vport->timer,
5160 bfa_fcs_vport_timeout, vport,
5161 BFA_FCS_RETRY_TIMEOUT);
5162 break;
5163
5164 case BFA_FCS_VPORT_SM_RSP_FAILED:
5165 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5166 break;
5167
5168 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
5169 bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
5170 break;
5171
5172 default:
5173 bfa_sm_fault(__vport_fcs(vport), event);
5174 }
5175}
5176
5177/**
5178 * FDISC attempt failed - a timer is active to retry FDISC.
5179 */
5180static void
5181bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5182 enum bfa_fcs_vport_event event)
5183{
5184 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5185 bfa_trc(__vport_fcs(vport), event);
5186
5187 switch (event) {
5188 case BFA_FCS_VPORT_SM_DELETE:
5189 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5190 bfa_timer_stop(&vport->timer);
5191 bfa_fcs_lport_delete(&vport->lport);
5192 break;
5193
5194 case BFA_FCS_VPORT_SM_OFFLINE:
5195 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5196 bfa_timer_stop(&vport->timer);
5197 break;
5198
5199 case BFA_FCS_VPORT_SM_TIMEOUT:
5200 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5201 vport->vport_stats.fdisc_retries++;
5202 vport->fdisc_retries++;
5203 bfa_fcs_vport_do_fdisc(vport);
5204 break;
5205
5206 default:
5207 bfa_sm_fault(__vport_fcs(vport), event);
5208 }
5209}
5210
5211/**
5212 * Vport is online (FDISC is complete).
5213 */
5214static void
5215bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5216 enum bfa_fcs_vport_event event)
5217{
5218 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5219 bfa_trc(__vport_fcs(vport), event);
5220
5221 switch (event) {
5222 case BFA_FCS_VPORT_SM_DELETE:
5223 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
5224 bfa_fcs_lport_delete(&vport->lport);
5225 break;
5226
5227 case BFA_FCS_VPORT_SM_OFFLINE:
5228 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5229 bfa_lps_discard(vport->lps);
5230 bfa_fcs_lport_offline(&vport->lport);
5231 break;
5232
5233 default:
5234 bfa_sm_fault(__vport_fcs(vport), event);
5235 }
5236}
5237
5238/**
5239 * Vport is being deleted - awaiting lport delete completion to send
5240 * LOGO to fabric.
5241 */
5242static void
5243bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5244 enum bfa_fcs_vport_event event)
5245{
5246 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5247 bfa_trc(__vport_fcs(vport), event);
5248
5249 switch (event) {
5250 case BFA_FCS_VPORT_SM_DELETE:
5251 break;
5252
5253 case BFA_FCS_VPORT_SM_DELCOMP:
5254 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
5255 bfa_fcs_vport_do_logo(vport);
5256 break;
5257
5258 case BFA_FCS_VPORT_SM_OFFLINE:
5259 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5260 break;
5261
5262 default:
5263 bfa_sm_fault(__vport_fcs(vport), event);
5264 }
5265}
5266
5267/**
5268 * Error State.
5269 * This state will be set when the Vport Creation fails due
5270 * to errors like Dup WWN. In this state only operation allowed
5271 * is a Vport Delete.
5272 */
5273static void
5274bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5275 enum bfa_fcs_vport_event event)
5276{
5277 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5278 bfa_trc(__vport_fcs(vport), event);
5279
5280 switch (event) {
5281 case BFA_FCS_VPORT_SM_DELETE:
5282 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5283 bfa_fcs_lport_delete(&vport->lport);
5284 break;
5285
5286 default:
5287 bfa_trc(__vport_fcs(vport), event);
5288 }
5289}
5290
5291/**
5292 * Lport cleanup is in progress since vport is being deleted. Fabric is
5293 * offline, so no LOGO is needed to complete vport deletion.
5294 */
5295static void
5296bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5297 enum bfa_fcs_vport_event event)
5298{
5299 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5300 bfa_trc(__vport_fcs(vport), event);
5301
5302 switch (event) {
5303 case BFA_FCS_VPORT_SM_DELCOMP:
5304 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5305 bfa_fcs_vport_free(vport);
5306 break;
5307
5308 case BFA_FCS_VPORT_SM_DELETE:
5309 break;
5310
5311 default:
5312 bfa_sm_fault(__vport_fcs(vport), event);
5313 }
5314}
5315
5316/**
5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
5318 * is done.
5319 */
5320static void
5321bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5322 enum bfa_fcs_vport_event event)
5323{
5324 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5325 bfa_trc(__vport_fcs(vport), event);
5326
5327 switch (event) {
5328 case BFA_FCS_VPORT_SM_OFFLINE:
5329 bfa_lps_discard(vport->lps);
5330 /*
5331 * !!! fall through !!!
5332 */
5333
5334 case BFA_FCS_VPORT_SM_RSP_OK:
5335 case BFA_FCS_VPORT_SM_RSP_ERROR:
5336 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5337 bfa_fcs_vport_free(vport);
5338 break;
5339
5340 case BFA_FCS_VPORT_SM_DELETE:
5341 break;
5342
5343 default:
5344 bfa_sm_fault(__vport_fcs(vport), event);
5345 }
5346}
5347
5348
5349
5350/**
5351 * fcs_vport_private FCS virtual port private functions
5352 */
5353/**
5354 * This routine will be called to send a FDISC command.
5355 */
5356static void
5357bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
5358{
5359 bfa_lps_fdisc(vport->lps, vport,
5360 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
5361 __vport_pwwn(vport), __vport_nwwn(vport));
5362 vport->vport_stats.fdisc_sent++;
5363}
5364
5365static void
5366bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5367{
5368 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
5369 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
5370
5371 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
5372 bfa_trc(__vport_fcs(vport), lsrjt_expl);
5373
5374 /* For certain reason codes, we don't want to retry. */
5375 switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
5376 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
5377 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5378 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5379 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5380 else
5381 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
5382 break;
5383
5384 case FC_LS_RJT_EXP_INSUFF_RES:
5385 /*
5386 * This means max logins per port/switch setting on the
5387 * switch was exceeded.
5388 */
5389 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5390 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5391 else
5392 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
5393 break;
5394
5395 default:
5396 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
933 } 5397 }
5398}
5399
5400/**
5401 * Called to send a logout to the fabric. Used when a V-Port is
5402 * deleted/stopped.
5403 */
5404static void
5405bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
5406{
5407 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5408
5409 vport->vport_stats.logo_sent++;
5410 bfa_lps_fdisclogo(vport->lps);
5411}
5412
5413
5414/**
5415 * This routine will be called by bfa_timer on timer timeouts.
5416 *
5417 * param[in] vport - pointer to bfa_fcs_vport_t.
5418 * param[out] vport_status - pointer to return vport status in
5419 *
5420 * return
5421 * void
5422 *
5423 * Special Considerations:
5424 *
5425 * note
5426 */
5427static void
5428bfa_fcs_vport_timeout(void *vport_arg)
5429{
5430 struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg;
5431
5432 vport->vport_stats.fdisc_timeouts++;
5433 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
5434}
5435
5436static void
5437bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5438{
5439 struct bfad_vport_s *vport_drv =
5440 (struct bfad_vport_s *)vport->vport_drv;
5441
5442 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
5443
5444 if (vport_drv->comp_del)
5445 complete(vport_drv->comp_del);
5446
5447 bfa_lps_delete(vport->lps);
5448}
5449
934 5450
5451
5452/**
5453 * fcs_vport_public FCS virtual port public interfaces
5454 */
5455
5456/**
5457 * Online notification from fabric SM.
5458 */
5459void
5460bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
5461{
5462 vport->vport_stats.fab_online++;
5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5464}
5465
5466/**
5467 * Offline notification from fabric SM.
5468 */
5469void
5470bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
5471{
5472 vport->vport_stats.fab_offline++;
5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5474}
5475
5476/**
5477 * Cleanup notification from fabric SM on link timer expiry.
5478 */
5479void
5480bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
5481{
5482 vport->vport_stats.fab_cleanup++;
5483}
5484/**
5485 * delete notification from fabric SM. To be invoked from within FCS.
5486 */
5487void
5488bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
5489{
5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
935} 5491}
936 5492
5493/**
5494 * Delete completion callback from associated lport
5495 */
5496void
5497bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
5498{
5499 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
5500}
937 5501
5502
5503
5504/**
5505 * fcs_vport_api Virtual port API
5506 */
5507
5508/**
5509 * Use this function to instantiate a new FCS vport object. This
5510 * function will not trigger any HW initialization process (which will be
5511 * done in vport_start() call)
5512 *
5513 * param[in] vport - pointer to bfa_fcs_vport_t. This space
5514 * needs to be allocated by the driver.
5515 * param[in] fcs - FCS instance
5516 * param[in] vport_cfg - vport configuration
5517 * param[in] vf_id - VF_ID if vport is created within a VF.
5518 * FC_VF_ID_NULL to specify base fabric.
5519 * param[in] vport_drv - Opaque handle back to the driver's vport
5520 * structure
5521 *
5522 * retval BFA_STATUS_OK - on success.
5523 * retval BFA_STATUS_FAILED - on failure.
5524 */
5525bfa_status_t
5526bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5527 u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
5528 struct bfad_vport_s *vport_drv)
5529{
5530 if (vport_cfg->pwwn == 0)
5531 return BFA_STATUS_INVALID_WWN;
5532
5533 if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
5534 return BFA_STATUS_VPORT_WWN_BP;
5535
5536 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
5537 return BFA_STATUS_VPORT_EXISTS;
5538
5539 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
5540 bfa_lps_get_max_vport(fcs->bfa))
5541 return BFA_STATUS_VPORT_MAX;
5542
5543 vport->lps = bfa_lps_alloc(fcs->bfa);
5544 if (!vport->lps)
5545 return BFA_STATUS_VPORT_MAX;
5546
5547 vport->vport_drv = vport_drv;
5548 vport_cfg->preboot_vp = BFA_FALSE;
5549
5550 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
5551 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
5552 bfa_fcs_lport_init(&vport->lport, vport_cfg);
5553 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
5554
5555 return BFA_STATUS_OK;
5556}
5557
5558/**
5559 * Use this function to instantiate a new FCS PBC vport object. This
5560 * function will not trigger any HW initialization process (which will be
5561 * done in vport_start() call)
5562 *
5563 * param[in] vport - pointer to bfa_fcs_vport_t. This space
5564 * needs to be allocated by the driver.
5565 * param[in] fcs - FCS instance
5566 * param[in] vport_cfg - vport configuration
5567 * param[in] vf_id - VF_ID if vport is created within a VF.
5568 * FC_VF_ID_NULL to specify base fabric.
5569 * param[in] vport_drv - Opaque handle back to the driver's vport
5570 * structure
5571 *
5572 * retval BFA_STATUS_OK - on success.
5573 * retval BFA_STATUS_FAILED - on failure.
5574 */
5575bfa_status_t
5576bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5577 u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
5578 struct bfad_vport_s *vport_drv)
5579{
5580 bfa_status_t rc;
5581
5582 rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
5583 vport->lport.port_cfg.preboot_vp = BFA_TRUE;
5584
5585 return rc;
5586}
5587
5588/**
5589 * Use this function to findout if this is a pbc vport or not.
5590 *
5591 * @param[in] vport - pointer to bfa_fcs_vport_t.
5592 *
5593 * @returns None
5594 */
5595bfa_boolean_t
5596bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
5597{
5598
5599 if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE))
5600 return BFA_TRUE;
5601 else
5602 return BFA_FALSE;
5603
5604}
5605
5606/**
5607 * Use this function initialize the vport.
5608 *
5609 * @param[in] vport - pointer to bfa_fcs_vport_t.
5610 *
5611 * @returns None
5612 */
5613bfa_status_t
5614bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
5615{
5616 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
5617
5618 return BFA_STATUS_OK;
5619}
5620
5621/**
5622 * Use this function quiese the vport object. This function will return
5623 * immediately, when the vport is actually stopped, the
5624 * bfa_drv_vport_stop_cb() will be called.
5625 *
5626 * param[in] vport - pointer to bfa_fcs_vport_t.
5627 *
5628 * return None
5629 */
5630bfa_status_t
5631bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
5632{
5633 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
5634
5635 return BFA_STATUS_OK;
5636}
5637
5638/**
5639 * Use this function to delete a vport object. Fabric object should
5640 * be stopped before this function call.
5641 *
5642 * !!!!!!! Donot invoke this from within FCS !!!!!!!
5643 *
5644 * param[in] vport - pointer to bfa_fcs_vport_t.
5645 *
5646 * return None
5647 */
5648bfa_status_t
5649bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
5650{
5651
5652 if (vport->lport.port_cfg.preboot_vp)
5653 return BFA_STATUS_PBC;
5654
5655 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
5656
5657 return BFA_STATUS_OK;
5658}
5659
5660/**
5661 * Use this function to get vport's current status info.
5662 *
5663 * param[in] vport pointer to bfa_fcs_vport_t.
5664 * param[out] attr pointer to return vport attributes
5665 *
5666 * return None
5667 */
5668void
5669bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
5670 struct bfa_vport_attr_s *attr)
5671{
5672 if (vport == NULL || attr == NULL)
5673 return;
5674
5675 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
5676
5677 bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
5679}
5680
5681/**
5682 * Use this function to get vport's statistics.
5683 *
5684 * param[in] vport pointer to bfa_fcs_vport_t.
5685 * param[out] stats pointer to return vport statistics in
5686 *
5687 * return None
5688 */
5689void
5690bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5691 struct bfa_vport_stats_s *stats)
5692{
5693 *stats = vport->vport_stats;
5694}
5695
5696/**
5697 * Use this function to clear vport's statistics.
5698 *
5699 * param[in] vport pointer to bfa_fcs_vport_t.
5700 *
5701 * return None
5702 */
5703void
5704bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
5705{
5706 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
5707}
5708
5709/**
5710 * Lookup a virtual port. Excludes base port from lookup.
5711 */
5712struct bfa_fcs_vport_s *
5713bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
5714{
5715 struct bfa_fcs_vport_s *vport;
5716 struct bfa_fcs_fabric_s *fabric;
5717
5718 bfa_trc(fcs, vf_id);
5719 bfa_trc(fcs, vpwwn);
5720
5721 fabric = bfa_fcs_vf_lookup(fcs, vf_id);
5722 if (!fabric) {
5723 bfa_trc(fcs, vf_id);
5724 return NULL;
5725 }
5726
5727 vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
5728 return vport;
5729}
5730
5731/**
5732 * FDISC Response
5733 */
5734void
5735bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5736{
5737 struct bfa_fcs_vport_s *vport = uarg;
5738
5739 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5740 bfa_trc(__vport_fcs(vport), status);
5741
5742 switch (status) {
5743 case BFA_STATUS_OK:
5744 /*
5745 * Initialiaze the V-Port fields
5746 */
5747 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
5748 vport->vport_stats.fdisc_accepts++;
5749 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5750 break;
5751
5752 case BFA_STATUS_INVALID_MAC:
5753 /* Only for CNA */
5754 vport->vport_stats.fdisc_acc_bad++;
5755 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5756
5757 break;
5758
5759 case BFA_STATUS_EPROTOCOL:
5760 switch (bfa_lps_get_extstatus(vport->lps)) {
5761 case BFA_EPROTO_BAD_ACCEPT:
5762 vport->vport_stats.fdisc_acc_bad++;
5763 break;
5764
5765 case BFA_EPROTO_UNKNOWN_RSP:
5766 vport->vport_stats.fdisc_unknown_rsp++;
5767 break;
5768
5769 default:
5770 break;
5771 }
5772
5773 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5774 break;
5775
5776 case BFA_STATUS_FABRIC_RJT:
5777 vport->vport_stats.fdisc_rejects++;
5778 bfa_fcs_vport_fdisc_rejected(vport);
5779 break;
5780
5781 default:
5782 vport->vport_stats.fdisc_rsp_err++;
5783 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5784 }
5785}
5786
5787/**
5788 * LOGO response
5789 */
5790void
5791bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
5792{
5793 struct bfa_fcs_vport_s *vport = uarg;
5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5795}
5796
5797/**
5798 * Received clear virtual link
5799 */
5800void
5801bfa_cb_lps_cvl_event(void *bfad, void *uarg)
5802{
5803 struct bfa_fcs_vport_s *vport = uarg;
5804
5805 /* Send an Offline followed by an ONLINE */
5806 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5807 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5808}
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
deleted file mode 100644
index 3c27788cd527..000000000000
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_pport.c BFA FCS PPORT ( physical port)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs_trcmod.h"
26#include "fcs.h"
27#include "fcs_fabric.h"
28#include "fcs_port.h"
29
30BFA_TRC_FILE(FCS, PPORT);
31
32static void
33bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
34{
35 struct bfa_fcs_s *fcs = cbarg;
36
37 bfa_trc(fcs, event);
38
39 switch (event) {
40 case BFA_PPORT_LINKUP:
41 bfa_fcs_fabric_link_up(&fcs->fabric);
42 break;
43
44 case BFA_PPORT_LINKDOWN:
45 bfa_fcs_fabric_link_down(&fcs->fabric);
46 break;
47
48 case BFA_PPORT_TRUNK_LINKDOWN:
49 bfa_assert(0);
50 break;
51
52 default:
53 bfa_assert(0);
54 }
55}
56
57void
58bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
59{
60 bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
61}
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 9b4c2c9a644b..635f0cd88714 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,151 +19,133 @@
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
22#include <linux/slab.h> 22#include "bfa_fcs.h"
23#include <bfa.h> 23#include "bfa_fcbuild.h"
24#include <bfa_svc.h> 24#include "bfad_drv.h"
25#include "fcbuild.h"
26#include "fcs_vport.h"
27#include "fcs_lport.h"
28#include "fcs_rport.h"
29#include "fcs_fcpim.h"
30#include "fcs_fcptm.h"
31#include "fcs_trcmod.h"
32#include "fcs_fcxp.h"
33#include "fcs.h"
34#include <fcb/bfa_fcb_rport.h>
35#include <aen/bfa_aen_rport.h>
36 25
37BFA_TRC_FILE(FCS, RPORT); 26BFA_TRC_FILE(FCS, RPORT);
38 27
39/* In millisecs */ 28static u32
40static u32 bfa_fcs_rport_del_timeout = 29bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
41 BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; 30 /* In millisecs */
42
43/* 31/*
44 * forward declarations 32 * forward declarations
45 */ 33 */
46static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, 34static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
47 wwn_t pwwn, u32 rpid); 35 struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
48static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); 36static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
49static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); 37static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
50static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport); 38static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
51static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport); 39static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
52static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, 40static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
53 struct fc_logi_s *plogi); 41 struct fc_logi_s *plogi);
54static void bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport); 42static void bfa_fcs_rport_timeout(void *arg);
55static void bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport); 43static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
56static void bfa_fcs_rport_timeout(void *arg);
57static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
58 struct bfa_fcxp_s *fcxp_alloced); 44 struct bfa_fcxp_s *fcxp_alloced);
59static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, 45static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
60 struct bfa_fcxp_s *fcxp_alloced); 46 struct bfa_fcxp_s *fcxp_alloced);
61static void bfa_fcs_rport_plogi_response(void *fcsarg, 47static void bfa_fcs_rport_plogi_response(void *fcsarg,
62 struct bfa_fcxp_s *fcxp, 48 struct bfa_fcxp_s *fcxp, void *cbarg,
63 void *cbarg, 49 bfa_status_t req_status, u32 rsp_len,
64 bfa_status_t req_status, 50 u32 resid_len, struct fchs_s *rsp_fchs);
65 u32 rsp_len, 51static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
66 u32 resid_len,
67 struct fchs_s *rsp_fchs);
68static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
69 struct bfa_fcxp_s *fcxp_alloced); 52 struct bfa_fcxp_s *fcxp_alloced);
70static void bfa_fcs_rport_adisc_response(void *fcsarg, 53static void bfa_fcs_rport_adisc_response(void *fcsarg,
71 struct bfa_fcxp_s *fcxp, 54 struct bfa_fcxp_s *fcxp, void *cbarg,
72 void *cbarg, 55 bfa_status_t req_status, u32 rsp_len,
73 bfa_status_t req_status, 56 u32 resid_len, struct fchs_s *rsp_fchs);
74 u32 rsp_len, 57static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg,
75 u32 resid_len,
76 struct fchs_s *rsp_fchs);
77static void bfa_fcs_rport_send_gidpn(void *rport_cbarg,
78 struct bfa_fcxp_s *fcxp_alloced); 58 struct bfa_fcxp_s *fcxp_alloced);
79static void bfa_fcs_rport_gidpn_response(void *fcsarg, 59static void bfa_fcs_rport_gidpn_response(void *fcsarg,
80 struct bfa_fcxp_s *fcxp, 60 struct bfa_fcxp_s *fcxp, void *cbarg,
81 void *cbarg, 61 bfa_status_t req_status, u32 rsp_len,
82 bfa_status_t req_status, 62 u32 resid_len, struct fchs_s *rsp_fchs);
83 u32 rsp_len, 63static void bfa_fcs_rport_gpnid_response(void *fcsarg,
84 u32 resid_len, 64 struct bfa_fcxp_s *fcxp, void *cbarg,
85 struct fchs_s *rsp_fchs); 65 bfa_status_t req_status, u32 rsp_len,
86static void bfa_fcs_rport_send_logo(void *rport_cbarg, 66 u32 resid_len, struct fchs_s *rsp_fchs);
67static void bfa_fcs_rport_send_logo(void *rport_cbarg,
87 struct bfa_fcxp_s *fcxp_alloced); 68 struct bfa_fcxp_s *fcxp_alloced);
88static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); 69static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
89static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, 70static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
90 struct fchs_s *rx_fchs, u16 len); 71 struct fchs_s *rx_fchs, u16 len);
91static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, 72static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
92 struct fchs_s *rx_fchs, u8 reason_code, 73 struct fchs_s *rx_fchs, u8 reason_code,
93 u8 reason_code_expl); 74 u8 reason_code_expl);
94static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
95 struct fchs_s *rx_fchs, u16 len); 76 struct fchs_s *rx_fchs, u16 len);
96static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
97/** 78/**
98 * fcs_rport_sm FCS rport state machine events 79 * fcs_rport_sm FCS rport state machine events
99 */ 80 */
100 81
101enum rport_event { 82enum rport_event {
102 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ 83 RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
103 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ 84 RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
104 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ 85 RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
105 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ 86 RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
106 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ 87 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
107 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ 88 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
108 RPSM_EVENT_DELETE = 7, /* RPORT delete request */ 89 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
109 RPSM_EVENT_SCN = 8, /* state change notification */ 90 RPSM_EVENT_SCN = 8, /* state change notification */
110 RPSM_EVENT_ACCEPTED = 9,/* Good response from remote device */ 91 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
111 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ 92 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
112 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ 93 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
113 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ 94 RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
114 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ 95 RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
115 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ 96 RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
116 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ 97 RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
117 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ 98 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
118 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ 99 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
100 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */
119}; 101};
120 102
121static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, 103static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
122 enum rport_event event); 104 enum rport_event event);
123static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, 105static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
124 enum rport_event event); 106 enum rport_event event);
125static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, 107static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
126 enum rport_event event); 108 enum rport_event event);
127static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, 109static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
128 enum rport_event event); 110 enum rport_event event);
129static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, 111static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
130 enum rport_event event);
131static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
132 enum rport_event event);
133static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
134 enum rport_event event); 112 enum rport_event event);
135static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, 113static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
114 enum rport_event event);
115static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
116 enum rport_event event);
117static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
136 enum rport_event event); 118 enum rport_event event);
137static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, 119static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
138 enum rport_event event); 120 enum rport_event event);
139static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 121static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
140 enum rport_event event); 122 enum rport_event event);
141static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, 123static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport,
142 enum rport_event event); 124 enum rport_event event);
143static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 125static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
144 enum rport_event event); 126 enum rport_event event);
145static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, 127static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
146 enum rport_event event); 128 enum rport_event event);
147static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, 129static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
148 enum rport_event event); 130 enum rport_event event);
149static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, 131static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
150 enum rport_event event); 132 enum rport_event event);
151static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, 133static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
152 enum rport_event event); 134 enum rport_event event);
153static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, 135static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
154 enum rport_event event); 136 enum rport_event event);
155static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, 137static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
156 enum rport_event event); 138 enum rport_event event);
157static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, 139static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
158 enum rport_event event); 140 enum rport_event event);
159static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, 141static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
142 enum rport_event event);
143static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
144 enum rport_event event);
145static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
146 enum rport_event event);
147static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
160 enum rport_event event); 148 enum rport_event event);
161static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
162 enum rport_event event);
163static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
164 enum rport_event event);
165static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
166 enum rport_event event);
167 149
168static struct bfa_sm_table_s rport_sm_table[] = { 150static struct bfa_sm_table_s rport_sm_table[] = {
169 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, 151 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@@ -191,7 +173,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
191}; 173};
192 174
193/** 175/**
194 * Beginning state. 176 * Beginning state.
195 */ 177 */
196static void 178static void
197bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) 179bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -221,20 +203,19 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
221 case RPSM_EVENT_ADDRESS_DISC: 203 case RPSM_EVENT_ADDRESS_DISC:
222 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 204 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
223 rport->ns_retries = 0; 205 rport->ns_retries = 0;
224 bfa_fcs_rport_send_gidpn(rport, NULL); 206 bfa_fcs_rport_send_nsdisc(rport, NULL);
225 break; 207 break;
226
227 default: 208 default:
228 bfa_sm_fault(rport->fcs, event); 209 bfa_sm_fault(rport->fcs, event);
229 } 210 }
230} 211}
231 212
232/** 213/**
233 * PLOGI is being sent. 214 * PLOGI is being sent.
234 */ 215 */
235static void 216static void
236bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, 217bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
237 enum rport_event event) 218 enum rport_event event)
238{ 219{
239 bfa_trc(rport->fcs, rport->pwwn); 220 bfa_trc(rport->fcs, rport->pwwn);
240 bfa_trc(rport->fcs, rport->pid); 221 bfa_trc(rport->fcs, rport->pid);
@@ -258,10 +239,12 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
258 break; 239 break;
259 240
260 case RPSM_EVENT_ADDRESS_CHANGE: 241 case RPSM_EVENT_ADDRESS_CHANGE:
242 case RPSM_EVENT_SCN:
243 /* query the NS */
261 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 244 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
262 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 245 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
263 rport->ns_retries = 0; 246 rport->ns_retries = 0;
264 bfa_fcs_rport_send_gidpn(rport, NULL); 247 bfa_fcs_rport_send_nsdisc(rport, NULL);
265 break; 248 break;
266 249
267 case RPSM_EVENT_LOGO_IMP: 250 case RPSM_EVENT_LOGO_IMP:
@@ -273,8 +256,6 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
273 bfa_fcs_rport_del_timeout); 256 bfa_fcs_rport_del_timeout);
274 break; 257 break;
275 258
276 case RPSM_EVENT_SCN:
277 break;
278 259
279 default: 260 default:
280 bfa_sm_fault(rport->fcs, event); 261 bfa_sm_fault(rport->fcs, event);
@@ -282,11 +263,11 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
282} 263}
283 264
284/** 265/**
285 * PLOGI is being sent. 266 * PLOGI is being sent.
286 */ 267 */
287static void 268static void
288bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, 269bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
289 enum rport_event event) 270 enum rport_event event)
290{ 271{
291 bfa_trc(rport->fcs, rport->pwwn); 272 bfa_trc(rport->fcs, rport->pwwn);
292 bfa_trc(rport->fcs, rport->pid); 273 bfa_trc(rport->fcs, rport->pid);
@@ -304,6 +285,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
304 bfa_fcs_rport_free(rport); 285 bfa_fcs_rport_free(rport);
305 break; 286 break;
306 287
288 case RPSM_EVENT_PLOGI_RCVD:
307 case RPSM_EVENT_SCN: 289 case RPSM_EVENT_SCN:
308 /** 290 /**
309 * Ignore, SCN is possibly online notification. 291 * Ignore, SCN is possibly online notification.
@@ -314,7 +296,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
314 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 296 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
315 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 297 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
316 rport->ns_retries = 0; 298 rport->ns_retries = 0;
317 bfa_fcs_rport_send_gidpn(rport, NULL); 299 bfa_fcs_rport_send_nsdisc(rport, NULL);
318 break; 300 break;
319 301
320 case RPSM_EVENT_LOGO_IMP: 302 case RPSM_EVENT_LOGO_IMP:
@@ -338,7 +320,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
338} 320}
339 321
340/** 322/**
341 * PLOGI is sent. 323 * PLOGI is sent.
342 */ 324 */
343static void 325static void
344bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, 326bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
@@ -349,24 +331,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
349 bfa_trc(rport->fcs, event); 331 bfa_trc(rport->fcs, event);
350 332
351 switch (event) { 333 switch (event) {
352 case RPSM_EVENT_SCN:
353 bfa_timer_stop(&rport->timer);
354 /*
355 * !! fall through !!
356 */
357
358 case RPSM_EVENT_TIMEOUT: 334 case RPSM_EVENT_TIMEOUT:
359 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { 335 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
360 rport->plogi_retries++; 336 bfa_fcs_rport_send_plogi(rport, NULL);
361 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
362 bfa_fcs_rport_send_plogi(rport, NULL);
363 } else {
364 rport->pid = 0;
365 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
366 bfa_timer_start(rport->fcs->bfa, &rport->timer,
367 bfa_fcs_rport_timeout, rport,
368 bfa_fcs_rport_del_timeout);
369 }
370 break; 337 break;
371 338
372 case RPSM_EVENT_DELETE: 339 case RPSM_EVENT_DELETE:
@@ -386,10 +353,11 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
386 break; 353 break;
387 354
388 case RPSM_EVENT_ADDRESS_CHANGE: 355 case RPSM_EVENT_ADDRESS_CHANGE:
356 case RPSM_EVENT_SCN:
389 bfa_timer_stop(&rport->timer); 357 bfa_timer_stop(&rport->timer);
390 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 358 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
391 rport->ns_retries = 0; 359 rport->ns_retries = 0;
392 bfa_fcs_rport_send_gidpn(rport, NULL); 360 bfa_fcs_rport_send_nsdisc(rport, NULL);
393 break; 361 break;
394 362
395 case RPSM_EVENT_LOGO_IMP: 363 case RPSM_EVENT_LOGO_IMP:
@@ -413,7 +381,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
413} 381}
414 382
415/** 383/**
416 * PLOGI is sent. 384 * PLOGI is sent.
417 */ 385 */
418static void 386static void
419bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) 387bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -443,10 +411,28 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
443 * !! fall through !! 411 * !! fall through !!
444 */ 412 */
445 case RPSM_EVENT_FAILED: 413 case RPSM_EVENT_FAILED:
414 if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
415 rport->plogi_retries++;
416 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
417 bfa_timer_start(rport->fcs->bfa, &rport->timer,
418 bfa_fcs_rport_timeout, rport,
419 BFA_FCS_RETRY_TIMEOUT);
420 } else {
421 bfa_stats(rport->port, rport_del_max_plogi_retry);
422 rport->pid = 0;
423 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
424 bfa_timer_start(rport->fcs->bfa, &rport->timer,
425 bfa_fcs_rport_timeout, rport,
426 bfa_fcs_rport_del_timeout);
427 }
428 break;
429
430 case RPSM_EVENT_PLOGI_RETRY:
431 rport->plogi_retries = 0;
446 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); 432 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
447 bfa_timer_start(rport->fcs->bfa, &rport->timer, 433 bfa_timer_start(rport->fcs->bfa, &rport->timer,
448 bfa_fcs_rport_timeout, rport, 434 bfa_fcs_rport_timeout, rport,
449 BFA_FCS_RETRY_TIMEOUT); 435 (FC_RA_TOV * 1000));
450 break; 436 break;
451 437
452 case RPSM_EVENT_LOGO_IMP: 438 case RPSM_EVENT_LOGO_IMP:
@@ -459,10 +445,11 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
459 break; 445 break;
460 446
461 case RPSM_EVENT_ADDRESS_CHANGE: 447 case RPSM_EVENT_ADDRESS_CHANGE:
448 case RPSM_EVENT_SCN:
462 bfa_fcxp_discard(rport->fcxp); 449 bfa_fcxp_discard(rport->fcxp);
463 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 450 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
464 rport->ns_retries = 0; 451 rport->ns_retries = 0;
465 bfa_fcs_rport_send_gidpn(rport, NULL); 452 bfa_fcs_rport_send_nsdisc(rport, NULL);
466 break; 453 break;
467 454
468 case RPSM_EVENT_PLOGI_RCVD: 455 case RPSM_EVENT_PLOGI_RCVD:
@@ -471,12 +458,6 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
471 bfa_fcs_rport_send_plogiacc(rport, NULL); 458 bfa_fcs_rport_send_plogiacc(rport, NULL);
472 break; 459 break;
473 460
474 case RPSM_EVENT_SCN:
475 /**
476 * Ignore SCN - wait for PLOGI response.
477 */
478 break;
479
480 case RPSM_EVENT_DELETE: 461 case RPSM_EVENT_DELETE:
481 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 462 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
482 bfa_fcxp_discard(rport->fcxp); 463 bfa_fcxp_discard(rport->fcxp);
@@ -495,8 +476,8 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
495} 476}
496 477
497/** 478/**
498 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
499 * are offline. 480 * are offline.
500 */ 481 */
501static void 482static void
502bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, 483bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
@@ -551,7 +532,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
551} 532}
552 533
553/** 534/**
554 * Rport is ONLINE. FC-4s active. 535 * Rport is ONLINE. FC-4s active.
555 */ 536 */
556static void 537static void
557bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) 538bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -562,18 +543,11 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
562 543
563 switch (event) { 544 switch (event) {
564 case RPSM_EVENT_SCN: 545 case RPSM_EVENT_SCN:
565 /**
566 * Pause FC-4 activity till rport is authenticated.
567 * In switched fabrics, check presence of device in nameserver
568 * first.
569 */
570 bfa_fcs_rport_fc4_pause(rport);
571
572 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 546 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
573 bfa_sm_set_state(rport, 547 bfa_sm_set_state(rport,
574 bfa_fcs_rport_sm_nsquery_sending); 548 bfa_fcs_rport_sm_nsquery_sending);
575 rport->ns_retries = 0; 549 rport->ns_retries = 0;
576 bfa_fcs_rport_send_gidpn(rport, NULL); 550 bfa_fcs_rport_send_nsdisc(rport, NULL);
577 } else { 551 } else {
578 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 552 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
579 bfa_fcs_rport_send_adisc(rport, NULL); 553 bfa_fcs_rport_send_adisc(rport, NULL);
@@ -607,12 +581,12 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
607} 581}
608 582
609/** 583/**
610 * An SCN event is received in ONLINE state. NS query is being sent 584 * An SCN event is received in ONLINE state. NS query is being sent
611 * prior to ADISC authentication with rport. FC-4s are paused. 585 * prior to ADISC authentication with rport. FC-4s are paused.
612 */ 586 */
613static void 587static void
614bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, 588bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
615 enum rport_event event) 589 enum rport_event event)
616{ 590{
617 bfa_trc(rport->fcs, rport->pwwn); 591 bfa_trc(rport->fcs, rport->pwwn);
618 bfa_trc(rport->fcs, rport->pid); 592 bfa_trc(rport->fcs, rport->pid);
@@ -665,8 +639,8 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
665} 639}
666 640
667/** 641/**
668 * An SCN event is received in ONLINE state. NS query is sent to rport. 642 * An SCN event is received in ONLINE state. NS query is sent to rport.
669 * FC-4s are paused. 643 * FC-4s are paused.
670 */ 644 */
671static void 645static void
672bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) 646bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -686,7 +660,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
686 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { 660 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
687 bfa_sm_set_state(rport, 661 bfa_sm_set_state(rport,
688 bfa_fcs_rport_sm_nsquery_sending); 662 bfa_fcs_rport_sm_nsquery_sending);
689 bfa_fcs_rport_send_gidpn(rport, NULL); 663 bfa_fcs_rport_send_nsdisc(rport, NULL);
690 } else { 664 } else {
691 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 665 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
692 bfa_fcs_rport_offline_action(rport); 666 bfa_fcs_rport_offline_action(rport);
@@ -724,12 +698,12 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
724} 698}
725 699
726/** 700/**
727 * An SCN event is received in ONLINE state. ADISC is being sent for 701 * An SCN event is received in ONLINE state. ADISC is being sent for
728 * authenticating with rport. FC-4s are paused. 702 * authenticating with rport. FC-4s are paused.
729 */ 703 */
730static void 704static void
731bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 705bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
732 enum rport_event event) 706 enum rport_event event)
733{ 707{
734 bfa_trc(rport->fcs, rport->pwwn); 708 bfa_trc(rport->fcs, rport->pwwn);
735 bfa_trc(rport->fcs, rport->pid); 709 bfa_trc(rport->fcs, rport->pid);
@@ -775,8 +749,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
775} 749}
776 750
777/** 751/**
778 * An SCN event is received in ONLINE state. ADISC is to rport. 752 * An SCN event is received in ONLINE state. ADISC is to rport.
779 * FC-4s are paused. 753 * FC-4s are paused.
780 */ 754 */
781static void 755static void
782bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) 756bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -788,7 +762,6 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
788 switch (event) { 762 switch (event) {
789 case RPSM_EVENT_ACCEPTED: 763 case RPSM_EVENT_ACCEPTED:
790 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); 764 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
791 bfa_fcs_rport_fc4_resume(rport);
792 break; 765 break;
793 766
794 case RPSM_EVENT_PLOGI_RCVD: 767 case RPSM_EVENT_PLOGI_RCVD:
@@ -838,7 +811,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
838} 811}
839 812
840/** 813/**
841 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
842 */ 815 */
843static void 816static void
844bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 817bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
@@ -869,12 +842,12 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
869} 842}
870 843
871/** 844/**
872 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
873 * callback. 846 * callback.
874 */ 847 */
875static void 848static void
876bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, 849bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
877 enum rport_event event) 850 enum rport_event event)
878{ 851{
879 bfa_trc(rport->fcs, rport->pwwn); 852 bfa_trc(rport->fcs, rport->pwwn);
880 bfa_trc(rport->fcs, rport->pid); 853 bfa_trc(rport->fcs, rport->pid);
@@ -892,7 +865,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
892} 865}
893 866
894/** 867/**
895 * Rport is going offline. Awaiting FC-4 offline completion callback. 868 * Rport is going offline. Awaiting FC-4 offline completion callback.
896 */ 869 */
897static void 870static void
898bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, 871bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
@@ -929,12 +902,12 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
929} 902}
930 903
931/** 904/**
932 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
933 * callback. 906 * callback.
934 */ 907 */
935static void 908static void
936bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, 909bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
937 enum rport_event event) 910 enum rport_event event)
938{ 911{
939 bfa_trc(rport->fcs, rport->pwwn); 912 bfa_trc(rport->fcs, rport->pwwn);
940 bfa_trc(rport->fcs, rport->pid); 913 bfa_trc(rport->fcs, rport->pid);
@@ -943,12 +916,12 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
943 switch (event) { 916 switch (event) {
944 case RPSM_EVENT_HCB_OFFLINE: 917 case RPSM_EVENT_HCB_OFFLINE:
945 case RPSM_EVENT_ADDRESS_CHANGE: 918 case RPSM_EVENT_ADDRESS_CHANGE:
946 if (bfa_fcs_port_is_online(rport->port)) { 919 if (bfa_fcs_lport_is_online(rport->port)) {
947 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 920 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
948 bfa_sm_set_state(rport, 921 bfa_sm_set_state(rport,
949 bfa_fcs_rport_sm_nsdisc_sending); 922 bfa_fcs_rport_sm_nsdisc_sending);
950 rport->ns_retries = 0; 923 rport->ns_retries = 0;
951 bfa_fcs_rport_send_gidpn(rport, NULL); 924 bfa_fcs_rport_send_nsdisc(rport, NULL);
952 } else { 925 } else {
953 bfa_sm_set_state(rport, 926 bfa_sm_set_state(rport,
954 bfa_fcs_rport_sm_plogi_sending); 927 bfa_fcs_rport_sm_plogi_sending);
@@ -983,8 +956,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
983} 956}
984 957
985/** 958/**
986 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
987 * callback to send LOGO accept. 960 * callback to send LOGO accept.
988 */ 961 */
989static void 962static void
990bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, 963bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
@@ -1001,21 +974,21 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1001 bfa_fcs_rport_send_prlo_acc(rport); 974 bfa_fcs_rport_send_prlo_acc(rport);
1002 if (rport->pid && (rport->prlo == BFA_FALSE)) 975 if (rport->pid && (rport->prlo == BFA_FALSE))
1003 bfa_fcs_rport_send_logo_acc(rport); 976 bfa_fcs_rport_send_logo_acc(rport);
1004
1005 /* 977 /*
1006 * If the lport is online and if the rport is not a well known 978 * If the lport is online and if the rport is not a well
1007 * address port, we try to re-discover the r-port. 979 * known address port,
980 * we try to re-discover the r-port.
1008 */ 981 */
1009 if (bfa_fcs_port_is_online(rport->port) 982 if (bfa_fcs_lport_is_online(rport->port) &&
1010 && (!BFA_FCS_PID_IS_WKA(rport->pid))) { 983 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
1011 bfa_sm_set_state(rport, 984 bfa_sm_set_state(rport,
1012 bfa_fcs_rport_sm_nsdisc_sending); 985 bfa_fcs_rport_sm_nsdisc_sending);
1013 rport->ns_retries = 0; 986 rport->ns_retries = 0;
1014 bfa_fcs_rport_send_gidpn(rport, NULL); 987 bfa_fcs_rport_send_nsdisc(rport, NULL);
1015 } else { 988 } else {
1016 /* 989 /*
1017 * if it is not a well known address, reset the pid to 990 * if it is not a well known address, reset the
1018 * 991 * pid to 0.
1019 */ 992 */
1020 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 993 if (!BFA_FCS_PID_IS_WKA(rport->pid))
1021 rport->pid = 0; 994 rport->pid = 0;
@@ -1047,12 +1020,13 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1047} 1020}
1048 1021
1049/** 1022/**
1050 * Rport is being deleted. FC-4s are offline. Awaiting BFA rport offline 1023 * Rport is being deleted. FC-4s are offline.
1051 * callback to send LOGO. 1024 * Awaiting BFA rport offline
1025 * callback to send LOGO.
1052 */ 1026 */
1053static void 1027static void
1054bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, 1028bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1055 enum rport_event event) 1029 enum rport_event event)
1056{ 1030{
1057 bfa_trc(rport->fcs, rport->pwwn); 1031 bfa_trc(rport->fcs, rport->pwwn);
1058 bfa_trc(rport->fcs, rport->pid); 1032 bfa_trc(rport->fcs, rport->pid);
@@ -1075,11 +1049,11 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1075} 1049}
1076 1050
1077/** 1051/**
1078 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent.
1079 */ 1053 */
1080static void 1054static void
1081bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, 1055bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1082 enum rport_event event) 1056 enum rport_event event)
1083{ 1057{
1084 bfa_trc(rport->fcs, rport->pwwn); 1058 bfa_trc(rport->fcs, rport->pwwn);
1085 bfa_trc(rport->fcs, rport->pid); 1059 bfa_trc(rport->fcs, rport->pid);
@@ -1087,9 +1061,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1087 1061
1088 switch (event) { 1062 switch (event) {
1089 case RPSM_EVENT_FCXP_SENT: 1063 case RPSM_EVENT_FCXP_SENT:
1090 /* 1064 /* Once LOGO is sent, we donot wait for the response */
1091 * Once LOGO is sent, we donot wait for the response
1092 */
1093 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 1065 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1094 bfa_fcs_rport_free(rport); 1066 bfa_fcs_rport_free(rport);
1095 break; 1067 break;
@@ -1111,8 +1083,8 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1111} 1083}
1112 1084
1113/** 1085/**
1114 * Rport is offline. FC-4s are offline. BFA rport is offline. 1086 * Rport is offline. FC-4s are offline. BFA rport is offline.
1115 * Timer active to delete stale rport. 1087 * Timer active to delete stale rport.
1116 */ 1088 */
1117static void 1089static void
1118bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) 1090bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
@@ -1132,7 +1104,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1132 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1104 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1133 bfa_timer_stop(&rport->timer); 1105 bfa_timer_stop(&rport->timer);
1134 rport->ns_retries = 0; 1106 rport->ns_retries = 0;
1135 bfa_fcs_rport_send_gidpn(rport, NULL); 1107 bfa_fcs_rport_send_nsdisc(rport, NULL);
1136 break; 1108 break;
1137 1109
1138 case RPSM_EVENT_DELETE: 1110 case RPSM_EVENT_DELETE:
@@ -1171,11 +1143,11 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1171} 1143}
1172 1144
1173/** 1145/**
1174 * Rport address has changed. Nameserver discovery request is being sent. 1146 * Rport address has changed. Nameserver discovery request is being sent.
1175 */ 1147 */
1176static void 1148static void
1177bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, 1149bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1178 enum rport_event event) 1150 enum rport_event event)
1179{ 1151{
1180 bfa_trc(rport->fcs, rport->pwwn); 1152 bfa_trc(rport->fcs, rport->pwwn);
1181 bfa_trc(rport->fcs, rport->pid); 1153 bfa_trc(rport->fcs, rport->pid);
@@ -1205,7 +1177,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1205 break; 1177 break;
1206 1178
1207 case RPSM_EVENT_ADDRESS_CHANGE: 1179 case RPSM_EVENT_ADDRESS_CHANGE:
1208 rport->ns_retries = 0; /* reset the retry count */ 1180 rport->ns_retries = 0; /* reset the retry count */
1209 break; 1181 break;
1210 1182
1211 case RPSM_EVENT_LOGO_IMP: 1183 case RPSM_EVENT_LOGO_IMP:
@@ -1228,11 +1200,11 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1228} 1200}
1229 1201
1230/** 1202/**
1231 * Nameserver discovery failed. Waiting for timeout to retry. 1203 * Nameserver discovery failed. Waiting for timeout to retry.
1232 */ 1204 */
1233static void 1205static void
1234bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, 1206bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1235 enum rport_event event) 1207 enum rport_event event)
1236{ 1208{
1237 bfa_trc(rport->fcs, rport->pwwn); 1209 bfa_trc(rport->fcs, rport->pwwn);
1238 bfa_trc(rport->fcs, rport->pid); 1210 bfa_trc(rport->fcs, rport->pid);
@@ -1241,7 +1213,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1241 switch (event) { 1213 switch (event) {
1242 case RPSM_EVENT_TIMEOUT: 1214 case RPSM_EVENT_TIMEOUT:
1243 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1215 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1244 bfa_fcs_rport_send_gidpn(rport, NULL); 1216 bfa_fcs_rport_send_nsdisc(rport, NULL);
1245 break; 1217 break;
1246 1218
1247 case RPSM_EVENT_SCN: 1219 case RPSM_EVENT_SCN:
@@ -1249,7 +1221,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1249 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1221 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1250 bfa_timer_stop(&rport->timer); 1222 bfa_timer_stop(&rport->timer);
1251 rport->ns_retries = 0; 1223 rport->ns_retries = 0;
1252 bfa_fcs_rport_send_gidpn(rport, NULL); 1224 bfa_fcs_rport_send_nsdisc(rport, NULL);
1253 break; 1225 break;
1254 1226
1255 case RPSM_EVENT_DELETE: 1227 case RPSM_EVENT_DELETE:
@@ -1276,7 +1248,6 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1276 case RPSM_EVENT_LOGO_RCVD: 1248 case RPSM_EVENT_LOGO_RCVD:
1277 bfa_fcs_rport_send_logo_acc(rport); 1249 bfa_fcs_rport_send_logo_acc(rport);
1278 break; 1250 break;
1279
1280 case RPSM_EVENT_PRLO_RCVD: 1251 case RPSM_EVENT_PRLO_RCVD:
1281 bfa_fcs_rport_send_prlo_acc(rport); 1252 bfa_fcs_rport_send_prlo_acc(rport);
1282 break; 1253 break;
@@ -1293,7 +1264,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1293} 1264}
1294 1265
1295/** 1266/**
1296 * Rport address has changed. Nameserver discovery request is sent. 1267 * Rport address has changed. Nameserver discovery request is sent.
1297 */ 1268 */
1298static void 1269static void
1299bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, 1270bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
@@ -1311,9 +1282,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1311 bfa_fcs_rport_send_plogi(rport, NULL); 1282 bfa_fcs_rport_send_plogi(rport, NULL);
1312 } else { 1283 } else {
1313 bfa_sm_set_state(rport, 1284 bfa_sm_set_state(rport,
1314 bfa_fcs_rport_sm_nsdisc_sending); 1285 bfa_fcs_rport_sm_nsdisc_sending);
1315 rport->ns_retries = 0; 1286 rport->ns_retries = 0;
1316 bfa_fcs_rport_send_gidpn(rport, NULL); 1287 bfa_fcs_rport_send_nsdisc(rport, NULL);
1317 } 1288 }
1318 break; 1289 break;
1319 1290
@@ -1321,8 +1292,8 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1321 rport->ns_retries++; 1292 rport->ns_retries++;
1322 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { 1293 if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
1323 bfa_sm_set_state(rport, 1294 bfa_sm_set_state(rport,
1324 bfa_fcs_rport_sm_nsdisc_sending); 1295 bfa_fcs_rport_sm_nsdisc_sending);
1325 bfa_fcs_rport_send_gidpn(rport, NULL); 1296 bfa_fcs_rport_send_nsdisc(rport, NULL);
1326 } else { 1297 } else {
1327 rport->pid = 0; 1298 rport->pid = 0;
1328 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 1299 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -1353,10 +1324,10 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1353 bfa_fcs_rport_del_timeout); 1324 bfa_fcs_rport_del_timeout);
1354 break; 1325 break;
1355 1326
1327
1356 case RPSM_EVENT_PRLO_RCVD: 1328 case RPSM_EVENT_PRLO_RCVD:
1357 bfa_fcs_rport_send_prlo_acc(rport); 1329 bfa_fcs_rport_send_prlo_acc(rport);
1358 break; 1330 break;
1359
1360 case RPSM_EVENT_SCN: 1331 case RPSM_EVENT_SCN:
1361 /** 1332 /**
1362 * ignore, wait for NS query response 1333 * ignore, wait for NS query response
@@ -1391,29 +1362,29 @@ static void
1391bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1362bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1392{ 1363{
1393 struct bfa_fcs_rport_s *rport = rport_cbarg; 1364 struct bfa_fcs_rport_s *rport = rport_cbarg;
1394 struct bfa_fcs_port_s *port = rport->port; 1365 struct bfa_fcs_lport_s *port = rport->port;
1395 struct fchs_s fchs; 1366 struct fchs_s fchs;
1396 int len; 1367 int len;
1397 struct bfa_fcxp_s *fcxp; 1368 struct bfa_fcxp_s *fcxp;
1398 1369
1399 bfa_trc(rport->fcs, rport->pwwn); 1370 bfa_trc(rport->fcs, rport->pwwn);
1400 1371
1401 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1372 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1402 if (!fcxp) { 1373 if (!fcxp) {
1403 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1374 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1404 bfa_fcs_rport_send_plogi, rport); 1375 bfa_fcs_rport_send_plogi, rport);
1405 return; 1376 return;
1406 } 1377 }
1407 rport->fcxp = fcxp; 1378 rport->fcxp = fcxp;
1408 1379
1409 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1380 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1410 bfa_fcs_port_get_fcid(port), 0, 1381 bfa_fcs_lport_get_fcid(port), 0,
1411 port->port_cfg.pwwn, port->port_cfg.nwwn, 1382 port->port_cfg.pwwn, port->port_cfg.nwwn,
1412 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1383 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1413 1384
1414 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1385 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1415 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1386 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
1416 (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); 1387 (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1417 1388
1418 rport->stats.plogis++; 1389 rport->stats.plogis++;
1419 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1390 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1421,14 +1392,14 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1421 1392
1422static void 1393static void
1423bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1394bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1424 bfa_status_t req_status, u32 rsp_len, 1395 bfa_status_t req_status, u32 rsp_len,
1425 u32 resid_len, struct fchs_s *rsp_fchs) 1396 u32 resid_len, struct fchs_s *rsp_fchs)
1426{ 1397{
1427 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1398 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1428 struct fc_logi_s *plogi_rsp; 1399 struct fc_logi_s *plogi_rsp;
1429 struct fc_ls_rjt_s *ls_rjt; 1400 struct fc_ls_rjt_s *ls_rjt;
1430 struct bfa_fcs_rport_s *twin; 1401 struct bfa_fcs_rport_s *twin;
1431 struct list_head *qe; 1402 struct list_head *qe;
1432 1403
1433 bfa_trc(rport->fcs, rport->pwwn); 1404 bfa_trc(rport->fcs, rport->pwwn);
1434 1405
@@ -1453,6 +1424,13 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1453 bfa_trc(rport->fcs, ls_rjt->reason_code); 1424 bfa_trc(rport->fcs, ls_rjt->reason_code);
1454 bfa_trc(rport->fcs, ls_rjt->reason_code_expl); 1425 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
1455 1426
1427 if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) &&
1428 (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) {
1429 rport->stats.rjt_insuff_res++;
1430 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY);
1431 return;
1432 }
1433
1456 rport->stats.plogi_rejects++; 1434 rport->stats.plogi_rejects++;
1457 bfa_sm_send_event(rport, RPSM_EVENT_FAILED); 1435 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1458 return; 1436 return;
@@ -1463,22 +1441,22 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1463 * device with a new FC port address. 1441 * device with a new FC port address.
1464 */ 1442 */
1465 list_for_each(qe, &rport->port->rport_q) { 1443 list_for_each(qe, &rport->port->rport_q) {
1466 twin = (struct bfa_fcs_rport_s *)qe; 1444 twin = (struct bfa_fcs_rport_s *) qe;
1467 if (twin == rport) 1445 if (twin == rport)
1468 continue; 1446 continue;
1469 if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { 1447 if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
1470 bfa_trc(rport->fcs, twin->pid); 1448 bfa_trc(rport->fcs, twin->pid);
1471 bfa_trc(rport->fcs, rport->pid); 1449 bfa_trc(rport->fcs, rport->pid);
1472 1450
1473 /* 1451 /* Update plogi stats in twin */
1474 * Update plogi stats in twin 1452 twin->stats.plogis += rport->stats.plogis;
1475 */ 1453 twin->stats.plogi_rejects +=
1476 twin->stats.plogis += rport->stats.plogis; 1454 rport->stats.plogi_rejects;
1477 twin->stats.plogi_rejects += rport->stats.plogi_rejects; 1455 twin->stats.plogi_timeouts +=
1478 twin->stats.plogi_timeouts += 1456 rport->stats.plogi_timeouts;
1479 rport->stats.plogi_timeouts; 1457 twin->stats.plogi_failed +=
1480 twin->stats.plogi_failed += rport->stats.plogi_failed; 1458 rport->stats.plogi_failed;
1481 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; 1459 twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
1482 twin->stats.plogi_accs++; 1460 twin->stats.plogi_accs++;
1483 1461
1484 bfa_fcs_rport_delete(rport); 1462 bfa_fcs_rport_delete(rport);
@@ -1502,9 +1480,9 @@ static void
1502bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1480bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1503{ 1481{
1504 struct bfa_fcs_rport_s *rport = rport_cbarg; 1482 struct bfa_fcs_rport_s *rport = rport_cbarg;
1505 struct bfa_fcs_port_s *port = rport->port; 1483 struct bfa_fcs_lport_s *port = rport->port;
1506 struct fchs_s fchs; 1484 struct fchs_s fchs;
1507 int len; 1485 int len;
1508 struct bfa_fcxp_s *fcxp; 1486 struct bfa_fcxp_s *fcxp;
1509 1487
1510 bfa_trc(rport->fcs, rport->pwwn); 1488 bfa_trc(rport->fcs, rport->pwwn);
@@ -1512,19 +1490,20 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1512 1490
1513 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1491 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1514 if (!fcxp) { 1492 if (!fcxp) {
1515 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1493 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1516 bfa_fcs_rport_send_plogiacc, rport); 1494 bfa_fcs_rport_send_plogiacc, rport);
1517 return; 1495 return;
1518 } 1496 }
1519 rport->fcxp = fcxp; 1497 rport->fcxp = fcxp;
1520 1498
1521 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1499 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1522 bfa_fcs_port_get_fcid(port), rport->reply_oxid, 1500 rport->pid, bfa_fcs_lport_get_fcid(port),
1523 port->port_cfg.pwwn, port->port_cfg.nwwn, 1501 rport->reply_oxid, port->port_cfg.pwwn,
1502 port->port_cfg.nwwn,
1524 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1503 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1525 1504
1526 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1505 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1527 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1506 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1528 1507
1529 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1508 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1530} 1509}
@@ -1533,28 +1512,28 @@ static void
1533bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1512bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1534{ 1513{
1535 struct bfa_fcs_rport_s *rport = rport_cbarg; 1514 struct bfa_fcs_rport_s *rport = rport_cbarg;
1536 struct bfa_fcs_port_s *port = rport->port; 1515 struct bfa_fcs_lport_s *port = rport->port;
1537 struct fchs_s fchs; 1516 struct fchs_s fchs;
1538 int len; 1517 int len;
1539 struct bfa_fcxp_s *fcxp; 1518 struct bfa_fcxp_s *fcxp;
1540 1519
1541 bfa_trc(rport->fcs, rport->pwwn); 1520 bfa_trc(rport->fcs, rport->pwwn);
1542 1521
1543 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1522 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1544 if (!fcxp) { 1523 if (!fcxp) {
1545 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1524 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1546 bfa_fcs_rport_send_adisc, rport); 1525 bfa_fcs_rport_send_adisc, rport);
1547 return; 1526 return;
1548 } 1527 }
1549 rport->fcxp = fcxp; 1528 rport->fcxp = fcxp;
1550 1529
1551 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1530 len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1552 bfa_fcs_port_get_fcid(port), 0, 1531 bfa_fcs_lport_get_fcid(port), 0,
1553 port->port_cfg.pwwn, port->port_cfg.nwwn); 1532 port->port_cfg.pwwn, port->port_cfg.nwwn);
1554 1533
1555 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1534 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1556 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, 1535 FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
1557 rport, FC_MAX_PDUSZ, FC_ELS_TOV); 1536 rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1558 1537
1559 rport->stats.adisc_sent++; 1538 rport->stats.adisc_sent++;
1560 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1539 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
@@ -1562,12 +1541,12 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1562 1541
1563static void 1542static void
1564bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1543bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1565 bfa_status_t req_status, u32 rsp_len, 1544 bfa_status_t req_status, u32 rsp_len,
1566 u32 resid_len, struct fchs_s *rsp_fchs) 1545 u32 resid_len, struct fchs_s *rsp_fchs)
1567{ 1546{
1568 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1547 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1569 void *pld = bfa_fcxp_get_rspbuf(fcxp); 1548 void *pld = bfa_fcxp_get_rspbuf(fcxp);
1570 struct fc_ls_rjt_s *ls_rjt; 1549 struct fc_ls_rjt_s *ls_rjt;
1571 1550
1572 if (req_status != BFA_STATUS_OK) { 1551 if (req_status != BFA_STATUS_OK) {
1573 bfa_trc(rport->fcs, req_status); 1552 bfa_trc(rport->fcs, req_status);
@@ -1577,7 +1556,7 @@ bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1577 } 1556 }
1578 1557
1579 if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, 1558 if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
1580 rport->nwwn) == FC_PARSE_OK) { 1559 rport->nwwn) == FC_PARSE_OK) {
1581 rport->stats.adisc_accs++; 1560 rport->stats.adisc_accs++;
1582 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); 1561 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1583 return; 1562 return;
@@ -1592,44 +1571,52 @@ bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1592} 1571}
1593 1572
1594static void 1573static void
1595bfa_fcs_rport_send_gidpn(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1574bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1596{ 1575{
1597 struct bfa_fcs_rport_s *rport = rport_cbarg; 1576 struct bfa_fcs_rport_s *rport = rport_cbarg;
1598 struct bfa_fcs_port_s *port = rport->port; 1577 struct bfa_fcs_lport_s *port = rport->port;
1599 struct fchs_s fchs; 1578 struct fchs_s fchs;
1600 struct bfa_fcxp_s *fcxp; 1579 struct bfa_fcxp_s *fcxp;
1601 int len; 1580 int len;
1581 bfa_cb_fcxp_send_t cbfn;
1602 1582
1603 bfa_trc(rport->fcs, rport->pid); 1583 bfa_trc(rport->fcs, rport->pid);
1604 1584
1605 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1585 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1606 if (!fcxp) { 1586 if (!fcxp) {
1607 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1587 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1608 bfa_fcs_rport_send_gidpn, rport); 1588 bfa_fcs_rport_send_nsdisc, rport);
1609 return; 1589 return;
1610 } 1590 }
1611 rport->fcxp = fcxp; 1591 rport->fcxp = fcxp;
1612 1592
1613 len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1593 if (rport->pwwn) {
1614 bfa_fcs_port_get_fcid(port), 0, rport->pwwn); 1594 len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1595 bfa_fcs_lport_get_fcid(port), 0, rport->pwwn);
1596 cbfn = bfa_fcs_rport_gidpn_response;
1597 } else {
1598 len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1599 bfa_fcs_lport_get_fcid(port), 0, rport->pid);
1600 cbfn = bfa_fcs_rport_gpnid_response;
1601 }
1615 1602
1616 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1603 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1617 FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response, 1604 FC_CLASS_3, len, &fchs, cbfn,
1618 (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); 1605 (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
1619 1606
1620 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1607 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1621} 1608}
1622 1609
1623static void 1610static void
1624bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, 1611bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1625 bfa_status_t req_status, u32 rsp_len, 1612 bfa_status_t req_status, u32 rsp_len,
1626 u32 resid_len, struct fchs_s *rsp_fchs) 1613 u32 resid_len, struct fchs_s *rsp_fchs)
1627{ 1614{
1628 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 1615 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1629 struct bfa_fcs_rport_s *twin; 1616 struct ct_hdr_s *cthdr;
1630 struct list_head *qe;
1631 struct ct_hdr_s *cthdr;
1632 struct fcgs_gidpn_resp_s *gidpn_rsp; 1617 struct fcgs_gidpn_resp_s *gidpn_rsp;
1618 struct bfa_fcs_rport_s *twin;
1619 struct list_head *qe;
1633 1620
1634 bfa_trc(rport->fcs, rport->pwwn); 1621 bfa_trc(rport->fcs, rport->pwwn);
1635 1622
@@ -1637,25 +1624,21 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1637 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1624 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1638 1625
1639 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1626 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1640 /* 1627 /* Check if the pid is the same as before. */
1641 * Check if the pid is the same as before.
1642 */
1643 gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); 1628 gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
1644 1629
1645 if (gidpn_rsp->dap == rport->pid) { 1630 if (gidpn_rsp->dap == rport->pid) {
1646 /* 1631 /* Device is online */
1647 * Device is online
1648 */
1649 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); 1632 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1650 } else { 1633 } else {
1651 /* 1634 /*
1652 * Device's PID has changed. We need to cleanup and 1635 * Device's PID has changed. We need to cleanup
1653 * re-login. If there is another device with the the 1636 * and re-login. If there is another device with
1654 * newly discovered pid, send an scn notice so that its 1637 * the the newly discovered pid, send an scn notice
1655 * new pid can be discovered. 1638 * so that its new pid can be discovered.
1656 */ 1639 */
1657 list_for_each(qe, &rport->port->rport_q) { 1640 list_for_each(qe, &rport->port->rport_q) {
1658 twin = (struct bfa_fcs_rport_s *)qe; 1641 twin = (struct bfa_fcs_rport_s *) qe;
1659 if (twin == rport) 1642 if (twin == rport)
1660 continue; 1643 continue;
1661 if (gidpn_rsp->dap == twin->pid) { 1644 if (gidpn_rsp->dap == twin->pid) {
@@ -1664,7 +1647,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1664 1647
1665 twin->pid = 0; 1648 twin->pid = 0;
1666 bfa_sm_send_event(twin, 1649 bfa_sm_send_event(twin,
1667 RPSM_EVENT_ADDRESS_CHANGE); 1650 RPSM_EVENT_ADDRESS_CHANGE);
1668 } 1651 }
1669 } 1652 }
1670 rport->pid = gidpn_rsp->dap; 1653 rport->pid = gidpn_rsp->dap;
@@ -1697,17 +1680,59 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1697 } 1680 }
1698} 1681}
1699 1682
1683static void
1684bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1685 bfa_status_t req_status, u32 rsp_len,
1686 u32 resid_len, struct fchs_s *rsp_fchs)
1687{
1688 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
1689 struct ct_hdr_s *cthdr;
1690
1691 bfa_trc(rport->fcs, rport->pwwn);
1692
1693 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1694 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1695
1696 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1697 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
1698 return;
1699 }
1700
1701 /*
1702 * Reject Response
1703 */
1704 switch (cthdr->reason_code) {
1705 case CT_RSN_LOGICAL_BUSY:
1706 /*
1707 * Need to retry
1708 */
1709 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1710 break;
1711
1712 case CT_RSN_UNABLE_TO_PERF:
1713 /*
1714 * device doesn't exist : Start timer to cleanup this later.
1715 */
1716 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1717 break;
1718
1719 default:
1720 bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
1721 break;
1722 }
1723}
1724
1700/** 1725/**
1701 * Called to send a logout to the rport. 1726 * Called to send a logout to the rport.
1702 */ 1727 */
1703static void 1728static void
1704bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) 1729bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1705{ 1730{
1706 struct bfa_fcs_rport_s *rport = rport_cbarg; 1731 struct bfa_fcs_rport_s *rport = rport_cbarg;
1707 struct bfa_fcs_port_s *port; 1732 struct bfa_fcs_lport_s *port;
1708 struct fchs_s fchs; 1733 struct fchs_s fchs;
1709 struct bfa_fcxp_s *fcxp; 1734 struct bfa_fcxp_s *fcxp;
1710 u16 len; 1735 u16 len;
1711 1736
1712 bfa_trc(rport->fcs, rport->pid); 1737 bfa_trc(rport->fcs, rport->pid);
1713 1738
@@ -1715,19 +1740,19 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1715 1740
1716 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1741 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1717 if (!fcxp) { 1742 if (!fcxp) {
1718 bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1743 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1719 bfa_fcs_rport_send_logo, rport); 1744 bfa_fcs_rport_send_logo, rport);
1720 return; 1745 return;
1721 } 1746 }
1722 rport->fcxp = fcxp; 1747 rport->fcxp = fcxp;
1723 1748
1724 len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1749 len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1725 bfa_fcs_port_get_fcid(port), 0, 1750 bfa_fcs_lport_get_fcid(port), 0,
1726 bfa_fcs_port_get_pwwn(port)); 1751 bfa_fcs_lport_get_pwwn(port));
1727 1752
1728 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1753 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1729 FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ, 1754 FC_CLASS_3, len, &fchs, NULL,
1730 FC_ELS_TOV); 1755 rport, FC_MAX_PDUSZ, FC_ELS_TOV);
1731 1756
1732 rport->stats.logos++; 1757 rport->stats.logos++;
1733 bfa_fcxp_discard(rport->fcxp); 1758 bfa_fcxp_discard(rport->fcxp);
@@ -1735,16 +1760,16 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1735} 1760}
1736 1761
1737/** 1762/**
1738 * Send ACC for a LOGO received. 1763 * Send ACC for a LOGO received.
1739 */ 1764 */
1740static void 1765static void
1741bfa_fcs_rport_send_logo_acc(void *rport_cbarg) 1766bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1742{ 1767{
1743 struct bfa_fcs_rport_s *rport = rport_cbarg; 1768 struct bfa_fcs_rport_s *rport = rport_cbarg;
1744 struct bfa_fcs_port_s *port; 1769 struct bfa_fcs_lport_s *port;
1745 struct fchs_s fchs; 1770 struct fchs_s fchs;
1746 struct bfa_fcxp_s *fcxp; 1771 struct bfa_fcxp_s *fcxp;
1747 u16 len; 1772 u16 len;
1748 1773
1749 bfa_trc(rport->fcs, rport->pid); 1774 bfa_trc(rport->fcs, rport->pid);
1750 1775
@@ -1755,32 +1780,35 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1755 return; 1780 return;
1756 1781
1757 rport->stats.logo_rcvd++; 1782 rport->stats.logo_rcvd++;
1758 len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1783 len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1759 bfa_fcs_port_get_fcid(port), rport->reply_oxid); 1784 rport->pid, bfa_fcs_lport_get_fcid(port),
1785 rport->reply_oxid);
1760 1786
1761 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1787 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1762 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1763} 1789}
1764 1790
1765/** 1791/**
1766 * This routine will be called by bfa_timer on timer timeouts. 1792 * brief
1793 * This routine will be called by bfa_timer on timer timeouts.
1767 * 1794 *
1768 * param[in] rport - pointer to bfa_fcs_port_ns_t. 1795 * param[in] rport - pointer to bfa_fcs_lport_ns_t.
1769 * param[out] rport_status - pointer to return vport status in 1796 * param[out] rport_status - pointer to return vport status in
1770 * 1797 *
1771 * return 1798 * return
1772 * void 1799 * void
1773 * 1800 *
1774* Special Considerations: 1801 * Special Considerations:
1775 * 1802 *
1776 * note 1803 * note
1777 */ 1804 */
1778static void 1805static void
1779bfa_fcs_rport_timeout(void *arg) 1806bfa_fcs_rport_timeout(void *arg)
1780{ 1807{
1781 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)arg; 1808 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg;
1782 1809
1783 rport->stats.plogi_timeouts++; 1810 rport->stats.plogi_timeouts++;
1811 bfa_stats(rport->port, rport_plogi_timeouts);
1784 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); 1812 bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
1785} 1813}
1786 1814
@@ -1789,50 +1817,45 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
1789 struct fchs_s *rx_fchs, u16 len) 1817 struct fchs_s *rx_fchs, u16 len)
1790{ 1818{
1791 struct bfa_fcxp_s *fcxp; 1819 struct bfa_fcxp_s *fcxp;
1792 struct fchs_s fchs; 1820 struct fchs_s fchs;
1793 struct bfa_fcs_port_s *port = rport->port; 1821 struct bfa_fcs_lport_s *port = rport->port;
1794 struct fc_prli_s *prli; 1822 struct fc_prli_s *prli;
1795 1823
1796 bfa_trc(port->fcs, rx_fchs->s_id); 1824 bfa_trc(port->fcs, rx_fchs->s_id);
1797 bfa_trc(port->fcs, rx_fchs->d_id); 1825 bfa_trc(port->fcs, rx_fchs->d_id);
1798 1826
1799 rport->stats.prli_rcvd++; 1827 rport->stats.prli_rcvd++;
1800 1828
1801 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1802 /*
1803 * Target Mode : Let the fcptm handle it
1804 */
1805 bfa_fcs_tin_rx_prli(rport->tin, rx_fchs, len);
1806 return;
1807 }
1808
1809 /* 1829 /*
1810 * We are either in Initiator or ipfc Mode 1830 * We are in Initiator Mode
1811 */ 1831 */
1812 prli = (struct fc_prli_s *) (rx_fchs + 1); 1832 prli = (struct fc_prli_s *) (rx_fchs + 1);
1813 1833
1814 if (prli->parampage.servparams.initiator) { 1834 if (prli->parampage.servparams.target) {
1815 bfa_trc(rport->fcs, prli->parampage.type);
1816 rport->scsi_function = BFA_RPORT_INITIATOR;
1817 bfa_fcs_itnim_is_initiator(rport->itnim);
1818 } else {
1819 /* 1835 /*
1820 * @todo: PRLI from a target ? 1836 * PRLI from a target ?
1837 * Send the Acc.
1838 * PRLI sent by us will be used to transition the IT nexus,
1839 * once the response is received from the target.
1821 */ 1840 */
1822 bfa_trc(port->fcs, rx_fchs->s_id); 1841 bfa_trc(port->fcs, rx_fchs->s_id);
1823 rport->scsi_function = BFA_RPORT_TARGET; 1842 rport->scsi_function = BFA_RPORT_TARGET;
1843 } else {
1844 bfa_trc(rport->fcs, prli->parampage.type);
1845 rport->scsi_function = BFA_RPORT_INITIATOR;
1846 bfa_fcs_itnim_is_initiator(rport->itnim);
1824 } 1847 }
1825 1848
1826 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 1849 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
1827 if (!fcxp) 1850 if (!fcxp)
1828 return; 1851 return;
1829 1852
1830 len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 1853 len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1831 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 1854 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1832 port->port_cfg.roles); 1855 rx_fchs->ox_id, port->port_cfg.roles);
1833 1856
1834 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1857 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1835 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1858 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1836} 1859}
1837 1860
1838static void 1861static void
@@ -1840,10 +1863,10 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1840 struct fchs_s *rx_fchs, u16 len) 1863 struct fchs_s *rx_fchs, u16 len)
1841{ 1864{
1842 struct bfa_fcxp_s *fcxp; 1865 struct bfa_fcxp_s *fcxp;
1843 struct fchs_s fchs; 1866 struct fchs_s fchs;
1844 struct bfa_fcs_port_s *port = rport->port; 1867 struct bfa_fcs_lport_s *port = rport->port;
1845 struct fc_rpsc_speed_info_s speeds; 1868 struct fc_rpsc_speed_info_s speeds;
1846 struct bfa_pport_attr_s pport_attr; 1869 struct bfa_port_attr_s pport_attr;
1847 1870
1848 bfa_trc(port->fcs, rx_fchs->s_id); 1871 bfa_trc(port->fcs, rx_fchs->s_id);
1849 bfa_trc(port->fcs, rx_fchs->d_id); 1872 bfa_trc(port->fcs, rx_fchs->d_id);
@@ -1864,12 +1887,12 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1864 if (!fcxp) 1887 if (!fcxp)
1865 return; 1888 return;
1866 1889
1867 len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 1890 len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1868 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 1891 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1869 &speeds); 1892 rx_fchs->ox_id, &speeds);
1870 1893
1871 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1894 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1872 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1895 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1873} 1896}
1874 1897
1875static void 1898static void
@@ -1877,28 +1900,20 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1877 struct fchs_s *rx_fchs, u16 len) 1900 struct fchs_s *rx_fchs, u16 len)
1878{ 1901{
1879 struct bfa_fcxp_s *fcxp; 1902 struct bfa_fcxp_s *fcxp;
1880 struct fchs_s fchs; 1903 struct fchs_s fchs;
1881 struct bfa_fcs_port_s *port = rport->port; 1904 struct bfa_fcs_lport_s *port = rport->port;
1882 struct fc_adisc_s *adisc; 1905 struct fc_adisc_s *adisc;
1883 1906
1884 bfa_trc(port->fcs, rx_fchs->s_id); 1907 bfa_trc(port->fcs, rx_fchs->s_id);
1885 bfa_trc(port->fcs, rx_fchs->d_id); 1908 bfa_trc(port->fcs, rx_fchs->d_id);
1886 1909
1887 rport->stats.adisc_rcvd++; 1910 rport->stats.adisc_rcvd++;
1888 1911
1889 if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) {
1890 /*
1891 * @todo : Target Mode handling
1892 */
1893 bfa_trc(port->fcs, rx_fchs->d_id);
1894 bfa_assert(0);
1895 return;
1896 }
1897
1898 adisc = (struct fc_adisc_s *) (rx_fchs + 1); 1912 adisc = (struct fc_adisc_s *) (rx_fchs + 1);
1899 1913
1900 /* 1914 /*
1901 * Accept if the itnim for this rport is online. Else reject the ADISC 1915 * Accept if the itnim for this rport is online.
1916 * Else reject the ADISC.
1902 */ 1917 */
1903 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { 1918 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
1904 1919
@@ -1907,27 +1922,25 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1907 return; 1922 return;
1908 1923
1909 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1924 len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1910 rx_fchs->s_id, 1925 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
1911 bfa_fcs_port_get_fcid(port), 1926 rx_fchs->ox_id, port->port_cfg.pwwn,
1912 rx_fchs->ox_id, port->port_cfg.pwwn, 1927 port->port_cfg.nwwn);
1913 port->port_cfg.nwwn);
1914 1928
1915 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, 1929 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
1916 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, 1930 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
1917 FC_MAX_PDUSZ, 0); 1931 FC_MAX_PDUSZ, 0);
1918 } else { 1932 } else {
1919 rport->stats.adisc_rejected++; 1933 rport->stats.adisc_rejected++;
1920 bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, 1934 bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
1921 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, 1935 FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
1922 FC_LS_RJT_EXP_LOGIN_REQUIRED); 1936 FC_LS_RJT_EXP_LOGIN_REQUIRED);
1923 } 1937 }
1924
1925} 1938}
1926 1939
1927static void 1940static void
1928bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) 1941bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1929{ 1942{
1930 struct bfa_fcs_port_s *port = rport->port; 1943 struct bfa_fcs_lport_s *port = rport->port;
1931 struct bfa_rport_info_s rport_info; 1944 struct bfa_rport_info_s rport_info;
1932 1945
1933 rport_info.pid = rport->pid; 1946 rport_info.pid = rport->pid;
@@ -1941,38 +1954,18 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1941 bfa_rport_online(rport->bfa_rport, &rport_info); 1954 bfa_rport_online(rport->bfa_rport, &rport_info);
1942} 1955}
1943 1956
1944static void
1945bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport)
1946{
1947 if (bfa_fcs_port_is_initiator(rport->port))
1948 bfa_fcs_itnim_pause(rport->itnim);
1949
1950 if (bfa_fcs_port_is_target(rport->port))
1951 bfa_fcs_tin_pause(rport->tin);
1952}
1953
1954static void
1955bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport)
1956{
1957 if (bfa_fcs_port_is_initiator(rport->port))
1958 bfa_fcs_itnim_resume(rport->itnim);
1959
1960 if (bfa_fcs_port_is_target(rport->port))
1961 bfa_fcs_tin_resume(rport->tin);
1962}
1963
1964static struct bfa_fcs_rport_s * 1957static struct bfa_fcs_rport_s *
1965bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid) 1958bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1966{ 1959{
1967 struct bfa_fcs_s *fcs = port->fcs; 1960 struct bfa_fcs_s *fcs = port->fcs;
1968 struct bfa_fcs_rport_s *rport; 1961 struct bfa_fcs_rport_s *rport;
1969 struct bfad_rport_s *rport_drv; 1962 struct bfad_rport_s *rport_drv;
1970 1963
1971 /** 1964 /**
1972 * allocate rport 1965 * allocate rport
1973 */ 1966 */
1974 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
1975 != BFA_STATUS_OK) { 1968 != BFA_STATUS_OK) {
1976 bfa_trc(fcs, rpid); 1969 bfa_trc(fcs, rpid);
1977 return NULL; 1970 return NULL;
1978 } 1971 }
@@ -1999,10 +1992,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
1999 /** 1992 /**
2000 * allocate FC-4s 1993 * allocate FC-4s
2001 */ 1994 */
2002 bfa_assert(bfa_fcs_port_is_initiator(port) ^ 1995 bfa_assert(bfa_fcs_lport_is_initiator(port));
2003 bfa_fcs_port_is_target(port));
2004 1996
2005 if (bfa_fcs_port_is_initiator(port)) { 1997 if (bfa_fcs_lport_is_initiator(port)) {
2006 rport->itnim = bfa_fcs_itnim_create(rport); 1998 rport->itnim = bfa_fcs_itnim_create(rport);
2007 if (!rport->itnim) { 1999 if (!rport->itnim) {
2008 bfa_trc(fcs, rpid); 2000 bfa_trc(fcs, rpid);
@@ -2012,23 +2004,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
2012 } 2004 }
2013 } 2005 }
2014 2006
2015 if (bfa_fcs_port_is_target(port)) { 2007 bfa_fcs_lport_add_rport(port, rport);
2016 rport->tin = bfa_fcs_tin_create(rport);
2017 if (!rport->tin) {
2018 bfa_trc(fcs, rpid);
2019 bfa_rport_delete(rport->bfa_rport);
2020 kfree(rport_drv);
2021 return NULL;
2022 }
2023 }
2024
2025 bfa_fcs_port_add_rport(port, rport);
2026 2008
2027 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 2009 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
2028 2010
2029 /* 2011 /* Initialize the Rport Features(RPF) Sub Module */
2030 * Initialize the Rport Features(RPF) Sub Module
2031 */
2032 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2012 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2033 bfa_fcs_rpf_init(rport); 2013 bfa_fcs_rpf_init(rport);
2034 2014
@@ -2039,121 +2019,78 @@ bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid)
2039static void 2019static void
2040bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) 2020bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2041{ 2021{
2042 struct bfa_fcs_port_s *port = rport->port; 2022 struct bfa_fcs_lport_s *port = rport->port;
2043 2023
2044 /** 2024 /**
2045 * - delete FC-4s 2025 * - delete FC-4s
2046 * - delete BFA rport 2026 * - delete BFA rport
2047 * - remove from queue of rports 2027 * - remove from queue of rports
2048 */ 2028 */
2049 if (bfa_fcs_port_is_initiator(port)) 2029 if (bfa_fcs_lport_is_initiator(port)) {
2050 bfa_fcs_itnim_delete(rport->itnim); 2030 bfa_fcs_itnim_delete(rport->itnim);
2051 2031 if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
2052 if (bfa_fcs_port_is_target(port)) 2032 bfa_fcs_rpf_rport_offline(rport);
2053 bfa_fcs_tin_delete(rport->tin); 2033 }
2054 2034
2055 bfa_rport_delete(rport->bfa_rport); 2035 bfa_rport_delete(rport->bfa_rport);
2056 bfa_fcs_port_del_rport(port, rport); 2036 bfa_fcs_lport_del_rport(port, rport);
2057 kfree(rport->rp_drv); 2037 kfree(rport->rp_drv);
2058} 2038}
2059 2039
2060static void 2040static void
2061bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2062 enum bfa_rport_aen_event event,
2063 struct bfa_rport_aen_data_s *data)
2064{
2065 union bfa_aen_data_u aen_data;
2066 struct bfa_log_mod_s *logmod = rport->fcs->logm;
2067 wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
2068 wwn_t rpwwn = rport->pwwn;
2069 char lpwwn_ptr[BFA_STRING_32];
2070 char rpwwn_ptr[BFA_STRING_32];
2071 char *prio_str[] = { "unknown", "high", "medium", "low" };
2072
2073 wwn2str(lpwwn_ptr, lpwwn);
2074 wwn2str(rpwwn_ptr, rpwwn);
2075
2076 switch (event) {
2077 case BFA_RPORT_AEN_ONLINE:
2078 case BFA_RPORT_AEN_OFFLINE:
2079 case BFA_RPORT_AEN_DISCONNECT:
2080 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
2081 rpwwn_ptr, lpwwn_ptr);
2082 break;
2083 case BFA_RPORT_AEN_QOS_PRIO:
2084 aen_data.rport.priv.qos = data->priv.qos;
2085 bfa_log(logmod, BFA_AEN_RPORT_QOS_PRIO,
2086 prio_str[aen_data.rport.priv.qos.qos_priority],
2087 rpwwn_ptr, lpwwn_ptr);
2088 break;
2089 case BFA_RPORT_AEN_QOS_FLOWID:
2090 aen_data.rport.priv.qos = data->priv.qos;
2091 bfa_log(logmod, BFA_AEN_RPORT_QOS_FLOWID,
2092 aen_data.rport.priv.qos.qos_flow_id, rpwwn_ptr,
2093 lpwwn_ptr);
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 aen_data.rport.vf_id = rport->port->fabric->vf_id;
2100 aen_data.rport.ppwwn =
2101 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(rport->fcs));
2102 aen_data.rport.lpwwn = lpwwn;
2103 aen_data.rport.rpwwn = rpwwn;
2104}
2105
2106static void
2107bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2041bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2108{ 2042{
2109 struct bfa_fcs_port_s *port = rport->port; 2043 struct bfa_fcs_lport_s *port = rport->port;
2044 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2045 char lpwwn_buf[BFA_STRING_32];
2046 char rpwwn_buf[BFA_STRING_32];
2110 2047
2111 rport->stats.onlines++; 2048 rport->stats.onlines++;
2112 2049
2113 if (bfa_fcs_port_is_initiator(port)) { 2050 if (bfa_fcs_lport_is_initiator(port)) {
2114 bfa_fcs_itnim_rport_online(rport->itnim); 2051 bfa_fcs_itnim_rport_online(rport->itnim);
2115 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2052 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2116 bfa_fcs_rpf_rport_online(rport); 2053 bfa_fcs_rpf_rport_online(rport);
2117 }; 2054 };
2118 2055
2119 if (bfa_fcs_port_is_target(port)) 2056 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2120 bfa_fcs_tin_rport_online(rport->tin); 2057 wwn2str(rpwwn_buf, rport->pwwn);
2121
2122 /*
2123 * Don't post events for well known addresses
2124 */
2125 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2058 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2126 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); 2059 BFA_LOG(KERN_INFO, bfad, log_level,
2060 "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2061 rpwwn_buf, lpwwn_buf);
2127} 2062}
2128 2063
2129static void 2064static void
2130bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) 2065bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2131{ 2066{
2132 struct bfa_fcs_port_s *port = rport->port; 2067 struct bfa_fcs_lport_s *port = rport->port;
2068 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2069 char lpwwn_buf[BFA_STRING_32];
2070 char rpwwn_buf[BFA_STRING_32];
2133 2071
2134 rport->stats.offlines++; 2072 rport->stats.offlines++;
2135 2073
2136 /* 2074 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2137 * Don't post events for well known addresses 2075 wwn2str(rpwwn_buf, rport->pwwn);
2138 */
2139 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2076 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2140 if (bfa_fcs_port_is_online(rport->port) == BFA_TRUE) { 2077 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
2141 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_DISCONNECT, 2078 BFA_LOG(KERN_ERR, bfad, log_level,
2142 NULL); 2079 "Remote port (WWN = %s) connectivity lost for "
2143 } else { 2080 "logical port (WWN = %s)\n",
2144 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_OFFLINE, 2081 rpwwn_buf, lpwwn_buf);
2145 NULL); 2082 else
2146 } 2083 BFA_LOG(KERN_INFO, bfad, log_level,
2084 "Remote port (WWN = %s) offlined by "
2085 "logical port (WWN = %s)\n",
2086 rpwwn_buf, lpwwn_buf);
2147 } 2087 }
2148 2088
2149 if (bfa_fcs_port_is_initiator(port)) { 2089 if (bfa_fcs_lport_is_initiator(port)) {
2150 bfa_fcs_itnim_rport_offline(rport->itnim); 2090 bfa_fcs_itnim_rport_offline(rport->itnim);
2151 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2091 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2152 bfa_fcs_rpf_rport_offline(rport); 2092 bfa_fcs_rpf_rport_offline(rport);
2153 } 2093 }
2154
2155 if (bfa_fcs_port_is_target(port))
2156 bfa_fcs_tin_rport_offline(rport->tin);
2157} 2094}
2158 2095
2159/** 2096/**
@@ -2162,7 +2099,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2162static void 2099static void
2163bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) 2100bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2164{ 2101{
2165 struct bfa_fcs_port_s *port = rport->port; 2102 bfa_fcs_lport_t *port = rport->port;
2166 2103
2167 /** 2104 /**
2168 * - port name 2105 * - port name
@@ -2193,12 +2130,13 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2193 /** 2130 /**
2194 * Direct Attach P2P mode : 2131 * Direct Attach P2P mode :
2195 * This is to handle a bug (233476) in IBM targets in Direct Attach 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach
2196 * Mode. Basically, in FLOGI Accept the target would have erroneously 2133 * Mode. Basically, in FLOGI Accept the target would have
2197 * set the BB Credit to the value used in the FLOGI sent by the HBA. 2134 * erroneously set the BB Credit to the value used in the FLOGI
2198 * It uses the correct value (its own BB credit) in PLOGI. 2135 * sent by the HBA. It uses the correct value (its own BB credit)
2136 * in PLOGI.
2199 */ 2137 */
2200 if ((!bfa_fcs_fabric_is_switched(port->fabric)) 2138 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
2201 && (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { 2139 (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
2202 2140
2203 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2141 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
2204 bfa_trc(port->fcs, port->fabric->bb_credit); 2142 bfa_trc(port->fcs, port->fabric->bb_credit);
@@ -2211,7 +2149,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2211} 2149}
2212 2150
2213/** 2151/**
2214 * Called to handle LOGO received from an existing remote port. 2152 * Called to handle LOGO received from an existing remote port.
2215 */ 2153 */
2216static void 2154static void
2217bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) 2155bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
@@ -2231,8 +2169,8 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2231 */ 2169 */
2232 2170
2233/** 2171/**
2234 * Called by bport/vport to create a remote port instance for a discovered 2172 * Called by bport/vport to create a remote port instance for a discovered
2235 * remote device. 2173 * remote device.
2236 * 2174 *
2237 * @param[in] port - base port or vport 2175 * @param[in] port - base port or vport
2238 * @param[in] rpid - remote port ID 2176 * @param[in] rpid - remote port ID
@@ -2240,7 +2178,7 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2240 * @return None 2178 * @return None
2241 */ 2179 */
2242struct bfa_fcs_rport_s * 2180struct bfa_fcs_rport_s *
2243bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid) 2181bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
2244{ 2182{
2245 struct bfa_fcs_rport_s *rport; 2183 struct bfa_fcs_rport_s *rport;
2246 2184
@@ -2262,10 +2200,9 @@ bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid)
2262 * @return None 2200 * @return None
2263 */ 2201 */
2264struct bfa_fcs_rport_s * 2202struct bfa_fcs_rport_s *
2265bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn) 2203bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2266{ 2204{
2267 struct bfa_fcs_rport_s *rport; 2205 struct bfa_fcs_rport_s *rport;
2268
2269 bfa_trc(port->fcs, rpwwn); 2206 bfa_trc(port->fcs, rpwwn);
2270 rport = bfa_fcs_rport_alloc(port, rpwwn, 0); 2207 rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
2271 if (!rport) 2208 if (!rport)
@@ -2274,7 +2211,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn)
2274 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
2275 return rport; 2212 return rport;
2276} 2213}
2277
2278/** 2214/**
2279 * Called by bport in private loop topology to indicate that a 2215 * Called by bport in private loop topology to indicate that a
2280 * rport has been discovered and plogi has been completed. 2216 * rport has been discovered and plogi has been completed.
@@ -2283,8 +2219,8 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn)
2283 * @param[in] rpid - remote port ID 2219 * @param[in] rpid - remote port ID
2284 */ 2220 */
2285void 2221void
2286bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs, 2222bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2287 struct fc_logi_s *plogi) 2223 struct fc_logi_s *plogi)
2288{ 2224{
2289 struct bfa_fcs_rport_s *rport; 2225 struct bfa_fcs_rport_s *rport;
2290 2226
@@ -2298,12 +2234,12 @@ bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2298} 2234}
2299 2235
2300/** 2236/**
2301 * Called by bport/vport to handle PLOGI received from a new remote port. 2237 * Called by bport/vport to handle PLOGI received from a new remote port.
2302 * If an existing rport does a plogi, it will be handled separately. 2238 * If an existing rport does a plogi, it will be handled separately.
2303 */ 2239 */
2304void 2240void
2305bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs, 2241bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2306 struct fc_logi_s *plogi) 2242 struct fc_logi_s *plogi)
2307{ 2243{
2308 struct bfa_fcs_rport_s *rport; 2244 struct bfa_fcs_rport_s *rport;
2309 2245
@@ -2323,9 +2259,9 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
2323static int 2259static int
2324wwn_compare(wwn_t wwn1, wwn_t wwn2) 2260wwn_compare(wwn_t wwn1, wwn_t wwn2)
2325{ 2261{
2326 u8 *b1 = (u8 *) &wwn1; 2262 u8 *b1 = (u8 *) &wwn1;
2327 u8 *b2 = (u8 *) &wwn2; 2263 u8 *b2 = (u8 *) &wwn2;
2328 int i; 2264 int i;
2329 2265
2330 for (i = 0; i < sizeof(wwn_t); i++) { 2266 for (i = 0; i < sizeof(wwn_t); i++) {
2331 if (b1[i] < b2[i]) 2267 if (b1[i] < b2[i])
@@ -2337,12 +2273,12 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
2337} 2273}
2338 2274
2339/** 2275/**
2340 * Called by bport/vport to handle PLOGI received from an existing 2276 * Called by bport/vport to handle PLOGI received from an existing
2341 * remote port. 2277 * remote port.
2342 */ 2278 */
2343void 2279void
2344bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2345 struct fc_logi_s *plogi) 2281 struct fc_logi_s *plogi)
2346{ 2282{
2347 /** 2283 /**
2348 * @todo Handle P2P and initiator-initiator. 2284 * @todo Handle P2P and initiator-initiator.
@@ -2360,9 +2296,9 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2360 * If the link topology is N2N, 2296 * If the link topology is N2N,
2361 * this Plogi should be accepted. 2297 * this Plogi should be accepted.
2362 */ 2298 */
2363 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) 2299 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
2364 && (bfa_fcs_fabric_is_switched(rport->port->fabric)) 2300 (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
2365 && (!BFA_FCS_PID_IS_WKA(rport->pid))) { 2301 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
2366 bfa_trc(rport->fcs, rport->pid); 2302 bfa_trc(rport->fcs, rport->pid);
2367 return; 2303 return;
2368 } 2304 }
@@ -2374,10 +2310,10 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2374/** 2310/**
2375 * Called by bport/vport to delete a remote port instance. 2311 * Called by bport/vport to delete a remote port instance.
2376 * 2312 *
2377* Rport delete is called under the following conditions: 2313 * Rport delete is called under the following conditions:
2378 * - vport is deleted 2314 * - vport is deleted
2379 * - vf is deleted 2315 * - vf is deleted
2380 * - explicit request from OS to delete rport (vmware) 2316 * - explicit request from OS to delete rport
2381 */ 2317 */
2382void 2318void
2383bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) 2319bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
@@ -2404,20 +2340,18 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2404{ 2340{
2405 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2406} 2342}
2407
2408/** 2343/**
2409 * Called by bport/vport to notify SCN for the remote port 2344 * Called by bport/vport to notify SCN for the remote port
2410 */ 2345 */
2411void 2346void
2412bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) 2347bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2413{ 2348{
2414
2415 rport->stats.rscns++; 2349 rport->stats.rscns++;
2416 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2417} 2351}
2418 2352
2419/** 2353/**
2420 * Called by fcpim to notify that the ITN cleanup is done. 2354 * Called by fcpim to notify that the ITN cleanup is done.
2421 */ 2355 */
2422void 2356void
2423bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) 2357bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
@@ -2426,7 +2360,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2426} 2360}
2427 2361
2428/** 2362/**
2429 * Called by fcptm to notify that the ITN cleanup is done. 2363 * Called by fcptm to notify that the ITN cleanup is done.
2430 */ 2364 */
2431void 2365void
2432bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) 2366bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
@@ -2435,99 +2369,100 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2435} 2369}
2436 2370
2437/** 2371/**
2438 * This routine BFA callback for bfa_rport_online() call. 2372 * brief
2373 * This routine BFA callback for bfa_rport_online() call.
2439 * 2374 *
2440 * param[in] cb_arg - rport struct. 2375 * param[in] cb_arg - rport struct.
2441 * 2376 *
2442 * return 2377 * return
2443 * void 2378 * void
2444 * 2379 *
2445* Special Considerations: 2380 * Special Considerations:
2446 * 2381 *
2447 * note 2382 * note
2448 */ 2383 */
2449void 2384void
2450bfa_cb_rport_online(void *cbarg) 2385bfa_cb_rport_online(void *cbarg)
2451{ 2386{
2452 2387
2453 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2388 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2454 2389
2455 bfa_trc(rport->fcs, rport->pwwn); 2390 bfa_trc(rport->fcs, rport->pwwn);
2456 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
2457} 2392}
2458 2393
2459/** 2394/**
2460 * This routine BFA callback for bfa_rport_offline() call. 2395 * brief
2396 * This routine BFA callback for bfa_rport_offline() call.
2461 * 2397 *
2462 * param[in] rport - 2398 * param[in] rport -
2463 * 2399 *
2464 * return 2400 * return
2465 * void 2401 * void
2466 * 2402 *
2467 * Special Considerations: 2403 * Special Considerations:
2468 * 2404 *
2469 * note 2405 * note
2470 */ 2406 */
2471void 2407void
2472bfa_cb_rport_offline(void *cbarg) 2408bfa_cb_rport_offline(void *cbarg)
2473{ 2409{
2474 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2410 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2475 2411
2476 bfa_trc(rport->fcs, rport->pwwn); 2412 bfa_trc(rport->fcs, rport->pwwn);
2477 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
2478} 2414}
2479 2415
2480/** 2416/**
2481 * This routine is a static BFA callback when there is a QoS flow_id 2417 * brief
2482 * change notification 2418 * This routine is a static BFA callback when there is a QoS flow_id
2419 * change notification
2483 * 2420 *
2484 * @param[in] rport - 2421 * param[in] rport -
2485 * 2422 *
2486 * @return void 2423 * return
2424 * void
2487 * 2425 *
2488 * Special Considerations: 2426 * Special Considerations:
2489 * 2427 *
2490 * @note 2428 * note
2491 */ 2429 */
2492void 2430void
2493bfa_cb_rport_qos_scn_flowid(void *cbarg, 2431bfa_cb_rport_qos_scn_flowid(void *cbarg,
2494 struct bfa_rport_qos_attr_s old_qos_attr, 2432 struct bfa_rport_qos_attr_s old_qos_attr,
2495 struct bfa_rport_qos_attr_s new_qos_attr) 2433 struct bfa_rport_qos_attr_s new_qos_attr)
2496{ 2434{
2497 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2435 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2498 struct bfa_rport_aen_data_s aen_data;
2499 2436
2500 bfa_trc(rport->fcs, rport->pwwn); 2437 bfa_trc(rport->fcs, rport->pwwn);
2501 aen_data.priv.qos = new_qos_attr;
2502 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2503} 2438}
2504 2439
2505/** 2440/**
2506 * This routine is a static BFA callback when there is a QoS priority 2441 * brief
2507 * change notification 2442 * This routine is a static BFA callback when there is a QoS priority
2443 * change notification
2508 * 2444 *
2509 * @param[in] rport - 2445 * param[in] rport -
2510 * 2446 *
2511 * @return void 2447 * return
2448 * void
2512 * 2449 *
2513 * Special Considerations: 2450 * Special Considerations:
2514 * 2451 *
2515 * @note 2452 * note
2516 */ 2453 */
2517void 2454void
2518bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr, 2455bfa_cb_rport_qos_scn_prio(void *cbarg,
2519 struct bfa_rport_qos_attr_s new_qos_attr) 2456 struct bfa_rport_qos_attr_s old_qos_attr,
2457 struct bfa_rport_qos_attr_s new_qos_attr)
2520{ 2458{
2521 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; 2459 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2522 struct bfa_rport_aen_data_s aen_data;
2523 2460
2524 bfa_trc(rport->fcs, rport->pwwn); 2461 bfa_trc(rport->fcs, rport->pwwn);
2525 aen_data.priv.qos = new_qos_attr;
2526 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2527} 2462}
2528 2463
2529/** 2464/**
2530 * Called to process any unsolicted frames from this remote port 2465 * Called to process any unsolicted frames from this remote port
2531 */ 2466 */
2532void 2467void
2533bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) 2468bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
@@ -2536,14 +2471,14 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2536} 2471}
2537 2472
2538/** 2473/**
2539 * Called to process any unsolicted frames from this remote port 2474 * Called to process any unsolicted frames from this remote port
2540 */ 2475 */
2541void 2476void
2542bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 2477bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
2543 u16 len) 2478 struct fchs_s *fchs, u16 len)
2544{ 2479{
2545 struct bfa_fcs_port_s *port = rport->port; 2480 struct bfa_fcs_lport_s *port = rport->port;
2546 struct fc_els_cmd_s *els_cmd; 2481 struct fc_els_cmd_s *els_cmd;
2547 2482
2548 bfa_trc(rport->fcs, fchs->s_id); 2483 bfa_trc(rport->fcs, fchs->s_id);
2549 bfa_trc(rport->fcs, fchs->d_id); 2484 bfa_trc(rport->fcs, fchs->d_id);
@@ -2558,30 +2493,33 @@ bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2558 2493
2559 switch (els_cmd->els_code) { 2494 switch (els_cmd->els_code) {
2560 case FC_ELS_LOGO: 2495 case FC_ELS_LOGO:
2496 bfa_stats(port, plogi_rcvd);
2561 bfa_fcs_rport_process_logo(rport, fchs); 2497 bfa_fcs_rport_process_logo(rport, fchs);
2562 break; 2498 break;
2563 2499
2564 case FC_ELS_ADISC: 2500 case FC_ELS_ADISC:
2501 bfa_stats(port, adisc_rcvd);
2565 bfa_fcs_rport_process_adisc(rport, fchs, len); 2502 bfa_fcs_rport_process_adisc(rport, fchs, len);
2566 break; 2503 break;
2567 2504
2568 case FC_ELS_PRLO: 2505 case FC_ELS_PRLO:
2569 if (bfa_fcs_port_is_initiator(port)) 2506 bfa_stats(port, prlo_rcvd);
2507 if (bfa_fcs_lport_is_initiator(port))
2570 bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); 2508 bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
2571
2572 if (bfa_fcs_port_is_target(port))
2573 bfa_fcs_fcptm_uf_recv(rport->tin, fchs, len);
2574 break; 2509 break;
2575 2510
2576 case FC_ELS_PRLI: 2511 case FC_ELS_PRLI:
2512 bfa_stats(port, prli_rcvd);
2577 bfa_fcs_rport_process_prli(rport, fchs, len); 2513 bfa_fcs_rport_process_prli(rport, fchs, len);
2578 break; 2514 break;
2579 2515
2580 case FC_ELS_RPSC: 2516 case FC_ELS_RPSC:
2517 bfa_stats(port, rpsc_rcvd);
2581 bfa_fcs_rport_process_rpsc(rport, fchs, len); 2518 bfa_fcs_rport_process_rpsc(rport, fchs, len);
2582 break; 2519 break;
2583 2520
2584 default: 2521 default:
2522 bfa_stats(port, un_handled_els_rcvd);
2585 bfa_fcs_rport_send_ls_rjt(rport, fchs, 2523 bfa_fcs_rport_send_ls_rjt(rport, fchs,
2586 FC_LS_RJT_RSN_CMD_NOT_SUPP, 2524 FC_LS_RJT_RSN_CMD_NOT_SUPP,
2587 FC_LS_RJT_EXP_NO_ADDL_INFO); 2525 FC_LS_RJT_EXP_NO_ADDL_INFO);
@@ -2589,28 +2527,27 @@ bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
2589 } 2527 }
2590} 2528}
2591 2529
2592/* Send best case acc to prlo */ 2530/* send best case acc to prlo */
2593static void 2531static void
2594bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) 2532bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
2595{ 2533{
2596 struct bfa_fcs_port_s *port = rport->port; 2534 struct bfa_fcs_lport_s *port = rport->port;
2597 struct fchs_s fchs; 2535 struct fchs_s fchs;
2598 struct bfa_fcxp_s *fcxp; 2536 struct bfa_fcxp_s *fcxp;
2599 int len; 2537 int len;
2600 2538
2601 bfa_trc(rport->fcs, rport->pid); 2539 bfa_trc(rport->fcs, rport->pid);
2602 2540
2603 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2541 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
2604 if (!fcxp) 2542 if (!fcxp)
2605 return; 2543 return;
2606
2607 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2544 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2608 rport->pid, bfa_fcs_port_get_fcid(port), 2545 rport->pid, bfa_fcs_lport_get_fcid(port),
2609 rport->reply_oxid, 0); 2546 rport->reply_oxid, 0);
2610 2547
2611 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, 2548 bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
2612 port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, 2549 port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
2613 NULL, NULL, FC_MAX_PDUSZ, 0); 2550 NULL, NULL, FC_MAX_PDUSZ, 0);
2614} 2551}
2615 2552
2616/* 2553/*
@@ -2620,10 +2557,10 @@ static void
2620bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2557bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2621 u8 reason_code, u8 reason_code_expl) 2558 u8 reason_code, u8 reason_code_expl)
2622{ 2559{
2623 struct bfa_fcs_port_s *port = rport->port; 2560 struct bfa_fcs_lport_s *port = rport->port;
2624 struct fchs_s fchs; 2561 struct fchs_s fchs;
2625 struct bfa_fcxp_s *fcxp; 2562 struct bfa_fcxp_s *fcxp;
2626 int len; 2563 int len;
2627 2564
2628 bfa_trc(rport->fcs, rx_fchs->s_id); 2565 bfa_trc(rport->fcs, rx_fchs->s_id);
2629 2566
@@ -2631,12 +2568,13 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2631 if (!fcxp) 2568 if (!fcxp)
2632 return; 2569 return;
2633 2570
2634 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, 2571 len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
2635 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, 2572 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
2636 reason_code, reason_code_expl); 2573 rx_fchs->ox_id, reason_code, reason_code_expl);
2637 2574
2638 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2575 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
2639 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 2576 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
2577 FC_MAX_PDUSZ, 0);
2640} 2578}
2641 2579
2642/** 2580/**
@@ -2649,24 +2587,22 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2649} 2587}
2650 2588
2651/** 2589/**
2652 * Called by the Driver to set rport delete/ageout timeout 2590 * brief
2591 * Called by the Driver to set rport delete/ageout timeout
2653 * 2592 *
2654 * param[in] rport timeout value in seconds. 2593 * param[in] rport timeout value in seconds.
2655 * 2594 *
2656 * return None 2595 * return None
2657 */ 2596 */
2658void 2597void
2659bfa_fcs_rport_set_del_timeout(u8 rport_tmo) 2598bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
2660{ 2599{
2661 /* 2600 /* convert to Millisecs */
2662 * convert to Millisecs
2663 */
2664 if (rport_tmo > 0) 2601 if (rport_tmo > 0)
2665 bfa_fcs_rport_del_timeout = rport_tmo * 1000; 2602 bfa_fcs_rport_del_timeout = rport_tmo * 1000;
2666} 2603}
2667
2668void 2604void
2669bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id) 2605bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
2670{ 2606{
2671 bfa_trc(rport->fcs, rport->pid); 2607 bfa_trc(rport->fcs, rport->pid);
2672 2608
@@ -2674,3 +2610,517 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id)
2674 rport->reply_oxid = ox_id; 2610 rport->reply_oxid = ox_id;
2675 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); 2611 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
2676} 2612}
2613
2614
2615
2616/**
2617 * Remote port implementation.
2618 */
2619
2620/**
2621 * fcs_rport_api FCS rport API.
2622 */
2623
2624/**
2625 * Direct API to add a target by port wwn. This interface is used, for
2626 * example, by bios when target pwwn is known from boot lun configuration.
2627 */
2628bfa_status_t
2629bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
2630 struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
2631{
2632 bfa_trc(port->fcs, *pwwn);
2633
2634 return BFA_STATUS_OK;
2635}
2636
2637/**
2638 * Direct API to remove a target and its associated resources. This
2639 * interface is used, for example, by driver to remove target
2640 * ports from the target list for a VM.
2641 */
2642bfa_status_t
2643bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
2644{
2645
2646 struct bfa_fcs_rport_s *rport;
2647
2648 bfa_trc(rport_in->fcs, rport_in->pwwn);
2649
2650 rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
2651 if (rport == NULL) {
2652 /*
2653 * TBD Error handling
2654 */
2655 bfa_trc(rport_in->fcs, rport_in->pid);
2656 return BFA_STATUS_UNKNOWN_RWWN;
2657 }
2658
2659 /*
2660 * TBD if this remote port is online, send a logo
2661 */
2662 return BFA_STATUS_OK;
2663
2664}
2665
2666/**
2667 * Remote device status for display/debug.
2668 */
2669void
2670bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2671 struct bfa_rport_attr_s *rport_attr)
2672{
2673 struct bfa_rport_qos_attr_s qos_attr;
2674 bfa_fcs_lport_t *port = rport->port;
2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
2676
2677 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2678
2679 rport_attr->pid = rport->pid;
2680 rport_attr->pwwn = rport->pwwn;
2681 rport_attr->nwwn = rport->nwwn;
2682 rport_attr->cos_supported = rport->fc_cos;
2683 rport_attr->df_sz = rport->maxfrsize;
2684 rport_attr->state = bfa_fcs_rport_get_state(rport);
2685 rport_attr->fc_cos = rport->fc_cos;
2686 rport_attr->cisc = rport->cisc;
2687 rport_attr->scsi_function = rport->scsi_function;
2688 rport_attr->curr_speed = rport->rpf.rpsc_speed;
2689 rport_attr->assigned_speed = rport->rpf.assigned_speed;
2690
2691 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
2692 rport_attr->qos_attr = qos_attr;
2693
2694 rport_attr->trl_enforced = BFA_FALSE;
2695 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
2696 if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
2697 /* Use default ratelim speed setting */
2698 rport_speed =
2699 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2700 }
2701
2702 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
2703 rport_attr->trl_enforced = BFA_TRUE;
2704 }
2705}
2706
2707/**
2708 * Per remote device statistics.
2709 */
2710void
2711bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
2712 struct bfa_rport_stats_s *stats)
2713{
2714 *stats = rport->stats;
2715}
2716
2717void
2718bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
2719{
2720 bfa_os_memset((char *)&rport->stats, 0,
2721 sizeof(struct bfa_rport_stats_s));
2722}
2723
2724struct bfa_fcs_rport_s *
2725bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2726{
2727 struct bfa_fcs_rport_s *rport;
2728
2729 rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2730 if (rport == NULL) {
2731 /*
2732 * TBD Error handling
2733 */
2734 }
2735
2736 return rport;
2737}
2738
2739struct bfa_fcs_rport_s *
2740bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
2741{
2742 struct bfa_fcs_rport_s *rport;
2743
2744 rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn);
2745 if (rport == NULL) {
2746 /*
2747 * TBD Error handling
2748 */
2749 }
2750
2751 return rport;
2752}
2753
2754/*
2755 * This API is to set the Rport's speed. Should be used when RPSC is not
2756 * supported by the rport.
2757 */
2758void
2759bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
2760{
2761 rport->rpf.assigned_speed = speed;
2762
2763 /* Set this speed in f/w only if the RPSC speed is not available */
2764 if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
2765 bfa_rport_speed(rport->bfa_rport, speed);
2766}
2767
2768
2769
2770/**
2771 * Remote port features (RPF) implementation.
2772 */
2773
2774#define BFA_FCS_RPF_RETRIES (3)
2775#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
2776
2777static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
2778 struct bfa_fcxp_s *fcxp_alloced);
2779static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
2780 struct bfa_fcxp_s *fcxp,
2781 void *cbarg,
2782 bfa_status_t req_status,
2783 u32 rsp_len,
2784 u32 resid_len,
2785 struct fchs_s *rsp_fchs);
2786
2787static void bfa_fcs_rpf_timeout(void *arg);
2788
2789/**
2790 * fcs_rport_ftrs_sm FCS rport state machine events
2791 */
2792
2793enum rpf_event {
2794 RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
2795 RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
2796 RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
2797 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
2798 RPFSM_EVENT_RPSC_COMP = 5,
2799 RPFSM_EVENT_RPSC_FAIL = 6,
2800 RPFSM_EVENT_RPSC_ERROR = 7,
2801};
2802
2803static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
2804 enum rpf_event event);
2805static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
2806 enum rpf_event event);
2807static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
2808 enum rpf_event event);
2809static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
2810 enum rpf_event event);
2811static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
2812 enum rpf_event event);
2813static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
2814 enum rpf_event event);
2815
2816static void
2817bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2818{
2819 struct bfa_fcs_rport_s *rport = rpf->rport;
2820 struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
2821
2822 bfa_trc(rport->fcs, rport->pwwn);
2823 bfa_trc(rport->fcs, rport->pid);
2824 bfa_trc(rport->fcs, event);
2825
2826 switch (event) {
2827 case RPFSM_EVENT_RPORT_ONLINE:
2828 /* Send RPSC2 to a Brocade fabric only. */
2829 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
2830 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
2831 (bfa_fcs_fabric_get_switch_oui(fabric) ==
2832 BFA_FCS_BRCD_SWITCH_OUI))) {
2833 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2834 rpf->rpsc_retries = 0;
2835 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2836 }
2837 break;
2838
2839 case RPFSM_EVENT_RPORT_OFFLINE:
2840 break;
2841
2842 default:
2843 bfa_sm_fault(rport->fcs, event);
2844 }
2845}
2846
2847static void
2848bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2849{
2850 struct bfa_fcs_rport_s *rport = rpf->rport;
2851
2852 bfa_trc(rport->fcs, event);
2853
2854 switch (event) {
2855 case RPFSM_EVENT_FCXP_SENT:
2856 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
2857 break;
2858
2859 case RPFSM_EVENT_RPORT_OFFLINE:
2860 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2861 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
2862 rpf->rpsc_retries = 0;
2863 break;
2864
2865 default:
2866 bfa_sm_fault(rport->fcs, event);
2867 }
2868}
2869
2870static void
2871bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2872{
2873 struct bfa_fcs_rport_s *rport = rpf->rport;
2874
2875 bfa_trc(rport->fcs, rport->pid);
2876 bfa_trc(rport->fcs, event);
2877
2878 switch (event) {
2879 case RPFSM_EVENT_RPSC_COMP:
2880 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2881 /* Update speed info in f/w via BFA */
2882 if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN)
2883 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
2884 else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN)
2885 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
2886 break;
2887
2888 case RPFSM_EVENT_RPSC_FAIL:
2889 /* RPSC not supported by rport */
2890 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2891 break;
2892
2893 case RPFSM_EVENT_RPSC_ERROR:
2894 /* need to retry...delayed a bit. */
2895 if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
2896 bfa_timer_start(rport->fcs->bfa, &rpf->timer,
2897 bfa_fcs_rpf_timeout, rpf,
2898 BFA_FCS_RPF_RETRY_TIMEOUT);
2899 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
2900 } else {
2901 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
2902 }
2903 break;
2904
2905 case RPFSM_EVENT_RPORT_OFFLINE:
2906 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2907 bfa_fcxp_discard(rpf->fcxp);
2908 rpf->rpsc_retries = 0;
2909 break;
2910
2911 default:
2912 bfa_sm_fault(rport->fcs, event);
2913 }
2914}
2915
2916static void
2917bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2918{
2919 struct bfa_fcs_rport_s *rport = rpf->rport;
2920
2921 bfa_trc(rport->fcs, rport->pid);
2922 bfa_trc(rport->fcs, event);
2923
2924 switch (event) {
2925 case RPFSM_EVENT_TIMEOUT:
2926 /* re-send the RPSC */
2927 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2928 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2929 break;
2930
2931 case RPFSM_EVENT_RPORT_OFFLINE:
2932 bfa_timer_stop(&rpf->timer);
2933 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2934 rpf->rpsc_retries = 0;
2935 break;
2936
2937 default:
2938 bfa_sm_fault(rport->fcs, event);
2939 }
2940}
2941
2942static void
2943bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2944{
2945 struct bfa_fcs_rport_s *rport = rpf->rport;
2946
2947 bfa_trc(rport->fcs, rport->pwwn);
2948 bfa_trc(rport->fcs, rport->pid);
2949 bfa_trc(rport->fcs, event);
2950
2951 switch (event) {
2952 case RPFSM_EVENT_RPORT_OFFLINE:
2953 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
2954 rpf->rpsc_retries = 0;
2955 break;
2956
2957 default:
2958 bfa_sm_fault(rport->fcs, event);
2959 }
2960}
2961
2962static void
2963bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2964{
2965 struct bfa_fcs_rport_s *rport = rpf->rport;
2966
2967 bfa_trc(rport->fcs, rport->pwwn);
2968 bfa_trc(rport->fcs, rport->pid);
2969 bfa_trc(rport->fcs, event);
2970
2971 switch (event) {
2972 case RPFSM_EVENT_RPORT_ONLINE:
2973 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
2974 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
2975 break;
2976
2977 case RPFSM_EVENT_RPORT_OFFLINE:
2978 break;
2979
2980 default:
2981 bfa_sm_fault(rport->fcs, event);
2982 }
2983}
2984/**
2985 * Called when Rport is created.
2986 */
2987void
2988bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
2989{
2990 struct bfa_fcs_rpf_s *rpf = &rport->rpf;
2991
2992 bfa_trc(rport->fcs, rport->pid);
2993 rpf->rport = rport;
2994
2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
2996}
2997
2998/**
2999 * Called when Rport becomes online
3000 */
3001void
3002bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
3003{
3004 bfa_trc(rport->fcs, rport->pid);
3005
3006 if (__fcs_min_cfg(rport->port->fcs))
3007 return;
3008
3009 if (bfa_fcs_fabric_is_switched(rport->port->fabric))
3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
3011}
3012
3013/**
3014 * Called when Rport becomes offline
3015 */
3016void
3017bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
3018{
3019 bfa_trc(rport->fcs, rport->pid);
3020
3021 if (__fcs_min_cfg(rport->port->fcs))
3022 return;
3023
3024 rport->rpf.rpsc_speed = 0;
3025 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
3026}
3027
3028static void
3029bfa_fcs_rpf_timeout(void *arg)
3030{
3031 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
3032 struct bfa_fcs_rport_s *rport = rpf->rport;
3033
3034 bfa_trc(rport->fcs, rport->pid);
3035 bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
3036}
3037
3038static void
3039bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3040{
3041 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
3042 struct bfa_fcs_rport_s *rport = rpf->rport;
3043 struct bfa_fcs_lport_s *port = rport->port;
3044 struct fchs_s fchs;
3045 int len;
3046 struct bfa_fcxp_s *fcxp;
3047
3048 bfa_trc(rport->fcs, rport->pwwn);
3049
3050 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3051 if (!fcxp) {
3052 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
3053 bfa_fcs_rpf_send_rpsc2, rpf);
3054 return;
3055 }
3056 rpf->fcxp = fcxp;
3057
3058 len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
3059 bfa_fcs_lport_get_fcid(port), &rport->pid, 1);
3060
3061 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3062 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
3063 rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
3064 rport->stats.rpsc_sent++;
3065 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
3066
3067}
3068
3069static void
3070bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
3071 bfa_status_t req_status, u32 rsp_len,
3072 u32 resid_len, struct fchs_s *rsp_fchs)
3073{
3074 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
3075 struct bfa_fcs_rport_s *rport = rpf->rport;
3076 struct fc_ls_rjt_s *ls_rjt;
3077 struct fc_rpsc2_acc_s *rpsc2_acc;
3078 u16 num_ents;
3079
3080 bfa_trc(rport->fcs, req_status);
3081
3082 if (req_status != BFA_STATUS_OK) {
3083 bfa_trc(rport->fcs, req_status);
3084 if (req_status == BFA_STATUS_ETIMER)
3085 rport->stats.rpsc_failed++;
3086 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3087 return;
3088 }
3089
3090 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
3091 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
3092 rport->stats.rpsc_accs++;
3093 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
3094 bfa_trc(rport->fcs, num_ents);
3095 if (num_ents > 0) {
3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
3097 bfa_trc(rport->fcs,
3098 bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
3099 bfa_trc(rport->fcs,
3100 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
3101 bfa_trc(rport->fcs,
3102 bfa_os_ntohs(rpsc2_acc->port_info[0].index));
3103 bfa_trc(rport->fcs,
3104 rpsc2_acc->port_info[0].type);
3105
3106 if (rpsc2_acc->port_info[0].speed == 0) {
3107 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3108 return;
3109 }
3110
3111 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
3112 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
3113
3114 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
3115 }
3116 } else {
3117 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
3118 bfa_trc(rport->fcs, ls_rjt->reason_code);
3119 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
3120 rport->stats.rpsc_rejects++;
3121 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
3122 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
3123 else
3124 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
3125 }
3126}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
deleted file mode 100644
index 3d57d48bbae4..000000000000
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <bfa_svc.h>
24#include <fcs/bfa_fcs_fabric.h>
25#include "fcs.h"
26#include "fcs_trcmod.h"
27#include "fcs_fabric.h"
28#include "fcs_uf.h"
29
30BFA_TRC_FILE(FCS, UF);
31
32/**
33 * BFA callback for unsolicited frame receive handler.
34 *
35 * @param[in] cbarg callback arg for receive handler
36 * @param[in] uf unsolicited frame descriptor
37 *
38 * @return None
39 */
40static void
41bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
42{
43 struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
44 struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
45 u16 len = bfa_uf_get_frmlen(uf);
46 struct fc_vft_s *vft;
47 struct bfa_fcs_fabric_s *fabric;
48
49 /**
50 * check for VFT header
51 */
52 if (fchs->routing == FC_RTG_EXT_HDR &&
53 fchs->cat_info == FC_CAT_VFT_HDR) {
54 bfa_stats(fcs, uf.tagged);
55 vft = bfa_uf_get_frmbuf(uf);
56 if (fcs->port_vfid == vft->vf_id)
57 fabric = &fcs->fabric;
58 else
59 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
60
61 /**
62 * drop frame if vfid is unknown
63 */
64 if (!fabric) {
65 bfa_assert(0);
66 bfa_stats(fcs, uf.vfid_unknown);
67 bfa_uf_free(uf);
68 return;
69 }
70
71 /**
72 * skip vft header
73 */
74 fchs = (struct fchs_s *) (vft + 1);
75 len -= sizeof(struct fc_vft_s);
76
77 bfa_trc(fcs, vft->vf_id);
78 } else {
79 bfa_stats(fcs, uf.untagged);
80 fabric = &fcs->fabric;
81 }
82
83 bfa_trc(fcs, ((u32 *) fchs)[0]);
84 bfa_trc(fcs, ((u32 *) fchs)[1]);
85 bfa_trc(fcs, ((u32 *) fchs)[2]);
86 bfa_trc(fcs, ((u32 *) fchs)[3]);
87 bfa_trc(fcs, ((u32 *) fchs)[4]);
88 bfa_trc(fcs, ((u32 *) fchs)[5]);
89 bfa_trc(fcs, len);
90
91 bfa_fcs_fabric_uf_recv(fabric, fchs, len);
92 bfa_uf_free(uf);
93}
94
95void
96bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
97{
98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
99}
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
deleted file mode 100644
index 8258f88bfee6..000000000000
--- a/drivers/scsi/bfa/bfa_fcxp.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_uf.h>
20#include <cs/bfa_debug.h>
21
22BFA_TRC_FILE(HAL, FCXP);
23BFA_MODULE(fcxp);
24
25/**
26 * forward declarations
27 */
28static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
29static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
30 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
31static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
32 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
33static void bfa_fcxp_qresume(void *cbarg);
34static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
35 struct bfi_fcxp_send_req_s *send_req);
36
37/**
38 * fcxp_pvt BFA FCXP private functions
39 */
40
41static void
42claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
43{
44 u8 *dm_kva = NULL;
45 u64 dm_pa;
46 u32 buf_pool_sz;
47
48 dm_kva = bfa_meminfo_dma_virt(mi);
49 dm_pa = bfa_meminfo_dma_phys(mi);
50
51 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
52
53 /*
54 * Initialize the fcxp req payload list
55 */
56 mod->req_pld_list_kva = dm_kva;
57 mod->req_pld_list_pa = dm_pa;
58 dm_kva += buf_pool_sz;
59 dm_pa += buf_pool_sz;
60 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
61
62 /*
63 * Initialize the fcxp rsp payload list
64 */
65 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
66 mod->rsp_pld_list_kva = dm_kva;
67 mod->rsp_pld_list_pa = dm_pa;
68 dm_kva += buf_pool_sz;
69 dm_pa += buf_pool_sz;
70 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
71
72 bfa_meminfo_dma_virt(mi) = dm_kva;
73 bfa_meminfo_dma_phys(mi) = dm_pa;
74}
75
76static void
77claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
78{
79 u16 i;
80 struct bfa_fcxp_s *fcxp;
81
82 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
83 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
84
85 INIT_LIST_HEAD(&mod->fcxp_free_q);
86 INIT_LIST_HEAD(&mod->fcxp_active_q);
87
88 mod->fcxp_list = fcxp;
89
90 for (i = 0; i < mod->num_fcxps; i++) {
91 fcxp->fcxp_mod = mod;
92 fcxp->fcxp_tag = i;
93
94 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
95 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
96 fcxp->reqq_waiting = BFA_FALSE;
97
98 fcxp = fcxp + 1;
99 }
100
101 bfa_meminfo_kva(mi) = (void *)fcxp;
102}
103
104static void
105bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
106 u32 *dm_len)
107{
108 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
109
110 if (num_fcxp_reqs == 0)
111 return;
112
113 /*
114 * Account for req/rsp payload
115 */
116 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
117 if (cfg->drvcfg.min_cfg)
118 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
119 else
120 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
121
122 /*
123 * Account for fcxp structs
124 */
125 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
126}
127
128static void
129bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
130 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
131{
132 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
133
134 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
135 mod->bfa = bfa;
136 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
137
138 /**
139 * Initialize FCXP request and response payload sizes.
140 */
141 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
142 if (!cfg->drvcfg.min_cfg)
143 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
144
145 INIT_LIST_HEAD(&mod->wait_q);
146
147 claim_fcxp_req_rsp_mem(mod, meminfo);
148 claim_fcxps_mem(mod, meminfo);
149}
150
151static void
152bfa_fcxp_detach(struct bfa_s *bfa)
153{
154}
155
156static void
157bfa_fcxp_start(struct bfa_s *bfa)
158{
159}
160
161static void
162bfa_fcxp_stop(struct bfa_s *bfa)
163{
164}
165
166static void
167bfa_fcxp_iocdisable(struct bfa_s *bfa)
168{
169 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
170 struct bfa_fcxp_s *fcxp;
171 struct list_head *qe, *qen;
172
173 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
174 fcxp = (struct bfa_fcxp_s *) qe;
175 if (fcxp->caller == NULL) {
176 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
177 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
178 bfa_fcxp_free(fcxp);
179 } else {
180 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
181 bfa_cb_queue(bfa, &fcxp->hcb_qe,
182 __bfa_fcxp_send_cbfn, fcxp);
183 }
184 }
185}
186
187static struct bfa_fcxp_s *
188bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
189{
190 struct bfa_fcxp_s *fcxp;
191
192 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
193
194 if (fcxp)
195 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
196
197 return fcxp;
198}
199
200static void
201bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
202{
203 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
204 struct bfa_fcxp_wqe_s *wqe;
205
206 bfa_q_deq(&mod->wait_q, &wqe);
207 if (wqe) {
208 bfa_trc(mod->bfa, fcxp->fcxp_tag);
209 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
210 return;
211 }
212
213 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
214 list_del(&fcxp->qe);
215 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
216}
217
218static void
219bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
220 bfa_status_t req_status, u32 rsp_len,
221 u32 resid_len, struct fchs_s *rsp_fchs)
222{
223 /* discarded fcxp completion */
224}
225
226static void
227__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
228{
229 struct bfa_fcxp_s *fcxp = cbarg;
230
231 if (complete) {
232 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
233 fcxp->rsp_status, fcxp->rsp_len,
234 fcxp->residue_len, &fcxp->rsp_fchs);
235 } else {
236 bfa_fcxp_free(fcxp);
237 }
238}
239
240static void
241hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
242{
243 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
244 struct bfa_fcxp_s *fcxp;
245 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
246
247 bfa_trc(bfa, fcxp_tag);
248
249 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
250
251 /**
252 * @todo f/w should not set residue to non-0 when everything
253 * is received.
254 */
255 if (fcxp_rsp->req_status == BFA_STATUS_OK)
256 fcxp_rsp->residue_len = 0;
257 else
258 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
259
260 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
261
262 bfa_assert(fcxp->send_cbfn != NULL);
263
264 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
265
266 if (fcxp->send_cbfn != NULL) {
267 if (fcxp->caller == NULL) {
268 bfa_trc(mod->bfa, fcxp->fcxp_tag);
269
270 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
271 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
272 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
273 /*
274 * fcxp automatically freed on return from the callback
275 */
276 bfa_fcxp_free(fcxp);
277 } else {
278 bfa_trc(mod->bfa, fcxp->fcxp_tag);
279 fcxp->rsp_status = fcxp_rsp->req_status;
280 fcxp->rsp_len = fcxp_rsp->rsp_len;
281 fcxp->residue_len = fcxp_rsp->residue_len;
282 fcxp->rsp_fchs = fcxp_rsp->fchs;
283
284 bfa_cb_queue(bfa, &fcxp->hcb_qe,
285 __bfa_fcxp_send_cbfn, fcxp);
286 }
287 } else {
288 bfa_trc(bfa, fcxp_tag);
289 }
290}
291
292static void
293hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
294{
295 union bfi_addr_u sga_zero = { {0} };
296
297 sge->sg_len = reqlen;
298 sge->flags = BFI_SGE_DATA_LAST;
299 bfa_dma_addr_set(sge[0].sga, req_pa);
300 bfa_sge_to_be(sge);
301 sge++;
302
303 sge->sga = sga_zero;
304 sge->sg_len = reqlen;
305 sge->flags = BFI_SGE_PGDLEN;
306 bfa_sge_to_be(sge);
307}
308
309static void
310hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
311 struct fchs_s *fchs)
312{
313 /*
314 * TODO: TX ox_id
315 */
316 if (reqlen > 0) {
317 if (fcxp->use_ireqbuf) {
318 u32 pld_w0 =
319 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
320
321 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
322 BFA_PL_EID_TX,
323 reqlen + sizeof(struct fchs_s), fchs, pld_w0);
324 } else {
325 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
326 BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
327 fchs);
328 }
329 } else {
330 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
331 reqlen + sizeof(struct fchs_s), fchs);
332 }
333}
334
335static void
336hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
337 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
338{
339 if (fcxp_rsp->rsp_len > 0) {
340 if (fcxp->use_irspbuf) {
341 u32 pld_w0 =
342 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
343
344 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
345 BFA_PL_EID_RX,
346 (u16) fcxp_rsp->rsp_len,
347 &fcxp_rsp->fchs, pld_w0);
348 } else {
349 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
350 BFA_PL_EID_RX,
351 (u16) fcxp_rsp->rsp_len,
352 &fcxp_rsp->fchs);
353 }
354 } else {
355 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
356 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
357 }
358}
359
360/**
361 * Handler to resume sending fcxp when space in available in cpe queue.
362 */
363static void
364bfa_fcxp_qresume(void *cbarg)
365{
366 struct bfa_fcxp_s *fcxp = cbarg;
367 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
368 struct bfi_fcxp_send_req_s *send_req;
369
370 fcxp->reqq_waiting = BFA_FALSE;
371 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
372 bfa_fcxp_queue(fcxp, send_req);
373}
374
375/**
376 * Queue fcxp send request to foimrware.
377 */
378static void
379bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
380{
381 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
382 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
383 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
384 struct bfa_rport_s *rport = reqi->bfa_rport;
385
386 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
387 bfa_lpuid(bfa));
388
389 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
390 if (rport) {
391 send_req->rport_fw_hndl = rport->fw_handle;
392 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
393 if (send_req->max_frmsz == 0)
394 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
395 } else {
396 send_req->rport_fw_hndl = 0;
397 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
398 }
399
400 send_req->vf_id = bfa_os_htons(reqi->vf_id);
401 send_req->lp_tag = reqi->lp_tag;
402 send_req->class = reqi->class;
403 send_req->rsp_timeout = rspi->rsp_timeout;
404 send_req->cts = reqi->cts;
405 send_req->fchs = reqi->fchs;
406
407 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
408 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
409
410 /*
411 * setup req sgles
412 */
413 if (fcxp->use_ireqbuf == 1) {
414 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
415 BFA_FCXP_REQ_PLD_PA(fcxp));
416 } else {
417 if (fcxp->nreq_sgles > 0) {
418 bfa_assert(fcxp->nreq_sgles == 1);
419 hal_fcxp_set_local_sges(send_req->req_sge,
420 reqi->req_tot_len,
421 fcxp->req_sga_cbfn(fcxp->caller,
422 0));
423 } else {
424 bfa_assert(reqi->req_tot_len == 0);
425 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
426 }
427 }
428
429 /*
430 * setup rsp sgles
431 */
432 if (fcxp->use_irspbuf == 1) {
433 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
434
435 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
436 BFA_FCXP_RSP_PLD_PA(fcxp));
437
438 } else {
439 if (fcxp->nrsp_sgles > 0) {
440 bfa_assert(fcxp->nrsp_sgles == 1);
441 hal_fcxp_set_local_sges(send_req->rsp_sge,
442 rspi->rsp_maxlen,
443 fcxp->rsp_sga_cbfn(fcxp->caller,
444 0));
445 } else {
446 bfa_assert(rspi->rsp_maxlen == 0);
447 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
448 }
449 }
450
451 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
452
453 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
454
455 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
456 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
457}
458
459
460/**
461 * hal_fcxp_api BFA FCXP API
462 */
463
464/**
465 * Allocate an FCXP instance to send a response or to send a request
466 * that has a response. Request/response buffers are allocated by caller.
467 *
468 * @param[in] bfa BFA bfa instance
469 * @param[in] nreq_sgles Number of SG elements required for request
470 * buffer. 0, if fcxp internal buffers are used.
471 * Use bfa_fcxp_get_reqbuf() to get the
472 * internal req buffer.
473 * @param[in] req_sgles SG elements describing request buffer. Will be
474 * copied in by BFA and hence can be freed on
475 * return from this function.
476 * @param[in] get_req_sga function ptr to be called to get a request SG
477 * Address (given the sge index).
478 * @param[in] get_req_sglen function ptr to be called to get a request SG
479 * len (given the sge index).
480 * @param[in] get_rsp_sga function ptr to be called to get a response SG
481 * Address (given the sge index).
482 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
483 * len (given the sge index).
484 *
485 * @return FCXP instance. NULL on failure.
486 */
487struct bfa_fcxp_s *
488bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
489 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
490 bfa_fcxp_get_sglen_t req_sglen_cbfn,
491 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
492 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
493{
494 struct bfa_fcxp_s *fcxp = NULL;
495 u32 nreq_sgpg, nrsp_sgpg;
496
497 bfa_assert(bfa != NULL);
498
499 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
500 if (fcxp == NULL)
501 return NULL;
502
503 bfa_trc(bfa, fcxp->fcxp_tag);
504
505 fcxp->caller = caller;
506
507 if (nreq_sgles == 0) {
508 fcxp->use_ireqbuf = 1;
509 } else {
510 bfa_assert(req_sga_cbfn != NULL);
511 bfa_assert(req_sglen_cbfn != NULL);
512
513 fcxp->use_ireqbuf = 0;
514 fcxp->req_sga_cbfn = req_sga_cbfn;
515 fcxp->req_sglen_cbfn = req_sglen_cbfn;
516
517 fcxp->nreq_sgles = nreq_sgles;
518
519 /*
520 * alloc required sgpgs
521 */
522 if (nreq_sgles > BFI_SGE_INLINE) {
523 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
524
525 if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
526 != BFA_STATUS_OK) {
527 /*
528 * TODO
529 */
530 }
531 }
532 }
533
534 if (nrsp_sgles == 0) {
535 fcxp->use_irspbuf = 1;
536 } else {
537 bfa_assert(rsp_sga_cbfn != NULL);
538 bfa_assert(rsp_sglen_cbfn != NULL);
539
540 fcxp->use_irspbuf = 0;
541 fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
542 fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
543
544 fcxp->nrsp_sgles = nrsp_sgles;
545 /*
546 * alloc required sgpgs
547 */
548 if (nrsp_sgles > BFI_SGE_INLINE) {
549 nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
550
551 if (bfa_sgpg_malloc
552 (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
553 != BFA_STATUS_OK) {
554 /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
555 nrsp_sgpg); */
556 /*
557 * TODO
558 */
559 }
560 }
561 }
562
563 return fcxp;
564}
565
566/**
567 * Get the internal request buffer pointer
568 *
569 * @param[in] fcxp BFA fcxp pointer
570 *
571 * @return pointer to the internal request buffer
572 */
573void *
574bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
575{
576 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
577 void *reqbuf;
578
579 bfa_assert(fcxp->use_ireqbuf == 1);
580 reqbuf = ((u8 *)mod->req_pld_list_kva) +
581 fcxp->fcxp_tag * mod->req_pld_sz;
582 return reqbuf;
583}
584
585u32
586bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
587{
588 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
589
590 return mod->req_pld_sz;
591}
592
593/**
594 * Get the internal response buffer pointer
595 *
596 * @param[in] fcxp BFA fcxp pointer
597 *
598 * @return pointer to the internal request buffer
599 */
600void *
601bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
602{
603 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
604 void *rspbuf;
605
606 bfa_assert(fcxp->use_irspbuf == 1);
607
608 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
609 fcxp->fcxp_tag * mod->rsp_pld_sz;
610 return rspbuf;
611}
612
613/**
614 * Free the BFA FCXP
615 *
616 * @param[in] fcxp BFA fcxp pointer
617 *
618 * @return void
619 */
620void
621bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
622{
623 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
624
625 bfa_assert(fcxp != NULL);
626 bfa_trc(mod->bfa, fcxp->fcxp_tag);
627 bfa_fcxp_put(fcxp);
628}
629
630/**
631 * Send a FCXP request
632 *
633 * @param[in] fcxp BFA fcxp pointer
634 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
635 * @param[in] vf_id virtual Fabric ID
636 * @param[in] lp_tag lport tag
637 * @param[in] cts use Continous sequence
638 * @param[in] cos fc Class of Service
639 * @param[in] reqlen request length, does not include FCHS length
640 * @param[in] fchs fc Header Pointer. The header content will be copied
641 * in by BFA.
642 *
643 * @param[in] cbfn call back function to be called on receiving
644 * the response
645 * @param[in] cbarg arg for cbfn
646 * @param[in] rsp_timeout
647 * response timeout
648 *
649 * @return bfa_status_t
650 */
651void
652bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
653 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
654 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
655 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
656{
657 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
658 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
659 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
660 struct bfi_fcxp_send_req_s *send_req;
661
662 bfa_trc(bfa, fcxp->fcxp_tag);
663
664 /**
665 * setup request/response info
666 */
667 reqi->bfa_rport = rport;
668 reqi->vf_id = vf_id;
669 reqi->lp_tag = lp_tag;
670 reqi->class = cos;
671 rspi->rsp_timeout = rsp_timeout;
672 reqi->cts = cts;
673 reqi->fchs = *fchs;
674 reqi->req_tot_len = reqlen;
675 rspi->rsp_maxlen = rsp_maxlen;
676 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
677 fcxp->send_cbarg = cbarg;
678
679 /**
680 * If no room in CPE queue, wait for space in request queue
681 */
682 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
683 if (!send_req) {
684 bfa_trc(bfa, fcxp->fcxp_tag);
685 fcxp->reqq_waiting = BFA_TRUE;
686 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
687 return;
688 }
689
690 bfa_fcxp_queue(fcxp, send_req);
691}
692
693/**
694 * Abort a BFA FCXP
695 *
696 * @param[in] fcxp BFA fcxp pointer
697 *
698 * @return void
699 */
700bfa_status_t
701bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
702{
703 bfa_assert(0);
704 return BFA_STATUS_OK;
705}
706
707void
708bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
709 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
710{
711 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
712
713 bfa_assert(list_empty(&mod->fcxp_free_q));
714
715 wqe->alloc_cbfn = alloc_cbfn;
716 wqe->alloc_cbarg = alloc_cbarg;
717 list_add_tail(&wqe->qe, &mod->wait_q);
718}
719
720void
721bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
722{
723 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
724
725 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
726 list_del(&wqe->qe);
727}
728
729void
730bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
731{
732 /**
733 * If waiting for room in request queue, cancel reqq wait
734 * and free fcxp.
735 */
736 if (fcxp->reqq_waiting) {
737 fcxp->reqq_waiting = BFA_FALSE;
738 bfa_reqq_wcancel(&fcxp->reqq_wqe);
739 bfa_fcxp_free(fcxp);
740 return;
741 }
742
743 fcxp->send_cbfn = bfa_fcxp_null_comp;
744}
745
746
747
748/**
749 * hal_fcxp_public BFA FCXP public functions
750 */
751
752void
753bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
754{
755 switch (msg->mhdr.msg_id) {
756 case BFI_FCXP_I2H_SEND_RSP:
757 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
758 break;
759
760 default:
761 bfa_trc(bfa, msg->mhdr.msg_id);
762 bfa_assert(0);
763 }
764}
765
766u32
767bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
768{
769 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
770
771 return mod->rsp_pld_sz;
772}
773
774
diff --git a/drivers/scsi/bfa/bfa_fcxp_priv.h b/drivers/scsi/bfa/bfa_fcxp_priv.h
deleted file mode 100644
index 4cda49397da0..000000000000
--- a/drivers/scsi/bfa/bfa_fcxp_priv.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCXP_PRIV_H__
19#define __BFA_FCXP_PRIV_H__
20
21#include <cs/bfa_sm.h>
22#include <protocol/fc.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_fcxp.h>
25
26#define BFA_FCXP_MIN (1)
27#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
28#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
29
30struct bfa_fcxp_mod_s {
31 struct bfa_s *bfa; /* backpointer to BFA */
32 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
33 u16 num_fcxps; /* max num FCXP requests */
34 struct list_head fcxp_free_q; /* free FCXPs */
35 struct list_head fcxp_active_q; /* active FCXPs */
36 void *req_pld_list_kva; /* list of FCXP req pld */
37 u64 req_pld_list_pa; /* list of FCXP req pld */
38 void *rsp_pld_list_kva; /* list of FCXP resp pld */
39 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
40 struct list_head wait_q; /* wait queue for free fcxp */
41 u32 req_pld_sz;
42 u32 rsp_pld_sz;
43};
44
45#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
46#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
47
48typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
49 void *cb_arg, bfa_status_t req_status,
50 u32 rsp_len, u32 resid_len,
51 struct fchs_s *rsp_fchs);
52
53/**
54 * Information needed for a FCXP request
55 */
56struct bfa_fcxp_req_info_s {
57 struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was
58 *returned from bfa_rport_create().
59 *This could be left NULL for WKA or for
60 *FCXP interactions before the rport
61 *nexus is established
62 */
63 struct fchs_s fchs; /* request FC header structure */
64 u8 cts; /* continous sequence */
65 u8 class; /* FC class for the request/response */
66 u16 max_frmsz; /* max send frame size */
67 u16 vf_id; /* vsan tag if applicable */
68 u8 lp_tag; /* lport tag */
69 u32 req_tot_len; /* request payload total length */
70};
71
72struct bfa_fcxp_rsp_info_s {
73 struct fchs_s rsp_fchs; /* Response frame's FC header will
74 * be *sent back in this field */
75 u8 rsp_timeout; /* timeout in seconds, 0-no response
76 */
77 u8 rsvd2[3];
78 u32 rsp_maxlen; /* max response length expected */
79};
80
81struct bfa_fcxp_s {
82 struct list_head qe; /* fcxp queue element */
83 bfa_sm_t sm; /* state machine */
84 void *caller; /* driver or fcs */
85 struct bfa_fcxp_mod_s *fcxp_mod;
86 /* back pointer to fcxp mod */
87 u16 fcxp_tag; /* internal tag */
88 struct bfa_fcxp_req_info_s req_info;
89 /* request info */
90 struct bfa_fcxp_rsp_info_s rsp_info;
91 /* response info */
92 u8 use_ireqbuf; /* use internal req buf */
93 u8 use_irspbuf; /* use internal rsp buf */
94 u32 nreq_sgles; /* num request SGLEs */
95 u32 nrsp_sgles; /* num response SGLEs */
96 struct list_head req_sgpg_q; /* SG pages for request buf */
97 struct list_head req_sgpg_wqe; /* wait queue for req SG page */
98 struct list_head rsp_sgpg_q; /* SG pages for response buf */
99 struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
100
101 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
102 /* SG elem addr user function */
103 bfa_fcxp_get_sglen_t req_sglen_cbfn;
104 /* SG elem len user function */
105 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
106 /* SG elem addr user function */
107 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
108 /* SG elem len user function */
109 bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
110 void *send_cbarg; /* callback arg */
111 struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
112 /* req SG elems */
113 struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
114 /* rsp SG elems */
115 u8 rsp_status; /* comp: rsp status */
116 u32 rsp_len; /* comp: actual response len */
117 u32 residue_len; /* comp: residual rsp length */
118 struct fchs_s rsp_fchs; /* comp: response fchs */
119 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
120 struct bfa_reqq_wait_s reqq_wqe;
121 bfa_boolean_t reqq_waiting;
122};
123
124#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
125
126#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
127#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
128
129#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
130 ((_fcxp)->fcxp_mod->req_pld_list_pa + \
131 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
132
133#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
134 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
135 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
136
137void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
138#endif /* __BFA_FCXP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h
deleted file mode 100644
index d33e19e54395..000000000000
--- a/drivers/scsi/bfa/bfa_fwimg_priv.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FWIMG_PRIV_H__
19#define __BFA_FWIMG_PRIV_H__
20
21#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
22#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
23
24/**
25 * BFI FW image type
26 */
27enum {
28 BFI_IMAGE_CB_FC,
29 BFI_IMAGE_CT_FC,
30 BFI_IMAGE_CT_CNA,
31 BFI_IMAGE_MAX,
32};
33
34extern u32 *bfi_image_get_chunk(int type, uint32_t off);
35extern u32 bfi_image_get_size(int type);
36extern u32 bfi_image_ct_fc_size;
37extern u32 bfi_image_ct_cna_size;
38extern u32 bfi_image_cb_fc_size;
39extern u32 *bfi_image_ct_fc;
40extern u32 *bfi_image_ct_cna;
41extern u32 *bfi_image_cb_fc;
42
43
44#endif /* __BFA_FWIMG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index edfd729445cf..c787d3af0886 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,15 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa_priv.h> 18#include "bfa_modules.h"
19#include <bfi/bfi_cbreg.h> 19#include "bfi_cbreg.h"
20 20
21void 21void
22bfa_hwcb_reginit(struct bfa_s *bfa) 22bfa_hwcb_reginit(struct bfa_s *bfa)
23{ 23{
24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
27 27
28 if (fn == 0) { 28 if (fn == 0) {
29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index a357fb3066fd..c97ebafec5ea 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,9 +15,8 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa_priv.h> 18#include "bfa_modules.h"
19#include <bfi/bfi_ctreg.h> 19#include "bfi_ctreg.h"
20#include <bfa_ioc.h>
21 20
22BFA_TRC_FILE(HAL, IOCFC_CT); 21BFA_TRC_FILE(HAL, IOCFC_CT);
23 22
@@ -53,7 +52,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
53{ 52{
54 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 53 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
55 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 54 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
56 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 55 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
57 56
58 if (fn == 0) { 57 if (fn == 0) {
59 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 58 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -87,7 +86,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
87void 86void
88bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) 87bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
89{ 88{
90 u32 r32; 89 u32 r32;
91 90
92 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 91 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
93 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); 92 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
deleted file mode 100644
index 493678889b24..000000000000
--- a/drivers/scsi/bfa/bfa_intr.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfi/bfi_ctreg.h>
19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h>
22
23BFA_TRC_FILE(HAL, INTR);
24
25static void
26bfa_msix_errint(struct bfa_s *bfa, u32 intr)
27{
28 bfa_ioc_error_isr(&bfa->ioc);
29}
30
31static void
32bfa_msix_lpu(struct bfa_s *bfa)
33{
34 bfa_ioc_mbox_isr(&bfa->ioc);
35}
36
37static void
38bfa_reqq_resume(struct bfa_s *bfa, int qid)
39{
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
42
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
47 */
48 if (bfa_reqq_full(bfa, qid))
49 break;
50
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
54 }
55}
56
57void
58bfa_msix_all(struct bfa_s *bfa, int vec)
59{
60 bfa_intx(bfa);
61}
62
63/**
64 * hal_intr_api
65 */
66bfa_boolean_t
67bfa_intx(struct bfa_s *bfa)
68{
69 u32 intr, qintr;
70 int queue;
71
72 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
73 if (!intr)
74 return BFA_FALSE;
75
76 /**
77 * RME completion queue interrupt
78 */
79 qintr = intr & __HFN_INT_RME_MASK;
80 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
81
82 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
83 if (intr & (__HFN_INT_RME_Q0 << queue))
84 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
85 }
86 intr &= ~qintr;
87 if (!intr)
88 return BFA_TRUE;
89
90 /**
91 * CPE completion queue interrupt
92 */
93 qintr = intr & __HFN_INT_CPE_MASK;
94 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
95
96 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
97 if (intr & (__HFN_INT_CPE_Q0 << queue))
98 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
99 }
100 intr &= ~qintr;
101 if (!intr)
102 return BFA_TRUE;
103
104 bfa_msix_lpu_err(bfa, intr);
105
106 return BFA_TRUE;
107}
108
109void
110bfa_isr_enable(struct bfa_s *bfa)
111{
112 u32 intr_unmask;
113 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
114
115 bfa_trc(bfa, pci_func);
116
117 bfa_msix_install(bfa);
118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
121
122 if (pci_func == 0)
123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
124 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
125 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
126 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
127 __HFN_INT_MBOX_LPU0);
128 else
129 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
130 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
131 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
132 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
133 __HFN_INT_MBOX_LPU1);
134
135 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
136 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
137 bfa->iocfc.intr_mask = ~intr_unmask;
138 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
139}
140
141void
142bfa_isr_disable(struct bfa_s *bfa)
143{
144 bfa_isr_mode_set(bfa, BFA_FALSE);
145 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
146 bfa_msix_uninstall(bfa);
147}
148
149void
150bfa_msix_reqq(struct bfa_s *bfa, int qid)
151{
152 struct list_head *waitq;
153
154 qid &= (BFI_IOC_MAX_CQS - 1);
155
156 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
157
158 /**
159 * Resume any pending requests in the corresponding reqq.
160 */
161 waitq = bfa_reqq(bfa, qid);
162 if (!list_empty(waitq))
163 bfa_reqq_resume(bfa, qid);
164}
165
166void
167bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
168{
169 bfa_trc(bfa, m->mhdr.msg_class);
170 bfa_trc(bfa, m->mhdr.msg_id);
171 bfa_trc(bfa, m->mhdr.mtag.i2htok);
172 bfa_assert(0);
173 bfa_trc_stop(bfa->trcmod);
174}
175
176void
177bfa_msix_rspq(struct bfa_s *bfa, int qid)
178{
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
182
183 bfa_trc_fp(bfa, qid);
184
185 qid &= (BFI_IOC_MAX_CQS - 1);
186
187 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
188
189 ci = bfa_rspq_ci(bfa, qid);
190 pi = bfa_rspq_pi(bfa, qid);
191
192 bfa_trc_fp(bfa, ci);
193 bfa_trc_fp(bfa, pi);
194
195 if (bfa->rme_process) {
196 while (ci != pi) {
197 m = bfa_rspq_elem(bfa, qid, ci);
198 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
199
200 bfa_isrs[m->mhdr.msg_class] (bfa, m);
201
202 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
203 }
204 }
205
206 /**
207 * update CI
208 */
209 bfa_rspq_ci(bfa, qid) = pi;
210 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
211 bfa_os_mmiowb();
212
213 /**
214 * Resume any pending requests in the corresponding reqq.
215 */
216 waitq = bfa_reqq(bfa, qid);
217 if (!list_empty(waitq))
218 bfa_reqq_resume(bfa, qid);
219}
220
221void
222bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
223{
224 u32 intr, curr_value;
225
226 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
227
228 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
229 bfa_msix_lpu(bfa);
230
231 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
232 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
233
234 if (intr) {
235 if (intr & __HFN_INT_LL_HALT) {
236 /**
237 * If LL_HALT bit is set then FW Init Halt LL Port
238 * Register needs to be cleared as well so Interrupt
239 * Status Register will be cleared.
240 */
241 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
242 curr_value &= ~__FW_INIT_HALT_P;
243 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
244 }
245
246 if (intr & __HFN_INT_ERR_PSS) {
247 /**
248 * ERR_PSS bit needs to be cleared as well in case
249 * interrups are shared so driver's interrupt handler is
250 * still called eventhough it is already masked out.
251 */
252 curr_value = bfa_reg_read(
253 bfa->ioc.ioc_regs.pss_err_status_reg);
254 curr_value &= __PSS_ERR_STATUS_SET;
255 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
256 curr_value);
257 }
258
259 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
260 bfa_msix_errint(bfa, intr);
261 }
262}
263
264void
265bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
266{
267 bfa_isrs[mc] = isr_func;
268}
269
270
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
deleted file mode 100644
index 5fc301cf4d1b..000000000000
--- a/drivers/scsi/bfa/bfa_intr_priv.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_INTR_PRIV_H__
19#define __BFA_INTR_PRIV_H__
20
21/**
22 * Message handler
23 */
24typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
25void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
26void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
27
28
29#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
30#define bfa_reqq_ci(__bfa, __reqq) \
31 (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
32
33#define bfa_reqq_full(__bfa, __reqq) \
34 (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
35 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
36 bfa_reqq_ci(__bfa, __reqq))
37
38#define bfa_reqq_next(__bfa, __reqq) \
39 (bfa_reqq_full(__bfa, __reqq) ? NULL : \
40 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
41 + bfa_reqq_pi((__bfa), (__reqq)))))
42
43#define bfa_reqq_produce(__bfa, __reqq) do { \
44 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
45 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
46 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
47 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
48 (__bfa)->iocfc.req_cq_pi[__reqq]); \
49 bfa_os_mmiowb(); \
50} while (0)
51
52#define bfa_rspq_pi(__bfa, __rspq) \
53 (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
54
55#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
56#define bfa_rspq_elem(__bfa, __rspq, __ci) \
57 (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
58
59#define CQ_INCR(__index, __size) do { \
60 (__index)++; \
61 (__index) &= ((__size) - 1); \
62} while (0)
63
64/**
65 * Queue element to wait for room in request queue. FIFO order is
66 * maintained when fullfilling requests.
67 */
68struct bfa_reqq_wait_s {
69 struct list_head qe;
70 void (*qresume) (void *cbarg);
71 void *cbarg;
72};
73
74/**
75 * Circular queue usage assignments
76 */
77enum {
78 BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
79 BFA_REQQ_FCXP = 0, /* all FCXP messages */
80 BFA_REQQ_LPS = 0, /* all lport service msgs */
81 BFA_REQQ_PORT = 0, /* all port messages */
82 BFA_REQQ_FLASH = 0, /* for flash module */
83 BFA_REQQ_DIAG = 0, /* for diag module */
84 BFA_REQQ_RPORT = 0, /* all port messages */
85 BFA_REQQ_SBOOT = 0, /* all san boot messages */
86 BFA_REQQ_QOS_LO = 1, /* all low priority IO */
87 BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
88 BFA_REQQ_QOS_HI = 3, /* all high priority IO */
89};
90
91static inline void
92bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
93 void *cbarg)
94{
95 wqe->qresume = qresume;
96 wqe->cbarg = cbarg;
97}
98
99#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
100
101/**
102 * static inline void
103 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
104 */
105#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
106 \
107 struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
108 \
109 bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
110 bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
111 \
112 list_add_tail(&(__wqe)->qe, waitq); \
113} while (0)
114
115#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
116
117#endif /* __BFA_INTR_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 8e78f20110a5..6795b247791a 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,35 +15,33 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_ctreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h> 21#include "bfa_defs_svc.h"
22#include <cs/bfa_debug.h> 22#include "bfad_drv.h"
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <aen/bfa_aen_ioc.h>
26#include <aen/bfa_aen.h>
27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h>
29 23
30BFA_TRC_FILE(CNA, IOC); 24BFA_TRC_FILE(CNA, IOC);
31 25
32/** 26/**
33 * IOC local definitions 27 * IOC local definitions
34 */ 28 */
35#define BFA_IOC_TOV 2000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
36#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
37#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
38#define BFA_IOC_HWINIT_MAX 2 32#define BFA_IOC_HWINIT_MAX 2
39#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
40#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
41 34
42#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) 37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) 38#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
46 39
40#define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
44
47#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) 45#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48#define BFA_DBG_FWTRC_LEN \ 46#define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ 47 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
@@ -55,100 +53,226 @@ BFA_TRC_FILE(CNA, IOC);
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56 */ 54 */
57 55
58#define bfa_ioc_firmware_lock(__ioc) \ 56#define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 57 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc) \ 58#define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 59 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 60#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 61#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64#define bfa_ioc_notify_hbfail(__ioc) \ 62#define bfa_ioc_notify_hbfail(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 63 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66#define bfa_ioc_is_optrom(__ioc) \
67 (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
68 64
69bfa_boolean_t bfa_auto_recover = BFA_TRUE; 65#ifdef BFA_IOC_IS_UEFI
66#define bfa_ioc_is_bios_optrom(__ioc) (0)
67#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
68#else
69#define bfa_ioc_is_bios_optrom(__ioc) \
70 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
71#define bfa_ioc_is_uefi(__ioc) (0)
72#endif
73
74#define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
77
78bfa_boolean_t bfa_auto_recover = BFA_TRUE;
70 79
71/* 80/*
72 * forward declarations 81 * forward declarations
73 */ 82 */
74static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 83static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
75static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 84static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
76static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 85static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void bfa_ioc_timeout(void *ioc); 86static void bfa_ioc_timeout(void *ioc);
78static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 87static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
79static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 88static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
80static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 89static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
81static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); 90static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
82static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); 91static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
83static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); 92static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
84static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 93static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 94static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 95static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); 96static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 97static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 98static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
100static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
90 103
91/** 104/**
92 * bfa_ioc_sm 105 * hal_ioc_sm
93 */ 106 */
94 107
95/** 108/**
96 * IOC state machine events 109 * IOC state machine definitions/declarations
97 */ 110 */
98enum ioc_event { 111enum ioc_event {
99 IOC_E_ENABLE = 1, /* IOC enable request */ 112 IOC_E_RESET = 1, /* IOC reset request */
100 IOC_E_DISABLE = 2, /* IOC disable request */ 113 IOC_E_ENABLE = 2, /* IOC enable request */
101 IOC_E_TIMEOUT = 3, /* f/w response timeout */ 114 IOC_E_DISABLE = 3, /* IOC disable request */
102 IOC_E_FWREADY = 4, /* f/w initialization done */ 115 IOC_E_DETACH = 4, /* driver detach cleanup */
103 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ 116 IOC_E_ENABLED = 5, /* f/w enabled */
104 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ 117 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
105 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ 118 IOC_E_DISABLED = 7, /* f/w disabled */
106 IOC_E_HBFAIL = 8, /* heartbeat failure */ 119 IOC_E_FAILED = 8, /* failure notice by iocpf sm */
107 IOC_E_HWERROR = 9, /* hardware error interrupt */ 120 IOC_E_HBFAIL = 9, /* heartbeat failure */
108 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ 121 IOC_E_HWERROR = 10, /* hardware error interrupt */
109 IOC_E_DETACH = 11, /* driver detach cleanup */ 122 IOC_E_TIMEOUT = 11, /* timeout */
110}; 123};
111 124
125bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
112bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); 126bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
113bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
114bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
115bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
116bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); 127bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); 128bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); 129bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); 130bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); 131bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 132bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 133bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
124 134
125static struct bfa_sm_table_s ioc_sm_table[] = { 135static struct bfa_sm_table_s ioc_sm_table[] = {
136 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
126 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
127 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
128 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
129 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
130 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 142 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138}; 145};
139 146
140/** 147/**
148 * IOCPF state machine definitions/declarations
149 */
150
151#define bfa_iocpf_timer_start(__ioc) \
152 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
153 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
154#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
155
156#define bfa_iocpf_recovery_timer_start(__ioc) \
157 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
158 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
159
160#define bfa_sem_timer_start(__ioc) \
161 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
162 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
163#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
164
165/*
166 * Forward declareations for iocpf state machine
167 */
168static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174static void bfa_iocpf_timeout(void *ioc_arg);
175static void bfa_iocpf_sem_timeout(void *ioc_arg);
176
177/**
178 * IOCPF state machine events
179 */
180enum iocpf_event {
181 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
182 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
183 IOCPF_E_STOP = 3, /* stop on driver detach */
184 IOCPF_E_FWREADY = 4, /* f/w initialization done */
185 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
186 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
187 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
188 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
189 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
190 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
192};
193
194/**
195 * IOCPF states
196 */
197enum bfa_iocpf_state {
198 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
199 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
200 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
201 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
202 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
203 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
204 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
205 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
206 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
207};
208
209bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
210bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
211bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
212bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
216bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
217bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
219bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220
221static struct bfa_sm_table_s iocpf_sm_table[] = {
222 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233};
234
235/**
236 * IOC State Machine
237 */
238
239/**
240 * Beginning state. IOC uninit state.
241 */
242
243static void
244bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
245{
246}
247
248/**
249 * IOC is in uninit state.
250 */
251static void
252bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
253{
254 bfa_trc(ioc, event);
255
256 switch (event) {
257 case IOC_E_RESET:
258 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
259 break;
260
261 default:
262 bfa_sm_fault(ioc, event);
263 }
264}
265/**
141 * Reset entry actions -- initialize state machine 266 * Reset entry actions -- initialize state machine
142 */ 267 */
143static void 268static void
144bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) 269bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
145{ 270{
146 ioc->retry_count = 0; 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
147 ioc->auto_recover = bfa_auto_recover;
148} 272}
149 273
150/** 274/**
151 * Beginning state. IOC is in reset state. 275 * IOC is in reset state.
152 */ 276 */
153static void 277static void
154bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) 278bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
@@ -157,7 +281,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
157 281
158 switch (event) { 282 switch (event) {
159 case IOC_E_ENABLE: 283 case IOC_E_ENABLE:
160 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 284 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
161 break; 285 break;
162 286
163 case IOC_E_DISABLE: 287 case IOC_E_DISABLE:
@@ -165,6 +289,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
165 break; 289 break;
166 290
167 case IOC_E_DETACH: 291 case IOC_E_DETACH:
292 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
168 break; 293 break;
169 294
170 default: 295 default:
@@ -172,46 +297,209 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
172 } 297 }
173} 298}
174 299
300
301static void
302bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
303{
304 bfa_iocpf_enable(ioc);
305}
306
175/** 307/**
176 * Semaphore should be acquired for version check. 308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired.
177 */ 310 */
178static void 311static void
179bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) 312bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
180{ 313{
181 bfa_ioc_hw_sem_get(ioc); 314 bfa_trc(ioc, event);
315
316 switch (event) {
317 case IOC_E_ENABLED:
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 break;
320
321 case IOC_E_FAILED:
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
323 break;
324
325 case IOC_E_HWERROR:
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
327 bfa_iocpf_initfail(ioc);
328 break;
329
330 case IOC_E_DISABLE:
331 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
332 break;
333
334 case IOC_E_DETACH:
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
336 bfa_iocpf_stop(ioc);
337 break;
338
339 case IOC_E_ENABLE:
340 break;
341
342 default:
343 bfa_sm_fault(ioc, event);
344 }
345}
346
347
348static void
349bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
350{
351 bfa_ioc_timer_start(ioc);
352 bfa_ioc_send_getattr(ioc);
182} 353}
183 354
184/** 355/**
185 * Awaiting h/w semaphore to continue with version check. 356 * IOC configuration in progress. Timer is active.
186 */ 357 */
187static void 358static void
188bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) 359bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
189{ 360{
190 bfa_trc(ioc, event); 361 bfa_trc(ioc, event);
191 362
192 switch (event) { 363 switch (event) {
193 case IOC_E_SEMLOCKED: 364 case IOC_E_FWRSP_GETATTR:
194 if (bfa_ioc_firmware_lock(ioc)) { 365 bfa_ioc_timer_stop(ioc);
195 ioc->retry_count = 0; 366 bfa_ioc_check_attr_wwns(ioc);
196 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 367 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
197 } else { 368 break;
198 bfa_ioc_hw_sem_release(ioc); 369
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 370 case IOC_E_FAILED:
200 } 371 bfa_ioc_timer_stop(ioc);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
373 break;
374
375 case IOC_E_HWERROR:
376 bfa_ioc_timer_stop(ioc);
377 /* fall through */
378
379 case IOC_E_TIMEOUT:
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 bfa_iocpf_getattrfail(ioc);
201 break; 382 break;
202 383
203 case IOC_E_DISABLE: 384 case IOC_E_DISABLE:
204 bfa_ioc_disable_comp(ioc); 385 bfa_ioc_timer_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
387 break;
388
389 case IOC_E_ENABLE:
390 break;
391
392 default:
393 bfa_sm_fault(ioc, event);
394 }
395}
396
397
398static void
399bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
400{
401 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
402
403 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404 bfa_ioc_hb_monitor(ioc);
405 BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
406}
407
408static void
409bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
410{
411 bfa_trc(ioc, event);
412
413 switch (event) {
414 case IOC_E_ENABLE:
415 break;
416
417 case IOC_E_DISABLE:
418 bfa_ioc_hb_stop(ioc);
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
420 break;
421
422 case IOC_E_FAILED:
423 bfa_ioc_hb_stop(ioc);
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
425 break;
426
427 case IOC_E_HWERROR:
428 bfa_ioc_hb_stop(ioc);
429 /* !!! fall through !!! */
430
431 case IOC_E_HBFAIL:
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
433 bfa_iocpf_fail(ioc);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439}
440
441
442static void
443bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
444{
445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 bfa_iocpf_disable(ioc);
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
448}
449
450/**
451 * IOC is being disabled
452 */
453static void
454bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
455{
456 bfa_trc(ioc, event);
457
458 switch (event) {
459 case IOC_E_DISABLED:
460 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
461 break;
462
463 case IOC_E_HWERROR:
205 /* 464 /*
206 * fall through 465 * No state change. Will move to disabled state
466 * after iocpf sm completes failure processing and
467 * moves to disabled state.
207 */ 468 */
469 bfa_iocpf_fail(ioc);
470 break;
208 471
209 case IOC_E_DETACH: 472 default:
210 bfa_ioc_hw_sem_get_cancel(ioc); 473 bfa_sm_fault(ioc, event);
211 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 474 }
475}
476
477/**
478 * IOC disable completion entry.
479 */
480static void
481bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
482{
483 bfa_ioc_disable_comp(ioc);
484}
485
486static void
487bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
488{
489 bfa_trc(ioc, event);
490
491 switch (event) {
492 case IOC_E_ENABLE:
493 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
494 break;
495
496 case IOC_E_DISABLE:
497 ioc->cbfn->disable_cbfn(ioc->bfa);
212 break; 498 break;
213 499
214 case IOC_E_FWREADY: 500 case IOC_E_DETACH:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
502 bfa_iocpf_stop(ioc);
215 break; 503 break;
216 504
217 default: 505 default:
@@ -219,48 +507,138 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
219 } 507 }
220} 508}
221 509
510
511static void
512bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
513{
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
515}
516
222/** 517/**
223 * Notify enable completion callback and generate mismatch AEN. 518 * Hardware initialization failed.
224 */ 519 */
225static void 520static void
226bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) 521bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
522{
523 bfa_trc(ioc, event);
524
525 switch (event) {
526 case IOC_E_ENABLED:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
528 break;
529
530 case IOC_E_FAILED:
531 /**
532 * Initialization failure during iocpf init retry.
533 */
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
535 break;
536
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
540
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_iocpf_stop(ioc);
544 break;
545
546 default:
547 bfa_sm_fault(ioc, event);
548 }
549}
550
551
552static void
553bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
227{ 554{
555 struct list_head *qe;
556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558
228 /** 559 /**
229 * Provide enable completion callback and AEN notification only once. 560 * Notify driver and common modules registered for notification.
230 */ 561 */
231 if (ioc->retry_count == 0) { 562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
232 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 563 list_for_each(qe, &ioc->hb_notify_q) {
233 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); 564 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 notify->cbfn(notify->cbarg);
234 } 566 }
235 ioc->retry_count++; 567
236 bfa_ioc_timer_start(ioc); 568 BFA_LOG(KERN_CRIT, bfad, log_level,
569 "Heart Beat of IOC has failed\n");
237} 570}
238 571
239/** 572/**
240 * Awaiting firmware version match. 573 * IOC failure.
241 */ 574 */
242static void 575static void
243bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) 576bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
244{ 577{
245 bfa_trc(ioc, event); 578 bfa_trc(ioc, event);
246 579
247 switch (event) { 580 switch (event) {
248 case IOC_E_TIMEOUT: 581
249 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 582 case IOC_E_FAILED:
583 /**
584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!!
586 */
587 case IOC_E_ENABLE:
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 break;
590
591 case IOC_E_ENABLED:
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
250 break; 593 break;
251 594
252 case IOC_E_DISABLE: 595 case IOC_E_DISABLE:
253 bfa_ioc_disable_comp(ioc); 596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
597 break;
598
599 case IOC_E_HWERROR:
254 /* 600 /*
255 * fall through 601 * HB failure notification, ignore.
256 */ 602 */
603 break;
604 default:
605 bfa_sm_fault(ioc, event);
606 }
607}
257 608
258 case IOC_E_DETACH: 609
259 bfa_ioc_timer_stop(ioc); 610
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 611/**
612 * IOCPF State Machine
613 */
614
615
616/**
617 * Reset entry actions -- initialize state machine
618 */
619static void
620bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
621{
622 iocpf->retry_count = 0;
623 iocpf->auto_recover = bfa_auto_recover;
624}
625
626/**
627 * Beginning state. IOC is in reset state.
628 */
629static void
630bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
631{
632 struct bfa_ioc_s *ioc = iocpf->ioc;
633
634 bfa_trc(ioc, event);
635
636 switch (event) {
637 case IOCPF_E_ENABLE:
638 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
261 break; 639 break;
262 640
263 case IOC_E_FWREADY: 641 case IOCPF_E_STOP:
264 break; 642 break;
265 643
266 default: 644 default:
@@ -269,31 +647,44 @@ bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
269} 647}
270 648
271/** 649/**
272 * Request for semaphore. 650 * Semaphore should be acquired for version check.
273 */ 651 */
274static void 652static void
275bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) 653bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
276{ 654{
277 bfa_ioc_hw_sem_get(ioc); 655 bfa_ioc_hw_sem_get(iocpf->ioc);
278} 656}
279 657
280/** 658/**
281 * Awaiting semaphore for h/w initialzation. 659 * Awaiting h/w semaphore to continue with version check.
282 */ 660 */
283static void 661static void
284bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) 662bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
285{ 663{
664 struct bfa_ioc_s *ioc = iocpf->ioc;
665
286 bfa_trc(ioc, event); 666 bfa_trc(ioc, event);
287 667
288 switch (event) { 668 switch (event) {
289 case IOC_E_SEMLOCKED: 669 case IOCPF_E_SEMLOCKED:
290 ioc->retry_count = 0; 670 if (bfa_ioc_firmware_lock(ioc)) {
291 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 671 iocpf->retry_count = 0;
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
673 } else {
674 bfa_ioc_hw_sem_release(ioc);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
676 }
292 break; 677 break;
293 678
294 case IOC_E_DISABLE: 679 case IOCPF_E_DISABLE:
295 bfa_ioc_hw_sem_get_cancel(ioc); 680 bfa_ioc_hw_sem_get_cancel(ioc);
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 681 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 bfa_ioc_pf_disabled(ioc);
683 break;
684
685 case IOCPF_E_STOP:
686 bfa_ioc_hw_sem_get_cancel(ioc);
687 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
297 break; 688 break;
298 689
299 default: 690 default:
@@ -301,51 +692,81 @@ bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
301 } 692 }
302} 693}
303 694
304 695/**
696 * Notify enable completion callback.
697 */
305static void 698static void
306bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) 699bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
307{ 700{
308 bfa_ioc_timer_start(ioc); 701 /*
309 bfa_ioc_reset(ioc, BFA_FALSE); 702 * Call only the first time sm enters fwmismatch state.
703 */
704 if (iocpf->retry_count == 0)
705 bfa_ioc_pf_fwmismatch(iocpf->ioc);
706
707 iocpf->retry_count++;
708 bfa_iocpf_timer_start(iocpf->ioc);
310} 709}
311 710
312/** 711/**
313 * Hardware is being initialized. Interrupts are enabled. 712 * Awaiting firmware version match.
314 * Holding hardware semaphore lock.
315 */ 713 */
316static void 714static void
317bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) 715bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
318{ 716{
717 struct bfa_ioc_s *ioc = iocpf->ioc;
718
319 bfa_trc(ioc, event); 719 bfa_trc(ioc, event);
320 720
321 switch (event) { 721 switch (event) {
322 case IOC_E_FWREADY: 722 case IOCPF_E_TIMEOUT:
323 bfa_ioc_timer_stop(ioc); 723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
324 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
325 break; 724 break;
326 725
327 case IOC_E_HWERROR: 726 case IOCPF_E_DISABLE:
328 bfa_ioc_timer_stop(ioc); 727 bfa_iocpf_timer_stop(ioc);
329 /* 728 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
330 * fall through 729 bfa_ioc_pf_disabled(ioc);
331 */ 730 break;
332 731
333 case IOC_E_TIMEOUT: 732 case IOCPF_E_STOP:
334 ioc->retry_count++; 733 bfa_iocpf_timer_stop(ioc);
335 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 734 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
336 bfa_ioc_timer_start(ioc); 735 break;
337 bfa_ioc_reset(ioc, BFA_TRUE);
338 break;
339 }
340 736
341 bfa_ioc_hw_sem_release(ioc); 737 default:
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 738 bfa_sm_fault(ioc, event);
739 }
740}
741
742/**
743 * Request for semaphore.
744 */
745static void
746bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
747{
748 bfa_ioc_hw_sem_get(iocpf->ioc);
749}
750
751/**
752 * Awaiting semaphore for h/w initialzation.
753 */
754static void
755bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
756{
757 struct bfa_ioc_s *ioc = iocpf->ioc;
758
759 bfa_trc(ioc, event);
760
761 switch (event) {
762 case IOCPF_E_SEMLOCKED:
763 iocpf->retry_count = 0;
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
343 break; 765 break;
344 766
345 case IOC_E_DISABLE: 767 case IOCPF_E_DISABLE:
346 bfa_ioc_hw_sem_release(ioc); 768 bfa_ioc_hw_sem_get_cancel(ioc);
347 bfa_ioc_timer_stop(ioc); 769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
348 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
349 break; 770 break;
350 771
351 default: 772 default:
@@ -355,55 +776,54 @@ bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
355 776
356 777
357static void 778static void
358bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) 779bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
359{ 780{
360 bfa_ioc_timer_start(ioc); 781 bfa_iocpf_timer_start(iocpf->ioc);
361 bfa_ioc_send_enable(ioc); 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
362} 783}
363 784
364/** 785/**
365 * Host IOC function is being enabled, awaiting response from firmware. 786 * Hardware is being initialized. Interrupts are enabled.
366 * Semaphore is acquired. 787 * Holding hardware semaphore lock.
367 */ 788 */
368static void 789static void
369bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) 790bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
370{ 791{
792 struct bfa_ioc_s *ioc = iocpf->ioc;
793
371 bfa_trc(ioc, event); 794 bfa_trc(ioc, event);
372 795
373 switch (event) { 796 switch (event) {
374 case IOC_E_FWRSP_ENABLE: 797 case IOCPF_E_FWREADY:
375 bfa_ioc_timer_stop(ioc); 798 bfa_iocpf_timer_stop(ioc);
376 bfa_ioc_hw_sem_release(ioc); 799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
377 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
378 break; 800 break;
379 801
380 case IOC_E_HWERROR: 802 case IOCPF_E_INITFAIL:
381 bfa_ioc_timer_stop(ioc); 803 bfa_iocpf_timer_stop(ioc);
382 /* 804 /*
383 * fall through 805 * !!! fall through !!!
384 */ 806 */
385 807
386 case IOC_E_TIMEOUT: 808 case IOCPF_E_TIMEOUT:
387 ioc->retry_count++; 809 iocpf->retry_count++;
388 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 810 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
389 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, 811 bfa_iocpf_timer_start(ioc);
390 BFI_IOC_UNINIT); 812 bfa_ioc_reset(ioc, BFA_TRUE);
391 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
392 break; 813 break;
393 } 814 }
394 815
395 bfa_ioc_hw_sem_release(ioc); 816 bfa_ioc_hw_sem_release(ioc);
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
397 break;
398 818
399 case IOC_E_DISABLE: 819 if (event == IOCPF_E_TIMEOUT)
400 bfa_ioc_timer_stop(ioc); 820 bfa_ioc_pf_failed(ioc);
401 bfa_ioc_hw_sem_release(ioc);
402 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
403 break; 821 break;
404 822
405 case IOC_E_FWREADY: 823 case IOCPF_E_DISABLE:
406 bfa_ioc_send_enable(ioc); 824 bfa_ioc_hw_sem_release(ioc);
825 bfa_iocpf_timer_stop(ioc);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
407 break; 827 break;
408 828
409 default: 829 default:
@@ -413,40 +833,60 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
413 833
414 834
415static void 835static void
416bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) 836bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
417{ 837{
418 bfa_ioc_timer_start(ioc); 838 bfa_iocpf_timer_start(iocpf->ioc);
419 bfa_ioc_send_getattr(ioc); 839 bfa_ioc_send_enable(iocpf->ioc);
420} 840}
421 841
422/** 842/**
423 * IOC configuration in progress. Timer is active. 843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired.
424 */ 845 */
425static void 846static void
426bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) 847bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
427{ 848{
849 struct bfa_ioc_s *ioc = iocpf->ioc;
850
428 bfa_trc(ioc, event); 851 bfa_trc(ioc, event);
429 852
430 switch (event) { 853 switch (event) {
431 case IOC_E_FWRSP_GETATTR: 854 case IOCPF_E_FWRSP_ENABLE:
432 bfa_ioc_timer_stop(ioc); 855 bfa_iocpf_timer_stop(ioc);
433 bfa_ioc_check_attr_wwns(ioc); 856 bfa_ioc_hw_sem_release(ioc);
434 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
435 break; 858 break;
436 859
437 case IOC_E_HWERROR: 860 case IOCPF_E_INITFAIL:
438 bfa_ioc_timer_stop(ioc); 861 bfa_iocpf_timer_stop(ioc);
439 /* 862 /*
440 * fall through 863 * !!! fall through !!!
441 */ 864 */
442 865
443 case IOC_E_TIMEOUT: 866 case IOCPF_E_TIMEOUT:
444 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 867 iocpf->retry_count++;
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
870 BFI_IOC_UNINIT);
871 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
872 break;
873 }
874
875 bfa_ioc_hw_sem_release(ioc);
876 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
877
878 if (event == IOCPF_E_TIMEOUT)
879 bfa_ioc_pf_failed(ioc);
445 break; 880 break;
446 881
447 case IOC_E_DISABLE: 882 case IOCPF_E_DISABLE:
448 bfa_ioc_timer_stop(ioc); 883 bfa_iocpf_timer_stop(ioc);
449 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 884 bfa_ioc_hw_sem_release(ioc);
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
886 break;
887
888 case IOCPF_E_FWREADY:
889 bfa_ioc_send_enable(ioc);
450 break; 890 break;
451 891
452 default: 892 default:
@@ -455,41 +895,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
455} 895}
456 896
457 897
898
458static void 899static void
459bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) 900bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
460{ 901{
461 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 902 bfa_ioc_pf_enabled(iocpf->ioc);
462 bfa_ioc_hb_monitor(ioc);
463 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
464} 903}
465 904
466static void 905static void
467bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) 906bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
468{ 907{
908 struct bfa_ioc_s *ioc = iocpf->ioc;
909
469 bfa_trc(ioc, event); 910 bfa_trc(ioc, event);
470 911
471 switch (event) { 912 switch (event) {
472 case IOC_E_ENABLE: 913 case IOCPF_E_DISABLE:
914 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
473 break; 915 break;
474 916
475 case IOC_E_DISABLE: 917 case IOCPF_E_GETATTRFAIL:
476 bfa_ioc_hb_stop(ioc); 918 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
477 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
478 break; 919 break;
479 920
480 case IOC_E_HWERROR: 921 case IOCPF_E_FAIL:
481 case IOC_E_FWREADY: 922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
482 /** 923 break;
483 * Hard error or IOC recovery by other function.
484 * Treat it same as heartbeat failure.
485 */
486 bfa_ioc_hb_stop(ioc);
487 /*
488 * !!! fall through !!!
489 */
490 924
491 case IOC_E_HBFAIL: 925 case IOCPF_E_FWREADY:
492 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 926 if (bfa_ioc_is_operational(ioc))
927 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
928 else
929 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
930
931 bfa_ioc_pf_failed(ioc);
493 break; 932 break;
494 933
495 default: 934 default:
@@ -499,36 +938,41 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
499 938
500 939
501static void 940static void
502bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) 941bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
503{ 942{
504 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); 943 bfa_iocpf_timer_start(iocpf->ioc);
505 bfa_ioc_timer_start(ioc); 944 bfa_ioc_send_disable(iocpf->ioc);
506 bfa_ioc_send_disable(ioc);
507} 945}
508 946
509/** 947/**
510 * IOC is being disabled 948 * IOC is being disabled
511 */ 949 */
512static void 950static void
513bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) 951bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
514{ 952{
953 struct bfa_ioc_s *ioc = iocpf->ioc;
954
515 bfa_trc(ioc, event); 955 bfa_trc(ioc, event);
516 956
517 switch (event) { 957 switch (event) {
518 case IOC_E_FWRSP_DISABLE: 958 case IOCPF_E_FWRSP_DISABLE:
519 bfa_ioc_timer_stop(ioc); 959 case IOCPF_E_FWREADY:
520 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 960 bfa_iocpf_timer_stop(ioc);
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
521 break; 962 break;
522 963
523 case IOC_E_HWERROR: 964 case IOCPF_E_FAIL:
524 bfa_ioc_timer_stop(ioc); 965 bfa_iocpf_timer_stop(ioc);
525 /* 966 /*
526 * !!! fall through !!! 967 * !!! fall through !!!
527 */ 968 */
528 969
529 case IOC_E_TIMEOUT: 970 case IOCPF_E_TIMEOUT:
530 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 971 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
973 break;
974
975 case IOCPF_E_FWRSP_ENABLE:
532 break; 976 break;
533 977
534 default: 978 default:
@@ -540,31 +984,26 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
540 * IOC disable completion entry. 984 * IOC disable completion entry.
541 */ 985 */
542static void 986static void
543bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) 987bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
544{ 988{
545 bfa_ioc_disable_comp(ioc); 989 bfa_ioc_pf_disabled(iocpf->ioc);
546} 990}
547 991
548static void 992static void
549bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) 993bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
550{ 994{
995 struct bfa_ioc_s *ioc = iocpf->ioc;
996
551 bfa_trc(ioc, event); 997 bfa_trc(ioc, event);
552 998
553 switch (event) { 999 switch (event) {
554 case IOC_E_ENABLE: 1000 case IOCPF_E_ENABLE:
555 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1001 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
556 break; 1002 break;
557 1003
558 case IOC_E_DISABLE: 1004 case IOCPF_E_STOP:
559 ioc->cbfn->disable_cbfn(ioc->bfa);
560 break;
561
562 case IOC_E_FWREADY:
563 break;
564
565 case IOC_E_DETACH:
566 bfa_ioc_firmware_unlock(ioc); 1005 bfa_ioc_firmware_unlock(ioc);
567 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
568 break; 1007 break;
569 1008
570 default: 1009 default:
@@ -574,34 +1013,35 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
574 1013
575 1014
576static void 1015static void
577bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) 1016bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
578{ 1017{
579 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1018 bfa_iocpf_timer_start(iocpf->ioc);
580 bfa_ioc_timer_start(ioc);
581} 1019}
582 1020
583/** 1021/**
584 * Hardware initialization failed. 1022 * Hardware initialization failed.
585 */ 1023 */
586static void 1024static void
587bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) 1025bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
588{ 1026{
1027 struct bfa_ioc_s *ioc = iocpf->ioc;
1028
589 bfa_trc(ioc, event); 1029 bfa_trc(ioc, event);
590 1030
591 switch (event) { 1031 switch (event) {
592 case IOC_E_DISABLE: 1032 case IOCPF_E_DISABLE:
593 bfa_ioc_timer_stop(ioc); 1033 bfa_iocpf_timer_stop(ioc);
594 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 1034 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
595 break; 1035 break;
596 1036
597 case IOC_E_DETACH: 1037 case IOCPF_E_STOP:
598 bfa_ioc_timer_stop(ioc); 1038 bfa_iocpf_timer_stop(ioc);
599 bfa_ioc_firmware_unlock(ioc); 1039 bfa_ioc_firmware_unlock(ioc);
600 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1040 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
601 break; 1041 break;
602 1042
603 case IOC_E_TIMEOUT: 1043 case IOCPF_E_TIMEOUT:
604 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
605 break; 1045 break;
606 1046
607 default: 1047 default:
@@ -611,80 +1051,47 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
611 1051
612 1052
613static void 1053static void
614bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) 1054bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
615{ 1055{
616 struct list_head *qe;
617 struct bfa_ioc_hbfail_notify_s *notify;
618
619 /** 1056 /**
620 * Mark IOC as failed in hardware and stop firmware. 1057 * Mark IOC as failed in hardware and stop firmware.
621 */ 1058 */
622 bfa_ioc_lpu_stop(ioc); 1059 bfa_ioc_lpu_stop(iocpf->ioc);
623 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 1060 bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
624 1061
625 /** 1062 /**
626 * Notify other functions on HB failure. 1063 * Notify other functions on HB failure.
627 */ 1064 */
628 bfa_ioc_notify_hbfail(ioc); 1065 bfa_ioc_notify_hbfail(iocpf->ioc);
629
630 /**
631 * Notify driver and common modules registered for notification.
632 */
633 ioc->cbfn->hbfail_cbfn(ioc->bfa);
634 list_for_each(qe, &ioc->hb_notify_q) {
635 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
636 notify->cbfn(notify->cbarg);
637 }
638 1066
639 /** 1067 /**
640 * Flush any queued up mailbox requests. 1068 * Flush any queued up mailbox requests.
641 */ 1069 */
642 bfa_ioc_mbox_hbfail(ioc); 1070 bfa_ioc_mbox_hbfail(iocpf->ioc);
643 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
644 1071
645 /** 1072 if (iocpf->auto_recover)
646 * Trigger auto-recovery after a delay. 1073 bfa_iocpf_recovery_timer_start(iocpf->ioc);
647 */
648 if (ioc->auto_recover) {
649 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
650 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
651 }
652} 1074}
653 1075
654/** 1076/**
655 * IOC heartbeat failure. 1077 * IOC is in failed state.
656 */ 1078 */
657static void 1079static void
658bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) 1080bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
659{ 1081{
1082 struct bfa_ioc_s *ioc = iocpf->ioc;
1083
660 bfa_trc(ioc, event); 1084 bfa_trc(ioc, event);
661 1085
662 switch (event) { 1086 switch (event) {
663 1087 case IOCPF_E_DISABLE:
664 case IOC_E_ENABLE: 1088 if (iocpf->auto_recover)
665 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1089 bfa_iocpf_timer_stop(ioc);
666 break; 1090 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
667
668 case IOC_E_DISABLE:
669 if (ioc->auto_recover)
670 bfa_ioc_timer_stop(ioc);
671 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
672 break; 1091 break;
673 1092
674 case IOC_E_TIMEOUT: 1093 case IOCPF_E_TIMEOUT:
675 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
676 break;
677
678 case IOC_E_FWREADY:
679 /**
680 * Recovery is already initiated by other function.
681 */
682 break;
683
684 case IOC_E_HWERROR:
685 /*
686 * HB failure notification, ignore.
687 */
688 break; 1095 break;
689 1096
690 default: 1097 default:
@@ -695,14 +1102,14 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
695 1102
696 1103
697/** 1104/**
698 * bfa_ioc_pvt BFA IOC private functions 1105 * hal_ioc_pvt BFA IOC private functions
699 */ 1106 */
700 1107
701static void 1108static void
702bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) 1109bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
703{ 1110{
704 struct list_head *qe; 1111 struct list_head *qe;
705 struct bfa_ioc_hbfail_notify_s *notify; 1112 struct bfa_ioc_hbfail_notify_s *notify;
706 1113
707 ioc->cbfn->disable_cbfn(ioc->bfa); 1114 ioc->cbfn->disable_cbfn(ioc->bfa);
708 1115
@@ -710,25 +1117,17 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
710 * Notify common modules registered for notification. 1117 * Notify common modules registered for notification.
711 */ 1118 */
712 list_for_each(qe, &ioc->hb_notify_q) { 1119 list_for_each(qe, &ioc->hb_notify_q) {
713 notify = (struct bfa_ioc_hbfail_notify_s *)qe; 1120 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
714 notify->cbfn(notify->cbarg); 1121 notify->cbfn(notify->cbarg);
715 } 1122 }
716} 1123}
717 1124
718void
719bfa_ioc_sem_timeout(void *ioc_arg)
720{
721 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
722
723 bfa_ioc_hw_sem_get(ioc);
724}
725
726bfa_boolean_t 1125bfa_boolean_t
727bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 1126bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
728{ 1127{
729 u32 r32; 1128 u32 r32;
730 int cnt = 0; 1129 int cnt = 0;
731#define BFA_SEM_SPINCNT 3000 1130#define BFA_SEM_SPINCNT 3000
732 1131
733 r32 = bfa_reg_read(sem_reg); 1132 r32 = bfa_reg_read(sem_reg);
734 1133
@@ -754,7 +1153,7 @@ bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
754static void 1153static void
755bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) 1154bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
756{ 1155{
757 u32 r32; 1156 u32 r32;
758 1157
759 /** 1158 /**
760 * First read to the semaphore register will return 0, subsequent reads 1159 * First read to the semaphore register will return 0, subsequent reads
@@ -762,12 +1161,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
762 */ 1161 */
763 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 1162 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
764 if (r32 == 0) { 1163 if (r32 == 0) {
765 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 1164 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
766 return; 1165 return;
767 } 1166 }
768 1167
769 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 1168 bfa_sem_timer_start(ioc);
770 ioc, BFA_IOC_HWSEM_TOV);
771} 1169}
772 1170
773void 1171void
@@ -779,7 +1177,7 @@ bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
779static void 1177static void
780bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) 1178bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
781{ 1179{
782 bfa_timer_stop(&ioc->sem_timer); 1180 bfa_sem_timer_stop(ioc);
783} 1181}
784 1182
785/** 1183/**
@@ -788,14 +1186,18 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
788static void 1186static void
789bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) 1187bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
790{ 1188{
791 u32 pss_ctl; 1189 u32 pss_ctl;
792 int i; 1190 int i;
793#define PSS_LMEM_INIT_TIME 10000 1191#define PSS_LMEM_INIT_TIME 10000
794 1192
795 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1193 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
796 pss_ctl &= ~__PSS_LMEM_RESET; 1194 pss_ctl &= ~__PSS_LMEM_RESET;
797 pss_ctl |= __PSS_LMEM_INIT_EN; 1195 pss_ctl |= __PSS_LMEM_INIT_EN;
798 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ 1196
1197 /*
1198 * i2c workaround 12.5khz clock
1199 */
1200 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
799 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1201 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
800 1202
801 /** 1203 /**
@@ -821,7 +1223,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
821static void 1223static void
822bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) 1224bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
823{ 1225{
824 u32 pss_ctl; 1226 u32 pss_ctl;
825 1227
826 /** 1228 /**
827 * Take processor out of reset. 1229 * Take processor out of reset.
@@ -835,7 +1237,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
835static void 1237static void
836bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) 1238bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
837{ 1239{
838 u32 pss_ctl; 1240 u32 pss_ctl;
839 1241
840 /** 1242 /**
841 * Put processors in reset. 1243 * Put processors in reset.
@@ -852,10 +1254,10 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
852void 1254void
853bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 1255bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
854{ 1256{
855 u32 pgnum, pgoff; 1257 u32 pgnum, pgoff;
856 u32 loff = 0; 1258 u32 loff = 0;
857 int i; 1259 int i;
858 u32 *fwsig = (u32 *) fwhdr; 1260 u32 *fwsig = (u32 *) fwhdr;
859 1261
860 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1262 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
861 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1263 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -863,7 +1265,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
863 1265
864 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 1266 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
865 i++) { 1267 i++) {
866 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 1268 fwsig[i] =
1269 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
867 loff += sizeof(u32); 1270 loff += sizeof(u32);
868 } 1271 }
869} 1272}
@@ -875,10 +1278,10 @@ bfa_boolean_t
875bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 1278bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
876{ 1279{
877 struct bfi_ioc_image_hdr_s *drv_fwhdr; 1280 struct bfi_ioc_image_hdr_s *drv_fwhdr;
878 int i; 1281 int i;
879 1282
880 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1283 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
881 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1284 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
882 1285
883 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1286 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
884 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 1287 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -897,21 +1300,20 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
897 * Return true if current running version is valid. Firmware signature and 1300 * Return true if current running version is valid. Firmware signature and
898 * execution context (driver/bios) must match. 1301 * execution context (driver/bios) must match.
899 */ 1302 */
900static bfa_boolean_t 1303static bfa_boolean_t
901bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) 1304bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
902{ 1305{
903 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1306 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
904 1307
905 /** 1308 /**
906 * If bios/efi boot (flash based) -- return true 1309 * If bios/efi boot (flash based) -- return true
907 */ 1310 */
908 if (bfa_ioc_is_optrom(ioc)) 1311 if (bfa_ioc_is_bios_optrom(ioc))
909 return BFA_TRUE; 1312 return BFA_TRUE;
910 1313
911 bfa_ioc_fwver_get(ioc, &fwhdr); 1314 bfa_ioc_fwver_get(ioc, &fwhdr);
912 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1315 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
913 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1316 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914
915 1317
916 if (fwhdr.signature != drv_fwhdr->signature) { 1318 if (fwhdr.signature != drv_fwhdr->signature) {
917 bfa_trc(ioc, fwhdr.signature); 1319 bfa_trc(ioc, fwhdr.signature);
@@ -919,9 +1321,9 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
919 return BFA_FALSE; 1321 return BFA_FALSE;
920 } 1322 }
921 1323
922 if (fwhdr.exec != drv_fwhdr->exec) { 1324 if (bfa_os_swap32(fwhdr.param) != boot_env) {
923 bfa_trc(ioc, fwhdr.exec); 1325 bfa_trc(ioc, fwhdr.param);
924 bfa_trc(ioc, drv_fwhdr->exec); 1326 bfa_trc(ioc, boot_env);
925 return BFA_FALSE; 1327 return BFA_FALSE;
926 } 1328 }
927 1329
@@ -934,7 +1336,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
934static void 1336static void
935bfa_ioc_msgflush(struct bfa_ioc_s *ioc) 1337bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
936{ 1338{
937 u32 r32; 1339 u32 r32;
938 1340
939 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1341 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
940 if (r32) 1342 if (r32)
@@ -946,7 +1348,9 @@ static void
946bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) 1348bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
947{ 1349{
948 enum bfi_ioc_state ioc_fwstate; 1350 enum bfi_ioc_state ioc_fwstate;
949 bfa_boolean_t fwvalid; 1351 bfa_boolean_t fwvalid;
1352 u32 boot_type;
1353 u32 boot_env;
950 1354
951 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1355 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
952 1356
@@ -955,14 +1359,33 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
955 1359
956 bfa_trc(ioc, ioc_fwstate); 1360 bfa_trc(ioc, ioc_fwstate);
957 1361
1362 boot_type = BFI_BOOT_TYPE_NORMAL;
1363 boot_env = BFI_BOOT_LOADER_OS;
1364
1365 /**
1366 * Flash based firmware boot BIOS env.
1367 */
1368 if (bfa_ioc_is_bios_optrom(ioc)) {
1369 boot_type = BFI_BOOT_TYPE_FLASH;
1370 boot_env = BFI_BOOT_LOADER_BIOS;
1371 }
1372
1373 /**
1374 * Flash based firmware boot UEFI env.
1375 */
1376 if (bfa_ioc_is_uefi(ioc)) {
1377 boot_type = BFI_BOOT_TYPE_FLASH;
1378 boot_env = BFI_BOOT_LOADER_UEFI;
1379 }
1380
958 /** 1381 /**
959 * check if firmware is valid 1382 * check if firmware is valid
960 */ 1383 */
961 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1384 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
962 BFA_FALSE : bfa_ioc_fwver_valid(ioc); 1385 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
963 1386
964 if (!fwvalid) { 1387 if (!fwvalid) {
965 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1388 bfa_ioc_boot(ioc, boot_type, boot_env);
966 return; 1389 return;
967 } 1390 }
968 1391
@@ -971,7 +1394,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
971 * just wait for an initialization completion interrupt. 1394 * just wait for an initialization completion interrupt.
972 */ 1395 */
973 if (ioc_fwstate == BFI_IOC_INITING) { 1396 if (ioc_fwstate == BFI_IOC_INITING) {
974 bfa_trc(ioc, ioc_fwstate);
975 ioc->cbfn->reset_cbfn(ioc->bfa); 1397 ioc->cbfn->reset_cbfn(ioc->bfa);
976 return; 1398 return;
977 } 1399 }
@@ -985,8 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
985 * is loaded. 1407 * is loaded.
986 */ 1408 */
987 if (ioc_fwstate == BFI_IOC_DISABLED || 1409 if (ioc_fwstate == BFI_IOC_DISABLED ||
988 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1410 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
989 bfa_trc(ioc, ioc_fwstate);
990 1411
991 /** 1412 /**
992 * When using MSI-X any pending firmware ready event should 1413 * When using MSI-X any pending firmware ready event should
@@ -994,20 +1415,20 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
994 */ 1415 */
995 bfa_ioc_msgflush(ioc); 1416 bfa_ioc_msgflush(ioc);
996 ioc->cbfn->reset_cbfn(ioc->bfa); 1417 ioc->cbfn->reset_cbfn(ioc->bfa);
997 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1418 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
998 return; 1419 return;
999 } 1420 }
1000 1421
1001 /** 1422 /**
1002 * Initialize the h/w for any other states. 1423 * Initialize the h/w for any other states.
1003 */ 1424 */
1004 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1425 bfa_ioc_boot(ioc, boot_type, boot_env);
1005} 1426}
1006 1427
1007static void 1428static void
1008bfa_ioc_timeout(void *ioc_arg) 1429bfa_ioc_timeout(void *ioc_arg)
1009{ 1430{
1010 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 1431 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1011 1432
1012 bfa_trc(ioc, 0); 1433 bfa_trc(ioc, 0);
1013 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1434 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
@@ -1016,8 +1437,8 @@ bfa_ioc_timeout(void *ioc_arg)
1016void 1437void
1017bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) 1438bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1018{ 1439{
1019 u32 *msgp = (u32 *) ioc_msg; 1440 u32 *msgp = (u32 *) ioc_msg;
1020 u32 i; 1441 u32 i;
1021 1442
1022 bfa_trc(ioc, msgp[0]); 1443 bfa_trc(ioc, msgp[0]);
1023 bfa_trc(ioc, len); 1444 bfa_trc(ioc, len);
@@ -1038,17 +1459,20 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1038 * write 1 to mailbox CMD to trigger LPU event 1459 * write 1 to mailbox CMD to trigger LPU event
1039 */ 1460 */
1040 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); 1461 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1041 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1462 (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1042} 1463}
1043 1464
1044static void 1465static void
1045bfa_ioc_send_enable(struct bfa_ioc_s *ioc) 1466bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1046{ 1467{
1047 struct bfi_ioc_ctrl_req_s enable_req; 1468 struct bfi_ioc_ctrl_req_s enable_req;
1469 struct bfa_timeval_s tv;
1048 1470
1049 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1471 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1050 bfa_ioc_portid(ioc)); 1472 bfa_ioc_portid(ioc));
1051 enable_req.ioc_class = ioc->ioc_mc; 1473 enable_req.ioc_class = ioc->ioc_mc;
1474 bfa_os_gettimeofday(&tv);
1475 enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
1052 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1476 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1053} 1477}
1054 1478
@@ -1065,7 +1489,7 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1065static void 1489static void
1066bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) 1490bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1067{ 1491{
1068 struct bfi_ioc_getattr_req_s attr_req; 1492 struct bfi_ioc_getattr_req_s attr_req;
1069 1493
1070 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1494 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1071 bfa_ioc_portid(ioc)); 1495 bfa_ioc_portid(ioc));
@@ -1077,12 +1501,11 @@ static void
1077bfa_ioc_hb_check(void *cbarg) 1501bfa_ioc_hb_check(void *cbarg)
1078{ 1502{
1079 struct bfa_ioc_s *ioc = cbarg; 1503 struct bfa_ioc_s *ioc = cbarg;
1080 u32 hb_count; 1504 u32 hb_count;
1081 1505
1082 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1506 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1083 if (ioc->hb_count == hb_count) { 1507 if (ioc->hb_count == hb_count) {
1084 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, 1508 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1085 hb_count);
1086 bfa_ioc_recover(ioc); 1509 bfa_ioc_recover(ioc);
1087 return; 1510 return;
1088 } else { 1511 } else {
@@ -1090,61 +1513,54 @@ bfa_ioc_hb_check(void *cbarg)
1090 } 1513 }
1091 1514
1092 bfa_ioc_mbox_poll(ioc); 1515 bfa_ioc_mbox_poll(ioc);
1093 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, 1516 bfa_hb_timer_start(ioc);
1094 ioc, BFA_IOC_HB_TOV);
1095} 1517}
1096 1518
1097static void 1519static void
1098bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1520bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1099{ 1521{
1100 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1522 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1101 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1523 bfa_hb_timer_start(ioc);
1102 BFA_IOC_HB_TOV);
1103} 1524}
1104 1525
1105static void 1526static void
1106bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) 1527bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1107{ 1528{
1108 bfa_timer_stop(&ioc->ioc_timer); 1529 bfa_hb_timer_stop(ioc);
1109} 1530}
1110 1531
1532
1111/** 1533/**
1112 * Initiate a full firmware download. 1534 * Initiate a full firmware download.
1113 */ 1535 */
1114static void 1536static void
1115bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, 1537bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1116 u32 boot_param) 1538 u32 boot_env)
1117{ 1539{
1118 u32 *fwimg; 1540 u32 *fwimg;
1119 u32 pgnum, pgoff; 1541 u32 pgnum, pgoff;
1120 u32 loff = 0; 1542 u32 loff = 0;
1121 u32 chunkno = 0; 1543 u32 chunkno = 0;
1122 u32 i; 1544 u32 i;
1123 1545
1124 /** 1546 /**
1125 * Initialize LMEM first before code download 1547 * Initialize LMEM first before code download
1126 */ 1548 */
1127 bfa_ioc_lmem_init(ioc); 1549 bfa_ioc_lmem_init(ioc);
1128 1550
1129 /** 1551 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1130 * Flash based firmware boot 1552 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1131 */
1132 bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1133 if (bfa_ioc_is_optrom(ioc))
1134 boot_type = BFI_BOOT_TYPE_FLASH;
1135 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1136
1137 1553
1138 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1554 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1139 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1555 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1140 1556
1141 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1557 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1142 1558
1143 for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1559 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1144 1560
1145 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1561 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1146 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1562 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1147 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 1563 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1148 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1564 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1149 } 1565 }
1150 1566
@@ -1162,7 +1578,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1162 loff = PSS_SMEM_PGOFF(loff); 1578 loff = PSS_SMEM_PGOFF(loff);
1163 if (loff == 0) { 1579 if (loff == 0) {
1164 pgnum++; 1580 pgnum++;
1165 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1581 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1582 pgnum);
1166 } 1583 }
1167 } 1584 }
1168 1585
@@ -1171,11 +1588,11 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1171 1588
1172 /* 1589 /*
1173 * Set boot type and boot param at the end. 1590 * Set boot type and boot param at the end.
1174 */ 1591 */
1175 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1592 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1176 bfa_os_swap32(boot_type)); 1593 bfa_os_swap32(boot_type));
1177 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, 1594 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1178 bfa_os_swap32(boot_param)); 1595 bfa_os_swap32(boot_env));
1179} 1596}
1180 1597
1181static void 1598static void
@@ -1190,11 +1607,11 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1190static void 1607static void
1191bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) 1608bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1192{ 1609{
1193 struct bfi_ioc_attr_s *attr = ioc->attr; 1610 struct bfi_ioc_attr_s *attr = ioc->attr;
1194 1611
1195 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1612 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1196 attr->card_type = bfa_os_ntohl(attr->card_type); 1613 attr->card_type = bfa_os_ntohl(attr->card_type);
1197 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1614 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1198 1615
1199 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1616 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1200} 1617}
@@ -1205,8 +1622,8 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1205static void 1622static void
1206bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) 1623bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1207{ 1624{
1208 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1625 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1209 int mc; 1626 int mc;
1210 1627
1211 INIT_LIST_HEAD(&mod->cmd_q); 1628 INIT_LIST_HEAD(&mod->cmd_q);
1212 for (mc = 0; mc < BFI_MC_MAX; mc++) { 1629 for (mc = 0; mc < BFI_MC_MAX; mc++) {
@@ -1221,9 +1638,9 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1221static void 1638static void
1222bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) 1639bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1223{ 1640{
1224 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1641 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1225 struct bfa_mbox_cmd_s *cmd; 1642 struct bfa_mbox_cmd_s *cmd;
1226 u32 stat; 1643 u32 stat;
1227 1644
1228 /** 1645 /**
1229 * If no command pending, do nothing 1646 * If no command pending, do nothing
@@ -1251,25 +1668,194 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1251static void 1668static void
1252bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) 1669bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1253{ 1670{
1254 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1671 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1255 struct bfa_mbox_cmd_s *cmd; 1672 struct bfa_mbox_cmd_s *cmd;
1256 1673
1257 while (!list_empty(&mod->cmd_q)) 1674 while (!list_empty(&mod->cmd_q))
1258 bfa_q_deq(&mod->cmd_q, &cmd); 1675 bfa_q_deq(&mod->cmd_q, &cmd);
1259} 1676}
1260 1677
1261/** 1678/**
1262 * bfa_ioc_public 1679 * Read data from SMEM to host through PCI memmap
1680 *
1681 * @param[in] ioc memory for IOC
1682 * @param[in] tbuf app memory to store data from smem
1683 * @param[in] soff smem offset
1684 * @param[in] sz size of smem in bytes
1685 */
1686static bfa_status_t
1687bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1688{
1689 u32 pgnum, loff, r32;
1690 int i, len;
1691 u32 *buf = tbuf;
1692
1693 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1694 loff = bfa_ioc_smem_pgoff(ioc, soff);
1695 bfa_trc(ioc, pgnum);
1696 bfa_trc(ioc, loff);
1697 bfa_trc(ioc, sz);
1698
1699 /*
1700 * Hold semaphore to serialize pll init and fwtrc.
1701 */
1702 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1703 bfa_trc(ioc, 0);
1704 return BFA_STATUS_FAILED;
1705 }
1706
1707 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1708
1709 len = sz/sizeof(u32);
1710 bfa_trc(ioc, len);
1711 for (i = 0; i < len; i++) {
1712 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1713 buf[i] = bfa_os_ntohl(r32);
1714 loff += sizeof(u32);
1715
1716 /**
1717 * handle page offset wrap around
1718 */
1719 loff = PSS_SMEM_PGOFF(loff);
1720 if (loff == 0) {
1721 pgnum++;
1722 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1723 }
1724 }
1725 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1726 bfa_ioc_smem_pgnum(ioc, 0));
1727 /*
1728 * release semaphore.
1729 */
1730 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1731
1732 bfa_trc(ioc, pgnum);
1733 return BFA_STATUS_OK;
1734}
1735
1736/**
1737 * Clear SMEM data from host through PCI memmap
1738 *
1739 * @param[in] ioc memory for IOC
1740 * @param[in] soff smem offset
1741 * @param[in] sz size of smem in bytes
1742 */
1743static bfa_status_t
1744bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1745{
1746 int i, len;
1747 u32 pgnum, loff;
1748
1749 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1750 loff = bfa_ioc_smem_pgoff(ioc, soff);
1751 bfa_trc(ioc, pgnum);
1752 bfa_trc(ioc, loff);
1753 bfa_trc(ioc, sz);
1754
1755 /*
1756 * Hold semaphore to serialize pll init and fwtrc.
1757 */
1758 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1759 bfa_trc(ioc, 0);
1760 return BFA_STATUS_FAILED;
1761 }
1762
1763 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1764
1765 len = sz/sizeof(u32); /* len in words */
1766 bfa_trc(ioc, len);
1767 for (i = 0; i < len; i++) {
1768 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1769 loff += sizeof(u32);
1770
1771 /**
1772 * handle page offset wrap around
1773 */
1774 loff = PSS_SMEM_PGOFF(loff);
1775 if (loff == 0) {
1776 pgnum++;
1777 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1778 }
1779 }
1780 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1781 bfa_ioc_smem_pgnum(ioc, 0));
1782
1783 /*
1784 * release semaphore.
1785 */
1786 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1787 bfa_trc(ioc, pgnum);
1788 return BFA_STATUS_OK;
1789}
1790
1791/**
1792 * hal iocpf to ioc interface
1793 */
1794static void
1795bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
1796{
1797 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1798}
1799
1800static void
1801bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
1802{
1803 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1804}
1805
1806static void
1807bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1808{
1809 bfa_fsm_send_event(ioc, IOC_E_FAILED);
1810}
1811
1812static void
1813bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1814{
1815 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1816 /**
1817 * Provide enable completion callback.
1818 */
1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 BFA_LOG(KERN_WARNING, bfad, log_level,
1821 "Running firmware version is incompatible "
1822 "with the driver version\n");
1823}
1824
1825
1826
1827/**
1828 * hal_ioc_public
1263 */ 1829 */
1264 1830
1831bfa_status_t
1832bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1833{
1834
1835 /*
1836 * Hold semaphore so that nobody can access the chip during init.
1837 */
1838 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1839
1840 bfa_ioc_pll_init_asic(ioc);
1841
1842 ioc->pllinit = BFA_TRUE;
1843 /*
1844 * release semaphore.
1845 */
1846 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1847
1848 return BFA_STATUS_OK;
1849}
1850
1265/** 1851/**
1266 * Interface used by diag module to do firmware boot with memory test 1852 * Interface used by diag module to do firmware boot with memory test
1267 * as the entry vector. 1853 * as the entry vector.
1268 */ 1854 */
1269void 1855void
1270bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) 1856bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1271{ 1857{
1272 bfa_os_addr_t rb; 1858 bfa_os_addr_t rb;
1273 1859
1274 bfa_ioc_stats(ioc, ioc_boots); 1860 bfa_ioc_stats(ioc, ioc_boots);
1275 1861
@@ -1280,7 +1866,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1280 * Initialize IOC state of all functions on a chip reset. 1866 * Initialize IOC state of all functions on a chip reset.
1281 */ 1867 */
1282 rb = ioc->pcidev.pci_bar_kva; 1868 rb = ioc->pcidev.pci_bar_kva;
1283 if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1869 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1284 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); 1870 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1285 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); 1871 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1286 } else { 1872 } else {
@@ -1289,7 +1875,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1289 } 1875 }
1290 1876
1291 bfa_ioc_msgflush(ioc); 1877 bfa_ioc_msgflush(ioc);
1292 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1878 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1293 1879
1294 /** 1880 /**
1295 * Enable interrupts just before starting LPU 1881 * Enable interrupts just before starting LPU
@@ -1308,18 +1894,29 @@ bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1308} 1894}
1309 1895
1310 1896
1897
1311bfa_boolean_t 1898bfa_boolean_t
1312bfa_ioc_is_operational(struct bfa_ioc_s *ioc) 1899bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1313{ 1900{
1314 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1901 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1315} 1902}
1316 1903
1904bfa_boolean_t
1905bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1906{
1907 u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1908
1909 return ((r32 != BFI_IOC_UNINIT) &&
1910 (r32 != BFI_IOC_INITING) &&
1911 (r32 != BFI_IOC_MEMTEST));
1912}
1913
1317void 1914void
1318bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) 1915bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1319{ 1916{
1320 u32 *msgp = mbmsg; 1917 u32 *msgp = mbmsg;
1321 u32 r32; 1918 u32 r32;
1322 int i; 1919 int i;
1323 1920
1324 /** 1921 /**
1325 * read the MBOX msg 1922 * read the MBOX msg
@@ -1341,9 +1938,10 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1341void 1938void
1342bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) 1939bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1343{ 1940{
1344 union bfi_ioc_i2h_msg_u *msg; 1941 union bfi_ioc_i2h_msg_u *msg;
1942 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1345 1943
1346 msg = (union bfi_ioc_i2h_msg_u *)m; 1944 msg = (union bfi_ioc_i2h_msg_u *) m;
1347 1945
1348 bfa_ioc_stats(ioc, ioc_isrs); 1946 bfa_ioc_stats(ioc, ioc_isrs);
1349 1947
@@ -1352,15 +1950,15 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1352 break; 1950 break;
1353 1951
1354 case BFI_IOC_I2H_READY_EVENT: 1952 case BFI_IOC_I2H_READY_EVENT:
1355 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1953 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1356 break; 1954 break;
1357 1955
1358 case BFI_IOC_I2H_ENABLE_REPLY: 1956 case BFI_IOC_I2H_ENABLE_REPLY:
1359 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1957 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1360 break; 1958 break;
1361 1959
1362 case BFI_IOC_I2H_DISABLE_REPLY: 1960 case BFI_IOC_I2H_DISABLE_REPLY:
1363 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1961 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1364 break; 1962 break;
1365 1963
1366 case BFI_IOC_I2H_GETATTR_REPLY: 1964 case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1378,29 +1976,24 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1378 * 1976 *
1379 * @param[in] ioc memory for IOC 1977 * @param[in] ioc memory for IOC
1380 * @param[in] bfa driver instance structure 1978 * @param[in] bfa driver instance structure
1381 * @param[in] trcmod kernel trace module
1382 * @param[in] aen kernel aen event module
1383 * @param[in] logm kernel logging module
1384 */ 1979 */
1385void 1980void
1386bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, 1981bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1387 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, 1982 struct bfa_timer_mod_s *timer_mod)
1388 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) 1983{
1389{ 1984 ioc->bfa = bfa;
1390 ioc->bfa = bfa; 1985 ioc->cbfn = cbfn;
1391 ioc->cbfn = cbfn; 1986 ioc->timer_mod = timer_mod;
1392 ioc->timer_mod = timer_mod; 1987 ioc->fcmode = BFA_FALSE;
1393 ioc->trcmod = trcmod; 1988 ioc->pllinit = BFA_FALSE;
1394 ioc->aen = aen;
1395 ioc->logm = logm;
1396 ioc->fcmode = BFA_FALSE;
1397 ioc->pllinit = BFA_FALSE;
1398 ioc->dbg_fwsave_once = BFA_TRUE; 1989 ioc->dbg_fwsave_once = BFA_TRUE;
1990 ioc->iocpf.ioc = ioc;
1399 1991
1400 bfa_ioc_mbox_attach(ioc); 1992 bfa_ioc_mbox_attach(ioc);
1401 INIT_LIST_HEAD(&ioc->hb_notify_q); 1993 INIT_LIST_HEAD(&ioc->hb_notify_q);
1402 1994
1403 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1995 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1996 bfa_fsm_send_event(ioc, IOC_E_RESET);
1404} 1997}
1405 1998
1406/** 1999/**
@@ -1421,10 +2014,10 @@ void
1421bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 2014bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1422 enum bfi_mclass mc) 2015 enum bfi_mclass mc)
1423{ 2016{
1424 ioc->ioc_mc = mc; 2017 ioc->ioc_mc = mc;
1425 ioc->pcidev = *pcidev; 2018 ioc->pcidev = *pcidev;
1426 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2019 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1427 ioc->cna = ioc->ctdev && !ioc->fcmode; 2020 ioc->cna = ioc->ctdev && !ioc->fcmode;
1428 2021
1429 /** 2022 /**
1430 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2023 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
@@ -1445,14 +2038,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1445 * @param[in] dm_pa physical address of IOC dma memory 2038 * @param[in] dm_pa physical address of IOC dma memory
1446 */ 2039 */
1447void 2040void
1448bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2041bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1449{ 2042{
1450 /** 2043 /**
1451 * dma memory for firmware attribute 2044 * dma memory for firmware attribute
1452 */ 2045 */
1453 ioc->attr_dma.kva = dm_kva; 2046 ioc->attr_dma.kva = dm_kva;
1454 ioc->attr_dma.pa = dm_pa; 2047 ioc->attr_dma.pa = dm_pa;
1455 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; 2048 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
1456} 2049}
1457 2050
1458/** 2051/**
@@ -1490,7 +2083,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
1490int 2083int
1491bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) 2084bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1492{ 2085{
1493return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2086 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1494} 2087}
1495 2088
1496/** 2089/**
@@ -1500,8 +2093,8 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1500void 2093void
1501bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 2094bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1502{ 2095{
1503 ioc->dbg_fwsave = dbg_fwsave; 2096 ioc->dbg_fwsave = dbg_fwsave;
1504 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 2097 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
1505} 2098}
1506 2099
1507u32 2100u32
@@ -1525,8 +2118,8 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1525void 2118void
1526bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) 2119bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1527{ 2120{
1528 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2121 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1529 int mc; 2122 int mc;
1530 2123
1531 for (mc = 0; mc < BFI_MC_MAX; mc++) 2124 for (mc = 0; mc < BFI_MC_MAX; mc++)
1532 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2125 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
@@ -1539,10 +2132,10 @@ void
1539bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 2132bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1540 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2133 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1541{ 2134{
1542 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2135 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1543 2136
1544 mod->mbhdlr[mc].cbfn = cbfn; 2137 mod->mbhdlr[mc].cbfn = cbfn;
1545 mod->mbhdlr[mc].cbarg = cbarg; 2138 mod->mbhdlr[mc].cbarg = cbarg;
1546} 2139}
1547 2140
1548/** 2141/**
@@ -1555,8 +2148,8 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1555void 2148void
1556bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) 2149bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1557{ 2150{
1558 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2151 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1559 u32 stat; 2152 u32 stat;
1560 2153
1561 /** 2154 /**
1562 * If a previous command is pending, queue new command 2155 * If a previous command is pending, queue new command
@@ -1587,9 +2180,9 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1587void 2180void
1588bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) 2181bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1589{ 2182{
1590 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2183 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1591 struct bfi_mbmsg_s m; 2184 struct bfi_mbmsg_s m;
1592 int mc; 2185 int mc;
1593 2186
1594 bfa_ioc_msgget(ioc, &m); 2187 bfa_ioc_msgget(ioc, &m);
1595 2188
@@ -1621,16 +2214,14 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1621 ioc->port_id = bfa_ioc_pcifn(ioc); 2214 ioc->port_id = bfa_ioc_pcifn(ioc);
1622} 2215}
1623 2216
1624#ifndef BFA_BIOS_BUILD
1625
1626/** 2217/**
1627 * return true if IOC is disabled 2218 * return true if IOC is disabled
1628 */ 2219 */
1629bfa_boolean_t 2220bfa_boolean_t
1630bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) 2221bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1631{ 2222{
1632 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) 2223 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
1633 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2224 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1634} 2225}
1635 2226
1636/** 2227/**
@@ -1639,9 +2230,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1639bfa_boolean_t 2230bfa_boolean_t
1640bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) 2231bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1641{ 2232{
1642 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) 2233 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
1643 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) 2234 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
1644 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); 2235 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
1645} 2236}
1646 2237
1647#define bfa_ioc_state_disabled(__sm) \ 2238#define bfa_ioc_state_disabled(__sm) \
@@ -1659,8 +2250,8 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1659bfa_boolean_t 2250bfa_boolean_t
1660bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 2251bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1661{ 2252{
1662 u32 ioc_state; 2253 u32 ioc_state;
1663 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 2254 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1664 2255
1665 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2256 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1666 return BFA_FALSE; 2257 return BFA_FALSE;
@@ -1669,16 +2260,18 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1669 if (!bfa_ioc_state_disabled(ioc_state)) 2260 if (!bfa_ioc_state_disabled(ioc_state))
1670 return BFA_FALSE; 2261 return BFA_FALSE;
1671 2262
1672 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); 2263 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
1673 if (!bfa_ioc_state_disabled(ioc_state)) 2264 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1674 return BFA_FALSE; 2265 if (!bfa_ioc_state_disabled(ioc_state))
2266 return BFA_FALSE;
2267 }
1675 2268
1676 return BFA_TRUE; 2269 return BFA_TRUE;
1677} 2270}
1678 2271
1679/** 2272/**
1680 * Add to IOC heartbeat failure notification queue. To be used by common 2273 * Add to IOC heartbeat failure notification queue. To be used by common
1681 * modules such as 2274 * modules such as cee, port, diag.
1682 */ 2275 */
1683void 2276void
1684bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 2277bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
@@ -1692,7 +2285,7 @@ void
1692bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 2285bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1693 struct bfa_adapter_attr_s *ad_attr) 2286 struct bfa_adapter_attr_s *ad_attr)
1694{ 2287{
1695 struct bfi_ioc_attr_s *ioc_attr; 2288 struct bfi_ioc_attr_s *ioc_attr;
1696 2289
1697 ioc_attr = ioc->attr; 2290 ioc_attr = ioc->attr;
1698 2291
@@ -1719,7 +2312,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1719 ad_attr->prototype = 0; 2312 ad_attr->prototype = 0;
1720 2313
1721 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2314 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1722 ad_attr->mac = bfa_ioc_get_mac(ioc); 2315 ad_attr->mac = bfa_ioc_get_mac(ioc);
1723 2316
1724 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2317 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1725 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2318 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -1729,6 +2322,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1729 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2322 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1730 2323
1731 ad_attr->cna_capable = ioc->cna; 2324 ad_attr->cna_capable = ioc->cna;
2325 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1732} 2326}
1733 2327
1734enum bfa_ioc_type_e 2328enum bfa_ioc_type_e
@@ -1782,7 +2376,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1782{ 2376{
1783 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2377 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1784 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 2378 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1785 BFA_VERSION_LEN); 2379 BFA_VERSION_LEN);
1786} 2380}
1787 2381
1788void 2382void
@@ -1795,7 +2389,7 @@ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1795void 2389void
1796bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) 2390bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1797{ 2391{
1798 struct bfi_ioc_attr_s *ioc_attr; 2392 struct bfi_ioc_attr_s *ioc_attr;
1799 2393
1800 bfa_assert(model); 2394 bfa_assert(model);
1801 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2395 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
@@ -1805,14 +2399,48 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1805 /** 2399 /**
1806 * model name 2400 * model name
1807 */ 2401 */
1808 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2402 bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1809 BFA_MFG_NAME, ioc_attr->card_type); 2403 BFA_MFG_NAME, ioc_attr->card_type);
1810} 2404}
1811 2405
1812enum bfa_ioc_state 2406enum bfa_ioc_state
1813bfa_ioc_get_state(struct bfa_ioc_s *ioc) 2407bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1814{ 2408{
1815 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2409 enum bfa_iocpf_state iocpf_st;
2410 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2411
2412 if (ioc_st == BFA_IOC_ENABLING ||
2413 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2414
2415 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2416
2417 switch (iocpf_st) {
2418 case BFA_IOCPF_SEMWAIT:
2419 ioc_st = BFA_IOC_SEMWAIT;
2420 break;
2421
2422 case BFA_IOCPF_HWINIT:
2423 ioc_st = BFA_IOC_HWINIT;
2424 break;
2425
2426 case BFA_IOCPF_FWMISMATCH:
2427 ioc_st = BFA_IOC_FWMISMATCH;
2428 break;
2429
2430 case BFA_IOCPF_FAIL:
2431 ioc_st = BFA_IOC_FAIL;
2432 break;
2433
2434 case BFA_IOCPF_INITFAIL:
2435 ioc_st = BFA_IOC_INITFAIL;
2436 break;
2437
2438 default:
2439 break;
2440 }
2441 }
2442
2443 return ioc_st;
1816} 2444}
1817 2445
1818void 2446void
@@ -1833,7 +2461,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1833} 2461}
1834 2462
1835/** 2463/**
1836 * bfa_wwn_public 2464 * hal_wwn_public
1837 */ 2465 */
1838wwn_t 2466wwn_t
1839bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) 2467bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
@@ -1857,10 +2485,10 @@ mac_t
1857bfa_ioc_get_mac(struct bfa_ioc_s *ioc) 2485bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1858{ 2486{
1859 /* 2487 /*
1860 * Currently mfg mac is used as FCoE enode mac (not configured by PBC) 2488 * Check the IOC type and return the appropriate MAC
1861 */ 2489 */
1862 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) 2490 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1863 return bfa_ioc_get_mfg_mac(ioc); 2491 return ioc->attr->fcoe_mac;
1864 else 2492 else
1865 return ioc->attr->mac; 2493 return ioc->attr->mac;
1866} 2494}
@@ -1880,12 +2508,16 @@ bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1880mac_t 2508mac_t
1881bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) 2509bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1882{ 2510{
1883 mac_t mac; 2511 mac_t m;
1884 2512
1885 mac = ioc->attr->mfg_mac; 2513 m = ioc->attr->mfg_mac;
1886 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); 2514 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2515 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2516 else
2517 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2518 bfa_ioc_pcifn(ioc));
1887 2519
1888 return mac; 2520 return m;
1889} 2521}
1890 2522
1891bfa_boolean_t 2523bfa_boolean_t
@@ -1895,46 +2527,12 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1895} 2527}
1896 2528
1897/** 2529/**
1898 * Send AEN notification
1899 */
1900void
1901bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1902{
1903 union bfa_aen_data_u aen_data;
1904 struct bfa_log_mod_s *logmod = ioc->logm;
1905 s32 inst_num = 0;
1906 enum bfa_ioc_type_e ioc_type;
1907
1908 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1909
1910 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1911 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1912 ioc_type = bfa_ioc_get_type(ioc);
1913 switch (ioc_type) {
1914 case BFA_IOC_TYPE_FC:
1915 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1916 break;
1917 case BFA_IOC_TYPE_FCoE:
1918 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1919 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1920 break;
1921 case BFA_IOC_TYPE_LL:
1922 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1923 break;
1924 default:
1925 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1926 break;
1927 }
1928 aen_data.ioc.ioc_type = ioc_type;
1929}
1930
1931/**
1932 * Retrieve saved firmware trace from a prior IOC failure. 2530 * Retrieve saved firmware trace from a prior IOC failure.
1933 */ 2531 */
1934bfa_status_t 2532bfa_status_t
1935bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 2533bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1936{ 2534{
1937 int tlen; 2535 int tlen;
1938 2536
1939 if (ioc->dbg_fwsave_len == 0) 2537 if (ioc->dbg_fwsave_len == 0)
1940 return BFA_STATUS_ENOFSAVE; 2538 return BFA_STATUS_ENOFSAVE;
@@ -1963,57 +2561,145 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1963bfa_status_t 2561bfa_status_t
1964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 2562bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1965{ 2563{
1966 u32 pgnum; 2564 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1967 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); 2565 int tlen;
1968 int i, tlen; 2566 bfa_status_t status;
1969 u32 *tbuf = trcdata, r32;
1970 2567
1971 bfa_trc(ioc, *trclen); 2568 bfa_trc(ioc, *trclen);
1972 2569
1973 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1974 loff = bfa_ioc_smem_pgoff(ioc, loff);
1975
1976 /*
1977 * Hold semaphore to serialize pll init and fwtrc.
1978 */
1979 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1980 return BFA_STATUS_FAILED;
1981
1982 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1983
1984 tlen = *trclen; 2570 tlen = *trclen;
1985 if (tlen > BFA_DBG_FWTRC_LEN) 2571 if (tlen > BFA_DBG_FWTRC_LEN)
1986 tlen = BFA_DBG_FWTRC_LEN; 2572 tlen = BFA_DBG_FWTRC_LEN;
1987 tlen /= sizeof(u32);
1988 2573
1989 bfa_trc(ioc, tlen); 2574 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2575 *trclen = tlen;
2576 return status;
2577}
1990 2578
1991 for (i = 0; i < tlen; i++) { 2579static void
1992 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 2580bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
1993 tbuf[i] = bfa_os_ntohl(r32); 2581{
1994 loff += sizeof(u32); 2582 struct bfa_mbox_cmd_s cmd;
2583 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
1995 2584
1996 /** 2585 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
1997 * handle page offset wrap around 2586 bfa_ioc_portid(ioc));
1998 */ 2587 req->ioc_class = ioc->ioc_mc;
1999 loff = PSS_SMEM_PGOFF(loff); 2588 bfa_ioc_mbox_queue(ioc, &cmd);
2000 if (loff == 0) { 2589}
2001 pgnum++; 2590
2002 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2591static void
2003 } 2592bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2593{
2594 u32 fwsync_iter = 1000;
2595
2596 bfa_ioc_send_fwsync(ioc);
2597
2598 /**
2599 * After sending a fw sync mbox command wait for it to
2600 * take effect. We will not wait for a response because
2601 * 1. fw_sync mbox cmd doesn't have a response.
2602 * 2. Even if we implement that, interrupts might not
2603 * be enabled when we call this function.
2604 * So, just keep checking if any mbox cmd is pending, and
2605 * after waiting for a reasonable amount of time, go ahead.
2606 * It is possible that fw has crashed and the mbox command
2607 * is never acknowledged.
2608 */
2609 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2610 fwsync_iter--;
2611}
2612
2613/**
2614 * Dump firmware smem
2615 */
2616bfa_status_t
2617bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2618 u32 *offset, int *buflen)
2619{
2620 u32 loff;
2621 int dlen;
2622 bfa_status_t status;
2623 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2624
2625 if (*offset >= smem_len) {
2626 *offset = *buflen = 0;
2627 return BFA_STATUS_EINVAL;
2004 } 2628 }
2005 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2006 bfa_ioc_smem_pgnum(ioc, 0));
2007 2629
2008 /* 2630 loff = *offset;
2009 * release semaphore. 2631 dlen = *buflen;
2632
2633 /**
2634 * First smem read, sync smem before proceeding
2635 * No need to sync before reading every chunk.
2010 */ 2636 */
2011 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2637 if (loff == 0)
2638 bfa_ioc_fwsync(ioc);
2012 2639
2013 bfa_trc(ioc, pgnum); 2640 if ((loff + dlen) >= smem_len)
2641 dlen = smem_len - loff;
2014 2642
2015 *trclen = tlen * sizeof(u32); 2643 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2016 return BFA_STATUS_OK; 2644
2645 if (status != BFA_STATUS_OK) {
2646 *offset = *buflen = 0;
2647 return status;
2648 }
2649
2650 *offset += dlen;
2651
2652 if (*offset >= smem_len)
2653 *offset = 0;
2654
2655 *buflen = dlen;
2656
2657 return status;
2658}
2659
2660/**
2661 * Firmware statistics
2662 */
2663bfa_status_t
2664bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2665{
2666 u32 loff = BFI_IOC_FWSTATS_OFF + \
2667 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2668 int tlen;
2669 bfa_status_t status;
2670
2671 if (ioc->stats_busy) {
2672 bfa_trc(ioc, ioc->stats_busy);
2673 return BFA_STATUS_DEVBUSY;
2674 }
2675 ioc->stats_busy = BFA_TRUE;
2676
2677 tlen = sizeof(struct bfa_fw_stats_s);
2678 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2679
2680 ioc->stats_busy = BFA_FALSE;
2681 return status;
2682}
2683
2684bfa_status_t
2685bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2686{
2687 u32 loff = BFI_IOC_FWSTATS_OFF + \
2688 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2689 int tlen;
2690 bfa_status_t status;
2691
2692 if (ioc->stats_busy) {
2693 bfa_trc(ioc, ioc->stats_busy);
2694 return BFA_STATUS_DEVBUSY;
2695 }
2696 ioc->stats_busy = BFA_TRUE;
2697
2698 tlen = sizeof(struct bfa_fw_stats_s);
2699 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2700
2701 ioc->stats_busy = BFA_FALSE;
2702 return status;
2017} 2703}
2018 2704
2019/** 2705/**
@@ -2022,7 +2708,7 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2022static void 2708static void
2023bfa_ioc_debug_save(struct bfa_ioc_s *ioc) 2709bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2024{ 2710{
2025 int tlen; 2711 int tlen;
2026 2712
2027 if (ioc->dbg_fwsave_len) { 2713 if (ioc->dbg_fwsave_len) {
2028 tlen = ioc->dbg_fwsave_len; 2714 tlen = ioc->dbg_fwsave_len;
@@ -2050,11 +2736,135 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2050{ 2736{
2051 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2737 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2052 return; 2738 return;
2739}
2740
2741/**
2742 * hal_iocpf_pvt BFA IOC PF private functions
2743 */
2053 2744
2054 if (ioc->attr->nwwn == 0) 2745static void
2055 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); 2746bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2056 if (ioc->attr->pwwn == 0) 2747{
2057 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); 2748 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2058} 2749}
2059 2750
2060#endif 2751static void
2752bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2753{
2754 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2755}
2756
2757static void
2758bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2759{
2760 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2761}
2762
2763static void
2764bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2765{
2766 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2767}
2768
2769static void
2770bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2771{
2772 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2773}
2774
2775static void
2776bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2777{
2778 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2779}
2780
2781static void
2782bfa_iocpf_timeout(void *ioc_arg)
2783{
2784 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2785
2786 bfa_trc(ioc, 0);
2787 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2788}
2789
2790static void
2791bfa_iocpf_sem_timeout(void *ioc_arg)
2792{
2793 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2794
2795 bfa_ioc_hw_sem_get(ioc);
2796}
2797
2798/**
2799 * bfa timer function
2800 */
2801void
2802bfa_timer_init(struct bfa_timer_mod_s *mod)
2803{
2804 INIT_LIST_HEAD(&mod->timer_q);
2805}
2806
2807void
2808bfa_timer_beat(struct bfa_timer_mod_s *mod)
2809{
2810 struct list_head *qh = &mod->timer_q;
2811 struct list_head *qe, *qe_next;
2812 struct bfa_timer_s *elem;
2813 struct list_head timedout_q;
2814
2815 INIT_LIST_HEAD(&timedout_q);
2816
2817 qe = bfa_q_next(qh);
2818
2819 while (qe != qh) {
2820 qe_next = bfa_q_next(qe);
2821
2822 elem = (struct bfa_timer_s *) qe;
2823 if (elem->timeout <= BFA_TIMER_FREQ) {
2824 elem->timeout = 0;
2825 list_del(&elem->qe);
2826 list_add_tail(&elem->qe, &timedout_q);
2827 } else {
2828 elem->timeout -= BFA_TIMER_FREQ;
2829 }
2830
2831 qe = qe_next; /* go to next elem */
2832 }
2833
2834 /*
2835 * Pop all the timeout entries
2836 */
2837 while (!list_empty(&timedout_q)) {
2838 bfa_q_deq(&timedout_q, &elem);
2839 elem->timercb(elem->arg);
2840 }
2841}
2842
2843/**
2844 * Should be called with lock protection
2845 */
2846void
2847bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2848 void (*timercb) (void *), void *arg, unsigned int timeout)
2849{
2850
2851 bfa_assert(timercb != NULL);
2852 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
2853
2854 timer->timeout = timeout;
2855 timer->timercb = timercb;
2856 timer->arg = arg;
2857
2858 list_add_tail(&timer->qe, &mod->timer_q);
2859}
2860
2861/**
2862 * Should be called with lock protection
2863 */
2864void
2865bfa_timer_stop(struct bfa_timer_s *timer)
2866{
2867 bfa_assert(!list_empty(&timer->qe));
2868
2869 list_del(&timer->qe);
2870}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index cae05b251c99..288c5801aace 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,18 +18,74 @@
18#ifndef __BFA_IOC_H__ 18#ifndef __BFA_IOC_H__
19#define __BFA_IOC_H__ 19#define __BFA_IOC_H__
20 20
21#include <cs/bfa_sm.h> 21#include "bfa_os_inc.h"
22#include <bfi/bfi.h> 22#include "bfa_cs.h"
23#include <bfi/bfi_ioc.h> 23#include "bfi.h"
24#include <bfi/bfi_boot.h> 24
25#include <bfa_timer.h> 25/**
26 * BFA timer declarations
27 */
28typedef void (*bfa_timer_cbfn_t)(void *);
29
30/**
31 * BFA timer data structure
32 */
33struct bfa_timer_s {
34 struct list_head qe;
35 bfa_timer_cbfn_t timercb;
36 void *arg;
37 int timeout; /**< in millisecs. */
38};
39
40/**
41 * Timer module structure
42 */
43struct bfa_timer_mod_s {
44 struct list_head timer_q;
45};
46
47#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
48
49void bfa_timer_beat(struct bfa_timer_mod_s *mod);
50void bfa_timer_init(struct bfa_timer_mod_s *mod);
51void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
52 bfa_timer_cbfn_t timercb, void *arg,
53 unsigned int timeout);
54void bfa_timer_stop(struct bfa_timer_s *timer);
55
56/**
57 * Generic Scatter Gather Element used by driver
58 */
59struct bfa_sge_s {
60 u32 sg_len;
61 void *sg_addr;
62};
63
64#define bfa_sge_word_swap(__sge) do { \
65 ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \
66 ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \
67 ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \
68} while (0)
69
70#define bfa_swap_words(_x) ( \
71 ((_x) << 32) | ((_x) >> 32))
72
73#ifdef __BIGENDIAN
74#define bfa_sge_to_be(_x)
75#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
76#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
77#else
78#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x)
79#define bfa_sge_to_le(_x)
80#define bfa_sgaddr_le(_x) (_x)
81#endif
26 82
27/** 83/**
28 * PCI device information required by IOC 84 * PCI device information required by IOC
29 */ 85 */
30struct bfa_pcidev_s { 86struct bfa_pcidev_s {
31 int pci_slot; 87 int pci_slot;
32 u8 pci_func; 88 u8 pci_func;
33 u16 device_id; 89 u16 device_id;
34 bfa_os_addr_t pci_bar_kva; 90 bfa_os_addr_t pci_bar_kva;
35}; 91};
@@ -39,13 +95,18 @@ struct bfa_pcidev_s {
39 * Address 95 * Address
40 */ 96 */
41struct bfa_dma_s { 97struct bfa_dma_s {
42 void *kva; /*! Kernel virtual address */ 98 void *kva; /* ! Kernel virtual address */
43 u64 pa; /*! Physical address */ 99 u64 pa; /* ! Physical address */
44}; 100};
45 101
46#define BFA_DMA_ALIGN_SZ 256 102#define BFA_DMA_ALIGN_SZ 256
47#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) 103#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
48 104
105/**
106 * smem size for Crossbow and Catapult
107 */
108#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
109#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
49 110
50 111
51#define bfa_dma_addr_set(dma_addr, pa) \ 112#define bfa_dma_addr_set(dma_addr, pa) \
@@ -101,7 +162,7 @@ struct bfa_ioc_regs_s {
101 * IOC Mailbox structures 162 * IOC Mailbox structures
102 */ 163 */
103struct bfa_mbox_cmd_s { 164struct bfa_mbox_cmd_s {
104 struct list_head qe; 165 struct list_head qe;
105 u32 msg[BFI_IOC_MSGSZ]; 166 u32 msg[BFI_IOC_MSGSZ];
106}; 167};
107 168
@@ -110,8 +171,8 @@ struct bfa_mbox_cmd_s {
110 */ 171 */
111typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); 172typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
112struct bfa_ioc_mbox_mod_s { 173struct bfa_ioc_mbox_mod_s {
113 struct list_head cmd_q; /* pending mbox queue */ 174 struct list_head cmd_q; /* pending mbox queue */
114 int nmclass; /* number of handlers */ 175 int nmclass; /* number of handlers */
115 struct { 176 struct {
116 bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */ 177 bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
117 void *cbarg; 178 void *cbarg;
@@ -149,49 +210,54 @@ struct bfa_ioc_hbfail_notify_s {
149 (__notify)->cbarg = (__cbarg); \ 210 (__notify)->cbarg = (__cbarg); \
150} while (0) 211} while (0)
151 212
213struct bfa_iocpf_s {
214 bfa_fsm_t fsm;
215 struct bfa_ioc_s *ioc;
216 u32 retry_count;
217 bfa_boolean_t auto_recover;
218};
219
152struct bfa_ioc_s { 220struct bfa_ioc_s {
153 bfa_fsm_t fsm; 221 bfa_fsm_t fsm;
154 struct bfa_s *bfa; 222 struct bfa_s *bfa;
155 struct bfa_pcidev_s pcidev; 223 struct bfa_pcidev_s pcidev;
156 struct bfa_timer_mod_s *timer_mod; 224 struct bfa_timer_mod_s *timer_mod;
157 struct bfa_timer_s ioc_timer; 225 struct bfa_timer_s ioc_timer;
158 struct bfa_timer_s sem_timer; 226 struct bfa_timer_s sem_timer;
227 struct bfa_timer_s hb_timer;
159 u32 hb_count; 228 u32 hb_count;
160 u32 retry_count;
161 struct list_head hb_notify_q; 229 struct list_head hb_notify_q;
162 void *dbg_fwsave; 230 void *dbg_fwsave;
163 int dbg_fwsave_len; 231 int dbg_fwsave_len;
164 bfa_boolean_t dbg_fwsave_once; 232 bfa_boolean_t dbg_fwsave_once;
165 enum bfi_mclass ioc_mc; 233 enum bfi_mclass ioc_mc;
166 struct bfa_ioc_regs_s ioc_regs; 234 struct bfa_ioc_regs_s ioc_regs;
167 struct bfa_trc_mod_s *trcmod; 235 struct bfa_trc_mod_s *trcmod;
168 struct bfa_aen_s *aen;
169 struct bfa_log_mod_s *logm;
170 struct bfa_ioc_drv_stats_s stats; 236 struct bfa_ioc_drv_stats_s stats;
171 bfa_boolean_t auto_recover;
172 bfa_boolean_t fcmode; 237 bfa_boolean_t fcmode;
173 bfa_boolean_t ctdev; 238 bfa_boolean_t ctdev;
174 bfa_boolean_t cna; 239 bfa_boolean_t cna;
175 bfa_boolean_t pllinit; 240 bfa_boolean_t pllinit;
241 bfa_boolean_t stats_busy; /* outstanding stats */
176 u8 port_id; 242 u8 port_id;
177
178 struct bfa_dma_s attr_dma; 243 struct bfa_dma_s attr_dma;
179 struct bfi_ioc_attr_s *attr; 244 struct bfi_ioc_attr_s *attr;
180 struct bfa_ioc_cbfn_s *cbfn; 245 struct bfa_ioc_cbfn_s *cbfn;
181 struct bfa_ioc_mbox_mod_s mbox_mod; 246 struct bfa_ioc_mbox_mod_s mbox_mod;
182 struct bfa_ioc_hwif_s *ioc_hwif; 247 struct bfa_ioc_hwif_s *ioc_hwif;
248 struct bfa_iocpf_s iocpf;
183}; 249};
184 250
185struct bfa_ioc_hwif_s { 251struct bfa_ioc_hwif_s {
186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); 252 bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 253 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 254 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
189 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 255 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
190 void (*ioc_map_port) (struct bfa_ioc_s *ioc); 256 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
191 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 257 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
192 bfa_boolean_t msix); 258 bfa_boolean_t msix);
193 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); 259 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
194 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 260 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
195}; 261};
196 262
197#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 263#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -206,18 +272,19 @@ struct bfa_ioc_hwif_s {
206#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 272#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
207#define bfa_ioc_speed_sup(__ioc) \ 273#define bfa_ioc_speed_sup(__ioc) \
208 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 274 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
209#define bfa_ioc_get_nports(__ioc) \ 275#define bfa_ioc_get_nports(__ioc) \
210 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) 276 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
211 277
212#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 278#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
213#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 279#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
214#define BFA_IOC_FWIMG_TYPE(__ioc) \ 280#define BFA_IOC_FWIMG_TYPE(__ioc) \
215 (((__ioc)->ctdev) ? \ 281 (((__ioc)->ctdev) ? \
216 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ 282 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
217 BFI_IMAGE_CB_FC) 283 BFI_IMAGE_CB_FC)
218 284#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
219#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 285 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
220#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 286#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
287#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
221#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 288#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
222 289
223/** 290/**
@@ -235,18 +302,28 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
235/** 302/**
236 * IOC interfaces 303 * IOC interfaces
237 */ 304 */
238#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc)) 305
239#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 306#define bfa_ioc_pll_init_asic(__ioc) \
307 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
308 (__ioc)->fcmode))
309
310bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
311bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
312bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
313bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
314
315#define bfa_ioc_isr_mode_set(__ioc, __msix) \
240 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 316 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
241#define bfa_ioc_ownership_reset(__ioc) \ 317#define bfa_ioc_ownership_reset(__ioc) \
242 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 318 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
243 319
320
244void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); 321void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
245void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); 322void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
323
246void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 324void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
247 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, 325 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
248 struct bfa_trc_mod_s *trcmod, 326void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
249 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
250void bfa_ioc_detach(struct bfa_ioc_s *ioc); 327void bfa_ioc_detach(struct bfa_ioc_s *ioc);
251void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 328void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
252 enum bfi_mclass mc); 329 enum bfi_mclass mc);
@@ -256,21 +333,22 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);
256void bfa_ioc_disable(struct bfa_ioc_s *ioc); 333void bfa_ioc_disable(struct bfa_ioc_s *ioc);
257bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); 334bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
258 335
259void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); 336void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
337 u32 boot_param);
260void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 338void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
261void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 339void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
262bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 340bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
341bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
263bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 342bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
264bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 343bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
265bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 344bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
266void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
267enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); 345enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
268void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); 346void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
269void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); 347void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
270void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); 348void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
271void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); 349void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
272void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, 350void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
273 char *manufacturer); 351 char *manufacturer);
274void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); 352void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
275enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); 353enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
276 354
@@ -284,6 +362,8 @@ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
284void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); 362void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
285bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 363bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
286 int *trclen); 364 int *trclen);
365bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
366 u32 *offset, int *buflen);
287u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); 367u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
288u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr); 368u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
289void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); 369void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
@@ -297,7 +377,8 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
297 struct bfi_ioc_image_hdr_s *fwhdr); 377 struct bfi_ioc_image_hdr_s *fwhdr);
298bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 378bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
299 struct bfi_ioc_image_hdr_s *fwhdr); 379 struct bfi_ioc_image_hdr_s *fwhdr);
300void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); 380bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
381bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
301 382
302/* 383/*
303 * bfa mfg wwn API functions 384 * bfa mfg wwn API functions
@@ -310,5 +391,68 @@ wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
310mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); 391mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
311u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc); 392u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
312 393
313#endif /* __BFA_IOC_H__ */ 394/*
395 * F/W Image Size & Chunk
396 */
397extern u32 bfi_image_ct_fc_size;
398extern u32 bfi_image_ct_cna_size;
399extern u32 bfi_image_cb_fc_size;
400extern u32 *bfi_image_ct_fc;
401extern u32 *bfi_image_ct_cna;
402extern u32 *bfi_image_cb_fc;
403
404static inline u32 *
405bfi_image_ct_fc_get_chunk(u32 off)
406{ return (u32 *)(bfi_image_ct_fc + off); }
407
408static inline u32 *
409bfi_image_ct_cna_get_chunk(u32 off)
410{ return (u32 *)(bfi_image_ct_cna + off); }
314 411
412static inline u32 *
413bfi_image_cb_fc_get_chunk(u32 off)
414{ return (u32 *)(bfi_image_cb_fc + off); }
415
416static inline u32*
417bfa_cb_image_get_chunk(int type, u32 off)
418{
419 switch (type) {
420 case BFI_IMAGE_CT_FC:
421 return bfi_image_ct_fc_get_chunk(off); break;
422 case BFI_IMAGE_CT_CNA:
423 return bfi_image_ct_cna_get_chunk(off); break;
424 case BFI_IMAGE_CB_FC:
425 return bfi_image_cb_fc_get_chunk(off); break;
426 default: return 0;
427 }
428}
429
430static inline u32
431bfa_cb_image_get_size(int type)
432{
433 switch (type) {
434 case BFI_IMAGE_CT_FC:
435 return bfi_image_ct_fc_size; break;
436 case BFI_IMAGE_CT_CNA:
437 return bfi_image_ct_cna_size; break;
438 case BFI_IMAGE_CB_FC:
439 return bfi_image_cb_fc_size; break;
440 default: return 0;
441 }
442}
443
444/**
445 * CNA TRCMOD declaration
446 */
447/*
448 * !!! Only append to the enums defined here to avoid any versioning
449 * !!! needed between trace utility and driver version
450 */
451enum {
452 BFA_TRC_CNA_PORT = 1,
453 BFA_TRC_CNA_IOC = 2,
454 BFA_TRC_CNA_IOC_CB = 3,
455 BFA_TRC_CNA_IOC_CT = 4,
456};
457
458#endif /* __BFA_IOC_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 324bdde7ea2e..d7ac864d8539 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,22 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_cbreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_cbreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27 21
28BFA_TRC_FILE(CNA, IOC_CB); 22BFA_TRC_FILE(CNA, IOC_CB);
29 23
30/* 24/*
31 * forward declarations 25 * forward declarations
32 */ 26 */
33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); 27static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); 28static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 29static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
@@ -95,6 +88,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
95 * Host <-> LPU mailbox command/status registers 88 * Host <-> LPU mailbox command/status registers
96 */ 89 */
97static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { 90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
91
98 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 92 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
99 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } 93 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
100}; 94};
@@ -154,6 +148,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
154/** 148/**
155 * Initialize IOC to port mapping. 149 * Initialize IOC to port mapping.
156 */ 150 */
151
157static void 152static void
158bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
159{ 154{
@@ -161,6 +156,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
161 * For crossbow, port id is same as pci function. 156 * For crossbow, port id is same as pci function.
162 */ 157 */
163 ioc->port_id = bfa_ioc_pcifn(ioc); 158 ioc->port_id = bfa_ioc_pcifn(ioc);
159
164 bfa_trc(ioc, ioc->port_id); 160 bfa_trc(ioc, ioc->port_id);
165} 161}
166 162
@@ -172,87 +168,69 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
172{ 168{
173} 169}
174 170
175static bfa_status_t 171/**
176bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) 172 * Cleanup hw semaphore and usecnt registers
173 */
174static void
175bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
177{ 176{
178 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
179 u32 pll_sclk, pll_fclk;
180 177
181 /* 178 /*
182 * Hold semaphore so that nobody can access the chip during init. 179 * Read the hw sem reg to make sure that it is locked
180 * before we clear it. If it is not locked, writing 1
181 * will lock it instead of clearing it.
183 */ 182 */
184 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 183 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
184 bfa_ioc_hw_sem_release(ioc);
185}
186
187
188
189bfa_status_t
190bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
191{
192 u32 pll_sclk, pll_fclk;
185 193
186 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | 194 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
187 __APP_PLL_212_P0_1(3U) | 195 __APP_PLL_212_P0_1(3U) |
188 __APP_PLL_212_JITLMT0_1(3U) | 196 __APP_PLL_212_JITLMT0_1(3U) |
189 __APP_PLL_212_CNTLMT0_1(3U); 197 __APP_PLL_212_CNTLMT0_1(3U);
190 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | 198 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
191 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 199 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
192 __APP_PLL_400_JITLMT0_1(3U) | 200 __APP_PLL_400_JITLMT0_1(3U) |
193 __APP_PLL_400_CNTLMT0_1(3U); 201 __APP_PLL_400_CNTLMT0_1(3U);
194
195 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 202 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
196 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 203 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
197
198 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 204 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
199 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 205 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
200 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 206 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
201 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 207 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
202 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 208 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
203 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 209 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
204 210 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
205 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 211 __APP_PLL_212_LOGIC_SOFT_RESET);
206 __APP_PLL_212_LOGIC_SOFT_RESET); 212 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
207 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 213 __APP_PLL_212_BYPASS |
208 __APP_PLL_212_BYPASS | 214 __APP_PLL_212_LOGIC_SOFT_RESET);
209 __APP_PLL_212_LOGIC_SOFT_RESET); 215 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
210 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 216 __APP_PLL_400_LOGIC_SOFT_RESET);
211 __APP_PLL_400_LOGIC_SOFT_RESET); 217 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
212 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 218 __APP_PLL_400_BYPASS |
213 __APP_PLL_400_BYPASS | 219 __APP_PLL_400_LOGIC_SOFT_RESET);
214 __APP_PLL_400_LOGIC_SOFT_RESET);
215 bfa_os_udelay(2); 220 bfa_os_udelay(2);
216 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 221 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
217 __APP_PLL_212_LOGIC_SOFT_RESET); 222 __APP_PLL_212_LOGIC_SOFT_RESET);
218 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 223 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
219 __APP_PLL_400_LOGIC_SOFT_RESET); 224 __APP_PLL_400_LOGIC_SOFT_RESET);
220 225 bfa_reg_write(rb + APP_PLL_212_CTL_REG,
221 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 226 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
222 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); 227 bfa_reg_write(rb + APP_PLL_400_CTL_REG,
223 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 228 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
224 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
225
226 /**
227 * Wait for PLLs to lock.
228 */
229 bfa_os_udelay(2000); 229 bfa_os_udelay(2000);
230 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 230 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
231 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 231 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
232 232 bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
233 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); 233 bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
234 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
235
236 /*
237 * release semaphore.
238 */
239 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
240 234
241 return BFA_STATUS_OK; 235 return BFA_STATUS_OK;
242} 236}
243
244/**
245 * Cleanup hw semaphore and usecnt registers
246 */
247static void
248bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
249{
250
251 /*
252 * Read the hw sem reg to make sure that it is locked
253 * before we clear it. If it is not locked, writing 1
254 * will lock it instead of clearing it.
255 */
256 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
257 bfa_ioc_hw_sem_release(ioc);
258}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 68f027da001e..f21b82c5f64c 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,22 +15,15 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <bfa.h> 18#include "bfa_ioc.h"
19#include <bfa_ioc.h> 19#include "bfi_ctreg.h"
20#include <bfa_fwimg_priv.h> 20#include "bfa_defs.h"
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27 21
28BFA_TRC_FILE(CNA, IOC_CT); 22BFA_TRC_FILE(CNA, IOC_CT);
29 23
30/* 24/*
31 * forward declarations 25 * forward declarations
32 */ 26 */
33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 27static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 28static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
36static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 29static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
@@ -78,7 +71,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
78 /** 71 /**
79 * If bios boot (flash based) -- do not increment usage count 72 * If bios boot (flash based) -- do not increment usage count
80 */ 73 */
81 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
75 BFA_IOC_FWIMG_MINSZ)
82 return BFA_TRUE; 76 return BFA_TRUE;
83 77
84 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -136,7 +130,8 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
136 /** 130 /**
137 * If bios boot (flash based) -- do not decrement usage count 131 * If bios boot (flash based) -- do not decrement usage count
138 */ 132 */
139 if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134 BFA_IOC_FWIMG_MINSZ)
140 return; 135 return;
141 136
142 /** 137 /**
@@ -308,16 +303,47 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
308 bfa_reg_write(rb + FNC_PERS_REG, r32); 303 bfa_reg_write(rb + FNC_PERS_REG, r32);
309} 304}
310 305
311static bfa_status_t 306/**
312bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) 307 * Cleanup hw semaphore and usecnt registers
308 */
309static void
310bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
313{ 311{
314 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 312
315 u32 pll_sclk, pll_fclk, r32; 313 if (ioc->cna) {
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
317 }
316 318
317 /* 319 /*
318 * Hold semaphore so that nobody can access the chip during init. 320 * Read the hw sem reg to make sure that it is locked
321 * before we clear it. If it is not locked, writing 1
322 * will lock it instead of clearing it.
319 */ 323 */
320 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 324 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc);
326}
327
328
329
330/*
331 * Check the firmware state to know if pll_init has been completed already
332 */
333bfa_boolean_t
334bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
335{
336 if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337 (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
338 return BFA_TRUE;
339
340 return BFA_FALSE;
341}
342
343bfa_status_t
344bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
345{
346 u32 pll_sclk, pll_fclk, r32;
321 347
322 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | 348 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
323 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | 349 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
@@ -327,70 +353,50 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
327 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | 353 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
328 __APP_PLL_425_JITLMT0_1(3U) | 354 __APP_PLL_425_JITLMT0_1(3U) |
329 __APP_PLL_425_CNTLMT0_1(1U); 355 __APP_PLL_425_CNTLMT0_1(1U);
330 356 if (fcmode) {
331 /**
332 * For catapult, choose operational mode FC/FCoE
333 */
334 if (ioc->fcmode) {
335 bfa_reg_write((rb + OP_MODE), 0); 357 bfa_reg_write((rb + OP_MODE), 0);
336 bfa_reg_write((rb + ETH_MAC_SER_REG), 358 bfa_reg_write((rb + ETH_MAC_SER_REG),
337 __APP_EMS_CMLCKSEL | 359 __APP_EMS_CMLCKSEL |
338 __APP_EMS_REFCKBUFEN2 | 360 __APP_EMS_REFCKBUFEN2 |
339 __APP_EMS_CHANNEL_SEL); 361 __APP_EMS_CHANNEL_SEL);
340 } else { 362 } else {
341 ioc->pllinit = BFA_TRUE;
342 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 363 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
343 bfa_reg_write((rb + ETH_MAC_SER_REG), 364 bfa_reg_write((rb + ETH_MAC_SER_REG),
344 __APP_EMS_REFCKBUFEN1); 365 __APP_EMS_REFCKBUFEN1);
345 } 366 }
346
347 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 367 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
348 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 368 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
349
350 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 369 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
351 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 370 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
352 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
353 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
354 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 373 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
355 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 374 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
356 375 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
357 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
358 __APP_PLL_312_LOGIC_SOFT_RESET); 376 __APP_PLL_312_LOGIC_SOFT_RESET);
359 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 377 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
360 __APP_PLL_425_LOGIC_SOFT_RESET); 378 __APP_PLL_425_LOGIC_SOFT_RESET);
361 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | 379 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
362 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); 380 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
363 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 381 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
364 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); 382 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
365
366 /**
367 * Wait for PLLs to lock.
368 */
369 bfa_reg_read(rb + HOSTFN0_INT_MSK); 383 bfa_reg_read(rb + HOSTFN0_INT_MSK);
370 bfa_os_udelay(2000); 384 bfa_os_udelay(2000);
371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 385 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 386 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
373 387 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
374 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
375 __APP_PLL_312_ENABLE); 388 __APP_PLL_312_ENABLE);
376 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 389 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
377 __APP_PLL_425_ENABLE); 390 __APP_PLL_425_ENABLE);
378 391 if (!fcmode) {
379 /**
380 * PSS memory reset is asserted at power-on-reset. Need to clear
381 * this before running EDRAM BISTR
382 */
383 if (ioc->cna) {
384 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); 392 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
385 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); 393 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
386 } 394 }
387
388 r32 = bfa_reg_read((rb + PSS_CTL_REG)); 395 r32 = bfa_reg_read((rb + PSS_CTL_REG));
389 r32 &= ~__PSS_LMEM_RESET; 396 r32 &= ~__PSS_LMEM_RESET;
390 bfa_reg_write((rb + PSS_CTL_REG), r32); 397 bfa_reg_write((rb + PSS_CTL_REG), r32);
391 bfa_os_udelay(1000); 398 bfa_os_udelay(1000);
392 399 if (!fcmode) {
393 if (ioc->cna) {
394 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); 400 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
395 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); 401 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
396 } 402 }
@@ -398,39 +404,6 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
398 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 404 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
399 bfa_os_udelay(1000); 405 bfa_os_udelay(1000);
400 r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 406 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
401 bfa_trc(ioc, r32);
402
403 /**
404 * Clear BISTR
405 */
406 bfa_reg_write((rb + MBIST_CTL_REG), 0); 407 bfa_reg_write((rb + MBIST_CTL_REG), 0);
407
408 /*
409 * release semaphore.
410 */
411 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
412
413 return BFA_STATUS_OK; 408 return BFA_STATUS_OK;
414} 409}
415
416/**
417 * Cleanup hw semaphore and usecnt registers
418 */
419static void
420bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
421{
422
423 if (ioc->cna) {
424 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
425 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
426 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
427 }
428
429 /*
430 * Read the hw sem reg to make sure that it is locked
431 * before we clear it. If it is not locked, writing 1
432 * will lock it instead of clearing it.
433 */
434 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
435 bfa_ioc_hw_sem_release(ioc);
436}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
deleted file mode 100644
index 90820be99864..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ /dev/null
@@ -1,927 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_debug.h>
19#include <bfa_priv.h>
20#include <log/bfa_log_hal.h>
21#include <bfi/bfi_boot.h>
22#include <bfi/bfi_cbreg.h>
23#include <aen/bfa_aen_ioc.h>
24#include <defs/bfa_defs_iocfc.h>
25#include <defs/bfa_defs_pci.h>
26#include "bfa_callback_priv.h"
27#include "bfad_drv.h"
28
29BFA_TRC_FILE(HAL, IOCFC);
30
31/**
32 * IOC local definitions
33 */
34#define BFA_IOCFC_TOV 5000 /* msecs */
35
36enum {
37 BFA_IOCFC_ACT_NONE = 0,
38 BFA_IOCFC_ACT_INIT = 1,
39 BFA_IOCFC_ACT_STOP = 2,
40 BFA_IOCFC_ACT_DISABLE = 3,
41};
42
43/*
44 * forward declarations
45 */
46static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
47static void bfa_iocfc_disable_cbfn(void *bfa_arg);
48static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
49static void bfa_iocfc_reset_cbfn(void *bfa_arg);
50static void bfa_iocfc_stats_clear(void *bfa_arg);
51static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
52 struct bfa_fw_stats_s *s);
53static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
54static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
55static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
56static void bfa_iocfc_stats_timeout(void *bfa_arg);
57
58static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
59
60/**
61 * bfa_ioc_pvt BFA IOC private functions
62 */
63
64static void
65bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
66{
67 int i, per_reqq_sz, per_rspq_sz;
68
69 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
70 BFA_DMA_ALIGN_SZ);
71 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
72 BFA_DMA_ALIGN_SZ);
73
74 /*
75 * Calculate CQ size
76 */
77 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
78 *dm_len = *dm_len + per_reqq_sz;
79 *dm_len = *dm_len + per_rspq_sz;
80 }
81
82 /*
83 * Calculate Shadow CI/PI size
84 */
85 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
86 *dm_len += (2 * BFA_CACHELINE_SZ);
87}
88
89static void
90bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
91{
92 *dm_len +=
93 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
94 *dm_len +=
95 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
96 BFA_CACHELINE_SZ);
97 *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
98}
99
100/**
101 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
102 */
103static void
104bfa_iocfc_send_cfg(void *bfa_arg)
105{
106 struct bfa_s *bfa = bfa_arg;
107 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
108 struct bfi_iocfc_cfg_req_s cfg_req;
109 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
110 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
111 int i;
112
113 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
114 bfa_trc(bfa, cfg->fwcfg.num_cqs);
115
116 bfa_iocfc_reset_queues(bfa);
117
118 /**
119 * initialize IOC configuration info
120 */
121 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
122 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
123
124 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
125 bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
126
127 /**
128 * dma map REQ and RSP circular queues and shadow pointers
129 */
130 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
131 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
132 iocfc->req_cq_ba[i].pa);
133 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
134 iocfc->req_cq_shadow_ci[i].pa);
135 cfg_info->req_cq_elems[i] =
136 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
137
138 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
139 iocfc->rsp_cq_ba[i].pa);
140 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
141 iocfc->rsp_cq_shadow_pi[i].pa);
142 cfg_info->rsp_cq_elems[i] =
143 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
144 }
145
146 /**
147 * Enable interrupt coalescing if it is driver init path
148 * and not ioc disable/enable path.
149 */
150 if (!iocfc->cfgdone)
151 cfg_info->intr_attr.coalesce = BFA_TRUE;
152
153 iocfc->cfgdone = BFA_FALSE;
154
155 /**
156 * dma map IOC configuration itself
157 */
158 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
159 bfa_lpuid(bfa));
160 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
161
162 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
163 sizeof(struct bfi_iocfc_cfg_req_s));
164}
165
166static void
167bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
168 struct bfa_pcidev_s *pcidev)
169{
170 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
171
172 bfa->bfad = bfad;
173 iocfc->bfa = bfa;
174 iocfc->action = BFA_IOCFC_ACT_NONE;
175
176 bfa_os_assign(iocfc->cfg, *cfg);
177
178 /**
179 * Initialize chip specific handlers.
180 */
181 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
182 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
183 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
184 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
185 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
186 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
187 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
188 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
189 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
190 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
191 } else {
192 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
193 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
194 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
195 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
196 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
197 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
198 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
199 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
200 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
201 }
202
203 iocfc->hwif.hw_reginit(bfa);
204 bfa->msix.nvecs = 0;
205}
206
207static void
208bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
209 struct bfa_meminfo_s *meminfo)
210{
211 u8 *dm_kva;
212 u64 dm_pa;
213 int i, per_reqq_sz, per_rspq_sz;
214 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
215 int dbgsz;
216
217 dm_kva = bfa_meminfo_dma_virt(meminfo);
218 dm_pa = bfa_meminfo_dma_phys(meminfo);
219
220 /*
221 * First allocate dma memory for IOC.
222 */
223 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
224 dm_kva += bfa_ioc_meminfo();
225 dm_pa += bfa_ioc_meminfo();
226
227 /*
228 * Claim DMA-able memory for the request/response queues and for shadow
229 * ci/pi registers
230 */
231 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
232 BFA_DMA_ALIGN_SZ);
233 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
234 BFA_DMA_ALIGN_SZ);
235
236 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
237 iocfc->req_cq_ba[i].kva = dm_kva;
238 iocfc->req_cq_ba[i].pa = dm_pa;
239 bfa_os_memset(dm_kva, 0, per_reqq_sz);
240 dm_kva += per_reqq_sz;
241 dm_pa += per_reqq_sz;
242
243 iocfc->rsp_cq_ba[i].kva = dm_kva;
244 iocfc->rsp_cq_ba[i].pa = dm_pa;
245 bfa_os_memset(dm_kva, 0, per_rspq_sz);
246 dm_kva += per_rspq_sz;
247 dm_pa += per_rspq_sz;
248 }
249
250 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
251 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
252 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
253 dm_kva += BFA_CACHELINE_SZ;
254 dm_pa += BFA_CACHELINE_SZ;
255
256 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
257 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
258 dm_kva += BFA_CACHELINE_SZ;
259 dm_pa += BFA_CACHELINE_SZ;
260 }
261
262 /*
263 * Claim DMA-able memory for the config info page
264 */
265 bfa->iocfc.cfg_info.kva = dm_kva;
266 bfa->iocfc.cfg_info.pa = dm_pa;
267 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
268 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
269 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
270
271 /*
272 * Claim DMA-able memory for the config response
273 */
274 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
275 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
276 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
277
278 dm_kva +=
279 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
280 BFA_CACHELINE_SZ);
281 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
282 BFA_CACHELINE_SZ);
283
284 /*
285 * Claim DMA-able memory for iocfc stats
286 */
287 bfa->iocfc.stats_kva = dm_kva;
288 bfa->iocfc.stats_pa = dm_pa;
289 bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
290 dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
291 dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
292
293 bfa_meminfo_dma_virt(meminfo) = dm_kva;
294 bfa_meminfo_dma_phys(meminfo) = dm_pa;
295
296 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
297 if (dbgsz > 0) {
298 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
299 bfa_meminfo_kva(meminfo) += dbgsz;
300 }
301}
302
303/**
304 * Start BFA submodules.
305 */
306static void
307bfa_iocfc_start_submod(struct bfa_s *bfa)
308{
309 int i;
310
311 bfa->rme_process = BFA_TRUE;
312
313 for (i = 0; hal_mods[i]; i++)
314 hal_mods[i]->start(bfa);
315}
316
317/**
318 * Disable BFA submodules.
319 */
320static void
321bfa_iocfc_disable_submod(struct bfa_s *bfa)
322{
323 int i;
324
325 for (i = 0; hal_mods[i]; i++)
326 hal_mods[i]->iocdisable(bfa);
327}
328
329static void
330bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
331{
332 struct bfa_s *bfa = bfa_arg;
333
334 if (complete) {
335 if (bfa->iocfc.cfgdone)
336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
337 else
338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
339 } else {
340 if (bfa->iocfc.cfgdone)
341 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
342 }
343}
344
345static void
346bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
347{
348 struct bfa_s *bfa = bfa_arg;
349 struct bfad_s *bfad = bfa->bfad;
350
351 if (compl)
352 complete(&bfad->comp);
353
354 else
355 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
356}
357
358static void
359bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
360{
361 struct bfa_s *bfa = bfa_arg;
362 struct bfad_s *bfad = bfa->bfad;
363
364 if (compl)
365 complete(&bfad->disable_comp);
366}
367
368/**
369 * Update BFA configuration from firmware configuration.
370 */
371static void
372bfa_iocfc_cfgrsp(struct bfa_s *bfa)
373{
374 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
375 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
376 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
377
378 fwcfg->num_cqs = fwcfg->num_cqs;
379 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
380 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
381 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
382 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
383 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
384
385 iocfc->cfgdone = BFA_TRUE;
386
387 /**
388 * Configuration is complete - initialize/start submodules
389 */
390 bfa_fcport_init(bfa);
391
392 if (iocfc->action == BFA_IOCFC_ACT_INIT)
393 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
394 else
395 bfa_iocfc_start_submod(bfa);
396}
397
398static void
399bfa_iocfc_stats_clear(void *bfa_arg)
400{
401 struct bfa_s *bfa = bfa_arg;
402 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
403 struct bfi_iocfc_stats_req_s stats_req;
404
405 bfa_timer_start(bfa, &iocfc->stats_timer,
406 bfa_iocfc_stats_clr_timeout, bfa,
407 BFA_IOCFC_TOV);
408
409 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
410 bfa_lpuid(bfa));
411 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
412 sizeof(struct bfi_iocfc_stats_req_s));
413}
414
415static void
416bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
417{
418 u32 *dip = (u32 *) d;
419 u32 *sip = (u32 *) s;
420 int i;
421
422 for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
423 dip[i] = bfa_os_ntohl(sip[i]);
424}
425
426static void
427bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
428{
429 struct bfa_s *bfa = bfa_arg;
430 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
431
432 if (complete) {
433 bfa_ioc_clr_stats(&bfa->ioc);
434 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
435 } else {
436 iocfc->stats_busy = BFA_FALSE;
437 iocfc->stats_status = BFA_STATUS_OK;
438 }
439}
440
441static void
442bfa_iocfc_stats_clr_timeout(void *bfa_arg)
443{
444 struct bfa_s *bfa = bfa_arg;
445 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
446
447 bfa_trc(bfa, 0);
448
449 iocfc->stats_status = BFA_STATUS_ETIMER;
450 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
451}
452
453static void
454bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
455{
456 struct bfa_s *bfa = bfa_arg;
457 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
458
459 if (complete) {
460 if (iocfc->stats_status == BFA_STATUS_OK) {
461 bfa_os_memset(iocfc->stats_ret, 0,
462 sizeof(*iocfc->stats_ret));
463 bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
464 iocfc->fw_stats);
465 }
466 iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
467 } else {
468 iocfc->stats_busy = BFA_FALSE;
469 iocfc->stats_status = BFA_STATUS_OK;
470 }
471}
472
473static void
474bfa_iocfc_stats_timeout(void *bfa_arg)
475{
476 struct bfa_s *bfa = bfa_arg;
477 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
478
479 bfa_trc(bfa, 0);
480
481 iocfc->stats_status = BFA_STATUS_ETIMER;
482 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
483}
484
485static void
486bfa_iocfc_stats_query(struct bfa_s *bfa)
487{
488 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
489 struct bfi_iocfc_stats_req_s stats_req;
490
491 bfa_timer_start(bfa, &iocfc->stats_timer,
492 bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
493
494 bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
495 bfa_lpuid(bfa));
496 bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
497 sizeof(struct bfi_iocfc_stats_req_s));
498}
499
500void
501bfa_iocfc_reset_queues(struct bfa_s *bfa)
502{
503 int q;
504
505 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
506 bfa_reqq_ci(bfa, q) = 0;
507 bfa_reqq_pi(bfa, q) = 0;
508 bfa_rspq_ci(bfa, q) = 0;
509 bfa_rspq_pi(bfa, q) = 0;
510 }
511}
512
513/**
514 * IOC enable request is complete
515 */
516static void
517bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
518{
519 struct bfa_s *bfa = bfa_arg;
520
521 if (status != BFA_STATUS_OK) {
522 bfa_isr_disable(bfa);
523 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
524 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
525 bfa_iocfc_init_cb, bfa);
526 return;
527 }
528
529 bfa_iocfc_send_cfg(bfa);
530}
531
532/**
533 * IOC disable request is complete
534 */
535static void
536bfa_iocfc_disable_cbfn(void *bfa_arg)
537{
538 struct bfa_s *bfa = bfa_arg;
539
540 bfa_isr_disable(bfa);
541 bfa_iocfc_disable_submod(bfa);
542
543 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
544 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
545 bfa);
546 else {
547 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
548 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
549 bfa);
550 }
551}
552
553/**
554 * Notify sub-modules of hardware failure.
555 */
556static void
557bfa_iocfc_hbfail_cbfn(void *bfa_arg)
558{
559 struct bfa_s *bfa = bfa_arg;
560
561 bfa->rme_process = BFA_FALSE;
562
563 bfa_isr_disable(bfa);
564 bfa_iocfc_disable_submod(bfa);
565
566 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
567 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
568 bfa);
569}
570
571/**
572 * Actions on chip-reset completion.
573 */
574static void
575bfa_iocfc_reset_cbfn(void *bfa_arg)
576{
577 struct bfa_s *bfa = bfa_arg;
578
579 bfa_iocfc_reset_queues(bfa);
580 bfa_isr_enable(bfa);
581}
582
583
584
585/**
586 * bfa_ioc_public
587 */
588
589/**
590 * Query IOC memory requirement information.
591 */
592void
593bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
594 u32 *dm_len)
595{
596 /* dma memory for IOC */
597 *dm_len += bfa_ioc_meminfo();
598
599 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
600 bfa_iocfc_cqs_sz(cfg, dm_len);
601 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
602}
603
604/**
605 * Query IOC memory requirement information.
606 */
607void
608bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
609 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
610{
611 int i;
612
613 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
614 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
615 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
616 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
617
618 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
619 bfa->trcmod, bfa->aen, bfa->logm);
620
621 /**
622 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
623 */
624 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
625 bfa_ioc_set_fcmode(&bfa->ioc);
626
627 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
628 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
629
630 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
631 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
632 bfa_timer_init(&bfa->timer_mod);
633
634 INIT_LIST_HEAD(&bfa->comp_q);
635 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
636 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
637}
638
639/**
640 * Query IOC memory requirement information.
641 */
642void
643bfa_iocfc_detach(struct bfa_s *bfa)
644{
645 bfa_ioc_detach(&bfa->ioc);
646}
647
648/**
649 * Query IOC memory requirement information.
650 */
651void
652bfa_iocfc_init(struct bfa_s *bfa)
653{
654 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
655 bfa_ioc_enable(&bfa->ioc);
656}
657
658/**
659 * IOC start called from bfa_start(). Called to start IOC operations
660 * at driver instantiation for this instance.
661 */
662void
663bfa_iocfc_start(struct bfa_s *bfa)
664{
665 if (bfa->iocfc.cfgdone)
666 bfa_iocfc_start_submod(bfa);
667}
668
669/**
670 * IOC stop called from bfa_stop(). Called only when driver is unloaded
671 * for this instance.
672 */
673void
674bfa_iocfc_stop(struct bfa_s *bfa)
675{
676 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
677
678 bfa->rme_process = BFA_FALSE;
679 bfa_ioc_disable(&bfa->ioc);
680}
681
682void
683bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
684{
685 struct bfa_s *bfa = bfaarg;
686 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
687 union bfi_iocfc_i2h_msg_u *msg;
688
689 msg = (union bfi_iocfc_i2h_msg_u *) m;
690 bfa_trc(bfa, msg->mh.msg_id);
691
692 switch (msg->mh.msg_id) {
693 case BFI_IOCFC_I2H_CFG_REPLY:
694 iocfc->cfg_reply = &msg->cfg_reply;
695 bfa_iocfc_cfgrsp(bfa);
696 break;
697
698 case BFI_IOCFC_I2H_GET_STATS_RSP:
699 if (iocfc->stats_busy == BFA_FALSE
700 || iocfc->stats_status == BFA_STATUS_ETIMER)
701 break;
702
703 bfa_timer_stop(&iocfc->stats_timer);
704 iocfc->stats_status = BFA_STATUS_OK;
705 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
706 bfa);
707 break;
708 case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
709 /*
710 * check for timer pop before processing the rsp
711 */
712 if (iocfc->stats_busy == BFA_FALSE
713 || iocfc->stats_status == BFA_STATUS_ETIMER)
714 break;
715
716 bfa_timer_stop(&iocfc->stats_timer);
717 iocfc->stats_status = BFA_STATUS_OK;
718 bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
719 bfa_iocfc_stats_clr_cb, bfa);
720 break;
721 case BFI_IOCFC_I2H_UPDATEQ_RSP:
722 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
723 break;
724 default:
725 bfa_assert(0);
726 }
727}
728
729#ifndef BFA_BIOS_BUILD
730void
731bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
732{
733 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
734}
735
736u64
737bfa_adapter_get_id(struct bfa_s *bfa)
738{
739 return bfa_ioc_get_adid(&bfa->ioc);
740}
741
742void
743bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
744{
745 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
746
747 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
748
749 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
750 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
751 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
752
753 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
754 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
755 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
756
757 attr->config = iocfc->cfg;
758
759}
760
761bfa_status_t
762bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
763{
764 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
765 struct bfi_iocfc_set_intr_req_s *m;
766
767 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
768 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
769 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
770
771 if (!bfa_iocfc_is_operational(bfa))
772 return BFA_STATUS_OK;
773
774 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
775 if (!m)
776 return BFA_STATUS_DEVBUSY;
777
778 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
779 bfa_lpuid(bfa));
780 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
781 m->delay = iocfc->cfginfo->intr_attr.delay;
782 m->latency = iocfc->cfginfo->intr_attr.latency;
783
784
785 bfa_trc(bfa, attr->delay);
786 bfa_trc(bfa, attr->latency);
787
788 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
789 return BFA_STATUS_OK;
790}
791
792void
793bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
794{
795 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
796
797 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
798 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
799}
800
801bfa_status_t
802bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
803 bfa_cb_ioc_t cbfn, void *cbarg)
804{
805 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
806
807 if (iocfc->stats_busy) {
808 bfa_trc(bfa, iocfc->stats_busy);
809 return BFA_STATUS_DEVBUSY;
810 }
811
812 if (!bfa_iocfc_is_operational(bfa)) {
813 bfa_trc(bfa, 0);
814 return BFA_STATUS_IOC_NON_OP;
815 }
816
817 iocfc->stats_busy = BFA_TRUE;
818 iocfc->stats_ret = stats;
819 iocfc->stats_cbfn = cbfn;
820 iocfc->stats_cbarg = cbarg;
821
822 bfa_iocfc_stats_query(bfa);
823
824 return BFA_STATUS_OK;
825}
826
827bfa_status_t
828bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
829{
830 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
831
832 if (iocfc->stats_busy) {
833 bfa_trc(bfa, iocfc->stats_busy);
834 return BFA_STATUS_DEVBUSY;
835 }
836
837 if (!bfa_iocfc_is_operational(bfa)) {
838 bfa_trc(bfa, 0);
839 return BFA_STATUS_IOC_NON_OP;
840 }
841
842 iocfc->stats_busy = BFA_TRUE;
843 iocfc->stats_cbfn = cbfn;
844 iocfc->stats_cbarg = cbarg;
845
846 bfa_iocfc_stats_clear(bfa);
847 return BFA_STATUS_OK;
848}
849
850/**
851 * Enable IOC after it is disabled.
852 */
853void
854bfa_iocfc_enable(struct bfa_s *bfa)
855{
856 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
857 "IOC Enable");
858 bfa_ioc_enable(&bfa->ioc);
859}
860
861void
862bfa_iocfc_disable(struct bfa_s *bfa)
863{
864 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
865 "IOC Disable");
866 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
867
868 bfa->rme_process = BFA_FALSE;
869 bfa_ioc_disable(&bfa->ioc);
870}
871
872
873bfa_boolean_t
874bfa_iocfc_is_operational(struct bfa_s *bfa)
875{
876 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
877}
878
879/**
880 * Return boot target port wwns -- read from boot information in flash.
881 */
882void
883bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
884{
885 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
886 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
887 int i;
888
889 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
890 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
891 *nwwns = cfgrsp->pbc_cfg.nbluns;
892 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
893 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
894
895 return;
896 }
897
898 *nwwns = cfgrsp->bootwwns.nwwns;
899 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
900}
901
902void
903bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
904{
905 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
906 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
907
908 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
909 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
910 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
911 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
912}
913
914int
915bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
916{
917 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
918 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
919
920 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
921 return cfgrsp->pbc_cfg.nvports;
922}
923
924
925#endif
926
927
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
deleted file mode 100644
index 74a6a048d1fd..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_IOCFC_H__
19#define __BFA_IOCFC_H__
20
21#include <bfa_ioc.h>
22#include <bfa.h>
23#include <bfi/bfi_iocfc.h>
24#include <bfi/bfi_pbc.h>
25#include <bfa_callback_priv.h>
26
27#define BFA_REQQ_NELEMS_MIN (4)
28#define BFA_RSPQ_NELEMS_MIN (4)
29
30struct bfa_iocfc_regs_s {
31 bfa_os_addr_t intr_status;
32 bfa_os_addr_t intr_mask;
33 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
34 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
35 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
36 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
37 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
38 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
39 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
40 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
41};
42
43/**
44 * MSIX vector handlers
45 */
46#define BFA_MSIX_MAX_VECTORS 22
47typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
48struct bfa_msix_s {
49 int nvecs;
50 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
51};
52
53/**
54 * Chip specific interfaces
55 */
56struct bfa_hwif_s {
57 void (*hw_reginit)(struct bfa_s *bfa);
58 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
59 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
60 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
61 void (*hw_msix_install)(struct bfa_s *bfa);
62 void (*hw_msix_uninstall)(struct bfa_s *bfa);
63 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
64 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
65 u32 *nvecs, u32 *maxvec);
66 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
67 u32 *end);
68};
69typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
70
71struct bfa_iocfc_s {
72 struct bfa_s *bfa;
73 struct bfa_iocfc_cfg_s cfg;
74 int action;
75
76 u32 req_cq_pi[BFI_IOC_MAX_CQS];
77 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
78
79 struct bfa_cb_qe_s init_hcb_qe;
80 struct bfa_cb_qe_s stop_hcb_qe;
81 struct bfa_cb_qe_s dis_hcb_qe;
82 struct bfa_cb_qe_s stats_hcb_qe;
83 bfa_boolean_t cfgdone;
84
85 struct bfa_dma_s cfg_info;
86 struct bfi_iocfc_cfg_s *cfginfo;
87 struct bfa_dma_s cfgrsp_dma;
88 struct bfi_iocfc_cfgrsp_s *cfgrsp;
89 struct bfi_iocfc_cfg_reply_s *cfg_reply;
90
91 u8 *stats_kva;
92 u64 stats_pa;
93 struct bfa_fw_stats_s *fw_stats;
94 struct bfa_timer_s stats_timer; /* timer */
95 struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */
96 bfa_status_t stats_status; /* stats/statsclr status */
97 bfa_boolean_t stats_busy; /* outstanding stats */
98 bfa_cb_ioc_t stats_cbfn; /* driver callback function */
99 void *stats_cbarg; /* user callback arg */
100
101 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
102 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
103 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
104 struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
105 struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
106 struct bfa_hwif_s hwif;
107
108 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
109 void *updateq_cbarg; /* bios callback arg */
110 u32 intr_mask;
111};
112
113#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
114#define bfa_msix_init(__bfa, __nvecs) \
115 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
116#define bfa_msix_install(__bfa) \
117 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
118#define bfa_msix_uninstall(__bfa) \
119 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
120#define bfa_isr_mode_set(__bfa, __msix) \
121 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
122#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
123 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
124 __nvecs, __maxvec))
125#define bfa_msix_get_rme_range(__bfa, __start, __end) \
126 ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
127
128/*
129 * FC specific IOC functions.
130 */
131void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
132 u32 *dm_len);
133void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
134 struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
135 struct bfa_pcidev_s *pcidev);
136void bfa_iocfc_detach(struct bfa_s *bfa);
137void bfa_iocfc_init(struct bfa_s *bfa);
138void bfa_iocfc_start(struct bfa_s *bfa);
139void bfa_iocfc_stop(struct bfa_s *bfa);
140void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
141void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
142bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
143void bfa_iocfc_reset_queues(struct bfa_s *bfa);
144void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
145 u32 reqq_sci, u32 rspq_spi,
146 bfa_cb_iocfc_t cbfn, void *cbarg);
147
148void bfa_msix_all(struct bfa_s *bfa, int vec);
149void bfa_msix_reqq(struct bfa_s *bfa, int vec);
150void bfa_msix_rspq(struct bfa_s *bfa, int vec);
151void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
152
153void bfa_hwcb_reginit(struct bfa_s *bfa);
154void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
155void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
156void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
157void bfa_hwcb_msix_install(struct bfa_s *bfa);
158void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
159void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
160void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
161 u32 *nvecs, u32 *maxvec);
162void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
163void bfa_hwct_reginit(struct bfa_s *bfa);
164void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
165void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
166void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
167void bfa_hwct_msix_install(struct bfa_s *bfa);
168void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
169void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
170void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
171 u32 *nvecs, u32 *maxvec);
172void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
173
174void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
175void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
176 bfa_boolean_t mincfg);
177void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
178void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
179 struct bfa_boot_pbc_s *pbcfg);
180int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
181 struct bfi_pbc_vport_s *pbc_vport);
182
183#endif /* __BFA_IOCFC_H__ */
184
diff --git a/drivers/scsi/bfa/bfa_iocfc_q.c b/drivers/scsi/bfa/bfa_iocfc_q.c
deleted file mode 100644
index 500a17df40b2..000000000000
--- a/drivers/scsi/bfa/bfa_iocfc_q.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include "bfa_intr_priv.h"
20
21BFA_TRC_FILE(HAL, IOCFC_Q);
22
23void
24bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
25 u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
26 void *cbarg)
27{
28 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
29 struct bfi_iocfc_updateq_req_s updateq_req;
30
31 iocfc->updateq_cbfn = cbfn;
32 iocfc->updateq_cbarg = cbarg;
33
34 bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
35 bfa_lpuid(bfa));
36
37 updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
38 updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
39 updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
40 updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
41
42 bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
43 sizeof(struct bfi_iocfc_updateq_req_s));
44}
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
deleted file mode 100644
index bdfdc19915f8..000000000000
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ /dev/null
@@ -1,1364 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <cs/bfa_debug.h>
20#include <bfa_cb_ioim_macros.h>
21
22BFA_TRC_FILE(HAL, IOIM);
23
24/*
25 * forward declarations.
26 */
27static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
29static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
30static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
31static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
32static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
33static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
34static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
35static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
36static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
37
38/**
39 * bfa_ioim_sm
40 */
41
42/**
43 * IO state machine events
44 */
45enum bfa_ioim_event {
46 BFA_IOIM_SM_START = 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
64};
65
66/*
67 * forward declaration of IO state machine
68 */
69static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
70 enum bfa_ioim_event event);
71static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
72 enum bfa_ioim_event event);
73static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
74 enum bfa_ioim_event event);
75static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
76 enum bfa_ioim_event event);
77static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
78 enum bfa_ioim_event event);
79static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
80 enum bfa_ioim_event event);
81static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
82 enum bfa_ioim_event event);
83static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
84 enum bfa_ioim_event event);
85static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
86 enum bfa_ioim_event event);
87static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
88 enum bfa_ioim_event event);
89static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
90 enum bfa_ioim_event event);
91
92/**
93 * IO is not started (unallocated).
94 */
95static void
96bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
97{
98 bfa_trc_fp(ioim->bfa, ioim->iotag);
99 bfa_trc_fp(ioim->bfa, event);
100
101 switch (event) {
102 case BFA_IOIM_SM_START:
103 if (!bfa_itnim_is_online(ioim->itnim)) {
104 if (!bfa_itnim_hold_io(ioim->itnim)) {
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe,
108 &ioim->fcpim->ioim_comp_q);
109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim);
111 } else {
112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe,
114 &ioim->itnim->pending_q);
115 }
116 break;
117 }
118
119 if (ioim->nsges > BFI_SGE_INLINE) {
120 if (!bfa_ioim_sge_setup(ioim)) {
121 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
122 return;
123 }
124 }
125
126 if (!bfa_ioim_send_ioreq(ioim)) {
127 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
128 break;
129 }
130
131 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
132 break;
133
134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 list_del(&ioim->qe);
137 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
138 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
139 __bfa_cb_ioim_pathtov, ioim);
140 break;
141
142 case BFA_IOIM_SM_ABORT:
143 /**
144 * IO in pending queue can get abort requests. Complete abort
145 * requests immediately.
146 */
147 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
148 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
149 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
150 ioim);
151 break;
152
153 default:
154 bfa_sm_fault(ioim->bfa, event);
155 }
156}
157
158/**
159 * IO is waiting for SG pages.
160 */
161static void
162bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
163{
164 bfa_trc(ioim->bfa, ioim->iotag);
165 bfa_trc(ioim->bfa, event);
166
167 switch (event) {
168 case BFA_IOIM_SM_SGALLOCED:
169 if (!bfa_ioim_send_ioreq(ioim)) {
170 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
171 break;
172 }
173 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
174 break;
175
176 case BFA_IOIM_SM_CLEANUP:
177 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
178 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
179 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
180 ioim);
181 bfa_ioim_notify_cleanup(ioim);
182 break;
183
184 case BFA_IOIM_SM_ABORT:
185 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
186 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
187 list_del(&ioim->qe);
188 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
189 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
190 ioim);
191 break;
192
193 case BFA_IOIM_SM_HWFAIL:
194 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
195 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
196 list_del(&ioim->qe);
197 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
198 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
199 ioim);
200 break;
201
202 default:
203 bfa_sm_fault(ioim->bfa, event);
204 }
205}
206
207/**
208 * IO is active.
209 */
210static void
211bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
212{
213 bfa_trc_fp(ioim->bfa, ioim->iotag);
214 bfa_trc_fp(ioim->bfa, event);
215
216 switch (event) {
217 case BFA_IOIM_SM_COMP_GOOD:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 list_del(&ioim->qe);
220 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
221 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
222 __bfa_cb_ioim_good_comp, ioim);
223 break;
224
225 case BFA_IOIM_SM_COMP:
226 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
227 list_del(&ioim->qe);
228 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
229 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
230 ioim);
231 break;
232
233 case BFA_IOIM_SM_DONE:
234 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
235 list_del(&ioim->qe);
236 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
237 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
238 ioim);
239 break;
240
241 case BFA_IOIM_SM_ABORT:
242 ioim->iosp->abort_explicit = BFA_TRUE;
243 ioim->io_cbfn = __bfa_cb_ioim_abort;
244
245 if (bfa_ioim_send_abort(ioim))
246 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
247 else {
248 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
249 bfa_reqq_wait(ioim->bfa, ioim->reqq,
250 &ioim->iosp->reqq_wait);
251 }
252 break;
253
254 case BFA_IOIM_SM_CLEANUP:
255 ioim->iosp->abort_explicit = BFA_FALSE;
256 ioim->io_cbfn = __bfa_cb_ioim_failed;
257
258 if (bfa_ioim_send_abort(ioim))
259 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
260 else {
261 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
262 bfa_reqq_wait(ioim->bfa, ioim->reqq,
263 &ioim->iosp->reqq_wait);
264 }
265 break;
266
267 case BFA_IOIM_SM_HWFAIL:
268 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
269 list_del(&ioim->qe);
270 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
271 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
272 ioim);
273 break;
274
275 default:
276 bfa_sm_fault(ioim->bfa, event);
277 }
278}
279
280/**
281 * IO is being aborted, waiting for completion from firmware.
282 */
283static void
284bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
285{
286 bfa_trc(ioim->bfa, ioim->iotag);
287 bfa_trc(ioim->bfa, event);
288
289 switch (event) {
290 case BFA_IOIM_SM_COMP_GOOD:
291 case BFA_IOIM_SM_COMP:
292 case BFA_IOIM_SM_DONE:
293 case BFA_IOIM_SM_FREE:
294 break;
295
296 case BFA_IOIM_SM_ABORT_DONE:
297 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
298 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
299 ioim);
300 break;
301
302 case BFA_IOIM_SM_ABORT_COMP:
303 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
304 list_del(&ioim->qe);
305 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
306 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
307 ioim);
308 break;
309
310 case BFA_IOIM_SM_COMP_UTAG:
311 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
312 list_del(&ioim->qe);
313 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
314 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
315 ioim);
316 break;
317
318 case BFA_IOIM_SM_CLEANUP:
319 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
320 ioim->iosp->abort_explicit = BFA_FALSE;
321
322 if (bfa_ioim_send_abort(ioim))
323 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
324 else {
325 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
326 bfa_reqq_wait(ioim->bfa, ioim->reqq,
327 &ioim->iosp->reqq_wait);
328 }
329 break;
330
331 case BFA_IOIM_SM_HWFAIL:
332 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
333 list_del(&ioim->qe);
334 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
335 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
336 ioim);
337 break;
338
339 default:
340 bfa_sm_fault(ioim->bfa, event);
341 }
342}
343
344/**
345 * IO is being cleaned up (implicit abort), waiting for completion from
346 * firmware.
347 */
348static void
349bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
350{
351 bfa_trc(ioim->bfa, ioim->iotag);
352 bfa_trc(ioim->bfa, event);
353
354 switch (event) {
355 case BFA_IOIM_SM_COMP_GOOD:
356 case BFA_IOIM_SM_COMP:
357 case BFA_IOIM_SM_DONE:
358 case BFA_IOIM_SM_FREE:
359 break;
360
361 case BFA_IOIM_SM_ABORT:
362 /**
363 * IO is already being aborted implicitly
364 */
365 ioim->io_cbfn = __bfa_cb_ioim_abort;
366 break;
367
368 case BFA_IOIM_SM_ABORT_DONE:
369 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
370 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
371 bfa_ioim_notify_cleanup(ioim);
372 break;
373
374 case BFA_IOIM_SM_ABORT_COMP:
375 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
376 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
377 bfa_ioim_notify_cleanup(ioim);
378 break;
379
380 case BFA_IOIM_SM_COMP_UTAG:
381 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
382 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
383 bfa_ioim_notify_cleanup(ioim);
384 break;
385
386 case BFA_IOIM_SM_HWFAIL:
387 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
388 list_del(&ioim->qe);
389 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
390 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
391 ioim);
392 break;
393
394 case BFA_IOIM_SM_CLEANUP:
395 /**
396 * IO can be in cleanup state already due to TM command. 2nd cleanup
397 * request comes from ITN offline event.
398 */
399 break;
400
401 default:
402 bfa_sm_fault(ioim->bfa, event);
403 }
404}
405
406/**
407 * IO is waiting for room in request CQ
408 */
409static void
410bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
411{
412 bfa_trc(ioim->bfa, ioim->iotag);
413 bfa_trc(ioim->bfa, event);
414
415 switch (event) {
416 case BFA_IOIM_SM_QRESUME:
417 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
418 bfa_ioim_send_ioreq(ioim);
419 break;
420
421 case BFA_IOIM_SM_ABORT:
422 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
423 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
424 list_del(&ioim->qe);
425 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
426 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
427 ioim);
428 break;
429
430 case BFA_IOIM_SM_CLEANUP:
431 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
432 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
433 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
434 ioim);
435 bfa_ioim_notify_cleanup(ioim);
436 break;
437
438 case BFA_IOIM_SM_HWFAIL:
439 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
440 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
441 list_del(&ioim->qe);
442 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
443 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
444 ioim);
445 break;
446
447 default:
448 bfa_sm_fault(ioim->bfa, event);
449 }
450}
451
452/**
453 * Active IO is being aborted, waiting for room in request CQ.
454 */
455static void
456bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
457{
458 bfa_trc(ioim->bfa, ioim->iotag);
459 bfa_trc(ioim->bfa, event);
460
461 switch (event) {
462 case BFA_IOIM_SM_QRESUME:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
464 bfa_ioim_send_abort(ioim);
465 break;
466
467 case BFA_IOIM_SM_CLEANUP:
468 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
469 ioim->iosp->abort_explicit = BFA_FALSE;
470 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
471 break;
472
473 case BFA_IOIM_SM_COMP_GOOD:
474 case BFA_IOIM_SM_COMP:
475 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
476 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
477 list_del(&ioim->qe);
478 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
479 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
480 ioim);
481 break;
482
483 case BFA_IOIM_SM_DONE:
484 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
485 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
486 list_del(&ioim->qe);
487 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
488 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
489 ioim);
490 break;
491
492 case BFA_IOIM_SM_HWFAIL:
493 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
494 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
495 list_del(&ioim->qe);
496 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
498 ioim);
499 break;
500
501 default:
502 bfa_sm_fault(ioim->bfa, event);
503 }
504}
505
506/**
507 * Active IO is being cleaned up, waiting for room in request CQ.
508 */
509static void
510bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
511{
512 bfa_trc(ioim->bfa, ioim->iotag);
513 bfa_trc(ioim->bfa, event);
514
515 switch (event) {
516 case BFA_IOIM_SM_QRESUME:
517 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
518 bfa_ioim_send_abort(ioim);
519 break;
520
521 case BFA_IOIM_SM_ABORT:
522 /**
523 * IO is already being cleaned up implicitly
524 */
525 ioim->io_cbfn = __bfa_cb_ioim_abort;
526 break;
527
528 case BFA_IOIM_SM_COMP_GOOD:
529 case BFA_IOIM_SM_COMP:
530 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
531 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
532 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
533 bfa_ioim_notify_cleanup(ioim);
534 break;
535
536 case BFA_IOIM_SM_DONE:
537 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
538 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
539 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
540 bfa_ioim_notify_cleanup(ioim);
541 break;
542
543 case BFA_IOIM_SM_HWFAIL:
544 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
545 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
546 list_del(&ioim->qe);
547 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
548 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
549 ioim);
550 break;
551
552 default:
553 bfa_sm_fault(ioim->bfa, event);
554 }
555}
556
557/**
558 * IO bfa callback is pending.
559 */
560static void
561bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
562{
563 bfa_trc_fp(ioim->bfa, ioim->iotag);
564 bfa_trc_fp(ioim->bfa, event);
565
566 switch (event) {
567 case BFA_IOIM_SM_HCB:
568 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
569 bfa_ioim_free(ioim);
570 bfa_cb_ioim_resfree(ioim->bfa->bfad);
571 break;
572
573 case BFA_IOIM_SM_CLEANUP:
574 bfa_ioim_notify_cleanup(ioim);
575 break;
576
577 case BFA_IOIM_SM_HWFAIL:
578 break;
579
580 default:
581 bfa_sm_fault(ioim->bfa, event);
582 }
583}
584
585/**
586 * IO bfa callback is pending. IO resource cannot be freed.
587 */
588static void
589bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
590{
591 bfa_trc(ioim->bfa, ioim->iotag);
592 bfa_trc(ioim->bfa, event);
593
594 switch (event) {
595 case BFA_IOIM_SM_HCB:
596 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
597 list_del(&ioim->qe);
598 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
599 break;
600
601 case BFA_IOIM_SM_FREE:
602 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
603 break;
604
605 case BFA_IOIM_SM_CLEANUP:
606 bfa_ioim_notify_cleanup(ioim);
607 break;
608
609 case BFA_IOIM_SM_HWFAIL:
610 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
611 break;
612
613 default:
614 bfa_sm_fault(ioim->bfa, event);
615 }
616}
617
618/**
619 * IO is completed, waiting resource free from firmware.
620 */
621static void
622bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
623{
624 bfa_trc(ioim->bfa, ioim->iotag);
625 bfa_trc(ioim->bfa, event);
626
627 switch (event) {
628 case BFA_IOIM_SM_FREE:
629 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
630 bfa_ioim_free(ioim);
631 bfa_cb_ioim_resfree(ioim->bfa->bfad);
632 break;
633
634 case BFA_IOIM_SM_CLEANUP:
635 bfa_ioim_notify_cleanup(ioim);
636 break;
637
638 case BFA_IOIM_SM_HWFAIL:
639 break;
640
641 default:
642 bfa_sm_fault(ioim->bfa, event);
643 }
644}
645
646
647
648/**
649 * bfa_ioim_private
650 */
651
652static void
653__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
654{
655 struct bfa_ioim_s *ioim = cbarg;
656
657 if (!complete) {
658 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
659 return;
660 }
661
662 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
663}
664
665static void
666__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
667{
668 struct bfa_ioim_s *ioim = cbarg;
669 struct bfi_ioim_rsp_s *m;
670 u8 *snsinfo = NULL;
671 u8 sns_len = 0;
672 s32 residue = 0;
673
674 if (!complete) {
675 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
676 return;
677 }
678
679 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
680 if (m->io_status == BFI_IOIM_STS_OK) {
681 /**
682 * setup sense information, if present
683 */
684 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
685 && m->sns_len) {
686 sns_len = m->sns_len;
687 snsinfo = ioim->iosp->snsinfo;
688 }
689
690 /**
691 * setup residue value correctly for normal completions
692 */
693 if (m->resid_flags == FCP_RESID_UNDER)
694 residue = bfa_os_ntohl(m->residue);
695 if (m->resid_flags == FCP_RESID_OVER) {
696 residue = bfa_os_ntohl(m->residue);
697 residue = -residue;
698 }
699 }
700
701 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
702 m->scsi_status, sns_len, snsinfo, residue);
703}
704
705static void
706__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
707{
708 struct bfa_ioim_s *ioim = cbarg;
709
710 if (!complete) {
711 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
712 return;
713 }
714
715 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
716 0, 0, NULL, 0);
717}
718
719static void
720__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
721{
722 struct bfa_ioim_s *ioim = cbarg;
723
724 if (!complete) {
725 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
726 return;
727 }
728
729 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
730 0, 0, NULL, 0);
731}
732
733static void
734__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
735{
736 struct bfa_ioim_s *ioim = cbarg;
737
738 if (!complete) {
739 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
740 return;
741 }
742
743 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
744}
745
746static void
747bfa_ioim_sgpg_alloced(void *cbarg)
748{
749 struct bfa_ioim_s *ioim = cbarg;
750
751 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
752 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
753 bfa_ioim_sgpg_setup(ioim);
754 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
755}
756
757/**
758 * Send I/O request to firmware.
759 */
760static bfa_boolean_t
761bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
762{
763 struct bfa_itnim_s *itnim = ioim->itnim;
764 struct bfi_ioim_req_s *m;
765 static struct fcp_cmnd_s cmnd_z0 = { 0 };
766 struct bfi_sge_s *sge;
767 u32 pgdlen = 0;
768 u64 addr;
769 struct scatterlist *sg;
770 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
771
772 /**
773 * check for room in queue to send request now
774 */
775 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
776 if (!m) {
777 bfa_reqq_wait(ioim->bfa, ioim->reqq,
778 &ioim->iosp->reqq_wait);
779 return BFA_FALSE;
780 }
781
782 /**
783 * build i/o request message next
784 */
785 m->io_tag = bfa_os_htons(ioim->iotag);
786 m->rport_hdl = ioim->itnim->rport->fw_handle;
787 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
788
789 /**
790 * build inline IO SG element here
791 */
792 sge = &m->sges[0];
793 if (ioim->nsges) {
794 sg = (struct scatterlist *)scsi_sglist(cmnd);
795 addr = bfa_os_sgaddr(sg_dma_address(sg));
796 sge->sga = *(union bfi_addr_u *) &addr;
797 pgdlen = sg_dma_len(sg);
798 sge->sg_len = pgdlen;
799 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
800 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
801 bfa_sge_to_be(sge);
802 sge++;
803 }
804
805 if (ioim->nsges > BFI_SGE_INLINE) {
806 sge->sga = ioim->sgpg->sgpg_pa;
807 } else {
808 sge->sga.a32.addr_lo = 0;
809 sge->sga.a32.addr_hi = 0;
810 }
811 sge->sg_len = pgdlen;
812 sge->flags = BFI_SGE_PGDLEN;
813 bfa_sge_to_be(sge);
814
815 /**
816 * set up I/O command parameters
817 */
818 bfa_os_assign(m->cmnd, cmnd_z0);
819 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
820 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
821 bfa_os_assign(m->cmnd.cdb,
822 *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
823 m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
824
825 /**
826 * set up I/O message header
827 */
828 switch (m->cmnd.iodir) {
829 case FCP_IODIR_READ:
830 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
831 bfa_stats(itnim, input_reqs);
832 break;
833 case FCP_IODIR_WRITE:
834 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
835 bfa_stats(itnim, output_reqs);
836 break;
837 case FCP_IODIR_RW:
838 bfa_stats(itnim, input_reqs);
839 bfa_stats(itnim, output_reqs);
840 default:
841 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
842 }
843 if (itnim->seq_rec ||
844 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
845 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
846
847#ifdef IOIM_ADVANCED
848 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
849 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
850 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
851
852 /**
853 * Handle large CDB (>16 bytes).
854 */
855 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
856 FCP_CMND_CDB_LEN) / sizeof(u32);
857 if (m->cmnd.addl_cdb_len) {
858 bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
859 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
860 m->cmnd.addl_cdb_len * sizeof(u32));
861 fcp_cmnd_fcpdl(&m->cmnd) =
862 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
863 }
864#endif
865
866 /**
867 * queue I/O message to firmware
868 */
869 bfa_reqq_produce(ioim->bfa, ioim->reqq);
870 return BFA_TRUE;
871}
872
873/**
874 * Setup any additional SG pages needed.Inline SG element is setup
875 * at queuing time.
876 */
877static bfa_boolean_t
878bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
879{
880 u16 nsgpgs;
881
882 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
883
884 /**
885 * allocate SG pages needed
886 */
887 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
888 if (!nsgpgs)
889 return BFA_TRUE;
890
891 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
892 != BFA_STATUS_OK) {
893 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
894 return BFA_FALSE;
895 }
896
897 ioim->nsgpgs = nsgpgs;
898 bfa_ioim_sgpg_setup(ioim);
899
900 return BFA_TRUE;
901}
902
903static void
904bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
905{
906 int sgeid, nsges, i;
907 struct bfi_sge_s *sge;
908 struct bfa_sgpg_s *sgpg;
909 u32 pgcumsz;
910 u64 addr;
911 struct scatterlist *sg;
912 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
913
914 sgeid = BFI_SGE_INLINE;
915 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
916
917 sg = scsi_sglist(cmnd);
918 sg = sg_next(sg);
919
920 do {
921 sge = sgpg->sgpg->sges;
922 nsges = ioim->nsges - sgeid;
923 if (nsges > BFI_SGPG_DATA_SGES)
924 nsges = BFI_SGPG_DATA_SGES;
925
926 pgcumsz = 0;
927 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
928 addr = bfa_os_sgaddr(sg_dma_address(sg));
929 sge->sga = *(union bfi_addr_u *) &addr;
930 sge->sg_len = sg_dma_len(sg);
931 pgcumsz += sge->sg_len;
932
933 /**
934 * set flags
935 */
936 if (i < (nsges - 1))
937 sge->flags = BFI_SGE_DATA;
938 else if (sgeid < (ioim->nsges - 1))
939 sge->flags = BFI_SGE_DATA_CPL;
940 else
941 sge->flags = BFI_SGE_DATA_LAST;
942 }
943
944 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
945
946 /**
947 * set the link element of each page
948 */
949 if (sgeid == ioim->nsges) {
950 sge->flags = BFI_SGE_PGDLEN;
951 sge->sga.a32.addr_lo = 0;
952 sge->sga.a32.addr_hi = 0;
953 } else {
954 sge->flags = BFI_SGE_LINK;
955 sge->sga = sgpg->sgpg_pa;
956 }
957 sge->sg_len = pgcumsz;
958 } while (sgeid < ioim->nsges);
959}
960
961/**
962 * Send I/O abort request to firmware.
963 */
964static bfa_boolean_t
965bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
966{
967 struct bfi_ioim_abort_req_s *m;
968 enum bfi_ioim_h2i msgop;
969
970 /**
971 * check for room in queue to send request now
972 */
973 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
974 if (!m)
975 return BFA_FALSE;
976
977 /**
978 * build i/o request message next
979 */
980 if (ioim->iosp->abort_explicit)
981 msgop = BFI_IOIM_H2I_IOABORT_REQ;
982 else
983 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
984
985 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
986 m->io_tag = bfa_os_htons(ioim->iotag);
987 m->abort_tag = ++ioim->abort_tag;
988
989 /**
990 * queue I/O message to firmware
991 */
992 bfa_reqq_produce(ioim->bfa, ioim->reqq);
993 return BFA_TRUE;
994}
995
996/**
997 * Call to resume any I/O requests waiting for room in request queue.
998 */
999static void
1000bfa_ioim_qresume(void *cbarg)
1001{
1002 struct bfa_ioim_s *ioim = cbarg;
1003
1004 bfa_fcpim_stats(ioim->fcpim, qresumes);
1005 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
1006}
1007
1008
1009static void
1010bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
1011{
1012 /**
1013 * Move IO from itnim queue to fcpim global queue since itnim will be
1014 * freed.
1015 */
1016 list_del(&ioim->qe);
1017 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1018
1019 if (!ioim->iosp->tskim) {
1020 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
1021 bfa_cb_dequeue(&ioim->hcb_qe);
1022 list_del(&ioim->qe);
1023 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
1024 }
1025 bfa_itnim_iodone(ioim->itnim);
1026 } else
1027 bfa_tskim_iodone(ioim->iosp->tskim);
1028}
1029
1030/**
1031 * or after the link comes back.
1032 */
1033void
1034bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
1035{
1036 /**
1037 * If path tov timer expired, failback with PATHTOV status - these
1038 * IO requests are not normally retried by IO stack.
1039 *
1040 * Otherwise device cameback online and fail it with normal failed
1041 * status so that IO stack retries these failed IO requests.
1042 */
1043 if (iotov)
1044 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
1045 else
1046 ioim->io_cbfn = __bfa_cb_ioim_failed;
1047
1048 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1049
1050 /**
1051 * Move IO to fcpim global queue since itnim will be
1052 * freed.
1053 */
1054 list_del(&ioim->qe);
1055 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1056}
1057
1058
1059
1060/**
1061 * bfa_ioim_friend
1062 */
1063
1064/**
1065 * Memory allocation and initialization.
1066 */
1067void
1068bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1069{
1070 struct bfa_ioim_s *ioim;
1071 struct bfa_ioim_sp_s *iosp;
1072 u16 i;
1073 u8 *snsinfo;
1074 u32 snsbufsz;
1075
1076 /**
1077 * claim memory first
1078 */
1079 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
1080 fcpim->ioim_arr = ioim;
1081 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
1082
1083 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
1084 fcpim->ioim_sp_arr = iosp;
1085 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
1086
1087 /**
1088 * Claim DMA memory for per IO sense data.
1089 */
1090 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
1091 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
1092 bfa_meminfo_dma_phys(minfo) += snsbufsz;
1093
1094 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
1095 bfa_meminfo_dma_virt(minfo) += snsbufsz;
1096 snsinfo = fcpim->snsbase.kva;
1097 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
1098
1099 /**
1100 * Initialize ioim free queues
1101 */
1102 INIT_LIST_HEAD(&fcpim->ioim_free_q);
1103 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
1104 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
1105
1106 for (i = 0; i < fcpim->num_ioim_reqs;
1107 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
1108 /*
1109 * initialize IOIM
1110 */
1111 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
1112 ioim->iotag = i;
1113 ioim->bfa = fcpim->bfa;
1114 ioim->fcpim = fcpim;
1115 ioim->iosp = iosp;
1116 iosp->snsinfo = snsinfo;
1117 INIT_LIST_HEAD(&ioim->sgpg_q);
1118 bfa_reqq_winit(&ioim->iosp->reqq_wait,
1119 bfa_ioim_qresume, ioim);
1120 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
1121 bfa_ioim_sgpg_alloced, ioim);
1122 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1123
1124 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1125 }
1126}
1127
1128/**
1129 * Driver detach time call.
1130 */
1131void
1132bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
1133{
1134}
1135
1136void
1137bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1138{
1139 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1140 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1141 struct bfa_ioim_s *ioim;
1142 u16 iotag;
1143 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
1144
1145 iotag = bfa_os_ntohs(rsp->io_tag);
1146
1147 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1148 bfa_assert(ioim->iotag == iotag);
1149
1150 bfa_trc(ioim->bfa, ioim->iotag);
1151 bfa_trc(ioim->bfa, rsp->io_status);
1152 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
1153
1154 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
1155 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
1156
1157 switch (rsp->io_status) {
1158 case BFI_IOIM_STS_OK:
1159 bfa_fcpim_stats(fcpim, iocomp_ok);
1160 if (rsp->reuse_io_tag == 0)
1161 evt = BFA_IOIM_SM_DONE;
1162 else
1163 evt = BFA_IOIM_SM_COMP;
1164 break;
1165
1166 case BFI_IOIM_STS_TIMEDOUT:
1167 case BFI_IOIM_STS_ABORTED:
1168 rsp->io_status = BFI_IOIM_STS_ABORTED;
1169 bfa_fcpim_stats(fcpim, iocomp_aborted);
1170 if (rsp->reuse_io_tag == 0)
1171 evt = BFA_IOIM_SM_DONE;
1172 else
1173 evt = BFA_IOIM_SM_COMP;
1174 break;
1175
1176 case BFI_IOIM_STS_PROTO_ERR:
1177 bfa_fcpim_stats(fcpim, iocom_proto_err);
1178 bfa_assert(rsp->reuse_io_tag);
1179 evt = BFA_IOIM_SM_COMP;
1180 break;
1181
1182 case BFI_IOIM_STS_SQER_NEEDED:
1183 bfa_fcpim_stats(fcpim, iocom_sqer_needed);
1184 bfa_assert(rsp->reuse_io_tag == 0);
1185 evt = BFA_IOIM_SM_SQRETRY;
1186 break;
1187
1188 case BFI_IOIM_STS_RES_FREE:
1189 bfa_fcpim_stats(fcpim, iocom_res_free);
1190 evt = BFA_IOIM_SM_FREE;
1191 break;
1192
1193 case BFI_IOIM_STS_HOST_ABORTED:
1194 bfa_fcpim_stats(fcpim, iocom_hostabrts);
1195 if (rsp->abort_tag != ioim->abort_tag) {
1196 bfa_trc(ioim->bfa, rsp->abort_tag);
1197 bfa_trc(ioim->bfa, ioim->abort_tag);
1198 return;
1199 }
1200
1201 if (rsp->reuse_io_tag)
1202 evt = BFA_IOIM_SM_ABORT_COMP;
1203 else
1204 evt = BFA_IOIM_SM_ABORT_DONE;
1205 break;
1206
1207 case BFI_IOIM_STS_UTAG:
1208 bfa_fcpim_stats(fcpim, iocom_utags);
1209 evt = BFA_IOIM_SM_COMP_UTAG;
1210 break;
1211
1212 default:
1213 bfa_assert(0);
1214 }
1215
1216 bfa_sm_send_event(ioim, evt);
1217}
1218
1219void
1220bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1221{
1222 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1223 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1224 struct bfa_ioim_s *ioim;
1225 u16 iotag;
1226
1227 iotag = bfa_os_ntohs(rsp->io_tag);
1228
1229 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1230 bfa_assert(ioim->iotag == iotag);
1231
1232 bfa_trc_fp(ioim->bfa, ioim->iotag);
1233 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
1234}
1235
1236/**
1237 * Called by itnim to clean up IO while going offline.
1238 */
1239void
1240bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
1241{
1242 bfa_trc(ioim->bfa, ioim->iotag);
1243 bfa_fcpim_stats(ioim->fcpim, io_cleanups);
1244
1245 ioim->iosp->tskim = NULL;
1246 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1247}
1248
1249void
1250bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
1251{
1252 bfa_trc(ioim->bfa, ioim->iotag);
1253 bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
1254
1255 ioim->iosp->tskim = tskim;
1256 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1257}
1258
1259/**
1260 * IOC failure handling.
1261 */
1262void
1263bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
1264{
1265 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
1266}
1267
1268/**
1269 * IO offline TOV popped. Fail the pending IO.
1270 */
1271void
1272bfa_ioim_tov(struct bfa_ioim_s *ioim)
1273{
1274 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
1275}
1276
1277
1278
1279/**
1280 * bfa_ioim_api
1281 */
1282
1283/**
1284 * Allocate IOIM resource for initiator mode I/O request.
1285 */
1286struct bfa_ioim_s *
1287bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
1288 struct bfa_itnim_s *itnim, u16 nsges)
1289{
1290 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1291 struct bfa_ioim_s *ioim;
1292
1293 /**
1294 * alocate IOIM resource
1295 */
1296 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
1297 if (!ioim) {
1298 bfa_fcpim_stats(fcpim, no_iotags);
1299 return NULL;
1300 }
1301
1302 ioim->dio = dio;
1303 ioim->itnim = itnim;
1304 ioim->nsges = nsges;
1305 ioim->nsgpgs = 0;
1306
1307 bfa_stats(fcpim, total_ios);
1308 bfa_stats(itnim, ios);
1309 fcpim->ios_active++;
1310
1311 list_add_tail(&ioim->qe, &itnim->io_q);
1312 bfa_trc_fp(ioim->bfa, ioim->iotag);
1313
1314 return ioim;
1315}
1316
1317void
1318bfa_ioim_free(struct bfa_ioim_s *ioim)
1319{
1320 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
1321
1322 bfa_trc_fp(ioim->bfa, ioim->iotag);
1323 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
1324
1325 bfa_assert_fp(list_empty(&ioim->sgpg_q)
1326 || (ioim->nsges > BFI_SGE_INLINE));
1327
1328 if (ioim->nsgpgs > 0)
1329 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
1330
1331 bfa_stats(ioim->itnim, io_comps);
1332 fcpim->ios_active--;
1333
1334 list_del(&ioim->qe);
1335 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1336}
1337
1338void
1339bfa_ioim_start(struct bfa_ioim_s *ioim)
1340{
1341 bfa_trc_fp(ioim->bfa, ioim->iotag);
1342
1343 /**
1344 * Obtain the queue over which this request has to be issued
1345 */
1346 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
1347 bfa_cb_ioim_get_reqq(ioim->dio) :
1348 bfa_itnim_get_reqq(ioim);
1349
1350 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1351}
1352
1353/**
1354 * Driver I/O abort request.
1355 */
1356void
1357bfa_ioim_abort(struct bfa_ioim_s *ioim)
1358{
1359 bfa_trc(ioim->bfa, ioim->iotag);
1360 bfa_fcpim_stats(ioim->fcpim, io_aborts);
1361 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
1362}
1363
1364
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
deleted file mode 100644
index a914ff255135..000000000000
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ /dev/null
@@ -1,1088 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_fcpim.h>
20#include "bfa_fcpim_priv.h"
21
22BFA_TRC_FILE(HAL, ITNIM);
23
24#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
25 ((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))
26
27#define bfa_fcpim_additn(__itnim) \
28 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
29#define bfa_fcpim_delitn(__itnim) do { \
30 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
31 list_del(&(__itnim)->qe); \
32 bfa_assert(list_empty(&(__itnim)->io_q)); \
33 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
34 bfa_assert(list_empty(&(__itnim)->pending_q)); \
35} while (0)
36
37#define bfa_itnim_online_cb(__itnim) do { \
38 if ((__itnim)->bfa->fcs) \
39 bfa_cb_itnim_online((__itnim)->ditn); \
40 else { \
41 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
42 __bfa_cb_itnim_online, (__itnim)); \
43 } \
44} while (0)
45
46#define bfa_itnim_offline_cb(__itnim) do { \
47 if ((__itnim)->bfa->fcs) \
48 bfa_cb_itnim_offline((__itnim)->ditn); \
49 else { \
50 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
51 __bfa_cb_itnim_offline, (__itnim)); \
52 } \
53} while (0)
54
55#define bfa_itnim_sler_cb(__itnim) do { \
56 if ((__itnim)->bfa->fcs) \
57 bfa_cb_itnim_sler((__itnim)->ditn); \
58 else { \
59 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
60 __bfa_cb_itnim_sler, (__itnim)); \
61 } \
62} while (0)
63
64/*
65 * forward declarations
66 */
67static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
68static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
69static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
70static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
71static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
72static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
73static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
74static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
75static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
76static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
77static void bfa_itnim_iotov(void *itnim_arg);
78static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
79static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
80static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
81
82/**
83 * bfa_itnim_sm BFA itnim state machine
84 */
85
86
87enum bfa_itnim_event {
88 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
89 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
90 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
91 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
92 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
93 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
94 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
95 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
96 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
97};
98
99static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
100 enum bfa_itnim_event event);
101static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
102 enum bfa_itnim_event event);
103static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
104 enum bfa_itnim_event event);
105static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
106 enum bfa_itnim_event event);
107static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
108 enum bfa_itnim_event event);
109static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
110 enum bfa_itnim_event event);
111static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
112 enum bfa_itnim_event event);
113static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
114 enum bfa_itnim_event event);
115static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
116 enum bfa_itnim_event event);
117static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
118 enum bfa_itnim_event event);
119static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
120 enum bfa_itnim_event event);
121static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
122 enum bfa_itnim_event event);
123static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
124 enum bfa_itnim_event event);
125static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
126 enum bfa_itnim_event event);
127static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
128 enum bfa_itnim_event event);
129
130/**
131 * Beginning/unallocated state - no events expected.
132 */
133static void
134bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
135{
136 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
137 bfa_trc(itnim->bfa, event);
138
139 switch (event) {
140 case BFA_ITNIM_SM_CREATE:
141 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
142 itnim->is_online = BFA_FALSE;
143 bfa_fcpim_additn(itnim);
144 break;
145
146 default:
147 bfa_sm_fault(itnim->bfa, event);
148 }
149}
150
151/**
152 * Beginning state, only online event expected.
153 */
154static void
155bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
156{
157 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
158 bfa_trc(itnim->bfa, event);
159
160 switch (event) {
161 case BFA_ITNIM_SM_ONLINE:
162 if (bfa_itnim_send_fwcreate(itnim))
163 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
164 else
165 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
166 break;
167
168 case BFA_ITNIM_SM_DELETE:
169 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
170 bfa_fcpim_delitn(itnim);
171 break;
172
173 case BFA_ITNIM_SM_HWFAIL:
174 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
175 break;
176
177 default:
178 bfa_sm_fault(itnim->bfa, event);
179 }
180}
181
182/**
183 * Waiting for itnim create response from firmware.
184 */
185static void
186bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
187{
188 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
189 bfa_trc(itnim->bfa, event);
190
191 switch (event) {
192 case BFA_ITNIM_SM_FWRSP:
193 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
194 itnim->is_online = BFA_TRUE;
195 bfa_itnim_iotov_online(itnim);
196 bfa_itnim_online_cb(itnim);
197 break;
198
199 case BFA_ITNIM_SM_DELETE:
200 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
201 break;
202
203 case BFA_ITNIM_SM_OFFLINE:
204 if (bfa_itnim_send_fwdelete(itnim))
205 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
206 else
207 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
208 break;
209
210 case BFA_ITNIM_SM_HWFAIL:
211 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
212 break;
213
214 default:
215 bfa_sm_fault(itnim->bfa, event);
216 }
217}
218
219static void
220bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
221 enum bfa_itnim_event event)
222{
223 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
224 bfa_trc(itnim->bfa, event);
225
226 switch (event) {
227 case BFA_ITNIM_SM_QRESUME:
228 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
229 bfa_itnim_send_fwcreate(itnim);
230 break;
231
232 case BFA_ITNIM_SM_DELETE:
233 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
234 bfa_reqq_wcancel(&itnim->reqq_wait);
235 bfa_fcpim_delitn(itnim);
236 break;
237
238 case BFA_ITNIM_SM_OFFLINE:
239 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
240 bfa_reqq_wcancel(&itnim->reqq_wait);
241 bfa_itnim_offline_cb(itnim);
242 break;
243
244 case BFA_ITNIM_SM_HWFAIL:
245 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
246 bfa_reqq_wcancel(&itnim->reqq_wait);
247 break;
248
249 default:
250 bfa_sm_fault(itnim->bfa, event);
251 }
252}
253
254/**
255 * Waiting for itnim create response from firmware, a delete is pending.
256 */
257static void
258bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
259 enum bfa_itnim_event event)
260{
261 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
262 bfa_trc(itnim->bfa, event);
263
264 switch (event) {
265 case BFA_ITNIM_SM_FWRSP:
266 if (bfa_itnim_send_fwdelete(itnim))
267 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
268 else
269 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
270 break;
271
272 case BFA_ITNIM_SM_HWFAIL:
273 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
274 bfa_fcpim_delitn(itnim);
275 break;
276
277 default:
278 bfa_sm_fault(itnim->bfa, event);
279 }
280}
281
282/**
283 * Online state - normal parking state.
284 */
285static void
286bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
287{
288 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
289 bfa_trc(itnim->bfa, event);
290
291 switch (event) {
292 case BFA_ITNIM_SM_OFFLINE:
293 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
294 itnim->is_online = BFA_FALSE;
295 bfa_itnim_iotov_start(itnim);
296 bfa_itnim_cleanup(itnim);
297 break;
298
299 case BFA_ITNIM_SM_DELETE:
300 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
301 itnim->is_online = BFA_FALSE;
302 bfa_itnim_cleanup(itnim);
303 break;
304
305 case BFA_ITNIM_SM_SLER:
306 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
307 itnim->is_online = BFA_FALSE;
308 bfa_itnim_iotov_start(itnim);
309 bfa_itnim_sler_cb(itnim);
310 break;
311
312 case BFA_ITNIM_SM_HWFAIL:
313 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
314 itnim->is_online = BFA_FALSE;
315 bfa_itnim_iotov_start(itnim);
316 bfa_itnim_iocdisable_cleanup(itnim);
317 break;
318
319 default:
320 bfa_sm_fault(itnim->bfa, event);
321 }
322}
323
324/**
325 * Second level error recovery need.
326 */
327static void
328bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
329{
330 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
331 bfa_trc(itnim->bfa, event);
332
333 switch (event) {
334 case BFA_ITNIM_SM_OFFLINE:
335 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
336 bfa_itnim_cleanup(itnim);
337 break;
338
339 case BFA_ITNIM_SM_DELETE:
340 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
341 bfa_itnim_cleanup(itnim);
342 bfa_itnim_iotov_delete(itnim);
343 break;
344
345 case BFA_ITNIM_SM_HWFAIL:
346 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
347 bfa_itnim_iocdisable_cleanup(itnim);
348 break;
349
350 default:
351 bfa_sm_fault(itnim->bfa, event);
352 }
353}
354
355/**
356 * Going offline. Waiting for active IO cleanup.
357 */
358static void
359bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
360 enum bfa_itnim_event event)
361{
362 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
363 bfa_trc(itnim->bfa, event);
364
365 switch (event) {
366 case BFA_ITNIM_SM_CLEANUP:
367 if (bfa_itnim_send_fwdelete(itnim))
368 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
369 else
370 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
371 break;
372
373 case BFA_ITNIM_SM_DELETE:
374 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
375 bfa_itnim_iotov_delete(itnim);
376 break;
377
378 case BFA_ITNIM_SM_HWFAIL:
379 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
380 bfa_itnim_iocdisable_cleanup(itnim);
381 bfa_itnim_offline_cb(itnim);
382 break;
383
384 case BFA_ITNIM_SM_SLER:
385 break;
386
387 default:
388 bfa_sm_fault(itnim->bfa, event);
389 }
390}
391
392/**
393 * Deleting itnim. Waiting for active IO cleanup.
394 */
395static void
396bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
397 enum bfa_itnim_event event)
398{
399 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
400 bfa_trc(itnim->bfa, event);
401
402 switch (event) {
403 case BFA_ITNIM_SM_CLEANUP:
404 if (bfa_itnim_send_fwdelete(itnim))
405 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
406 else
407 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
408 break;
409
410 case BFA_ITNIM_SM_HWFAIL:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
412 bfa_itnim_iocdisable_cleanup(itnim);
413 break;
414
415 default:
416 bfa_sm_fault(itnim->bfa, event);
417 }
418}
419
420/**
421 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
422 */
423static void
424bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
425{
426 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
427 bfa_trc(itnim->bfa, event);
428
429 switch (event) {
430 case BFA_ITNIM_SM_FWRSP:
431 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
432 bfa_itnim_offline_cb(itnim);
433 break;
434
435 case BFA_ITNIM_SM_DELETE:
436 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
437 break;
438
439 case BFA_ITNIM_SM_HWFAIL:
440 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
441 bfa_itnim_offline_cb(itnim);
442 break;
443
444 default:
445 bfa_sm_fault(itnim->bfa, event);
446 }
447}
448
449static void
450bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
451 enum bfa_itnim_event event)
452{
453 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
454 bfa_trc(itnim->bfa, event);
455
456 switch (event) {
457 case BFA_ITNIM_SM_QRESUME:
458 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
459 bfa_itnim_send_fwdelete(itnim);
460 break;
461
462 case BFA_ITNIM_SM_DELETE:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
464 break;
465
466 case BFA_ITNIM_SM_HWFAIL:
467 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
468 bfa_reqq_wcancel(&itnim->reqq_wait);
469 bfa_itnim_offline_cb(itnim);
470 break;
471
472 default:
473 bfa_sm_fault(itnim->bfa, event);
474 }
475}
476
477/**
478 * Offline state.
479 */
480static void
481bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
482{
483 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
484 bfa_trc(itnim->bfa, event);
485
486 switch (event) {
487 case BFA_ITNIM_SM_DELETE:
488 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
489 bfa_itnim_iotov_delete(itnim);
490 bfa_fcpim_delitn(itnim);
491 break;
492
493 case BFA_ITNIM_SM_ONLINE:
494 if (bfa_itnim_send_fwcreate(itnim))
495 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
496 else
497 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
498 break;
499
500 case BFA_ITNIM_SM_HWFAIL:
501 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
502 break;
503
504 default:
505 bfa_sm_fault(itnim->bfa, event);
506 }
507}
508
509/**
510 * IOC h/w failed state.
511 */
512static void
513bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
514 enum bfa_itnim_event event)
515{
516 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
517 bfa_trc(itnim->bfa, event);
518
519 switch (event) {
520 case BFA_ITNIM_SM_DELETE:
521 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
522 bfa_itnim_iotov_delete(itnim);
523 bfa_fcpim_delitn(itnim);
524 break;
525
526 case BFA_ITNIM_SM_OFFLINE:
527 bfa_itnim_offline_cb(itnim);
528 break;
529
530 case BFA_ITNIM_SM_ONLINE:
531 if (bfa_itnim_send_fwcreate(itnim))
532 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
533 else
534 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
535 break;
536
537 case BFA_ITNIM_SM_HWFAIL:
538 break;
539
540 default:
541 bfa_sm_fault(itnim->bfa, event);
542 }
543}
544
545/**
546 * Itnim is deleted, waiting for firmware response to delete.
547 */
548static void
549bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
550{
551 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
552 bfa_trc(itnim->bfa, event);
553
554 switch (event) {
555 case BFA_ITNIM_SM_FWRSP:
556 case BFA_ITNIM_SM_HWFAIL:
557 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
558 bfa_fcpim_delitn(itnim);
559 break;
560
561 default:
562 bfa_sm_fault(itnim->bfa, event);
563 }
564}
565
566static void
567bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
568 enum bfa_itnim_event event)
569{
570 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
571 bfa_trc(itnim->bfa, event);
572
573 switch (event) {
574 case BFA_ITNIM_SM_QRESUME:
575 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
576 bfa_itnim_send_fwdelete(itnim);
577 break;
578
579 case BFA_ITNIM_SM_HWFAIL:
580 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
581 bfa_reqq_wcancel(&itnim->reqq_wait);
582 bfa_fcpim_delitn(itnim);
583 break;
584
585 default:
586 bfa_sm_fault(itnim->bfa, event);
587 }
588}
589
590
591
592/**
593 * bfa_itnim_private
594 */
595
596/**
597 * Initiate cleanup of all IOs on an IOC failure.
598 */
599static void
600bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
601{
602 struct bfa_tskim_s *tskim;
603 struct bfa_ioim_s *ioim;
604 struct list_head *qe, *qen;
605
606 list_for_each_safe(qe, qen, &itnim->tsk_q) {
607 tskim = (struct bfa_tskim_s *) qe;
608 bfa_tskim_iocdisable(tskim);
609 }
610
611 list_for_each_safe(qe, qen, &itnim->io_q) {
612 ioim = (struct bfa_ioim_s *) qe;
613 bfa_ioim_iocdisable(ioim);
614 }
615
616 /**
617 * For IO request in pending queue, we pretend an early timeout.
618 */
619 list_for_each_safe(qe, qen, &itnim->pending_q) {
620 ioim = (struct bfa_ioim_s *) qe;
621 bfa_ioim_tov(ioim);
622 }
623
624 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
625 ioim = (struct bfa_ioim_s *) qe;
626 bfa_ioim_iocdisable(ioim);
627 }
628}
629
630/**
631 * IO cleanup completion
632 */
633static void
634bfa_itnim_cleanp_comp(void *itnim_cbarg)
635{
636 struct bfa_itnim_s *itnim = itnim_cbarg;
637
638 bfa_stats(itnim, cleanup_comps);
639 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
640}
641
642/**
643 * Initiate cleanup of all IOs.
644 */
645static void
646bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
647{
648 struct bfa_ioim_s *ioim;
649 struct bfa_tskim_s *tskim;
650 struct list_head *qe, *qen;
651
652 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
653
654 list_for_each_safe(qe, qen, &itnim->io_q) {
655 ioim = (struct bfa_ioim_s *) qe;
656
657 /**
658 * Move IO to a cleanup queue from active queue so that a later
659 * TM will not pickup this IO.
660 */
661 list_del(&ioim->qe);
662 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
663
664 bfa_wc_up(&itnim->wc);
665 bfa_ioim_cleanup(ioim);
666 }
667
668 list_for_each_safe(qe, qen, &itnim->tsk_q) {
669 tskim = (struct bfa_tskim_s *) qe;
670 bfa_wc_up(&itnim->wc);
671 bfa_tskim_cleanup(tskim);
672 }
673
674 bfa_wc_wait(&itnim->wc);
675}
676
677static void
678__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
679{
680 struct bfa_itnim_s *itnim = cbarg;
681
682 if (complete)
683 bfa_cb_itnim_online(itnim->ditn);
684}
685
686static void
687__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
688{
689 struct bfa_itnim_s *itnim = cbarg;
690
691 if (complete)
692 bfa_cb_itnim_offline(itnim->ditn);
693}
694
695static void
696__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
697{
698 struct bfa_itnim_s *itnim = cbarg;
699
700 if (complete)
701 bfa_cb_itnim_sler(itnim->ditn);
702}
703
704/**
705 * Call to resume any I/O requests waiting for room in request queue.
706 */
707static void
708bfa_itnim_qresume(void *cbarg)
709{
710 struct bfa_itnim_s *itnim = cbarg;
711
712 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
713}
714
715
716
717
718/**
719 * bfa_itnim_public
720 */
721
722void
723bfa_itnim_iodone(struct bfa_itnim_s *itnim)
724{
725 bfa_wc_down(&itnim->wc);
726}
727
728void
729bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
730{
731 bfa_wc_down(&itnim->wc);
732}
733
734void
735bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
736 u32 *dm_len)
737{
738 /**
739 * ITN memory
740 */
741 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
742}
743
744void
745bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
746{
747 struct bfa_s *bfa = fcpim->bfa;
748 struct bfa_itnim_s *itnim;
749 int i;
750
751 INIT_LIST_HEAD(&fcpim->itnim_q);
752
753 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
754 fcpim->itnim_arr = itnim;
755
756 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
757 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
758 itnim->bfa = bfa;
759 itnim->fcpim = fcpim;
760 itnim->reqq = BFA_REQQ_QOS_LO;
761 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
762 itnim->iotov_active = BFA_FALSE;
763 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
764
765 INIT_LIST_HEAD(&itnim->io_q);
766 INIT_LIST_HEAD(&itnim->io_cleanup_q);
767 INIT_LIST_HEAD(&itnim->pending_q);
768 INIT_LIST_HEAD(&itnim->tsk_q);
769 INIT_LIST_HEAD(&itnim->delay_comp_q);
770 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
771 }
772
773 bfa_meminfo_kva(minfo) = (u8 *) itnim;
774}
775
776void
777bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
778{
779 bfa_stats(itnim, ioc_disabled);
780 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
781}
782
783static bfa_boolean_t
784bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
785{
786 struct bfi_itnim_create_req_s *m;
787
788 itnim->msg_no++;
789
790 /**
791 * check for room in queue to send request now
792 */
793 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
794 if (!m) {
795 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
796 return BFA_FALSE;
797 }
798
799 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
800 bfa_lpuid(itnim->bfa));
801 m->fw_handle = itnim->rport->fw_handle;
802 m->class = FC_CLASS_3;
803 m->seq_rec = itnim->seq_rec;
804 m->msg_no = itnim->msg_no;
805
806 /**
807 * queue I/O message to firmware
808 */
809 bfa_reqq_produce(itnim->bfa, itnim->reqq);
810 return BFA_TRUE;
811}
812
813static bfa_boolean_t
814bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
815{
816 struct bfi_itnim_delete_req_s *m;
817
818 /**
819 * check for room in queue to send request now
820 */
821 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
822 if (!m) {
823 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
824 return BFA_FALSE;
825 }
826
827 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
828 bfa_lpuid(itnim->bfa));
829 m->fw_handle = itnim->rport->fw_handle;
830
831 /**
832 * queue I/O message to firmware
833 */
834 bfa_reqq_produce(itnim->bfa, itnim->reqq);
835 return BFA_TRUE;
836}
837
838/**
839 * Cleanup all pending failed inflight requests.
840 */
841static void
842bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
843{
844 struct bfa_ioim_s *ioim;
845 struct list_head *qe, *qen;
846
847 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
848 ioim = (struct bfa_ioim_s *)qe;
849 bfa_ioim_delayed_comp(ioim, iotov);
850 }
851}
852
853/**
854 * Start all pending IO requests.
855 */
856static void
857bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
858{
859 struct bfa_ioim_s *ioim;
860
861 bfa_itnim_iotov_stop(itnim);
862
863 /**
864 * Abort all inflight IO requests in the queue
865 */
866 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
867
868 /**
869 * Start all pending IO requests.
870 */
871 while (!list_empty(&itnim->pending_q)) {
872 bfa_q_deq(&itnim->pending_q, &ioim);
873 list_add_tail(&ioim->qe, &itnim->io_q);
874 bfa_ioim_start(ioim);
875 }
876}
877
878/**
879 * Fail all pending IO requests
880 */
881static void
882bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
883{
884 struct bfa_ioim_s *ioim;
885
886 /**
887 * Fail all inflight IO requests in the queue
888 */
889 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
890
891 /**
892 * Fail any pending IO requests.
893 */
894 while (!list_empty(&itnim->pending_q)) {
895 bfa_q_deq(&itnim->pending_q, &ioim);
896 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
897 bfa_ioim_tov(ioim);
898 }
899}
900
901/**
902 * IO TOV timer callback. Fail any pending IO requests.
903 */
904static void
905bfa_itnim_iotov(void *itnim_arg)
906{
907 struct bfa_itnim_s *itnim = itnim_arg;
908
909 itnim->iotov_active = BFA_FALSE;
910
911 bfa_cb_itnim_tov_begin(itnim->ditn);
912 bfa_itnim_iotov_cleanup(itnim);
913 bfa_cb_itnim_tov(itnim->ditn);
914}
915
916/**
917 * Start IO TOV timer for failing back pending IO requests in offline state.
918 */
919static void
920bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
921{
922 if (itnim->fcpim->path_tov > 0) {
923
924 itnim->iotov_active = BFA_TRUE;
925 bfa_assert(bfa_itnim_hold_io(itnim));
926 bfa_timer_start(itnim->bfa, &itnim->timer,
927 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
928 }
929}
930
931/**
932 * Stop IO TOV timer.
933 */
934static void
935bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
936{
937 if (itnim->iotov_active) {
938 itnim->iotov_active = BFA_FALSE;
939 bfa_timer_stop(&itnim->timer);
940 }
941}
942
943/**
944 * Stop IO TOV timer.
945 */
946static void
947bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
948{
949 bfa_boolean_t pathtov_active = BFA_FALSE;
950
951 if (itnim->iotov_active)
952 pathtov_active = BFA_TRUE;
953
954 bfa_itnim_iotov_stop(itnim);
955 if (pathtov_active)
956 bfa_cb_itnim_tov_begin(itnim->ditn);
957 bfa_itnim_iotov_cleanup(itnim);
958 if (pathtov_active)
959 bfa_cb_itnim_tov(itnim->ditn);
960}
961
962
963
964/**
965 * bfa_itnim_public
966 */
967
968/**
969 * Itnim interrupt processing.
970 */
971void
972bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
973{
974 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
975 union bfi_itnim_i2h_msg_u msg;
976 struct bfa_itnim_s *itnim;
977
978 bfa_trc(bfa, m->mhdr.msg_id);
979
980 msg.msg = m;
981
982 switch (m->mhdr.msg_id) {
983 case BFI_ITNIM_I2H_CREATE_RSP:
984 itnim = BFA_ITNIM_FROM_TAG(fcpim,
985 msg.create_rsp->bfa_handle);
986 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
987 bfa_stats(itnim, create_comps);
988 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
989 break;
990
991 case BFI_ITNIM_I2H_DELETE_RSP:
992 itnim = BFA_ITNIM_FROM_TAG(fcpim,
993 msg.delete_rsp->bfa_handle);
994 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
995 bfa_stats(itnim, delete_comps);
996 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
997 break;
998
999 case BFI_ITNIM_I2H_SLER_EVENT:
1000 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1001 msg.sler_event->bfa_handle);
1002 bfa_stats(itnim, sler_events);
1003 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1004 break;
1005
1006 default:
1007 bfa_trc(bfa, m->mhdr.msg_id);
1008 bfa_assert(0);
1009 }
1010}
1011
1012
1013
1014/**
1015 * bfa_itnim_api
1016 */
1017
1018struct bfa_itnim_s *
1019bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1020{
1021 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1022 struct bfa_itnim_s *itnim;
1023
1024 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1025 bfa_assert(itnim->rport == rport);
1026
1027 itnim->ditn = ditn;
1028
1029 bfa_stats(itnim, creates);
1030 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1031
1032 return itnim;
1033}
1034
1035void
1036bfa_itnim_delete(struct bfa_itnim_s *itnim)
1037{
1038 bfa_stats(itnim, deletes);
1039 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1040}
1041
1042void
1043bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1044{
1045 itnim->seq_rec = seq_rec;
1046 bfa_stats(itnim, onlines);
1047 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1048}
1049
1050void
1051bfa_itnim_offline(struct bfa_itnim_s *itnim)
1052{
1053 bfa_stats(itnim, offlines);
1054 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1055}
1056
1057/**
1058 * Return true if itnim is considered offline for holding off IO request.
1059 * IO is not held if itnim is being deleted.
1060 */
1061bfa_boolean_t
1062bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1063{
1064 return
1065 itnim->fcpim->path_tov && itnim->iotov_active &&
1066 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1067 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1068 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1069 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1070 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1071 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
1072 ;
1073}
1074
1075void
1076bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1077 struct bfa_itnim_hal_stats_s *stats)
1078{
1079 *stats = itnim->stats;
1080}
1081
1082void
1083bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1084{
1085 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1086}
1087
1088
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c
deleted file mode 100644
index e7514016c9c6..000000000000
--- a/drivers/scsi/bfa/bfa_log.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.c BFA log library
20 */
21
22#include <bfa_os_inc.h>
23#include <cs/bfa_log.h>
24
25/*
26 * global log info structure
27 */
28struct bfa_log_info_s {
29 u32 start_idx; /* start index for a module */
30 u32 total_count; /* total count for a module */
31 enum bfa_log_severity level; /* global log level */
32 bfa_log_cb_t cbfn; /* callback function */
33};
34
35static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1];
36static u32 bfa_log_msg_total_count;
37static int bfa_log_initialized;
38
39static char *bfa_log_severity[] =
40 { "[none]", "[critical]", "[error]", "[warn]", "[info]", "" };
41
42/**
43 * BFA log library initialization
44 *
45 * The log library initialization includes the following,
46 * - set log instance name and callback function
47 * - read the message array generated from xml files
48 * - calculate start index for each module
49 * - calculate message count for each module
50 * - perform error checking
51 *
52 * @param[in] log_mod - log module info
53 * @param[in] instance_name - instance name
54 * @param[in] cbfn - callback function
55 *
56 * It return 0 on success, or -1 on failure
57 */
58int
59bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name,
60 bfa_log_cb_t cbfn)
61{
62 struct bfa_log_msgdef_s *msg;
63 u32 pre_mod_id = 0;
64 u32 cur_mod_id = 0;
65 u32 i, pre_idx, idx, msg_id;
66
67 /*
68 * set instance name
69 */
70 if (log_mod) {
71 strncpy(log_mod->instance_info, instance_name,
72 sizeof(log_mod->instance_info));
73 log_mod->cbfn = cbfn;
74 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++)
75 log_mod->log_level[i] = BFA_LOG_WARNING;
76 }
77
78 if (bfa_log_initialized)
79 return 0;
80
81 for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) {
82 bfa_log_info[i].start_idx = 0;
83 bfa_log_info[i].total_count = 0;
84 bfa_log_info[i].level = BFA_LOG_WARNING;
85 bfa_log_info[i].cbfn = cbfn;
86 }
87
88 pre_idx = 0;
89 idx = 0;
90 msg = bfa_log_msg_array;
91 msg_id = BFA_LOG_GET_MSG_ID(msg);
92 pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
93 while (msg_id != 0) {
94 cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
95
96 if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) {
97 cbfn(log_mod, msg_id,
98 "%s%s log: module id %u out of range\n",
99 BFA_LOG_CAT_NAME,
100 bfa_log_severity[BFA_LOG_ERROR],
101 cur_mod_id);
102 return -1;
103 }
104
105 if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) {
106 cbfn(log_mod, msg_id,
107 "%s%s log: module id %u out of range\n",
108 BFA_LOG_CAT_NAME,
109 bfa_log_severity[BFA_LOG_ERROR],
110 pre_mod_id);
111 return -1;
112 }
113
114 if (cur_mod_id != pre_mod_id) {
115 bfa_log_info[pre_mod_id].start_idx = pre_idx;
116 bfa_log_info[pre_mod_id].total_count = idx - pre_idx;
117 pre_mod_id = cur_mod_id;
118 pre_idx = idx;
119 }
120
121 idx++;
122 msg++;
123 msg_id = BFA_LOG_GET_MSG_ID(msg);
124 }
125
126 bfa_log_info[cur_mod_id].start_idx = pre_idx;
127 bfa_log_info[cur_mod_id].total_count = idx - pre_idx;
128 bfa_log_msg_total_count = idx;
129
130 cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n",
131 BFA_LOG_CAT_NAME,
132 bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count);
133
134 bfa_log_initialized = 1;
135
136 return 0;
137}
138
139/**
140 * BFA log set log level for a module
141 *
142 * @param[in] log_mod - log module info
143 * @param[in] mod_id - module id
144 * @param[in] log_level - log severity level
145 *
146 * It return BFA_STATUS_OK on success, or > 0 on failure
147 */
148bfa_status_t
149bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id,
150 enum bfa_log_severity log_level)
151{
152 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
153 return BFA_STATUS_EINVAL;
154
155 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
156 return BFA_STATUS_EINVAL;
157
158 if (log_mod)
159 log_mod->log_level[mod_id] = log_level;
160 else
161 bfa_log_info[mod_id].level = log_level;
162
163 return BFA_STATUS_OK;
164}
165
166/**
167 * BFA log set log level for all modules
168 *
169 * @param[in] log_mod - log module info
170 * @param[in] log_level - log severity level
171 *
172 * It return BFA_STATUS_OK on success, or > 0 on failure
173 */
174bfa_status_t
175bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
176 enum bfa_log_severity log_level)
177{
178 int mod_id = BFA_LOG_UNUSED_ID + 1;
179
180 if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
181 return BFA_STATUS_EINVAL;
182
183 if (log_mod) {
184 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
185 log_mod->log_level[mod_id] = log_level;
186 } else {
187 for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
188 bfa_log_info[mod_id].level = log_level;
189 }
190
191 return BFA_STATUS_OK;
192}
193
194/**
195 * BFA log set log level for all aen sub-modules
196 *
197 * @param[in] log_mod - log module info
198 * @param[in] log_level - log severity level
199 *
200 * It return BFA_STATUS_OK on success, or > 0 on failure
201 */
202bfa_status_t
203bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
204 enum bfa_log_severity log_level)
205{
206 int mod_id = BFA_LOG_AEN_MIN + 1;
207
208 if (log_mod) {
209 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
210 log_mod->log_level[mod_id] = log_level;
211 } else {
212 for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
213 bfa_log_info[mod_id].level = log_level;
214 }
215
216 return BFA_STATUS_OK;
217}
218
219/**
220 * BFA log get log level for a module
221 *
222 * @param[in] log_mod - log module info
223 * @param[in] mod_id - module id
224 *
225 * It returns log level or -1 on error
226 */
227enum bfa_log_severity
228bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
229{
230 if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
231 return BFA_LOG_INVALID;
232
233 if (log_mod)
234 return log_mod->log_level[mod_id];
235 else
236 return bfa_log_info[mod_id].level;
237}
238
239enum bfa_log_severity
240bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id)
241{
242 struct bfa_log_msgdef_s *msg;
243 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
244 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
245
246 if (!bfa_log_initialized)
247 return BFA_LOG_INVALID;
248
249 if (mod > BFA_LOG_MODULE_ID_MAX)
250 return BFA_LOG_INVALID;
251
252 if (idx >= bfa_log_info[mod].total_count) {
253 bfa_log_info[mod].cbfn(log_mod, msg_id,
254 "%s%s log: inconsistent idx %u vs. total count %u\n",
255 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
256 bfa_log_info[mod].total_count);
257 return BFA_LOG_INVALID;
258 }
259
260 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
261 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
262 bfa_log_info[mod].cbfn(log_mod, msg_id,
263 "%s%s log: inconsistent msg id %u array msg id %u\n",
264 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
265 msg_id, BFA_LOG_GET_MSG_ID(msg));
266 return BFA_LOG_INVALID;
267 }
268
269 return BFA_LOG_GET_SEVERITY(msg);
270}
271
272/**
273 * BFA log message handling
274 *
275 * BFA log message handling finds the message based on message id and prints
276 * out the message based on its format and arguments. It also does prefix
277 * the severity etc.
278 *
279 * @param[in] log_mod - log module info
280 * @param[in] msg_id - message id
281 * @param[in] ... - message arguments
282 *
283 * It return 0 on success, or -1 on errors
284 */
285int
286bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...)
287{
288 va_list ap;
289 char buf[256];
290 struct bfa_log_msgdef_s *msg;
291 int log_level;
292 u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
293 u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
294
295 if (!bfa_log_initialized)
296 return -1;
297
298 if (mod > BFA_LOG_MODULE_ID_MAX)
299 return -1;
300
301 if (idx >= bfa_log_info[mod].total_count) {
302 bfa_log_info[mod].
303 cbfn
304 (log_mod, msg_id,
305 "%s%s log: inconsistent idx %u vs. total count %u\n",
306 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
307 bfa_log_info[mod].total_count);
308 return -1;
309 }
310
311 msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
312 if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
313 bfa_log_info[mod].
314 cbfn
315 (log_mod, msg_id,
316 "%s%s log: inconsistent msg id %u array msg id %u\n",
317 BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
318 msg_id, BFA_LOG_GET_MSG_ID(msg));
319 return -1;
320 }
321
322 log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level;
323 if ((BFA_LOG_GET_SEVERITY(msg) > log_level) &&
324 (msg->attributes != BFA_LOG_ATTR_NONE))
325 return 0;
326
327 va_start(ap, msg_id);
328 bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap);
329 va_end(ap);
330
331 if (log_mod)
332 log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n",
333 BFA_LOG_CAT_NAME, log_mod->instance_info,
334 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
335 (msg->attributes & BFA_LOG_ATTR_AUDIT)
336 ? " (audit) " : "", msg->msg_value, buf);
337 else
338 bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n",
339 BFA_LOG_CAT_NAME,
340 bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
341 (msg->attributes & BFA_LOG_ATTR_AUDIT) ?
342 " (audit) " : "", msg->msg_value, buf);
343
344 return 0;
345}
346
diff --git a/drivers/scsi/bfa/bfa_log_module.c b/drivers/scsi/bfa/bfa_log_module.c
deleted file mode 100644
index cf577ef7cb97..000000000000
--- a/drivers/scsi/bfa/bfa_log_module.c
+++ /dev/null
@@ -1,537 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <cs/bfa_log.h>
19#include <aen/bfa_aen_adapter.h>
20#include <aen/bfa_aen_audit.h>
21#include <aen/bfa_aen_ethport.h>
22#include <aen/bfa_aen_ioc.h>
23#include <aen/bfa_aen_itnim.h>
24#include <aen/bfa_aen_lport.h>
25#include <aen/bfa_aen_port.h>
26#include <aen/bfa_aen_rport.h>
27#include <log/bfa_log_fcs.h>
28#include <log/bfa_log_hal.h>
29#include <log/bfa_log_linux.h>
30#include <log/bfa_log_wdrv.h>
31
32struct bfa_log_msgdef_s bfa_log_msg_array[] = {
33
34
35/* messages define for BFA_AEN_CAT_ADAPTER Module */
36{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
37 "BFA_AEN_ADAPTER_ADD",
38 "New adapter found: SN = %s, base port WWN = %s.",
39 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
40
41{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
42 BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE",
43 "Adapter removed: SN = %s.",
44 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
45
46
47
48
49/* messages define for BFA_AEN_CAT_AUDIT Module */
50{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
51 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE",
52 "Authentication enabled for base port: WWN = %s.",
53 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
54
55{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
56 BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE",
57 "Authentication disabled for base port: WWN = %s.",
58 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
59
60
61
62
63/* messages define for BFA_AEN_CAT_ETHPORT Module */
64{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
65 "BFA_AEN_ETHPORT_LINKUP",
66 "Base port ethernet linkup: mac = %s.",
67 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
68
69{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
70 "BFA_AEN_ETHPORT_LINKDOWN",
71 "Base port ethernet linkdown: mac = %s.",
72 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
73
74{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
75 "BFA_AEN_ETHPORT_ENABLE",
76 "Base port ethernet interface enabled: mac = %s.",
77 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
78
79{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
80 "BFA_AEN_ETHPORT_DISABLE",
81 "Base port ethernet interface disabled: mac = %s.",
82 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
83
84
85
86
87/* messages define for BFA_AEN_CAT_IOC Module */
88{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
89 "BFA_AEN_IOC_HBGOOD",
90 "Heart Beat of IOC %d is good.",
91 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
92
93{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL,
94 "BFA_AEN_IOC_HBFAIL",
95 "Heart Beat of IOC %d has failed.",
96 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
97
98{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
99 "BFA_AEN_IOC_ENABLE",
100 "IOC %d is enabled.",
101 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
102
103{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
104 "BFA_AEN_IOC_DISABLE",
105 "IOC %d is disabled.",
106 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
107
108{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
109 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH",
110 "Running firmware version is incompatible with the driver version.",
111 (0), 0},
112
113{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
114 BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
115 "Link initialization failed due to firmware configuration read error:"
116 " WWN = %s.",
117 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
118
119{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
120 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
121 "Unsupported switch vendor. Link initialization failed: WWN = %s.",
122 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
123
124{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
125 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
126 "Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
127 (0), 0},
128
129{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
130 BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
131 "Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
132 (0), 0},
133
134
135
136
137/* messages define for BFA_AEN_CAT_ITNIM Module */
138{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
139 "BFA_AEN_ITNIM_ONLINE",
140 "Target (WWN = %s) is online for initiator (WWN = %s).",
141 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
142
143{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
144 "BFA_AEN_ITNIM_OFFLINE",
145 "Target (WWN = %s) offlined by initiator (WWN = %s).",
146 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
147
148{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
149 BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT",
150 "Target (WWN = %s) connectivity lost for initiator (WWN = %s).",
151 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
152
153
154
155
156/* messages define for BFA_AEN_CAT_LPORT Module */
157{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
158 "BFA_AEN_LPORT_NEW",
159 "New logical port created: WWN = %s, Role = %s.",
160 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
161
162{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
163 "BFA_AEN_LPORT_DELETE",
164 "Logical port deleted: WWN = %s, Role = %s.",
165 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
166
167{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
168 "BFA_AEN_LPORT_ONLINE",
169 "Logical port online: WWN = %s, Role = %s.",
170 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
171
172{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
173 "BFA_AEN_LPORT_OFFLINE",
174 "Logical port taken offline: WWN = %s, Role = %s.",
175 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
176
177{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
178 BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT",
179 "Logical port lost fabric connectivity: WWN = %s, Role = %s.",
180 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
181
182{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
183 "BFA_AEN_LPORT_NEW_PROP",
184 "New virtual port created using proprietary interface: WWN = %s, Role = %s.",
185 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
186
187{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
188 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP",
189 "Virtual port deleted using proprietary interface: WWN = %s, Role = %s.",
190 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
191
192{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
193 BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD",
194 "New virtual port created using standard interface: WWN = %s, Role = %s.",
195 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
196
197{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
198 BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD",
199 "Virtual port deleted using standard interface: WWN = %s, Role = %s.",
200 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
201
202{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
203 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN",
204 "Virtual port login failed. Duplicate WWN = %s reported by fabric.",
205 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
206
207{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
208 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX",
209 "Virtual port (WWN = %s) login failed. Max NPIV ports already exist in"
210 " fabric/fport.",
211 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
212
213{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
214 BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN",
215 "Virtual port (WWN = %s) login failed.",
216 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
217
218
219
220
221/* messages define for BFA_AEN_CAT_PORT Module */
222{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE",
223 "Base port online: WWN = %s.",
224 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
225
226{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
227 "BFA_AEN_PORT_OFFLINE",
228 "Base port offline: WWN = %s.",
229 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
230
231{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
232 "BFA_AEN_PORT_RLIR",
233 "RLIR event not supported.",
234 (0), 0},
235
236{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
237 "BFA_AEN_PORT_SFP_INSERT",
238 "New SFP found: WWN/MAC = %s.",
239 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
240
241{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
242 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE",
243 "SFP removed: WWN/MAC = %s.",
244 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
245
246{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
247 "BFA_AEN_PORT_SFP_POM",
248 "SFP POM level to %s: WWN/MAC = %s.",
249 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
250
251{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
252 "BFA_AEN_PORT_ENABLE",
253 "Base port enabled: WWN = %s.",
254 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
255
256{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
257 "BFA_AEN_PORT_DISABLE",
258 "Base port disabled: WWN = %s.",
259 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
260
261{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
262 "BFA_AEN_PORT_AUTH_ON",
263 "Authentication successful for base port: WWN = %s.",
264 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
265
266{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
267 "BFA_AEN_PORT_AUTH_OFF",
268 "Authentication unsuccessful for base port: WWN = %s.",
269 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
270
271{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
272 "BFA_AEN_PORT_DISCONNECT",
273 "Base port (WWN = %s) lost fabric connectivity.",
274 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
275
276{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
277 "BFA_AEN_PORT_QOS_NEG",
278 "QOS negotiation failed for base port: WWN = %s.",
279 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
280
281{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
282 BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE",
283 "Base port WWN = %s, Fabric WWN = %s.",
284 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
285
286{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
287 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR",
288 "SFP access error: WWN/MAC = %s.",
289 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
290
291{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
292 BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT",
293 "Unsupported SFP found: WWN/MAC = %s.",
294 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
295
296
297
298
299/* messages define for BFA_AEN_CAT_RPORT Module */
300{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
301 "BFA_AEN_RPORT_ONLINE",
302 "Remote port (WWN = %s) online for logical port (WWN = %s).",
303 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
304
305{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
306 "BFA_AEN_RPORT_OFFLINE",
307 "Remote port (WWN = %s) offlined by logical port (WWN = %s).",
308 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
309
310{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
311 BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT",
312 "Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).",
313 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
314
315{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
316 "BFA_AEN_RPORT_QOS_PRIO",
317 "QOS priority changed to %s: RPWWN = %s and LPWWN = %s.",
318 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
319 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
320
321{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
322 "BFA_AEN_RPORT_QOS_FLOWID",
323 "QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.",
324 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
325 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
326
327
328
329
330/* messages define for FCS Module */
331{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
332 BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH",
333 "No switched fabric presence is detected.",
334 (0), 0},
335
336{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
337 BFA_LOG_INFO, "FCS_FABRIC_ISOLATED",
338 "Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and"
339 " switch port VF_ID: %04x.",
340 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) |
341 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
342
343
344
345
346/* messages define for HAL Module */
347{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
348 "HAL_ASSERT",
349 "Assertion failure: %s:%d: %s",
350 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
351 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
352
353{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
354 BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE",
355 "Firmware heartbeat failure at %d",
356 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
357
358{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
359 BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID",
360 "Driver configuration %s value %d is invalid. Value should be within"
361 " %d and %d.",
362 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
363 (BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4},
364
365{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
366 "HAL_SM_ASSERT",
367 "SM Assertion failure: %s:%d: event = %d",
368 ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
369 (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
370
371{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
372 BFA_LOG_INFO, "HAL_DRIVER_ERROR",
373 "%s",
374 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
375
376{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
377 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
378 "HAL_DRIVER_CONFIG_ERROR",
379 "%s",
380 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
381
382{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
383 BFA_LOG_INFO, "HAL_MBOX_ERROR",
384 "%s",
385 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
386
387
388
389
390/* messages define for LINUX Module */
391{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
392 BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED",
393 "bfa device at %s claimed.",
394 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
395
396{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
397 BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED",
398 "Hash table initialization failure for the port %s.",
399 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
400
401{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
402 BFA_LOG_INFO, "LINUX_SYSFS_FAILED",
403 "sysfs file creation failure for the port %s.",
404 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
405
406{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
407 BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED",
408 "Memory allocation failed: %s. ",
409 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
410
411{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED,
412 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
413 "LINUX_DRIVER_REGISTRATION_FAILED",
414 "%s. ",
415 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
416
417{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
418 "LINUX_ITNIM_FREE",
419 "scsi%d: FCID: %s WWPN: %s",
420 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
421 (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
422
423{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
424 BFA_LOG_INFO, "LINUX_ITNIM_ONLINE",
425 "Target: %d:0:%d FCID: %s WWPN: %s",
426 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
427 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
428
429{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
430 BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE",
431 "Target: %d:0:%d FCID: %s WWPN: %s",
432 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
433 (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
434
435{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
436 BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE",
437 "Free scsi%d",
438 ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
439
440{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
441 "LINUX_SCSI_ABORT",
442 "scsi%d: abort cmnd %p, iotag %x",
443 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
444 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
445
446{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
447 BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP",
448 "scsi%d: complete abort 0x%p, iotag 0x%x",
449 ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
450 (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
451
452{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
453 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
454 "LINUX_DRIVER_CONFIG_ERROR",
455 "%s",
456 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
457
458{BFA_LOG_LINUX_BNA_STATE_MACHINE,
459 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
460 "LINUX_BNA_STATE_MACHINE",
461 "%s",
462 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
463
464{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
465 BFA_LOG_INFO, "LINUX_IOC_ERROR",
466 "%s",
467 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
468
469{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
470 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
471 "LINUX_RESOURCE_ALLOC_ERROR",
472 "%s",
473 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
474
475{BFA_LOG_LINUX_RING_BUFFER_ERROR,
476 BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
477 "LINUX_RING_BUFFER_ERROR",
478 "%s",
479 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
480
481{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
482 BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
483 "%s",
484 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
485
486{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
487 BFA_LOG_INFO, "LINUX_DRIVER_INFO",
488 "%s",
489 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
490
491{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
492 BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
493 "%s",
494 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
495
496{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
497 BFA_LOG_INFO, "LINUX_DRIVER_AEN",
498 "%s",
499 ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
500
501
502
503
504/* messages define for WDRV Module */
505{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
506 BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR",
507 "IOC initialization has failed.",
508 (0), 0},
509
510{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
511 BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR",
512 "IOC internal error. ",
513 (0), 0},
514
515{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
516 BFA_LOG_INFO, "WDRV_IOC_START_ERROR",
517 "IOC could not be started. ",
518 (0), 0},
519
520{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
521 BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR",
522 "IOC could not be stopped. ",
523 (0), 0},
524
525{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
526 BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES",
527 "Insufficient memory. ",
528 (0), 0},
529
530{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
531 BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR",
532 "Unable to map the IOC onto the system address space. ",
533 (0), 0},
534
535
536{0, 0, 0, "", "", 0, 0},
537};
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
deleted file mode 100644
index acabb44f092f..000000000000
--- a/drivers/scsi/bfa/bfa_lps.c
+++ /dev/null
@@ -1,892 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_lps.h>
20#include <cs/bfa_debug.h>
21#include <defs/bfa_defs_pci.h>
22
23BFA_TRC_FILE(HAL, LPS);
24BFA_MODULE(lps);
25
26#define BFA_LPS_MIN_LPORTS (1)
27#define BFA_LPS_MAX_LPORTS (256)
28
29/*
30 * Maximum Vports supported per physical port or vf.
31 */
32#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
33#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
34
35/**
36 * forward declarations
37 */
38static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
39 u32 *dm_len);
40static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
41 struct bfa_iocfc_cfg_s *cfg,
42 struct bfa_meminfo_s *meminfo,
43 struct bfa_pcidev_s *pcidev);
44static void bfa_lps_detach(struct bfa_s *bfa);
45static void bfa_lps_start(struct bfa_s *bfa);
46static void bfa_lps_stop(struct bfa_s *bfa);
47static void bfa_lps_iocdisable(struct bfa_s *bfa);
48static void bfa_lps_login_rsp(struct bfa_s *bfa,
49 struct bfi_lps_login_rsp_s *rsp);
50static void bfa_lps_logout_rsp(struct bfa_s *bfa,
51 struct bfi_lps_logout_rsp_s *rsp);
52static void bfa_lps_reqq_resume(void *lps_arg);
53static void bfa_lps_free(struct bfa_lps_s *lps);
54static void bfa_lps_send_login(struct bfa_lps_s *lps);
55static void bfa_lps_send_logout(struct bfa_lps_s *lps);
56static void bfa_lps_login_comp(struct bfa_lps_s *lps);
57static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
58static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
59
60/**
61 * lps_pvt BFA LPS private functions
62 */
63
64enum bfa_lps_event {
65 BFA_LPS_SM_LOGIN = 1, /* login request from user */
66 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
67 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
68 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
69 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
70 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
71 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
72};
73
74static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
75static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
76static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps,
77 enum bfa_lps_event event);
78static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
79static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
80static void bfa_lps_sm_logowait(struct bfa_lps_s *lps,
81 enum bfa_lps_event event);
82
83/**
84 * Init state -- no login
85 */
86static void
87bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
88{
89 bfa_trc(lps->bfa, lps->lp_tag);
90 bfa_trc(lps->bfa, event);
91
92 switch (event) {
93 case BFA_LPS_SM_LOGIN:
94 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
95 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
96 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
97 } else {
98 bfa_sm_set_state(lps, bfa_lps_sm_login);
99 bfa_lps_send_login(lps);
100 }
101 if (lps->fdisc)
102 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
103 BFA_PL_EID_LOGIN, 0, "FDISC Request");
104 else
105 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
106 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
107 break;
108
109 case BFA_LPS_SM_LOGOUT:
110 bfa_lps_logout_comp(lps);
111 break;
112
113 case BFA_LPS_SM_DELETE:
114 bfa_lps_free(lps);
115 break;
116
117 case BFA_LPS_SM_RX_CVL:
118 case BFA_LPS_SM_OFFLINE:
119 break;
120
121 case BFA_LPS_SM_FWRSP:
122 /* Could happen when fabric detects loopback and discards
123 * the lps request. Fw will eventually sent out the timeout
124 * Just ignore
125 */
126 break;
127
128 default:
129 bfa_sm_fault(lps->bfa, event);
130 }
131}
132
133/**
134 * login is in progress -- awaiting response from firmware
135 */
136static void
137bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
138{
139 bfa_trc(lps->bfa, lps->lp_tag);
140 bfa_trc(lps->bfa, event);
141
142 switch (event) {
143 case BFA_LPS_SM_FWRSP:
144 if (lps->status == BFA_STATUS_OK) {
145 bfa_sm_set_state(lps, bfa_lps_sm_online);
146 if (lps->fdisc)
147 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
148 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
149 else
150 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
151 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
152 } else {
153 bfa_sm_set_state(lps, bfa_lps_sm_init);
154 if (lps->fdisc)
155 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
156 BFA_PL_EID_LOGIN, 0,
157 "FDISC Fail (RJT or timeout)");
158 else
159 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
160 BFA_PL_EID_LOGIN, 0,
161 "FLOGI Fail (RJT or timeout)");
162 }
163 bfa_lps_login_comp(lps);
164 break;
165
166 case BFA_LPS_SM_OFFLINE:
167 bfa_sm_set_state(lps, bfa_lps_sm_init);
168 break;
169
170 default:
171 bfa_sm_fault(lps->bfa, event);
172 }
173}
174
175/**
176 * login pending - awaiting space in request queue
177 */
178static void
179bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
180{
181 bfa_trc(lps->bfa, lps->lp_tag);
182 bfa_trc(lps->bfa, event);
183
184 switch (event) {
185 case BFA_LPS_SM_RESUME:
186 bfa_sm_set_state(lps, bfa_lps_sm_login);
187 break;
188
189 case BFA_LPS_SM_OFFLINE:
190 bfa_sm_set_state(lps, bfa_lps_sm_init);
191 bfa_reqq_wcancel(&lps->wqe);
192 break;
193
194 case BFA_LPS_SM_RX_CVL:
195 /*
196 * Login was not even sent out; so when getting out
197 * of this state, it will appear like a login retry
198 * after Clear virtual link
199 */
200 break;
201
202 default:
203 bfa_sm_fault(lps->bfa, event);
204 }
205}
206
207/**
208 * login complete
209 */
210static void
211bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
212{
213 bfa_trc(lps->bfa, lps->lp_tag);
214 bfa_trc(lps->bfa, event);
215
216 switch (event) {
217 case BFA_LPS_SM_LOGOUT:
218 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
219 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
220 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
221 } else {
222 bfa_sm_set_state(lps, bfa_lps_sm_logout);
223 bfa_lps_send_logout(lps);
224 }
225 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
226 BFA_PL_EID_LOGO, 0, "Logout");
227 break;
228
229 case BFA_LPS_SM_RX_CVL:
230 bfa_sm_set_state(lps, bfa_lps_sm_init);
231
232 /* Let the vport module know about this event */
233 bfa_lps_cvl_event(lps);
234 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
235 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
236 break;
237
238 case BFA_LPS_SM_OFFLINE:
239 case BFA_LPS_SM_DELETE:
240 bfa_sm_set_state(lps, bfa_lps_sm_init);
241 break;
242
243 default:
244 bfa_sm_fault(lps->bfa, event);
245 }
246}
247
248/**
249 * logout in progress - awaiting firmware response
250 */
251static void
252bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
253{
254 bfa_trc(lps->bfa, lps->lp_tag);
255 bfa_trc(lps->bfa, event);
256
257 switch (event) {
258 case BFA_LPS_SM_FWRSP:
259 bfa_sm_set_state(lps, bfa_lps_sm_init);
260 bfa_lps_logout_comp(lps);
261 break;
262
263 case BFA_LPS_SM_OFFLINE:
264 bfa_sm_set_state(lps, bfa_lps_sm_init);
265 break;
266
267 default:
268 bfa_sm_fault(lps->bfa, event);
269 }
270}
271
272/**
273 * logout pending -- awaiting space in request queue
274 */
275static void
276bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
277{
278 bfa_trc(lps->bfa, lps->lp_tag);
279 bfa_trc(lps->bfa, event);
280
281 switch (event) {
282 case BFA_LPS_SM_RESUME:
283 bfa_sm_set_state(lps, bfa_lps_sm_logout);
284 bfa_lps_send_logout(lps);
285 break;
286
287 case BFA_LPS_SM_OFFLINE:
288 bfa_sm_set_state(lps, bfa_lps_sm_init);
289 bfa_reqq_wcancel(&lps->wqe);
290 break;
291
292 default:
293 bfa_sm_fault(lps->bfa, event);
294 }
295}
296
297
298
299/**
300 * lps_pvt BFA LPS private functions
301 */
302
303/**
304 * return memory requirement
305 */
306static void
307bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
308{
309 if (cfg->drvcfg.min_cfg)
310 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
311 else
312 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
313}
314
315/**
316 * bfa module attach at initialization time
317 */
318static void
319bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
320 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
321{
322 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
323 struct bfa_lps_s *lps;
324 int i;
325
326 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
327 mod->num_lps = BFA_LPS_MAX_LPORTS;
328 if (cfg->drvcfg.min_cfg)
329 mod->num_lps = BFA_LPS_MIN_LPORTS;
330 else
331 mod->num_lps = BFA_LPS_MAX_LPORTS;
332 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
333
334 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
335
336 INIT_LIST_HEAD(&mod->lps_free_q);
337 INIT_LIST_HEAD(&mod->lps_active_q);
338
339 for (i = 0; i < mod->num_lps; i++, lps++) {
340 lps->bfa = bfa;
341 lps->lp_tag = (u8) i;
342 lps->reqq = BFA_REQQ_LPS;
343 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
344 list_add_tail(&lps->qe, &mod->lps_free_q);
345 }
346}
347
348static void
349bfa_lps_detach(struct bfa_s *bfa)
350{
351}
352
353static void
354bfa_lps_start(struct bfa_s *bfa)
355{
356}
357
358static void
359bfa_lps_stop(struct bfa_s *bfa)
360{
361}
362
363/**
364 * IOC in disabled state -- consider all lps offline
365 */
366static void
367bfa_lps_iocdisable(struct bfa_s *bfa)
368{
369 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
370 struct bfa_lps_s *lps;
371 struct list_head *qe, *qen;
372
373 list_for_each_safe(qe, qen, &mod->lps_active_q) {
374 lps = (struct bfa_lps_s *) qe;
375 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
376 }
377}
378
379/**
380 * Firmware login response
381 */
382static void
383bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
384{
385 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
386 struct bfa_lps_s *lps;
387
388 bfa_assert(rsp->lp_tag < mod->num_lps);
389 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
390
391 lps->status = rsp->status;
392 switch (rsp->status) {
393 case BFA_STATUS_OK:
394 lps->fport = rsp->f_port;
395 lps->npiv_en = rsp->npiv_en;
396 lps->lp_pid = rsp->lp_pid;
397 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
398 lps->pr_pwwn = rsp->port_name;
399 lps->pr_nwwn = rsp->node_name;
400 lps->auth_req = rsp->auth_req;
401 lps->lp_mac = rsp->lp_mac;
402 lps->brcd_switch = rsp->brcd_switch;
403 lps->fcf_mac = rsp->fcf_mac;
404
405 break;
406
407 case BFA_STATUS_FABRIC_RJT:
408 lps->lsrjt_rsn = rsp->lsrjt_rsn;
409 lps->lsrjt_expl = rsp->lsrjt_expl;
410
411 break;
412
413 case BFA_STATUS_EPROTOCOL:
414 lps->ext_status = rsp->ext_status;
415
416 break;
417
418 default:
419 /* Nothing to do with other status */
420 break;
421 }
422
423 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
424}
425
426/**
427 * Firmware logout response
428 */
429static void
430bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
431{
432 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
433 struct bfa_lps_s *lps;
434
435 bfa_assert(rsp->lp_tag < mod->num_lps);
436 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
437
438 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
439}
440
441/**
442 * Firmware received a Clear virtual link request (for FCoE)
443 */
444static void
445bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
446{
447 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
448 struct bfa_lps_s *lps;
449
450 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
451
452 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
453}
454
455/**
456 * Space is available in request queue, resume queueing request to firmware.
457 */
458static void
459bfa_lps_reqq_resume(void *lps_arg)
460{
461 struct bfa_lps_s *lps = lps_arg;
462
463 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
464}
465
466/**
467 * lps is freed -- triggered by vport delete
468 */
469static void
470bfa_lps_free(struct bfa_lps_s *lps)
471{
472 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
473
474 list_del(&lps->qe);
475 list_add_tail(&lps->qe, &mod->lps_free_q);
476}
477
478/**
479 * send login request to firmware
480 */
481static void
482bfa_lps_send_login(struct bfa_lps_s *lps)
483{
484 struct bfi_lps_login_req_s *m;
485
486 m = bfa_reqq_next(lps->bfa, lps->reqq);
487 bfa_assert(m);
488
489 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
490 bfa_lpuid(lps->bfa));
491
492 m->lp_tag = lps->lp_tag;
493 m->alpa = lps->alpa;
494 m->pdu_size = bfa_os_htons(lps->pdusz);
495 m->pwwn = lps->pwwn;
496 m->nwwn = lps->nwwn;
497 m->fdisc = lps->fdisc;
498 m->auth_en = lps->auth_en;
499
500 bfa_reqq_produce(lps->bfa, lps->reqq);
501}
502
503/**
504 * send logout request to firmware
505 */
506static void
507bfa_lps_send_logout(struct bfa_lps_s *lps)
508{
509 struct bfi_lps_logout_req_s *m;
510
511 m = bfa_reqq_next(lps->bfa, lps->reqq);
512 bfa_assert(m);
513
514 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
515 bfa_lpuid(lps->bfa));
516
517 m->lp_tag = lps->lp_tag;
518 m->port_name = lps->pwwn;
519 bfa_reqq_produce(lps->bfa, lps->reqq);
520}
521
522/**
523 * Indirect login completion handler for non-fcs
524 */
525static void
526bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
527{
528 struct bfa_lps_s *lps = arg;
529
530 if (!complete)
531 return;
532
533 if (lps->fdisc)
534 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
535 else
536 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
537}
538
539/**
540 * Login completion handler -- direct call for fcs, queue for others
541 */
542static void
543bfa_lps_login_comp(struct bfa_lps_s *lps)
544{
545 if (!lps->bfa->fcs) {
546 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
547 bfa_lps_login_comp_cb, lps);
548 return;
549 }
550
551 if (lps->fdisc)
552 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
553 else
554 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
555}
556
557/**
558 * Indirect logout completion handler for non-fcs
559 */
560static void
561bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
562{
563 struct bfa_lps_s *lps = arg;
564
565 if (!complete)
566 return;
567
568 if (lps->fdisc)
569 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
570 else
571 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
572}
573
574/**
575 * Logout completion handler -- direct call for fcs, queue for others
576 */
577static void
578bfa_lps_logout_comp(struct bfa_lps_s *lps)
579{
580 if (!lps->bfa->fcs) {
581 bfa_cb_queue(lps->bfa, &lps->hcb_qe,
582 bfa_lps_logout_comp_cb, lps);
583 return;
584 }
585 if (lps->fdisc)
586 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
587 else
588 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
589}
590
591/**
592 * Clear virtual link completion handler for non-fcs
593 */
594static void
595bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
596{
597 struct bfa_lps_s *lps = arg;
598
599 if (!complete)
600 return;
601
602 /* Clear virtual link to base port will result in link down */
603 if (lps->fdisc)
604 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
605}
606
607/**
608 * Received Clear virtual link event --direct call for fcs,
609 * queue for others
610 */
611static void
612bfa_lps_cvl_event(struct bfa_lps_s *lps)
613{
614 if (!lps->bfa->fcs) {
615 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
616 lps);
617 return;
618 }
619
620 /* Clear virtual link to base port will result in link down */
621 if (lps->fdisc)
622 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
623}
624
625u32
626bfa_lps_get_max_vport(struct bfa_s *bfa)
627{
628 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
629 return BFA_LPS_MAX_VPORTS_SUPP_CT;
630 else
631 return BFA_LPS_MAX_VPORTS_SUPP_CB;
632}
633
634/**
635 * lps_public BFA LPS public functions
636 */
637
638/**
639 * Allocate a lport srvice tag.
640 */
641struct bfa_lps_s *
642bfa_lps_alloc(struct bfa_s *bfa)
643{
644 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
645 struct bfa_lps_s *lps = NULL;
646
647 bfa_q_deq(&mod->lps_free_q, &lps);
648
649 if (lps == NULL)
650 return NULL;
651
652 list_add_tail(&lps->qe, &mod->lps_active_q);
653
654 bfa_sm_set_state(lps, bfa_lps_sm_init);
655 return lps;
656}
657
658/**
659 * Free lport service tag. This can be called anytime after an alloc.
660 * No need to wait for any pending login/logout completions.
661 */
662void
663bfa_lps_delete(struct bfa_lps_s *lps)
664{
665 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
666}
667
668/**
669 * Initiate a lport login.
670 */
671void
672bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
673 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
674{
675 lps->uarg = uarg;
676 lps->alpa = alpa;
677 lps->pdusz = pdusz;
678 lps->pwwn = pwwn;
679 lps->nwwn = nwwn;
680 lps->fdisc = BFA_FALSE;
681 lps->auth_en = auth_en;
682 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
683}
684
685/**
686 * Initiate a lport fdisc login.
687 */
688void
689bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
690 wwn_t nwwn)
691{
692 lps->uarg = uarg;
693 lps->alpa = 0;
694 lps->pdusz = pdusz;
695 lps->pwwn = pwwn;
696 lps->nwwn = nwwn;
697 lps->fdisc = BFA_TRUE;
698 lps->auth_en = BFA_FALSE;
699 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
700}
701
702/**
703 * Initiate a lport logout (flogi).
704 */
705void
706bfa_lps_flogo(struct bfa_lps_s *lps)
707{
708 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
709}
710
711/**
712 * Initiate a lport FDSIC logout.
713 */
714void
715bfa_lps_fdisclogo(struct bfa_lps_s *lps)
716{
717 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
718}
719
720/**
721 * Discard a pending login request -- should be called only for
722 * link down handling.
723 */
724void
725bfa_lps_discard(struct bfa_lps_s *lps)
726{
727 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
728}
729
730/**
731 * Return lport services tag
732 */
733u8
734bfa_lps_get_tag(struct bfa_lps_s *lps)
735{
736 return lps->lp_tag;
737}
738
739/**
740 * Return lport services tag given the pid
741 */
742u8
743bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
744{
745 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
746 struct bfa_lps_s *lps;
747 int i;
748
749 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
750 if (lps->lp_pid == pid)
751 return lps->lp_tag;
752 }
753
754 /* Return base port tag anyway */
755 return 0;
756}
757
758/**
759 * return if fabric login indicates support for NPIV
760 */
761bfa_boolean_t
762bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
763{
764 return lps->npiv_en;
765}
766
767/**
768 * Return TRUE if attached to F-Port, else return FALSE
769 */
770bfa_boolean_t
771bfa_lps_is_fport(struct bfa_lps_s *lps)
772{
773 return lps->fport;
774}
775
776/**
777 * Return TRUE if attached to a Brocade Fabric
778 */
779bfa_boolean_t
780bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
781{
782 return lps->brcd_switch;
783}
784/**
785 * return TRUE if authentication is required
786 */
787bfa_boolean_t
788bfa_lps_is_authreq(struct bfa_lps_s *lps)
789{
790 return lps->auth_req;
791}
792
793bfa_eproto_status_t
794bfa_lps_get_extstatus(struct bfa_lps_s *lps)
795{
796 return lps->ext_status;
797}
798
799/**
800 * return port id assigned to the lport
801 */
802u32
803bfa_lps_get_pid(struct bfa_lps_s *lps)
804{
805 return lps->lp_pid;
806}
807
808/**
809 * Return bb_credit assigned in FLOGI response
810 */
811u16
812bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
813{
814 return lps->pr_bbcred;
815}
816
817/**
818 * Return peer port name
819 */
820wwn_t
821bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
822{
823 return lps->pr_pwwn;
824}
825
826/**
827 * Return peer node name
828 */
829wwn_t
830bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
831{
832 return lps->pr_nwwn;
833}
834
835/**
836 * return reason code if login request is rejected
837 */
838u8
839bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
840{
841 return lps->lsrjt_rsn;
842}
843
844/**
845 * return explanation code if login request is rejected
846 */
847u8
848bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
849{
850 return lps->lsrjt_expl;
851}
852
853/**
854 * Return fpma/spma MAC for lport
855 */
856struct mac_s
857bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
858{
859 return lps->lp_mac;
860}
861
862/**
863 * LPS firmware message class handler.
864 */
865void
866bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
867{
868 union bfi_lps_i2h_msg_u msg;
869
870 bfa_trc(bfa, m->mhdr.msg_id);
871 msg.msg = m;
872
873 switch (m->mhdr.msg_id) {
874 case BFI_LPS_H2I_LOGIN_RSP:
875 bfa_lps_login_rsp(bfa, msg.login_rsp);
876 break;
877
878 case BFI_LPS_H2I_LOGOUT_RSP:
879 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
880 break;
881
882 case BFI_LPS_H2I_CVL_EVENT:
883 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
884 break;
885
886 default:
887 bfa_trc(bfa, m->mhdr.msg_id);
888 bfa_assert(0);
889 }
890}
891
892
diff --git a/drivers/scsi/bfa/bfa_lps_priv.h b/drivers/scsi/bfa/bfa_lps_priv.h
deleted file mode 100644
index d16c6ce995df..000000000000
--- a/drivers/scsi/bfa/bfa_lps_priv.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_LPS_PRIV_H__
19#define __BFA_LPS_PRIV_H__
20
21#include <bfa_svc.h>
22
23struct bfa_lps_mod_s {
24 struct list_head lps_free_q;
25 struct list_head lps_active_q;
26 struct bfa_lps_s *lps_arr;
27 int num_lps;
28};
29
30#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
31#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
32
33/*
34 * external functions
35 */
36void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
37
38#endif /* __BFA_LPS_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_modules.h
index bf4939b1676c..2cd527338677 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,26 +15,52 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#ifndef __BFA_PRIV_H__ 18/**
19#define __BFA_PRIV_H__ 19 * bfa_modules.h BFA modules
20 */
21
22#ifndef __BFA_MODULES_H__
23#define __BFA_MODULES_H__
24
25#include "bfa_cs.h"
26#include "bfa.h"
27#include "bfa_svc.h"
28#include "bfa_fcpim.h"
29#include "bfa_port.h"
30
31struct bfa_modules_s {
32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_port_s port; /* Physical port module */
40};
41
42/*
43 * !!! Only append to the enums defined here to avoid any versioning
44 * !!! needed between trace utility and driver version
45 */
46enum {
47 BFA_TRC_HAL_CORE = 1,
48 BFA_TRC_HAL_FCXP = 2,
49 BFA_TRC_HAL_FCPIM = 3,
50 BFA_TRC_HAL_IOCFC_CT = 4,
51 BFA_TRC_HAL_IOCFC_CB = 5,
52};
20 53
21#include "bfa_iocfc.h"
22#include "bfa_intr_priv.h"
23#include "bfa_trcmod_priv.h"
24#include "bfa_modules_priv.h"
25#include "bfa_fwimg_priv.h"
26#include <cs/bfa_log.h>
27#include <bfa_timer.h>
28 54
29/** 55/**
30 * Macro to define a new BFA module 56 * Macro to define a new BFA module
31 */ 57 */
32#define BFA_MODULE(__mod) \ 58#define BFA_MODULE(__mod) \
33 static void bfa_ ## __mod ## _meminfo( \ 59 static void bfa_ ## __mod ## _meminfo( \
34 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ 60 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
35 u32 *dm_len); \ 61 u32 *dm_len); \
36 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ 62 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
37 void *bfad, struct bfa_iocfc_cfg_s *cfg, \ 63 void *bfad, struct bfa_iocfc_cfg_s *cfg, \
38 struct bfa_meminfo_s *meminfo, \ 64 struct bfa_meminfo_s *meminfo, \
39 struct bfa_pcidev_s *pcidev); \ 65 struct bfa_pcidev_s *pcidev); \
40 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ 66 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
@@ -77,17 +103,15 @@ extern struct bfa_module_s *hal_mods[];
77 103
78struct bfa_s { 104struct bfa_s {
79 void *bfad; /* BFA driver instance */ 105 void *bfad; /* BFA driver instance */
80 struct bfa_aen_s *aen; /* AEN module */
81 struct bfa_plog_s *plog; /* portlog buffer */ 106 struct bfa_plog_s *plog; /* portlog buffer */
82 struct bfa_log_mod_s *logm; /* driver logging modulen */
83 struct bfa_trc_mod_s *trcmod; /* driver tracing */ 107 struct bfa_trc_mod_s *trcmod; /* driver tracing */
84 struct bfa_ioc_s ioc; /* IOC module */ 108 struct bfa_ioc_s ioc; /* IOC module */
85 struct bfa_iocfc_s iocfc; /* IOCFC module */ 109 struct bfa_iocfc_s iocfc; /* IOCFC module */
86 struct bfa_timer_mod_s timer_mod; /* timer module */ 110 struct bfa_timer_mod_s timer_mod; /* timer module */
87 struct bfa_modules_s modules; /* BFA modules */ 111 struct bfa_modules_s modules; /* BFA modules */
88 struct list_head comp_q; /* pending completions */ 112 struct list_head comp_q; /* pending completions */
89 bfa_boolean_t rme_process; /* RME processing enabled */ 113 bfa_boolean_t rme_process; /* RME processing enabled */
90 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 114 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
91 bfa_boolean_t fcs; /* FCS is attached to BFA */ 115 bfa_boolean_t fcs; /* FCS is attached to BFA */
92 struct bfa_msix_s msix; 116 struct bfa_msix_s msix;
93}; 117};
@@ -95,8 +119,6 @@ struct bfa_s {
95extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX]; 119extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
96extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[]; 120extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[];
97extern bfa_boolean_t bfa_auto_recover; 121extern bfa_boolean_t bfa_auto_recover;
98extern struct bfa_module_s hal_mod_flash;
99extern struct bfa_module_s hal_mod_fcdiag;
100extern struct bfa_module_s hal_mod_sgpg; 122extern struct bfa_module_s hal_mod_sgpg;
101extern struct bfa_module_s hal_mod_fcport; 123extern struct bfa_module_s hal_mod_fcport;
102extern struct bfa_module_s hal_mod_fcxp; 124extern struct bfa_module_s hal_mod_fcxp;
@@ -104,7 +126,5 @@ extern struct bfa_module_s hal_mod_lps;
104extern struct bfa_module_s hal_mod_uf; 126extern struct bfa_module_s hal_mod_uf;
105extern struct bfa_module_s hal_mod_rport; 127extern struct bfa_module_s hal_mod_rport;
106extern struct bfa_module_s hal_mod_fcpim; 128extern struct bfa_module_s hal_mod_fcpim;
107extern struct bfa_module_s hal_mod_pbind;
108
109#endif /* __BFA_PRIV_H__ */
110 129
130#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
deleted file mode 100644
index f554c2fad6a9..000000000000
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_MODULES_PRIV_H__
19#define __BFA_MODULES_PRIV_H__
20
21#include "bfa_uf_priv.h"
22#include "bfa_port_priv.h"
23#include "bfa_rport_priv.h"
24#include "bfa_fcxp_priv.h"
25#include "bfa_lps_priv.h"
26#include "bfa_fcpim_priv.h"
27#include <cee/bfa_cee.h>
28#include <port/bfa_port.h>
29
30
31struct bfa_modules_s {
32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_cee_s cee; /* CEE Module */
40 struct bfa_port_s port; /* Physical port module */
41};
42
43#endif /* __BFA_MODULES_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index bd1cd3ee3022..788a250ffb8a 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -22,30 +22,20 @@
22#ifndef __BFA_OS_INC_H__ 22#ifndef __BFA_OS_INC_H__
23#define __BFA_OS_INC_H__ 23#define __BFA_OS_INC_H__
24 24
25#ifndef __KERNEL__
26#include <stdint.h>
27#else
28#include <linux/types.h> 25#include <linux/types.h>
29
30#include <linux/version.h> 26#include <linux/version.h>
31#include <linux/pci.h> 27#include <linux/pci.h>
32
33#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
34#define SET_MODULE_VERSION(VER)
35
36#include <linux/idr.h> 29#include <linux/idr.h>
37
38#include <linux/interrupt.h> 30#include <linux/interrupt.h>
39#include <linux/cdev.h> 31#include <linux/cdev.h>
40#include <linux/fs.h> 32#include <linux/fs.h>
41#include <linux/delay.h> 33#include <linux/delay.h>
42#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
43
44#include <linux/workqueue.h> 35#include <linux/workqueue.h>
45 36#include <linux/bitops.h>
46#include <scsi/scsi.h> 37#include <scsi/scsi.h>
47#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
48
49#include <scsi/scsi_tcq.h> 39#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_transport_fc.h> 40#include <scsi/scsi_transport_fc.h>
51#include <scsi/scsi_transport.h> 41#include <scsi/scsi_transport.h>
@@ -54,97 +44,75 @@
54#define __BIGENDIAN 44#define __BIGENDIAN
55#endif 45#endif
56 46
57#define BFA_ERR KERN_ERR 47static inline u64 bfa_os_get_clock(void)
58#define BFA_WARNING KERN_WARNING 48{
59#define BFA_NOTICE KERN_NOTICE 49 return jiffies;
60#define BFA_INFO KERN_INFO 50}
61#define BFA_DEBUG KERN_DEBUG 51
62 52static inline u64 bfa_os_get_log_time(void)
63#define LOG_BFAD_INIT 0x00000001 53{
64#define LOG_FCP_IO 0x00000002 54 u64 system_time = 0;
65 55 struct timeval tv;
66#ifdef DEBUG 56 do_gettimeofday(&tv);
67#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) \ 57
68 BFA_LOG(bfad, level, mask, fmt, ## arg) 58 /* We are interested in seconds only. */
69#define BFA_DEV_TRACE(bfad, level, fmt, arg...) \ 59 system_time = tv.tv_sec;
70 BFA_DEV_PRINTF(bfad, level, fmt, ## arg) 60 return system_time;
71#define BFA_TRACE(level, fmt, arg...) \ 61}
72 BFA_PRINTF(level, fmt, ## arg) 62
73#else 63#define bfa_io_lat_clock_res_div HZ
74#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) 64#define bfa_io_lat_clock_res_mul 1000
75#define BFA_DEV_TRACE(bfad, level, fmt, arg...)
76#define BFA_TRACE(level, fmt, arg...)
77#endif
78 65
79#define BFA_ASSERT(p) do { \ 66#define BFA_ASSERT(p) do { \
80 if (!(p)) { \ 67 if (!(p)) { \
81 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \ 68 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
82 #p, __FILE__, __LINE__); \ 69 #p, __FILE__, __LINE__); \
83 BUG(); \
84 } \ 70 } \
85} while (0) 71} while (0)
86 72
87 73#define BFA_LOG(level, bfad, mask, fmt, arg...) \
88#define BFA_LOG(bfad, level, mask, fmt, arg...) \ 74do { \
89do { \ 75 if (((mask) == 4) || (level[1] <= '4')) \
90 if (((mask) & (((struct bfad_s *)(bfad))-> \ 76 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
91 cfg_data[cfg_log_mask])) || (level[1] <= '3')) \
92 dev_printk(level, &(((struct bfad_s *) \
93 (bfad))->pcidev->dev), fmt, ##arg); \
94} while (0) 77} while (0)
95 78
96#ifndef BFA_DEV_PRINTF
97#define BFA_DEV_PRINTF(bfad, level, fmt, arg...) \
98 dev_printk(level, &(((struct bfad_s *) \
99 (bfad))->pcidev->dev), fmt, ##arg);
100#endif
101
102#define BFA_PRINTF(level, fmt, arg...) \
103 printk(level fmt, ##arg);
104
105int bfa_os_MWB(void *);
106
107#define bfa_os_mmiowb() mmiowb()
108
109#define bfa_swap_3b(_x) \ 79#define bfa_swap_3b(_x) \
110 ((((_x) & 0xff) << 16) | \ 80 ((((_x) & 0xff) << 16) | \
111 ((_x) & 0x00ff00) | \ 81 ((_x) & 0x00ff00) | \
112 (((_x) & 0xff0000) >> 16)) 82 (((_x) & 0xff0000) >> 16))
113 83
114#define bfa_swap_8b(_x) \ 84#define bfa_swap_8b(_x) \
115 ((((_x) & 0xff00000000000000ull) >> 56) \ 85 ((((_x) & 0xff00000000000000ull) >> 56) \
116 | (((_x) & 0x00ff000000000000ull) >> 40) \ 86 | (((_x) & 0x00ff000000000000ull) >> 40) \
117 | (((_x) & 0x0000ff0000000000ull) >> 24) \ 87 | (((_x) & 0x0000ff0000000000ull) >> 24) \
118 | (((_x) & 0x000000ff00000000ull) >> 8) \ 88 | (((_x) & 0x000000ff00000000ull) >> 8) \
119 | (((_x) & 0x00000000ff000000ull) << 8) \ 89 | (((_x) & 0x00000000ff000000ull) << 8) \
120 | (((_x) & 0x0000000000ff0000ull) << 24) \ 90 | (((_x) & 0x0000000000ff0000ull) << 24) \
121 | (((_x) & 0x000000000000ff00ull) << 40) \ 91 | (((_x) & 0x000000000000ff00ull) << 40) \
122 | (((_x) & 0x00000000000000ffull) << 56)) 92 | (((_x) & 0x00000000000000ffull) << 56))
123 93
124#define bfa_os_swap32(_x) \ 94#define bfa_os_swap32(_x) \
125 ((((_x) & 0xff) << 24) | \ 95 ((((_x) & 0xff) << 24) | \
126 (((_x) & 0x0000ff00) << 8) | \ 96 (((_x) & 0x0000ff00) << 8) | \
127 (((_x) & 0x00ff0000) >> 8) | \ 97 (((_x) & 0x00ff0000) >> 8) | \
128 (((_x) & 0xff000000) >> 24)) 98 (((_x) & 0xff000000) >> 24))
129 99
130#define bfa_os_swap_sgaddr(_x) ((u64)( \ 100#define bfa_os_swap_sgaddr(_x) ((u64)( \
131 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ 101 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
132 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ 102 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
133 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \ 103 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
134 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \ 104 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
135 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \ 105 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
136 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \ 106 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
137 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \ 107 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
138 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) 108 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
139 109
140#ifndef __BIGENDIAN 110#ifndef __BIGENDIAN
141#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ 111#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
142 (((_x) & 0x00ff) << 8))) 112 (((_x) & 0x00ff) << 8)))
143
144#define bfa_os_htonl(_x) bfa_os_swap32(_x) 113#define bfa_os_htonl(_x) bfa_os_swap32(_x)
145#define bfa_os_htonll(_x) bfa_swap_8b(_x) 114#define bfa_os_htonll(_x) bfa_swap_8b(_x)
146#define bfa_os_hton3b(_x) bfa_swap_3b(_x) 115#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
147
148#define bfa_os_wtole(_x) (_x) 116#define bfa_os_wtole(_x) (_x)
149#define bfa_os_sgaddr(_x) (_x) 117#define bfa_os_sgaddr(_x) (_x)
150 118
@@ -170,17 +138,16 @@ int bfa_os_MWB(void *);
170#define bfa_os_memcpy memcpy 138#define bfa_os_memcpy memcpy
171#define bfa_os_udelay udelay 139#define bfa_os_udelay udelay
172#define bfa_os_vsprintf vsprintf 140#define bfa_os_vsprintf vsprintf
141#define bfa_os_snprintf snprintf
173 142
174#define bfa_os_assign(__t, __s) __t = __s 143#define bfa_os_assign(__t, __s) __t = __s
175 144#define bfa_os_addr_t void __iomem *
176#define bfa_os_addr_t char __iomem *
177#define bfa_os_panic()
178 145
179#define bfa_os_reg_read(_raddr) readl(_raddr) 146#define bfa_os_reg_read(_raddr) readl(_raddr)
180#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr)) 147#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
181#define bfa_os_mem_read(_raddr, _off) \ 148#define bfa_os_mem_read(_raddr, _off) \
182 bfa_os_swap32(readl(((_raddr) + (_off)))) 149 bfa_os_swap32(readl(((_raddr) + (_off))))
183#define bfa_os_mem_write(_raddr, _off, _val) \ 150#define bfa_os_mem_write(_raddr, _off, _val) \
184 writel(bfa_os_swap32((_val)), ((_raddr) + (_off))) 151 writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
185 152
186#define BFA_TRC_TS(_trcm) \ 153#define BFA_TRC_TS(_trcm) \
@@ -191,11 +158,6 @@ int bfa_os_MWB(void *);
191 (tv.tv_sec*1000000+tv.tv_usec); \ 158 (tv.tv_sec*1000000+tv.tv_usec); \
192 }) 159 })
193 160
194struct bfa_log_mod_s;
195void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
196 const char *fmt, ...);
197#endif
198
199#define boolean_t int 161#define boolean_t int
200 162
201/** 163/**
@@ -206,7 +168,15 @@ struct bfa_timeval_s {
206 u32 tv_usec; /* microseconds */ 168 u32 tv_usec; /* microseconds */
207}; 169};
208 170
209void bfa_os_gettimeofday(struct bfa_timeval_s *tv); 171static inline void
172bfa_os_gettimeofday(struct bfa_timeval_s *tv)
173{
174 struct timeval tmp_tv;
175
176 do_gettimeofday(&tmp_tv);
177 tv->tv_sec = (u32) tmp_tv.tv_sec;
178 tv->tv_usec = (u32) tmp_tv.tv_usec;
179}
210 180
211static inline void 181static inline void
212wwn2str(char *wwn_str, u64 wwn) 182wwn2str(char *wwn_str, u64 wwn)
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
index f5bef63b5877..501f0ed35cf0 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -17,8 +17,8 @@
17#ifndef __BFA_PORTLOG_H__ 17#ifndef __BFA_PORTLOG_H__
18#define __BFA_PORTLOG_H__ 18#define __BFA_PORTLOG_H__
19 19
20#include "protocol/fc.h" 20#include "bfa_fc.h"
21#include <defs/bfa_defs_types.h> 21#include "bfa_defs.h"
22 22
23#define BFA_PL_NLOG_ENTS 256 23#define BFA_PL_NLOG_ENTS 256
24#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS) 24#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
@@ -27,38 +27,30 @@
27#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */ 27#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */
28 28
29enum bfa_plog_log_type { 29enum bfa_plog_log_type {
30 BFA_PL_LOG_TYPE_INVALID = 0, 30 BFA_PL_LOG_TYPE_INVALID = 0,
31 BFA_PL_LOG_TYPE_INT = 1, 31 BFA_PL_LOG_TYPE_INT = 1,
32 BFA_PL_LOG_TYPE_STRING = 2, 32 BFA_PL_LOG_TYPE_STRING = 2,
33}; 33};
34 34
35/* 35/*
36 * the (fixed size) record format for each entry in the portlog 36 * the (fixed size) record format for each entry in the portlog
37 */ 37 */
38struct bfa_plog_rec_s { 38struct bfa_plog_rec_s {
39 u32 tv; /* Filled by the portlog driver when the * 39 u64 tv; /* timestamp */
40 * entry is added to the circular log. */ 40 u8 port; /* Source port that logged this entry */
41 u8 port; /* Source port that logged this entry. CM 41 u8 mid; /* module id */
42 * entities will use 0xFF */ 42 u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */
43 u8 mid; /* Integer value to be used by all entities * 43 u8 log_type; /* string/integer log, bfa_plog_log_type_t */
44 * while logging. The module id to string * 44 u8 log_num_ints;
45 * conversion will be done by BFAL. See
46 * enum bfa_plog_mid */
47 u8 eid; /* indicates Rx, Tx, IOCTL, etc. See
48 * enum bfa_plog_eid */
49 u8 log_type; /* indicates string log or integer log.
50 * see bfa_plog_log_type_t */
51 u8 log_num_ints;
52 /* 45 /*
53 * interpreted only if log_type is INT_LOG. indicates number of 46 * interpreted only if log_type is INT_LOG. indicates number of
54 * integers in the int_log[] (0-PL_INT_LOG_SZ). 47 * integers in the int_log[] (0-PL_INT_LOG_SZ).
55 */ 48 */
56 u8 rsvd; 49 u8 rsvd;
57 u16 misc; /* can be used to indicate fc frame length, 50 u16 misc; /* can be used to indicate fc frame length */
58 *etc.. */
59 union { 51 union {
60 char string_log[BFA_PL_STRING_LOG_SZ]; 52 char string_log[BFA_PL_STRING_LOG_SZ];
61 u32 int_log[BFA_PL_INT_LOG_SZ]; 53 u32 int_log[BFA_PL_INT_LOG_SZ];
62 } log_entry; 54 } log_entry;
63 55
64}; 56};
@@ -73,20 +65,20 @@ struct bfa_plog_rec_s {
73 * - Do not remove any entry or rearrange the order. 65 * - Do not remove any entry or rearrange the order.
74 */ 66 */
75enum bfa_plog_mid { 67enum bfa_plog_mid {
76 BFA_PL_MID_INVALID = 0, 68 BFA_PL_MID_INVALID = 0,
77 BFA_PL_MID_DEBUG = 1, 69 BFA_PL_MID_DEBUG = 1,
78 BFA_PL_MID_DRVR = 2, 70 BFA_PL_MID_DRVR = 2,
79 BFA_PL_MID_HAL = 3, 71 BFA_PL_MID_HAL = 3,
80 BFA_PL_MID_HAL_FCXP = 4, 72 BFA_PL_MID_HAL_FCXP = 4,
81 BFA_PL_MID_HAL_UF = 5, 73 BFA_PL_MID_HAL_UF = 5,
82 BFA_PL_MID_FCS = 6, 74 BFA_PL_MID_FCS = 6,
83 BFA_PL_MID_LPS = 7, 75 BFA_PL_MID_LPS = 7,
84 BFA_PL_MID_MAX = 8 76 BFA_PL_MID_MAX = 8
85}; 77};
86 78
87#define BFA_PL_MID_STRLEN 8 79#define BFA_PL_MID_STRLEN 8
88struct bfa_plog_mid_strings_s { 80struct bfa_plog_mid_strings_s {
89 char m_str[BFA_PL_MID_STRLEN]; 81 char m_str[BFA_PL_MID_STRLEN];
90}; 82};
91 83
92/* 84/*
@@ -99,36 +91,37 @@ struct bfa_plog_mid_strings_s {
99 * - Do not remove any entry or rearrange the order. 91 * - Do not remove any entry or rearrange the order.
100 */ 92 */
101enum bfa_plog_eid { 93enum bfa_plog_eid {
102 BFA_PL_EID_INVALID = 0, 94 BFA_PL_EID_INVALID = 0,
103 BFA_PL_EID_IOC_DISABLE = 1, 95 BFA_PL_EID_IOC_DISABLE = 1,
104 BFA_PL_EID_IOC_ENABLE = 2, 96 BFA_PL_EID_IOC_ENABLE = 2,
105 BFA_PL_EID_PORT_DISABLE = 3, 97 BFA_PL_EID_PORT_DISABLE = 3,
106 BFA_PL_EID_PORT_ENABLE = 4, 98 BFA_PL_EID_PORT_ENABLE = 4,
107 BFA_PL_EID_PORT_ST_CHANGE = 5, 99 BFA_PL_EID_PORT_ST_CHANGE = 5,
108 BFA_PL_EID_TX = 6, 100 BFA_PL_EID_TX = 6,
109 BFA_PL_EID_TX_ACK1 = 7, 101 BFA_PL_EID_TX_ACK1 = 7,
110 BFA_PL_EID_TX_RJT = 8, 102 BFA_PL_EID_TX_RJT = 8,
111 BFA_PL_EID_TX_BSY = 9, 103 BFA_PL_EID_TX_BSY = 9,
112 BFA_PL_EID_RX = 10, 104 BFA_PL_EID_RX = 10,
113 BFA_PL_EID_RX_ACK1 = 11, 105 BFA_PL_EID_RX_ACK1 = 11,
114 BFA_PL_EID_RX_RJT = 12, 106 BFA_PL_EID_RX_RJT = 12,
115 BFA_PL_EID_RX_BSY = 13, 107 BFA_PL_EID_RX_BSY = 13,
116 BFA_PL_EID_CT_IN = 14, 108 BFA_PL_EID_CT_IN = 14,
117 BFA_PL_EID_CT_OUT = 15, 109 BFA_PL_EID_CT_OUT = 15,
118 BFA_PL_EID_DRIVER_START = 16, 110 BFA_PL_EID_DRIVER_START = 16,
119 BFA_PL_EID_RSCN = 17, 111 BFA_PL_EID_RSCN = 17,
120 BFA_PL_EID_DEBUG = 18, 112 BFA_PL_EID_DEBUG = 18,
121 BFA_PL_EID_MISC = 19, 113 BFA_PL_EID_MISC = 19,
122 BFA_PL_EID_FIP_FCF_DISC = 20, 114 BFA_PL_EID_FIP_FCF_DISC = 20,
123 BFA_PL_EID_FIP_FCF_CVL = 21, 115 BFA_PL_EID_FIP_FCF_CVL = 21,
124 BFA_PL_EID_LOGIN = 22, 116 BFA_PL_EID_LOGIN = 22,
125 BFA_PL_EID_LOGO = 23, 117 BFA_PL_EID_LOGO = 23,
126 BFA_PL_EID_MAX = 24 118 BFA_PL_EID_TRUNK_SCN = 24,
119 BFA_PL_EID_MAX
127}; 120};
128 121
129#define BFA_PL_ENAME_STRLEN 8 122#define BFA_PL_ENAME_STRLEN 8
130struct bfa_plog_eid_strings_s { 123struct bfa_plog_eid_strings_s {
131 char e_str[BFA_PL_ENAME_STRLEN]; 124 char e_str[BFA_PL_ENAME_STRLEN];
132}; 125};
133 126
134#define BFA_PL_SIG_LEN 8 127#define BFA_PL_SIG_LEN 8
@@ -138,12 +131,12 @@ struct bfa_plog_eid_strings_s {
138 * per port circular log buffer 131 * per port circular log buffer
139 */ 132 */
140struct bfa_plog_s { 133struct bfa_plog_s {
141 char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */ 134 char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
142 u8 plog_enabled; 135 u8 plog_enabled;
143 u8 rsvd[7]; 136 u8 rsvd[7];
144 u32 ticks; 137 u32 ticks;
145 u16 head; 138 u16 head;
146 u16 tail; 139 u16 tail;
147 struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS]; 140 struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS];
148}; 141};
149 142
@@ -154,8 +147,7 @@ void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
154 enum bfa_plog_eid event, u16 misc, 147 enum bfa_plog_eid event, u16 misc,
155 u32 *intarr, u32 num_ints); 148 u32 *intarr, u32 num_ints);
156void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 149void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
157 enum bfa_plog_eid event, u16 misc, 150 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
158 struct fchs_s *fchdr);
159void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 151void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
160 enum bfa_plog_eid event, u16 misc, 152 enum bfa_plog_eid event, u16 misc,
161 struct fchs_s *fchdr, u32 pld_w0); 153 struct fchs_s *fchdr, u32 pld_w0);
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index c7e69f1e56e3..b6d170a13bea 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -15,30 +15,25 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18#include <defs/bfa_defs_port.h> 18#include "bfa_defs_svc.h"
19#include <cs/bfa_trc.h> 19#include "bfa_port.h"
20#include <cs/bfa_log.h> 20#include "bfi.h"
21#include <cs/bfa_debug.h> 21#include "bfa_ioc.h"
22#include <port/bfa_port.h> 22
23#include <bfi/bfi.h>
24#include <bfi/bfi_port.h>
25#include <bfa_ioc.h>
26#include <cna/bfa_cna_trcmod.h>
27 23
28BFA_TRC_FILE(CNA, PORT); 24BFA_TRC_FILE(CNA, PORT);
29 25
30#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 26#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
31#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
32 27
33static void 28static void
34bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats) 29bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
35{ 30{
36 u32 *dip = (u32 *) stats; 31 u32 *dip = (u32 *) stats;
37 u32 t0, t1; 32 u32 t0, t1;
38 int i; 33 int i;
39 34
40 for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32); 35 for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
41 i += 2) { 36 i += 2) {
42 t0 = dip[i]; 37 t0 = dip[i];
43 t1 = dip[i + 1]; 38 t1 = dip[i + 1];
44#ifdef __BIGENDIAN 39#ifdef __BIGENDIAN
@@ -49,11 +44,6 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
49 dip[i + 1] = bfa_os_ntohl(t0); 44 dip[i + 1] = bfa_os_ntohl(t0);
50#endif 45#endif
51 } 46 }
52
53 /** todo
54 * QoS stats r also swapped as 64bit; that structure also
55 * has to use 64 bit counters
56 */
57} 47}
58 48
59/** 49/**
@@ -68,7 +58,9 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
68static void 58static void
69bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) 59bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
70{ 60{
71 bfa_assert(0); 61 bfa_trc(port, status);
62 port->endis_pending = BFA_FALSE;
63 port->endis_cbfn(port->endis_cbarg, status);
72} 64}
73 65
74/** 66/**
@@ -83,7 +75,9 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
83static void 75static void
84bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) 76bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
85{ 77{
86 bfa_assert(0); 78 bfa_trc(port, status);
79 port->endis_pending = BFA_FALSE;
80 port->endis_cbfn(port->endis_cbarg, status);
87} 81}
88 82
89/** 83/**
@@ -105,7 +99,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
105 struct bfa_timeval_s tv; 99 struct bfa_timeval_s tv;
106 100
107 memcpy(port->stats, port->stats_dma.kva, 101 memcpy(port->stats, port->stats_dma.kva,
108 sizeof(union bfa_pport_stats_u)); 102 sizeof(union bfa_port_stats_u));
109 bfa_port_stats_swap(port, port->stats); 103 bfa_port_stats_swap(port, port->stats);
110 104
111 bfa_os_gettimeofday(&tv); 105 bfa_os_gettimeofday(&tv);
@@ -133,11 +127,11 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
133 struct bfa_timeval_s tv; 127 struct bfa_timeval_s tv;
134 128
135 port->stats_status = status; 129 port->stats_status = status;
136 port->stats_busy = BFA_FALSE; 130 port->stats_busy = BFA_FALSE;
137 131
138 /** 132 /**
139 * re-initialize time stamp for stats reset 133 * re-initialize time stamp for stats reset
140 */ 134 */
141 bfa_os_gettimeofday(&tv); 135 bfa_os_gettimeofday(&tv);
142 port->stats_reset_time = tv.tv_sec; 136 port->stats_reset_time = tv.tv_sec;
143 137
@@ -158,10 +152,10 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
158static void 152static void
159bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) 153bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
160{ 154{
161 struct bfa_port_s *port = (struct bfa_port_s *)cbarg; 155 struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
162 union bfi_port_i2h_msg_u *i2hmsg; 156 union bfi_port_i2h_msg_u *i2hmsg;
163 157
164 i2hmsg = (union bfi_port_i2h_msg_u *)m; 158 i2hmsg = (union bfi_port_i2h_msg_u *) m;
165 bfa_trc(port, m->mh.msg_id); 159 bfa_trc(port, m->mh.msg_id);
166 160
167 switch (m->mh.msg_id) { 161 switch (m->mh.msg_id) {
@@ -178,9 +172,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
178 break; 172 break;
179 173
180 case BFI_PORT_I2H_GET_STATS_RSP: 174 case BFI_PORT_I2H_GET_STATS_RSP:
181 /* 175 /* Stats busy flag is still set? (may be cmd timed out) */
182 * Stats busy flag is still set? (may be cmd timed out)
183 */
184 if (port->stats_busy == BFA_FALSE) 176 if (port->stats_busy == BFA_FALSE)
185 break; 177 break;
186 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status); 178 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
@@ -208,7 +200,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
208u32 200u32
209bfa_port_meminfo(void) 201bfa_port_meminfo(void)
210{ 202{
211 return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ); 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
212} 204}
213 205
214/** 206/**
@@ -216,8 +208,8 @@ bfa_port_meminfo(void)
216 * 208 *
217 * 209 *
218 * @param[in] port Port module pointer 210 * @param[in] port Port module pointer
219 * dma_kva Kernel Virtual Address of Port DMA Memory 211 * dma_kva Kernel Virtual Address of Port DMA Memory
220 * dma_pa Physical Address of Port DMA Memory 212 * dma_pa Physical Address of Port DMA Memory
221 * 213 *
222 * @return void 214 * @return void
223 */ 215 */
@@ -225,7 +217,7 @@ void
225bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) 217bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
226{ 218{
227 port->stats_dma.kva = dma_kva; 219 port->stats_dma.kva = dma_kva;
228 port->stats_dma.pa = dma_pa; 220 port->stats_dma.pa = dma_pa;
229} 221}
230 222
231/** 223/**
@@ -239,12 +231,14 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
239 */ 231 */
240bfa_status_t 232bfa_status_t
241bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, 233bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
242 void *cbarg) 234 void *cbarg)
243{ 235{
244 struct bfi_port_generic_req_s *m; 236 struct bfi_port_generic_req_s *m;
245 237
246 /** todo Not implemented */ 238 if (bfa_ioc_is_disabled(port->ioc)) {
247 bfa_assert(0); 239 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
240 return BFA_STATUS_IOC_DISABLED;
241 }
248 242
249 if (!bfa_ioc_is_operational(port->ioc)) { 243 if (!bfa_ioc_is_operational(port->ioc)) {
250 bfa_trc(port, BFA_STATUS_IOC_FAILURE); 244 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -256,11 +250,11 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
256 return BFA_STATUS_DEVBUSY; 250 return BFA_STATUS_DEVBUSY;
257 } 251 }
258 252
259 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; 253 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
260 254
261 port->msgtag++; 255 port->msgtag++;
262 port->endis_cbfn = cbfn; 256 port->endis_cbfn = cbfn;
263 port->endis_cbarg = cbarg; 257 port->endis_cbarg = cbarg;
264 port->endis_pending = BFA_TRUE; 258 port->endis_pending = BFA_TRUE;
265 259
266 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ, 260 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
@@ -281,12 +275,14 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
281 */ 275 */
282bfa_status_t 276bfa_status_t
283bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, 277bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
284 void *cbarg) 278 void *cbarg)
285{ 279{
286 struct bfi_port_generic_req_s *m; 280 struct bfi_port_generic_req_s *m;
287 281
288 /** todo Not implemented */ 282 if (bfa_ioc_is_disabled(port->ioc)) {
289 bfa_assert(0); 283 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
284 return BFA_STATUS_IOC_DISABLED;
285 }
290 286
291 if (!bfa_ioc_is_operational(port->ioc)) { 287 if (!bfa_ioc_is_operational(port->ioc)) {
292 bfa_trc(port, BFA_STATUS_IOC_FAILURE); 288 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -298,11 +294,11 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
298 return BFA_STATUS_DEVBUSY; 294 return BFA_STATUS_DEVBUSY;
299 } 295 }
300 296
301 m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; 297 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
302 298
303 port->msgtag++; 299 port->msgtag++;
304 port->endis_cbfn = cbfn; 300 port->endis_cbfn = cbfn;
305 port->endis_cbarg = cbarg; 301 port->endis_cbarg = cbarg;
306 port->endis_pending = BFA_TRUE; 302 port->endis_pending = BFA_TRUE;
307 303
308 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ, 304 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
@@ -322,8 +318,8 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
322 * @return Status 318 * @return Status
323 */ 319 */
324bfa_status_t 320bfa_status_t
325bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats, 321bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
326 bfa_port_stats_cbfn_t cbfn, void *cbarg) 322 bfa_port_stats_cbfn_t cbfn, void *cbarg)
327{ 323{
328 struct bfi_port_get_stats_req_s *m; 324 struct bfi_port_get_stats_req_s *m;
329 325
@@ -337,12 +333,12 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
337 return BFA_STATUS_DEVBUSY; 333 return BFA_STATUS_DEVBUSY;
338 } 334 }
339 335
340 m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg; 336 m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
341 337
342 port->stats = stats; 338 port->stats = stats;
343 port->stats_cbfn = cbfn; 339 port->stats_cbfn = cbfn;
344 port->stats_cbarg = cbarg; 340 port->stats_cbarg = cbarg;
345 port->stats_busy = BFA_TRUE; 341 port->stats_busy = BFA_TRUE;
346 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa); 342 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
347 343
348 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ, 344 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
@@ -362,7 +358,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
362 */ 358 */
363bfa_status_t 359bfa_status_t
364bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, 360bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
365 void *cbarg) 361 void *cbarg)
366{ 362{
367 struct bfi_port_generic_req_s *m; 363 struct bfi_port_generic_req_s *m;
368 364
@@ -376,11 +372,11 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
376 return BFA_STATUS_DEVBUSY; 372 return BFA_STATUS_DEVBUSY;
377 } 373 }
378 374
379 m = (struct bfi_port_generic_req_s *)port->stats_mb.msg; 375 m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
380 376
381 port->stats_cbfn = cbfn; 377 port->stats_cbfn = cbfn;
382 port->stats_cbarg = cbarg; 378 port->stats_cbarg = cbarg;
383 port->stats_busy = BFA_TRUE; 379 port->stats_busy = BFA_TRUE;
384 380
385 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ, 381 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
386 bfa_ioc_portid(port->ioc)); 382 bfa_ioc_portid(port->ioc));
@@ -400,11 +396,9 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
400void 396void
401bfa_port_hbfail(void *arg) 397bfa_port_hbfail(void *arg)
402{ 398{
403 struct bfa_port_s *port = (struct bfa_port_s *)arg; 399 struct bfa_port_s *port = (struct bfa_port_s *) arg;
404 400
405 /* 401 /* Fail any pending get_stats/clear_stats requests */
406 * Fail any pending get_stats/clear_stats requests
407 */
408 if (port->stats_busy) { 402 if (port->stats_busy) {
409 if (port->stats_cbfn) 403 if (port->stats_cbfn)
410 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); 404 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
@@ -412,9 +406,7 @@ bfa_port_hbfail(void *arg)
412 port->stats_busy = BFA_FALSE; 406 port->stats_busy = BFA_FALSE;
413 } 407 }
414 408
415 /* 409 /* Clear any enable/disable is pending */
416 * Clear any enable/disable is pending
417 */
418 if (port->endis_pending) { 410 if (port->endis_pending) {
419 if (port->endis_cbfn) 411 if (port->endis_cbfn)
420 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); 412 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
@@ -433,22 +425,20 @@ bfa_port_hbfail(void *arg)
433 * The device driver specific mbox ISR functions have 425 * The device driver specific mbox ISR functions have
434 * this pointer as one of the parameters. 426 * this pointer as one of the parameters.
435 * trcmod - 427 * trcmod -
436 * logmod -
437 * 428 *
438 * @return void 429 * @return void
439 */ 430 */
440void 431void
441bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, 432bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
442 struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod) 433 void *dev, struct bfa_trc_mod_s *trcmod)
443{ 434{
444 struct bfa_timeval_s tv; 435 struct bfa_timeval_s tv;
445 436
446 bfa_assert(port); 437 bfa_assert(port);
447 438
448 port->dev = dev; 439 port->dev = dev;
449 port->ioc = ioc; 440 port->ioc = ioc;
450 port->trcmod = trcmod; 441 port->trcmod = trcmod;
451 port->logmod = logmod;
452 442
453 port->stats_busy = BFA_FALSE; 443 port->stats_busy = BFA_FALSE;
454 port->endis_pending = BFA_FALSE; 444 port->endis_pending = BFA_FALSE;
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
new file mode 100644
index 000000000000..dbce9dfd056b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_H__
19#define __BFA_PORT_H__
20
21#include "bfa_defs_svc.h"
22#include "bfa_ioc.h"
23#include "bfa_cs.h"
24
25typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
26typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
27
28struct bfa_port_s {
29 void *dev;
30 struct bfa_ioc_s *ioc;
31 struct bfa_trc_mod_s *trcmod;
32 u32 msgtag;
33 bfa_boolean_t stats_busy;
34 struct bfa_mbox_cmd_s stats_mb;
35 bfa_port_stats_cbfn_t stats_cbfn;
36 void *stats_cbarg;
37 bfa_status_t stats_status;
38 u32 stats_reset_time;
39 union bfa_port_stats_u *stats;
40 struct bfa_dma_s stats_dma;
41 bfa_boolean_t endis_pending;
42 struct bfa_mbox_cmd_s endis_mb;
43 bfa_port_endis_cbfn_t endis_cbfn;
44 void *endis_cbarg;
45 bfa_status_t endis_status;
46 struct bfa_ioc_hbfail_notify_s hbfail;
47};
48
49void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
50 void *dev, struct bfa_trc_mod_s *trcmod);
51void bfa_port_detach(struct bfa_port_s *port);
52void bfa_port_hbfail(void *arg);
53
54bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
55 union bfa_port_stats_u *stats,
56 bfa_port_stats_cbfn_t cbfn, void *cbarg);
57bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
58 bfa_port_stats_cbfn_t cbfn, void *cbarg);
59bfa_status_t bfa_port_enable(struct bfa_port_s *port,
60 bfa_port_endis_cbfn_t cbfn, void *cbarg);
61bfa_status_t bfa_port_disable(struct bfa_port_s *port,
62 bfa_port_endis_cbfn_t cbfn, void *cbarg);
63u32 bfa_port_meminfo(void);
64void bfa_port_mem_claim(struct bfa_port_s *port,
65 u8 *dma_kva, u64 dma_pa);
66#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
deleted file mode 100644
index c9ebe0426fa6..000000000000
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_PRIV_H__
19#define __BFA_PORT_PRIV_H__
20
21#include <defs/bfa_defs_pport.h>
22#include <bfi/bfi_pport.h>
23#include "bfa_intr_priv.h"
24
25/**
26 * Link notification data structure
27 */
28struct bfa_fcport_ln_s {
29 struct bfa_fcport_s *fcport;
30 bfa_sm_t sm;
31 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
32 enum bfa_pport_linkstate ln_event; /* ln event for callback */
33};
34
35/**
36 * BFA FC port data structure
37 */
38struct bfa_fcport_s {
39 struct bfa_s *bfa; /* parent BFA instance */
40 bfa_sm_t sm; /* port state machine */
41 wwn_t nwwn; /* node wwn of physical port */
42 wwn_t pwwn; /* port wwn of physical oprt */
43 enum bfa_pport_speed speed_sup;
44 /* supported speeds */
45 enum bfa_pport_speed speed; /* current speed */
46 enum bfa_pport_topology topology; /* current topology */
47 u8 myalpa; /* my ALPA in LOOP topology */
48 u8 rsvd[3];
49 u32 mypid:24;
50 u32 rsvd_b:8;
51 struct bfa_pport_cfg_s cfg; /* current port configuration */
52 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
53 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
54 struct bfa_reqq_wait_s reqq_wait;
55 /* to wait for room in reqq */
56 struct bfa_reqq_wait_s svcreq_wait;
57 /* to wait for room in reqq */
58 struct bfa_reqq_wait_s stats_reqq_wait;
59 /* to wait for room in reqq (stats) */
60 void *event_cbarg;
61 void (*event_cbfn) (void *cbarg,
62 bfa_pport_event_t event);
63 union {
64 union bfi_fcport_i2h_msg_u i2hmsg;
65 } event_arg;
66 void *bfad; /* BFA driver handle */
67 struct bfa_fcport_ln_s ln; /* Link Notification */
68 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
69 struct bfa_timer_s timer; /* timer */
70 u32 msgtag; /* fimrware msg tag for reply */
71 u8 *stats_kva;
72 u64 stats_pa;
73 union bfa_fcport_stats_u *stats;
74 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
75 bfa_status_t stats_status; /* stats/statsclr status */
76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
77 bfa_boolean_t stats_qfull;
78 u32 stats_reset_time; /* stats reset time stamp */
79 bfa_cb_pport_t stats_cbfn; /* driver callback function */
80 void *stats_cbarg; /* user callback arg */
81 bfa_boolean_t diag_busy; /* diag busy status */
82 bfa_boolean_t beacon; /* port beacon status */
83 bfa_boolean_t link_e2e_beacon; /* link beacon status */
84};
85
86#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
87
88/*
89 * public functions
90 */
91void bfa_fcport_init(struct bfa_s *bfa);
92void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
93
94#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
deleted file mode 100644
index ccd0680f6f16..000000000000
--- a/drivers/scsi/bfa/bfa_rport.c
+++ /dev/null
@@ -1,906 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <cs/bfa_debug.h>
21#include <bfi/bfi_rport.h>
22#include "bfa_intr_priv.h"
23
24BFA_TRC_FILE(HAL, RPORT);
25BFA_MODULE(rport);
26
27#define bfa_rport_offline_cb(__rp) do { \
28 if ((__rp)->bfa->fcs) \
29 bfa_cb_rport_offline((__rp)->rport_drv); \
30 else { \
31 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
32 __bfa_cb_rport_offline, (__rp)); \
33 } \
34} while (0)
35
36#define bfa_rport_online_cb(__rp) do { \
37 if ((__rp)->bfa->fcs) \
38 bfa_cb_rport_online((__rp)->rport_drv); \
39 else { \
40 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
41 __bfa_cb_rport_online, (__rp)); \
42 } \
43} while (0)
44
45/*
46 * forward declarations
47 */
48static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
49static void bfa_rport_free(struct bfa_rport_s *rport);
50static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
51static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
52static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
53static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
54static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
55
56/**
57 * bfa_rport_sm BFA rport state machine
58 */
59
60
61enum bfa_rport_event {
62 BFA_RPORT_SM_CREATE = 1, /* rport create event */
63 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
64 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
65 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
66 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
67 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
68 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
69 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
70 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
71};
72
73static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
74 enum bfa_rport_event event);
75static void bfa_rport_sm_created(struct bfa_rport_s *rp,
76 enum bfa_rport_event event);
77static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
78 enum bfa_rport_event event);
79static void bfa_rport_sm_online(struct bfa_rport_s *rp,
80 enum bfa_rport_event event);
81static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
82 enum bfa_rport_event event);
83static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
84 enum bfa_rport_event event);
85static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
86 enum bfa_rport_event event);
87static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
88 enum bfa_rport_event event);
89static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
90 enum bfa_rport_event event);
91static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
92 enum bfa_rport_event event);
93static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
94 enum bfa_rport_event event);
95static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
96 enum bfa_rport_event event);
97static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
98 enum bfa_rport_event event);
99
100/**
101 * Beginning state, only online event expected.
102 */
103static void
104bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
105{
106 bfa_trc(rp->bfa, rp->rport_tag);
107 bfa_trc(rp->bfa, event);
108
109 switch (event) {
110 case BFA_RPORT_SM_CREATE:
111 bfa_stats(rp, sm_un_cr);
112 bfa_sm_set_state(rp, bfa_rport_sm_created);
113 break;
114
115 default:
116 bfa_stats(rp, sm_un_unexp);
117 bfa_sm_fault(rp->bfa, event);
118 }
119}
120
121static void
122bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
123{
124 bfa_trc(rp->bfa, rp->rport_tag);
125 bfa_trc(rp->bfa, event);
126
127 switch (event) {
128 case BFA_RPORT_SM_ONLINE:
129 bfa_stats(rp, sm_cr_on);
130 if (bfa_rport_send_fwcreate(rp))
131 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
132 else
133 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
134 break;
135
136 case BFA_RPORT_SM_DELETE:
137 bfa_stats(rp, sm_cr_del);
138 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
139 bfa_rport_free(rp);
140 break;
141
142 case BFA_RPORT_SM_HWFAIL:
143 bfa_stats(rp, sm_cr_hwf);
144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
145 break;
146
147 default:
148 bfa_stats(rp, sm_cr_unexp);
149 bfa_sm_fault(rp->bfa, event);
150 }
151}
152
153/**
154 * Waiting for rport create response from firmware.
155 */
156static void
157bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
158{
159 bfa_trc(rp->bfa, rp->rport_tag);
160 bfa_trc(rp->bfa, event);
161
162 switch (event) {
163 case BFA_RPORT_SM_FWRSP:
164 bfa_stats(rp, sm_fwc_rsp);
165 bfa_sm_set_state(rp, bfa_rport_sm_online);
166 bfa_rport_online_cb(rp);
167 break;
168
169 case BFA_RPORT_SM_DELETE:
170 bfa_stats(rp, sm_fwc_del);
171 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
172 break;
173
174 case BFA_RPORT_SM_OFFLINE:
175 bfa_stats(rp, sm_fwc_off);
176 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
177 break;
178
179 case BFA_RPORT_SM_HWFAIL:
180 bfa_stats(rp, sm_fwc_hwf);
181 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
182 break;
183
184 default:
185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_sm_fault(rp->bfa, event);
187 }
188}
189
190/**
191 * Request queue is full, awaiting queue resume to send create request.
192 */
193static void
194bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
195{
196 bfa_trc(rp->bfa, rp->rport_tag);
197 bfa_trc(rp->bfa, event);
198
199 switch (event) {
200 case BFA_RPORT_SM_QRESUME:
201 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
202 bfa_rport_send_fwcreate(rp);
203 break;
204
205 case BFA_RPORT_SM_DELETE:
206 bfa_stats(rp, sm_fwc_del);
207 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
208 bfa_reqq_wcancel(&rp->reqq_wait);
209 bfa_rport_free(rp);
210 break;
211
212 case BFA_RPORT_SM_OFFLINE:
213 bfa_stats(rp, sm_fwc_off);
214 bfa_sm_set_state(rp, bfa_rport_sm_offline);
215 bfa_reqq_wcancel(&rp->reqq_wait);
216 bfa_rport_offline_cb(rp);
217 break;
218
219 case BFA_RPORT_SM_HWFAIL:
220 bfa_stats(rp, sm_fwc_hwf);
221 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
222 bfa_reqq_wcancel(&rp->reqq_wait);
223 break;
224
225 default:
226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_sm_fault(rp->bfa, event);
228 }
229}
230
231/**
232 * Online state - normal parking state.
233 */
234static void
235bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
236{
237 struct bfi_rport_qos_scn_s *qos_scn;
238
239 bfa_trc(rp->bfa, rp->rport_tag);
240 bfa_trc(rp->bfa, event);
241
242 switch (event) {
243 case BFA_RPORT_SM_OFFLINE:
244 bfa_stats(rp, sm_on_off);
245 if (bfa_rport_send_fwdelete(rp))
246 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
247 else
248 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
249 break;
250
251 case BFA_RPORT_SM_DELETE:
252 bfa_stats(rp, sm_on_del);
253 if (bfa_rport_send_fwdelete(rp))
254 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
255 else
256 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
257 break;
258
259 case BFA_RPORT_SM_HWFAIL:
260 bfa_stats(rp, sm_on_hwf);
261 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
262 break;
263
264 case BFA_RPORT_SM_SET_SPEED:
265 bfa_rport_send_fwspeed(rp);
266 break;
267
268 case BFA_RPORT_SM_QOS_SCN:
269 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
270 rp->qos_attr = qos_scn->new_qos_attr;
271 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
272 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
273 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
274 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
275
276 qos_scn->old_qos_attr.qos_flow_id =
277 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
278 qos_scn->new_qos_attr.qos_flow_id =
279 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
280 qos_scn->old_qos_attr.qos_priority =
281 bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
282 qos_scn->new_qos_attr.qos_priority =
283 bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
284
285 if (qos_scn->old_qos_attr.qos_flow_id !=
286 qos_scn->new_qos_attr.qos_flow_id)
287 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
288 qos_scn->old_qos_attr,
289 qos_scn->new_qos_attr);
290 if (qos_scn->old_qos_attr.qos_priority !=
291 qos_scn->new_qos_attr.qos_priority)
292 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
293 qos_scn->old_qos_attr,
294 qos_scn->new_qos_attr);
295 break;
296
297 default:
298 bfa_stats(rp, sm_on_unexp);
299 bfa_sm_fault(rp->bfa, event);
300 }
301}
302
303/**
304 * Firmware rport is being deleted - awaiting f/w response.
305 */
306static void
307bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
308{
309 bfa_trc(rp->bfa, rp->rport_tag);
310 bfa_trc(rp->bfa, event);
311
312 switch (event) {
313 case BFA_RPORT_SM_FWRSP:
314 bfa_stats(rp, sm_fwd_rsp);
315 bfa_sm_set_state(rp, bfa_rport_sm_offline);
316 bfa_rport_offline_cb(rp);
317 break;
318
319 case BFA_RPORT_SM_DELETE:
320 bfa_stats(rp, sm_fwd_del);
321 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
322 break;
323
324 case BFA_RPORT_SM_HWFAIL:
325 bfa_stats(rp, sm_fwd_hwf);
326 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
327 bfa_rport_offline_cb(rp);
328 break;
329
330 default:
331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_sm_fault(rp->bfa, event);
333 }
334}
335
336static void
337bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
338{
339 bfa_trc(rp->bfa, rp->rport_tag);
340 bfa_trc(rp->bfa, event);
341
342 switch (event) {
343 case BFA_RPORT_SM_QRESUME:
344 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
345 bfa_rport_send_fwdelete(rp);
346 break;
347
348 case BFA_RPORT_SM_DELETE:
349 bfa_stats(rp, sm_fwd_del);
350 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
351 break;
352
353 case BFA_RPORT_SM_HWFAIL:
354 bfa_stats(rp, sm_fwd_hwf);
355 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
356 bfa_reqq_wcancel(&rp->reqq_wait);
357 bfa_rport_offline_cb(rp);
358 break;
359
360 default:
361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_sm_fault(rp->bfa, event);
363 }
364}
365
366/**
367 * Offline state.
368 */
369static void
370bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
371{
372 bfa_trc(rp->bfa, rp->rport_tag);
373 bfa_trc(rp->bfa, event);
374
375 switch (event) {
376 case BFA_RPORT_SM_DELETE:
377 bfa_stats(rp, sm_off_del);
378 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
379 bfa_rport_free(rp);
380 break;
381
382 case BFA_RPORT_SM_ONLINE:
383 bfa_stats(rp, sm_off_on);
384 if (bfa_rport_send_fwcreate(rp))
385 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
386 else
387 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
388 break;
389
390 case BFA_RPORT_SM_HWFAIL:
391 bfa_stats(rp, sm_off_hwf);
392 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
393 break;
394
395 default:
396 bfa_stats(rp, sm_off_unexp);
397 bfa_sm_fault(rp->bfa, event);
398 }
399}
400
401/**
402 * Rport is deleted, waiting for firmware response to delete.
403 */
404static void
405bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
406{
407 bfa_trc(rp->bfa, rp->rport_tag);
408 bfa_trc(rp->bfa, event);
409
410 switch (event) {
411 case BFA_RPORT_SM_FWRSP:
412 bfa_stats(rp, sm_del_fwrsp);
413 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
414 bfa_rport_free(rp);
415 break;
416
417 case BFA_RPORT_SM_HWFAIL:
418 bfa_stats(rp, sm_del_hwf);
419 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
420 bfa_rport_free(rp);
421 break;
422
423 default:
424 bfa_sm_fault(rp->bfa, event);
425 }
426}
427
428static void
429bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
430{
431 bfa_trc(rp->bfa, rp->rport_tag);
432 bfa_trc(rp->bfa, event);
433
434 switch (event) {
435 case BFA_RPORT_SM_QRESUME:
436 bfa_stats(rp, sm_del_fwrsp);
437 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
438 bfa_rport_send_fwdelete(rp);
439 break;
440
441 case BFA_RPORT_SM_HWFAIL:
442 bfa_stats(rp, sm_del_hwf);
443 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
444 bfa_reqq_wcancel(&rp->reqq_wait);
445 bfa_rport_free(rp);
446 break;
447
448 default:
449 bfa_sm_fault(rp->bfa, event);
450 }
451}
452
453/**
454 * Waiting for rport create response from firmware. A delete is pending.
455 */
456static void
457bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
458 enum bfa_rport_event event)
459{
460 bfa_trc(rp->bfa, rp->rport_tag);
461 bfa_trc(rp->bfa, event);
462
463 switch (event) {
464 case BFA_RPORT_SM_FWRSP:
465 bfa_stats(rp, sm_delp_fwrsp);
466 if (bfa_rport_send_fwdelete(rp))
467 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
468 else
469 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
470 break;
471
472 case BFA_RPORT_SM_HWFAIL:
473 bfa_stats(rp, sm_delp_hwf);
474 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
475 bfa_rport_free(rp);
476 break;
477
478 default:
479 bfa_stats(rp, sm_delp_unexp);
480 bfa_sm_fault(rp->bfa, event);
481 }
482}
483
484/**
485 * Waiting for rport create response from firmware. Rport offline is pending.
486 */
487static void
488bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
489 enum bfa_rport_event event)
490{
491 bfa_trc(rp->bfa, rp->rport_tag);
492 bfa_trc(rp->bfa, event);
493
494 switch (event) {
495 case BFA_RPORT_SM_FWRSP:
496 bfa_stats(rp, sm_offp_fwrsp);
497 if (bfa_rport_send_fwdelete(rp))
498 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
499 else
500 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
501 break;
502
503 case BFA_RPORT_SM_DELETE:
504 bfa_stats(rp, sm_offp_del);
505 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
506 break;
507
508 case BFA_RPORT_SM_HWFAIL:
509 bfa_stats(rp, sm_offp_hwf);
510 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
511 break;
512
513 default:
514 bfa_stats(rp, sm_offp_unexp);
515 bfa_sm_fault(rp->bfa, event);
516 }
517}
518
519/**
520 * IOC h/w failed.
521 */
522static void
523bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
524{
525 bfa_trc(rp->bfa, rp->rport_tag);
526 bfa_trc(rp->bfa, event);
527
528 switch (event) {
529 case BFA_RPORT_SM_OFFLINE:
530 bfa_stats(rp, sm_iocd_off);
531 bfa_rport_offline_cb(rp);
532 break;
533
534 case BFA_RPORT_SM_DELETE:
535 bfa_stats(rp, sm_iocd_del);
536 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
537 bfa_rport_free(rp);
538 break;
539
540 case BFA_RPORT_SM_ONLINE:
541 bfa_stats(rp, sm_iocd_on);
542 if (bfa_rport_send_fwcreate(rp))
543 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
544 else
545 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
546 break;
547
548 case BFA_RPORT_SM_HWFAIL:
549 break;
550
551 default:
552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_sm_fault(rp->bfa, event);
554 }
555}
556
557
558
559/**
560 * bfa_rport_private BFA rport private functions
561 */
562
563static void
564__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
565{
566 struct bfa_rport_s *rp = cbarg;
567
568 if (complete)
569 bfa_cb_rport_online(rp->rport_drv);
570}
571
572static void
573__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
574{
575 struct bfa_rport_s *rp = cbarg;
576
577 if (complete)
578 bfa_cb_rport_offline(rp->rport_drv);
579}
580
581static void
582bfa_rport_qresume(void *cbarg)
583{
584 struct bfa_rport_s *rp = cbarg;
585
586 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
587}
588
589static void
590bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
591 u32 *dm_len)
592{
593 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
594 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
595
596 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
597}
598
599static void
600bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
601 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
602{
603 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
604 struct bfa_rport_s *rp;
605 u16 i;
606
607 INIT_LIST_HEAD(&mod->rp_free_q);
608 INIT_LIST_HEAD(&mod->rp_active_q);
609
610 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
611 mod->rps_list = rp;
612 mod->num_rports = cfg->fwcfg.num_rports;
613
614 bfa_assert(mod->num_rports
615 && !(mod->num_rports & (mod->num_rports - 1)));
616
617 for (i = 0; i < mod->num_rports; i++, rp++) {
618 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
619 rp->bfa = bfa;
620 rp->rport_tag = i;
621 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
622
623 /**
624 * - is unused
625 */
626 if (i)
627 list_add_tail(&rp->qe, &mod->rp_free_q);
628
629 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
630 }
631
632 /**
633 * consume memory
634 */
635 bfa_meminfo_kva(meminfo) = (u8 *) rp;
636}
637
638static void
639bfa_rport_detach(struct bfa_s *bfa)
640{
641}
642
643static void
644bfa_rport_start(struct bfa_s *bfa)
645{
646}
647
648static void
649bfa_rport_stop(struct bfa_s *bfa)
650{
651}
652
653static void
654bfa_rport_iocdisable(struct bfa_s *bfa)
655{
656 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
657 struct bfa_rport_s *rport;
658 struct list_head *qe, *qen;
659
660 list_for_each_safe(qe, qen, &mod->rp_active_q) {
661 rport = (struct bfa_rport_s *) qe;
662 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
663 }
664}
665
666static struct bfa_rport_s *
667bfa_rport_alloc(struct bfa_rport_mod_s *mod)
668{
669 struct bfa_rport_s *rport;
670
671 bfa_q_deq(&mod->rp_free_q, &rport);
672 if (rport)
673 list_add_tail(&rport->qe, &mod->rp_active_q);
674
675 return rport;
676}
677
678static void
679bfa_rport_free(struct bfa_rport_s *rport)
680{
681 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
682
683 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
684 list_del(&rport->qe);
685 list_add_tail(&rport->qe, &mod->rp_free_q);
686}
687
688static bfa_boolean_t
689bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
690{
691 struct bfi_rport_create_req_s *m;
692
693 /**
694 * check for room in queue to send request now
695 */
696 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
697 if (!m) {
698 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
699 return BFA_FALSE;
700 }
701
702 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
703 bfa_lpuid(rp->bfa));
704 m->bfa_handle = rp->rport_tag;
705 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
706 m->pid = rp->rport_info.pid;
707 m->lp_tag = rp->rport_info.lp_tag;
708 m->local_pid = rp->rport_info.local_pid;
709 m->fc_class = rp->rport_info.fc_class;
710 m->vf_en = rp->rport_info.vf_en;
711 m->vf_id = rp->rport_info.vf_id;
712 m->cisc = rp->rport_info.cisc;
713
714 /**
715 * queue I/O message to firmware
716 */
717 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
718 return BFA_TRUE;
719}
720
721static bfa_boolean_t
722bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
723{
724 struct bfi_rport_delete_req_s *m;
725
726 /**
727 * check for room in queue to send request now
728 */
729 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
730 if (!m) {
731 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
732 return BFA_FALSE;
733 }
734
735 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
736 bfa_lpuid(rp->bfa));
737 m->fw_handle = rp->fw_handle;
738
739 /**
740 * queue I/O message to firmware
741 */
742 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
743 return BFA_TRUE;
744}
745
746static bfa_boolean_t
747bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
748{
749 struct bfa_rport_speed_req_s *m;
750
751 /**
752 * check for room in queue to send request now
753 */
754 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
755 if (!m) {
756 bfa_trc(rp->bfa, rp->rport_info.speed);
757 return BFA_FALSE;
758 }
759
760 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
761 bfa_lpuid(rp->bfa));
762 m->fw_handle = rp->fw_handle;
763 m->speed = (u8)rp->rport_info.speed;
764
765 /**
766 * queue I/O message to firmware
767 */
768 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
769 return BFA_TRUE;
770}
771
772
773
774/**
775 * bfa_rport_public
776 */
777
778/**
779 * Rport interrupt processing.
780 */
781void
782bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
783{
784 union bfi_rport_i2h_msg_u msg;
785 struct bfa_rport_s *rp;
786
787 bfa_trc(bfa, m->mhdr.msg_id);
788
789 msg.msg = m;
790
791 switch (m->mhdr.msg_id) {
792 case BFI_RPORT_I2H_CREATE_RSP:
793 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
794 rp->fw_handle = msg.create_rsp->fw_handle;
795 rp->qos_attr = msg.create_rsp->qos_attr;
796 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
797 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
798 break;
799
800 case BFI_RPORT_I2H_DELETE_RSP:
801 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
802 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
803 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
804 break;
805
806 case BFI_RPORT_I2H_QOS_SCN:
807 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
808 rp->event_arg.fw_msg = msg.qos_scn_evt;
809 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
810 break;
811
812 default:
813 bfa_trc(bfa, m->mhdr.msg_id);
814 bfa_assert(0);
815 }
816}
817
818
819
820/**
821 * bfa_rport_api
822 */
823
824struct bfa_rport_s *
825bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
826{
827 struct bfa_rport_s *rp;
828
829 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
830
831 if (rp == NULL)
832 return NULL;
833
834 rp->bfa = bfa;
835 rp->rport_drv = rport_drv;
836 bfa_rport_clear_stats(rp);
837
838 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
839 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
840
841 return rp;
842}
843
844void
845bfa_rport_delete(struct bfa_rport_s *rport)
846{
847 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
848}
849
850void
851bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
852{
853 bfa_assert(rport_info->max_frmsz != 0);
854
855 /**
856 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
857 * responses. Default to minimum size.
858 */
859 if (rport_info->max_frmsz == 0) {
860 bfa_trc(rport->bfa, rport->rport_tag);
861 rport_info->max_frmsz = FC_MIN_PDUSZ;
862 }
863
864 bfa_os_assign(rport->rport_info, *rport_info);
865 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
866}
867
868void
869bfa_rport_offline(struct bfa_rport_s *rport)
870{
871 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
872}
873
874void
875bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
876{
877 bfa_assert(speed != 0);
878 bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
879
880 rport->rport_info.speed = speed;
881 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
882}
883
884void
885bfa_rport_get_stats(struct bfa_rport_s *rport,
886 struct bfa_rport_hal_stats_s *stats)
887{
888 *stats = rport->stats;
889}
890
891void
892bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
893 struct bfa_rport_qos_attr_s *qos_attr)
894{
895 qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority);
896 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
897
898}
899
900void
901bfa_rport_clear_stats(struct bfa_rport_s *rport)
902{
903 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
904}
905
906
diff --git a/drivers/scsi/bfa/bfa_rport_priv.h b/drivers/scsi/bfa/bfa_rport_priv.h
deleted file mode 100644
index 6490ce2e990d..000000000000
--- a/drivers/scsi/bfa/bfa_rport_priv.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_RPORT_PRIV_H__
19#define __BFA_RPORT_PRIV_H__
20
21#include <bfa_svc.h>
22
23#define BFA_RPORT_MIN 4
24
25struct bfa_rport_mod_s {
26 struct bfa_rport_s *rps_list; /* list of rports */
27 struct list_head rp_free_q; /* free bfa_rports */
28 struct list_head rp_active_q; /* free bfa_rports */
29 u16 num_rports; /* number of rports */
30};
31
32#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
33
34/**
35 * Convert rport tag to RPORT
36 */
37#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
38 (BFA_RPORT_MOD(__bfa)->rps_list + \
39 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
40
41/*
42 * external functions
43 */
44void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
45#endif /* __BFA_RPORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sgpg.c b/drivers/scsi/bfa/bfa_sgpg.c
deleted file mode 100644
index ae452c42e40e..000000000000
--- a/drivers/scsi/bfa/bfa_sgpg.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19
20BFA_TRC_FILE(HAL, SGPG);
21BFA_MODULE(sgpg);
22
23/**
24 * bfa_sgpg_mod BFA SGPG Mode module
25 */
26
27/**
28 * Compute and return memory needed by FCP(im) module.
29 */
30static void
31bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
32 u32 *dm_len)
33{
34 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
35 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
36
37 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
38 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
39}
40
41
42static void
43bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
44 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
45{
46 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
47 int i;
48 struct bfa_sgpg_s *hsgpg;
49 struct bfi_sgpg_s *sgpg;
50 u64 align_len;
51
52 union {
53 u64 pa;
54 union bfi_addr_u addr;
55 } sgpg_pa;
56
57 INIT_LIST_HEAD(&mod->sgpg_q);
58 INIT_LIST_HEAD(&mod->sgpg_wait_q);
59
60 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
61
62 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
63 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
64 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
65 mod->sgpg_arr_pa += align_len;
66 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
67 align_len);
68 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
69 align_len);
70
71 hsgpg = mod->hsgpg_arr;
72 sgpg = mod->sgpg_arr;
73 sgpg_pa.pa = mod->sgpg_arr_pa;
74 mod->free_sgpgs = mod->num_sgpgs;
75
76 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
77
78 for (i = 0; i < mod->num_sgpgs; i++) {
79 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
80 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
81
82 hsgpg->sgpg = sgpg;
83 hsgpg->sgpg_pa = sgpg_pa.addr;
84 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
85
86 hsgpg++;
87 sgpg++;
88 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
89 }
90
91 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
92 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
93 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
94}
95
96static void
97bfa_sgpg_detach(struct bfa_s *bfa)
98{
99}
100
101static void
102bfa_sgpg_start(struct bfa_s *bfa)
103{
104}
105
106static void
107bfa_sgpg_stop(struct bfa_s *bfa)
108{
109}
110
111static void
112bfa_sgpg_iocdisable(struct bfa_s *bfa)
113{
114}
115
116
117
118/**
119 * bfa_sgpg_public BFA SGPG public functions
120 */
121
122bfa_status_t
123bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
124{
125 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
126 struct bfa_sgpg_s *hsgpg;
127 int i;
128
129 bfa_trc_fp(bfa, nsgpgs);
130
131 if (mod->free_sgpgs < nsgpgs)
132 return BFA_STATUS_ENOMEM;
133
134 for (i = 0; i < nsgpgs; i++) {
135 bfa_q_deq(&mod->sgpg_q, &hsgpg);
136 bfa_assert(hsgpg);
137 list_add_tail(&hsgpg->qe, sgpg_q);
138 }
139
140 mod->free_sgpgs -= nsgpgs;
141 return BFA_STATUS_OK;
142}
143
144void
145bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
146{
147 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
148 struct bfa_sgpg_wqe_s *wqe;
149
150 bfa_trc_fp(bfa, nsgpg);
151
152 mod->free_sgpgs += nsgpg;
153 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
154
155 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
156
157 if (list_empty(&mod->sgpg_wait_q))
158 return;
159
160 /**
161 * satisfy as many waiting requests as possible
162 */
163 do {
164 wqe = bfa_q_first(&mod->sgpg_wait_q);
165 if (mod->free_sgpgs < wqe->nsgpg)
166 nsgpg = mod->free_sgpgs;
167 else
168 nsgpg = wqe->nsgpg;
169 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
170 wqe->nsgpg -= nsgpg;
171 if (wqe->nsgpg == 0) {
172 list_del(&wqe->qe);
173 wqe->cbfn(wqe->cbarg);
174 }
175 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
176}
177
178void
179bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
180{
181 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
182
183 bfa_assert(nsgpg > 0);
184 bfa_assert(nsgpg > mod->free_sgpgs);
185
186 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
187
188 /**
189 * allocate any left to this one first
190 */
191 if (mod->free_sgpgs) {
192 /**
193 * no one else is waiting for SGPG
194 */
195 bfa_assert(list_empty(&mod->sgpg_wait_q));
196 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
197 wqe->nsgpg -= mod->free_sgpgs;
198 mod->free_sgpgs = 0;
199 }
200
201 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
202}
203
204void
205bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
206{
207 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
208
209 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
210 list_del(&wqe->qe);
211
212 if (wqe->nsgpg_total != wqe->nsgpg)
213 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
214 wqe->nsgpg_total - wqe->nsgpg);
215}
216
217void
218bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
219 void *cbarg)
220{
221 INIT_LIST_HEAD(&wqe->sgpg_q);
222 wqe->cbfn = cbfn;
223 wqe->cbarg = cbarg;
224}
225
226
diff --git a/drivers/scsi/bfa/bfa_sgpg_priv.h b/drivers/scsi/bfa/bfa_sgpg_priv.h
deleted file mode 100644
index 9c2a8cbe7522..000000000000
--- a/drivers/scsi/bfa/bfa_sgpg_priv.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_sgpg.h BFA SG page module
20 */
21
22#ifndef __BFA_SGPG_PRIV_H__
23#define __BFA_SGPG_PRIV_H__
24
25#include <cs/bfa_q.h>
26
27#define BFA_SGPG_MIN (16)
28
29/**
30 * Alignment macro for SG page allocation
31 */
32#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
33 & ~(sizeof(struct bfi_sgpg_s) - 1))
34
35struct bfa_sgpg_wqe_s {
36 struct list_head qe; /* queue sg page element */
37 int nsgpg; /* pages to be allocated */
38 int nsgpg_total; /* total pages required */
39 void (*cbfn) (void *cbarg);
40 /* callback function */
41 void *cbarg; /* callback arg */
42 struct list_head sgpg_q; /* queue of alloced sgpgs */
43};
44
45struct bfa_sgpg_s {
46 struct list_head qe; /* queue sg page element */
47 struct bfi_sgpg_s *sgpg; /* va of SG page */
48 union bfi_addr_u sgpg_pa;/* pa of SG page */
49};
50
51/**
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required.
54 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56
57struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */
66};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
68
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs);
71void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q,
72 int nsgpgs);
73void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
74 void (*cbfn) (void *cbarg), void *cbarg);
75void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
76 int nsgpgs);
77void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
78
79#endif /* __BFA_SGPG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_sm.c b/drivers/scsi/bfa/bfa_sm.c
deleted file mode 100644
index 5420f4f45e58..000000000000
--- a/drivers/scsi/bfa/bfa_sm.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.c BFA State machine utility functions
20 */
21
22#include <cs/bfa_sm.h>
23
24/**
25 * cs_sm_api
26 */
27
28int
29bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
30{
31 int i = 0;
32
33 while (smt[i].sm && smt[i].sm != sm)
34 i++;
35 return smt[i].state;
36}
37
38
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
new file mode 100644
index 000000000000..aa1dc749b281
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -0,0 +1,5423 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfa_os_inc.h"
19#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
22#include "bfad_drv.h"
23
24BFA_TRC_FILE(HAL, FCXP);
25BFA_MODULE(fcxp);
26BFA_MODULE(sgpg);
27BFA_MODULE(lps);
28BFA_MODULE(fcport);
29BFA_MODULE(rport);
30BFA_MODULE(uf);
31
32/**
33 * LPS related definitions
34 */
35#define BFA_LPS_MIN_LPORTS (1)
36#define BFA_LPS_MAX_LPORTS (256)
37
38/*
39 * Maximum Vports supported per physical port or vf.
40 */
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43
44/**
45 * lps_pvt BFA LPS private functions
46 */
47
48enum bfa_lps_event {
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56};
57
58/**
59 * FC PORT related definitions
60 */
61/*
62 * The port is considered disabled if corresponding physical port or IOC are
63 * disabled explicitly
64 */
65#define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68
69
70/**
71 * BFA port state machine events
72 */
73enum bfa_fcport_sm_event {
74 BFA_FCPORT_SM_START = 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
83};
84
85/**
86 * BFA port link notification state machine events
87 */
88
89enum bfa_fcport_ln_sm_event {
90 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
93};
94
95/**
96 * RPORT related definitions
97 */
98#define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
101 else { \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
104 } \
105} while (0)
106
107#define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
110 else { \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
113 } \
114} while (0)
115
116
117enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127};
128
129/**
130 * forward declarations FCXP related functions
131 */
132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137static void bfa_fcxp_qresume(void *cbarg);
138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req);
140
141/**
142 * forward declarations for LPS functions
143 */
144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
145 u32 *dm_len);
146static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 struct bfa_iocfc_cfg_s *cfg,
148 struct bfa_meminfo_s *meminfo,
149 struct bfa_pcidev_s *pcidev);
150static void bfa_lps_detach(struct bfa_s *bfa);
151static void bfa_lps_start(struct bfa_s *bfa);
152static void bfa_lps_stop(struct bfa_s *bfa);
153static void bfa_lps_iocdisable(struct bfa_s *bfa);
154static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 struct bfi_lps_login_rsp_s *rsp);
156static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 struct bfi_lps_logout_rsp_s *rsp);
158static void bfa_lps_reqq_resume(void *lps_arg);
159static void bfa_lps_free(struct bfa_lps_s *lps);
160static void bfa_lps_send_login(struct bfa_lps_s *lps);
161static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165
166/**
167 * forward declaration for LPS state machine
168 */
169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 event);
173static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event);
177
178/**
179 * forward declaration for FC Port functions
180 */
181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 enum bfa_port_linkstate event, bfa_boolean_t trunk);
189static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 enum bfa_port_linkstate event);
191static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192static void bfa_fcport_stats_get_timeout(void *cbarg);
193static void bfa_fcport_stats_clr_timeout(void *cbarg);
194static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195
196/**
197 * forward declaration for FC PORT state machine
198 */
199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 enum bfa_fcport_sm_event event);
201static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 enum bfa_fcport_sm_event event);
203static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
207static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 enum bfa_fcport_sm_event event);
209static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 enum bfa_fcport_sm_event event);
211static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 enum bfa_fcport_sm_event event);
213static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 enum bfa_fcport_sm_event event);
215static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 enum bfa_fcport_sm_event event);
217static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 enum bfa_fcport_sm_event event);
219static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 enum bfa_fcport_sm_event event);
221static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 enum bfa_fcport_sm_event event);
223
224static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 enum bfa_fcport_ln_sm_event event);
226static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 enum bfa_fcport_ln_sm_event event);
228static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 enum bfa_fcport_ln_sm_event event);
230static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 enum bfa_fcport_ln_sm_event event);
232static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 enum bfa_fcport_ln_sm_event event);
234static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 enum bfa_fcport_ln_sm_event event);
236static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 enum bfa_fcport_ln_sm_event event);
238
239static struct bfa_sm_table_s hal_port_sm_table[] = {
240 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
252};
253
254
255/**
256 * forward declaration for RPORT related functions
257 */
258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259static void bfa_rport_free(struct bfa_rport_s *rport);
260static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263static void __bfa_cb_rport_online(void *cbarg,
264 bfa_boolean_t complete);
265static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete);
267
268/**
269 * forward declaration for RPORT state machine
270 */
271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273static void bfa_rport_sm_created(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277static void bfa_rport_sm_online(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 enum bfa_rport_event event);
281static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 enum bfa_rport_event event);
283static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 enum bfa_rport_event event);
285static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 enum bfa_rport_event event);
287static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 enum bfa_rport_event event);
289static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 enum bfa_rport_event event);
291static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 enum bfa_rport_event event);
293static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 enum bfa_rport_event event);
295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event);
297
298/**
299 * PLOG related definitions
300 */
301static int
302plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
303{
304 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
306 return 1;
307
308 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
310 return 1;
311
312 return 0;
313}
314
315static void
316bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
317{
318 u16 tail;
319 struct bfa_plog_rec_s *pl_recp;
320
321 if (plog->plog_enabled == 0)
322 return;
323
324 if (plkd_validate_logrec(pl_rec)) {
325 bfa_assert(0);
326 return;
327 }
328
329 tail = plog->tail;
330
331 pl_recp = &(plog->plog_recs[tail]);
332
333 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334
335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail);
337
338 if (plog->head == plog->tail)
339 BFA_PL_LOG_REC_INCR(plog->head);
340}
341
342void
343bfa_plog_init(struct bfa_plog_s *plog)
344{
345 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
346
347 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1;
350}
351
352void
353bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 enum bfa_plog_eid event,
355 u16 misc, char *log_str)
356{
357 struct bfa_plog_rec_s lp;
358
359 if (plog->plog_enabled) {
360 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 lp.mid = mid;
362 lp.eid = event;
363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
364 lp.misc = misc;
365 strncpy(lp.log_entry.string_log, log_str,
366 BFA_PL_STRING_LOG_SZ - 1);
367 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 bfa_plog_add(plog, &lp);
369 }
370}
371
372void
373bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 enum bfa_plog_eid event,
375 u16 misc, u32 *intarr, u32 num_ints)
376{
377 struct bfa_plog_rec_s lp;
378 u32 i;
379
380 if (num_ints > BFA_PL_INT_LOG_SZ)
381 num_ints = BFA_PL_INT_LOG_SZ;
382
383 if (plog->plog_enabled) {
384 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 lp.mid = mid;
386 lp.eid = event;
387 lp.log_type = BFA_PL_LOG_TYPE_INT;
388 lp.misc = misc;
389
390 for (i = 0; i < num_ints; i++)
391 bfa_os_assign(lp.log_entry.int_log[i],
392 intarr[i]);
393
394 lp.log_num_ints = (u8) num_ints;
395
396 bfa_plog_add(plog, &lp);
397 }
398}
399
400void
401bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
402 enum bfa_plog_eid event,
403 u16 misc, struct fchs_s *fchdr)
404{
405 struct bfa_plog_rec_s lp;
406 u32 *tmp_int = (u32 *) fchdr;
407 u32 ints[BFA_PL_INT_LOG_SZ];
408
409 if (plog->plog_enabled) {
410 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
411
412 ints[0] = tmp_int[0];
413 ints[1] = tmp_int[1];
414 ints[2] = tmp_int[4];
415
416 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
417 }
418}
419
420void
421bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
422 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
423 u32 pld_w0)
424{
425 struct bfa_plog_rec_s lp;
426 u32 *tmp_int = (u32 *) fchdr;
427 u32 ints[BFA_PL_INT_LOG_SZ];
428
429 if (plog->plog_enabled) {
430 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
431
432 ints[0] = tmp_int[0];
433 ints[1] = tmp_int[1];
434 ints[2] = tmp_int[4];
435 ints[3] = pld_w0;
436
437 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
438 }
439}
440
441void
442bfa_plog_clear(struct bfa_plog_s *plog)
443{
444 plog->head = plog->tail = 0;
445}
446
447void
448bfa_plog_enable(struct bfa_plog_s *plog)
449{
450 plog->plog_enabled = 1;
451}
452
453void
454bfa_plog_disable(struct bfa_plog_s *plog)
455{
456 plog->plog_enabled = 0;
457}
458
459bfa_boolean_t
460bfa_plog_get_setting(struct bfa_plog_s *plog)
461{
462 return (bfa_boolean_t)plog->plog_enabled;
463}
464
465/**
466 * fcxp_pvt BFA FCXP private functions
467 */
468
469static void
470claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
471{
472 u8 *dm_kva = NULL;
473 u64 dm_pa;
474 u32 buf_pool_sz;
475
476 dm_kva = bfa_meminfo_dma_virt(mi);
477 dm_pa = bfa_meminfo_dma_phys(mi);
478
479 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
480
481 /*
482 * Initialize the fcxp req payload list
483 */
484 mod->req_pld_list_kva = dm_kva;
485 mod->req_pld_list_pa = dm_pa;
486 dm_kva += buf_pool_sz;
487 dm_pa += buf_pool_sz;
488 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
489
490 /*
491 * Initialize the fcxp rsp payload list
492 */
493 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
494 mod->rsp_pld_list_kva = dm_kva;
495 mod->rsp_pld_list_pa = dm_pa;
496 dm_kva += buf_pool_sz;
497 dm_pa += buf_pool_sz;
498 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
499
500 bfa_meminfo_dma_virt(mi) = dm_kva;
501 bfa_meminfo_dma_phys(mi) = dm_pa;
502}
503
504static void
505claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
506{
507 u16 i;
508 struct bfa_fcxp_s *fcxp;
509
510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
511 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
512
513 INIT_LIST_HEAD(&mod->fcxp_free_q);
514 INIT_LIST_HEAD(&mod->fcxp_active_q);
515
516 mod->fcxp_list = fcxp;
517
518 for (i = 0; i < mod->num_fcxps; i++) {
519 fcxp->fcxp_mod = mod;
520 fcxp->fcxp_tag = i;
521
522 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
523 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
524 fcxp->reqq_waiting = BFA_FALSE;
525
526 fcxp = fcxp + 1;
527 }
528
529 bfa_meminfo_kva(mi) = (void *)fcxp;
530}
531
532static void
533bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
534 u32 *dm_len)
535{
536 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
537
538 if (num_fcxp_reqs == 0)
539 return;
540
541 /*
542 * Account for req/rsp payload
543 */
544 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
545 if (cfg->drvcfg.min_cfg)
546 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
547 else
548 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
549
550 /*
551 * Account for fcxp structs
552 */
553 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
554}
555
556static void
557bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
558 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
559{
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561
562 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
563 mod->bfa = bfa;
564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
565
566 /**
567 * Initialize FCXP request and response payload sizes.
568 */
569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
570 if (!cfg->drvcfg.min_cfg)
571 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
572
573 INIT_LIST_HEAD(&mod->wait_q);
574
575 claim_fcxp_req_rsp_mem(mod, meminfo);
576 claim_fcxps_mem(mod, meminfo);
577}
578
579static void
580bfa_fcxp_detach(struct bfa_s *bfa)
581{
582}
583
584static void
585bfa_fcxp_start(struct bfa_s *bfa)
586{
587}
588
589static void
590bfa_fcxp_stop(struct bfa_s *bfa)
591{
592}
593
594static void
595bfa_fcxp_iocdisable(struct bfa_s *bfa)
596{
597 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
598 struct bfa_fcxp_s *fcxp;
599 struct list_head *qe, *qen;
600
601 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
602 fcxp = (struct bfa_fcxp_s *) qe;
603 if (fcxp->caller == NULL) {
604 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
605 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
606 bfa_fcxp_free(fcxp);
607 } else {
608 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
609 bfa_cb_queue(bfa, &fcxp->hcb_qe,
610 __bfa_fcxp_send_cbfn, fcxp);
611 }
612 }
613}
614
615static struct bfa_fcxp_s *
616bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
617{
618 struct bfa_fcxp_s *fcxp;
619
620 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
621
622 if (fcxp)
623 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
624
625 return fcxp;
626}
627
628static void
629bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
630 struct bfa_s *bfa,
631 u8 *use_ibuf,
632 u32 *nr_sgles,
633 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
634 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
635 struct list_head *r_sgpg_q,
636 int n_sgles,
637 bfa_fcxp_get_sgaddr_t sga_cbfn,
638 bfa_fcxp_get_sglen_t sglen_cbfn)
639{
640
641 bfa_assert(bfa != NULL);
642
643 bfa_trc(bfa, fcxp->fcxp_tag);
644
645 if (n_sgles == 0) {
646 *use_ibuf = 1;
647 } else {
648 bfa_assert(*sga_cbfn != NULL);
649 bfa_assert(*sglen_cbfn != NULL);
650
651 *use_ibuf = 0;
652 *r_sga_cbfn = sga_cbfn;
653 *r_sglen_cbfn = sglen_cbfn;
654
655 *nr_sgles = n_sgles;
656
657 /*
658 * alloc required sgpgs
659 */
660 if (n_sgles > BFI_SGE_INLINE)
661 bfa_assert(0);
662 }
663
664}
665
666static void
667bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
668 void *caller, struct bfa_s *bfa, int nreq_sgles,
669 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
670 bfa_fcxp_get_sglen_t req_sglen_cbfn,
671 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
672 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
673{
674
675 bfa_assert(bfa != NULL);
676
677 bfa_trc(bfa, fcxp->fcxp_tag);
678
679 fcxp->caller = caller;
680
681 bfa_fcxp_init_reqrsp(fcxp, bfa,
682 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
683 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
684 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
685
686 bfa_fcxp_init_reqrsp(fcxp, bfa,
687 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
688 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
689 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
690
691}
692
693static void
694bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
695{
696 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
697 struct bfa_fcxp_wqe_s *wqe;
698
699 bfa_q_deq(&mod->wait_q, &wqe);
700 if (wqe) {
701 bfa_trc(mod->bfa, fcxp->fcxp_tag);
702
703 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
704 wqe->nrsp_sgles, wqe->req_sga_cbfn,
705 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
706 wqe->rsp_sglen_cbfn);
707
708 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
709 return;
710 }
711
712 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
713 list_del(&fcxp->qe);
714 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
715}
716
717static void
718bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
719 bfa_status_t req_status, u32 rsp_len,
720 u32 resid_len, struct fchs_s *rsp_fchs)
721{
722 /* discarded fcxp completion */
723}
724
725static void
726__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
727{
728 struct bfa_fcxp_s *fcxp = cbarg;
729
730 if (complete) {
731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 fcxp->rsp_status, fcxp->rsp_len,
733 fcxp->residue_len, &fcxp->rsp_fchs);
734 } else {
735 bfa_fcxp_free(fcxp);
736 }
737}
738
739static void
740hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
741{
742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
743 struct bfa_fcxp_s *fcxp;
744 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
745
746 bfa_trc(bfa, fcxp_tag);
747
748 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
749
750 /**
751 * @todo f/w should not set residue to non-0 when everything
752 * is received.
753 */
754 if (fcxp_rsp->req_status == BFA_STATUS_OK)
755 fcxp_rsp->residue_len = 0;
756 else
757 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
758
759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
760
761 bfa_assert(fcxp->send_cbfn != NULL);
762
763 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
764
765 if (fcxp->send_cbfn != NULL) {
766 bfa_trc(mod->bfa, (NULL == fcxp->caller));
767 if (fcxp->caller == NULL) {
768 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
769 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
770 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
771 /*
772 * fcxp automatically freed on return from the callback
773 */
774 bfa_fcxp_free(fcxp);
775 } else {
776 fcxp->rsp_status = fcxp_rsp->req_status;
777 fcxp->rsp_len = fcxp_rsp->rsp_len;
778 fcxp->residue_len = fcxp_rsp->residue_len;
779 fcxp->rsp_fchs = fcxp_rsp->fchs;
780
781 bfa_cb_queue(bfa, &fcxp->hcb_qe,
782 __bfa_fcxp_send_cbfn, fcxp);
783 }
784 } else {
785 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
786 }
787}
788
789static void
790hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
791{
792 union bfi_addr_u sga_zero = { {0} };
793
794 sge->sg_len = reqlen;
795 sge->flags = BFI_SGE_DATA_LAST;
796 bfa_dma_addr_set(sge[0].sga, req_pa);
797 bfa_sge_to_be(sge);
798 sge++;
799
800 sge->sga = sga_zero;
801 sge->sg_len = reqlen;
802 sge->flags = BFI_SGE_PGDLEN;
803 bfa_sge_to_be(sge);
804}
805
806static void
807hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
808 struct fchs_s *fchs)
809{
810 /*
811 * TODO: TX ox_id
812 */
813 if (reqlen > 0) {
814 if (fcxp->use_ireqbuf) {
815 u32 pld_w0 =
816 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
817
818 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
819 BFA_PL_EID_TX,
820 reqlen + sizeof(struct fchs_s), fchs,
821 pld_w0);
822 } else {
823 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
824 BFA_PL_EID_TX,
825 reqlen + sizeof(struct fchs_s),
826 fchs);
827 }
828 } else {
829 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
830 reqlen + sizeof(struct fchs_s), fchs);
831 }
832}
833
834static void
835hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
836 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
837{
838 if (fcxp_rsp->rsp_len > 0) {
839 if (fcxp->use_irspbuf) {
840 u32 pld_w0 =
841 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
842
843 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
844 BFA_PL_EID_RX,
845 (u16) fcxp_rsp->rsp_len,
846 &fcxp_rsp->fchs, pld_w0);
847 } else {
848 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
849 BFA_PL_EID_RX,
850 (u16) fcxp_rsp->rsp_len,
851 &fcxp_rsp->fchs);
852 }
853 } else {
854 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
855 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
856 }
857}
858
859/**
860 * Handler to resume sending fcxp when space in available in cpe queue.
861 */
862static void
863bfa_fcxp_qresume(void *cbarg)
864{
865 struct bfa_fcxp_s *fcxp = cbarg;
866 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
867 struct bfi_fcxp_send_req_s *send_req;
868
869 fcxp->reqq_waiting = BFA_FALSE;
870 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
871 bfa_fcxp_queue(fcxp, send_req);
872}
873
874/**
875 * Queue fcxp send request to foimrware.
876 */
877static void
878bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
879{
880 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
881 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
882 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
883 struct bfa_rport_s *rport = reqi->bfa_rport;
884
885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
886 bfa_lpuid(bfa));
887
888 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
889 if (rport) {
890 send_req->rport_fw_hndl = rport->fw_handle;
891 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
892 if (send_req->max_frmsz == 0)
893 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
894 } else {
895 send_req->rport_fw_hndl = 0;
896 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
897 }
898
899 send_req->vf_id = bfa_os_htons(reqi->vf_id);
900 send_req->lp_tag = reqi->lp_tag;
901 send_req->class = reqi->class;
902 send_req->rsp_timeout = rspi->rsp_timeout;
903 send_req->cts = reqi->cts;
904 send_req->fchs = reqi->fchs;
905
906 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
907 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
908
909 /*
910 * setup req sgles
911 */
912 if (fcxp->use_ireqbuf == 1) {
913 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
914 BFA_FCXP_REQ_PLD_PA(fcxp));
915 } else {
916 if (fcxp->nreq_sgles > 0) {
917 bfa_assert(fcxp->nreq_sgles == 1);
918 hal_fcxp_set_local_sges(send_req->req_sge,
919 reqi->req_tot_len,
920 fcxp->req_sga_cbfn(fcxp->caller,
921 0));
922 } else {
923 bfa_assert(reqi->req_tot_len == 0);
924 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
925 }
926 }
927
928 /*
929 * setup rsp sgles
930 */
931 if (fcxp->use_irspbuf == 1) {
932 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
933
934 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
935 BFA_FCXP_RSP_PLD_PA(fcxp));
936
937 } else {
938 if (fcxp->nrsp_sgles > 0) {
939 bfa_assert(fcxp->nrsp_sgles == 1);
940 hal_fcxp_set_local_sges(send_req->rsp_sge,
941 rspi->rsp_maxlen,
942 fcxp->rsp_sga_cbfn(fcxp->caller,
943 0));
944 } else {
945 bfa_assert(rspi->rsp_maxlen == 0);
946 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
947 }
948 }
949
950 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
951
952 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
953
954 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
956}
957
958/**
959 * hal_fcxp_api BFA FCXP API
960 */
961
962/**
963 * Allocate an FCXP instance to send a response or to send a request
964 * that has a response. Request/response buffers are allocated by caller.
965 *
966 * @param[in] bfa BFA bfa instance
967 * @param[in] nreq_sgles Number of SG elements required for request
968 * buffer. 0, if fcxp internal buffers are used.
969 * Use bfa_fcxp_get_reqbuf() to get the
970 * internal req buffer.
971 * @param[in] req_sgles SG elements describing request buffer. Will be
972 * copied in by BFA and hence can be freed on
973 * return from this function.
974 * @param[in] get_req_sga function ptr to be called to get a request SG
975 * Address (given the sge index).
976 * @param[in] get_req_sglen function ptr to be called to get a request SG
977 * len (given the sge index).
978 * @param[in] get_rsp_sga function ptr to be called to get a response SG
979 * Address (given the sge index).
980 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
981 * len (given the sge index).
982 *
983 * @return FCXP instance. NULL on failure.
984 */
985struct bfa_fcxp_s *
986bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
987 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
988 bfa_fcxp_get_sglen_t req_sglen_cbfn,
989 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
990 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
991{
992 struct bfa_fcxp_s *fcxp = NULL;
993
994 bfa_assert(bfa != NULL);
995
996 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
997 if (fcxp == NULL)
998 return NULL;
999
1000 bfa_trc(bfa, fcxp->fcxp_tag);
1001
1002 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
1003 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
1004
1005 return fcxp;
1006}
1007
1008/**
1009 * Get the internal request buffer pointer
1010 *
1011 * @param[in] fcxp BFA fcxp pointer
1012 *
1013 * @return pointer to the internal request buffer
1014 */
1015void *
1016bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1017{
1018 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1019 void *reqbuf;
1020
1021 bfa_assert(fcxp->use_ireqbuf == 1);
1022 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1023 fcxp->fcxp_tag * mod->req_pld_sz;
1024 return reqbuf;
1025}
1026
1027u32
1028bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1029{
1030 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1031
1032 return mod->req_pld_sz;
1033}
1034
1035/**
1036 * Get the internal response buffer pointer
1037 *
1038 * @param[in] fcxp BFA fcxp pointer
1039 *
1040 * @return pointer to the internal request buffer
1041 */
1042void *
1043bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1044{
1045 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1046 void *rspbuf;
1047
1048 bfa_assert(fcxp->use_irspbuf == 1);
1049
1050 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1051 fcxp->fcxp_tag * mod->rsp_pld_sz;
1052 return rspbuf;
1053}
1054
1055/**
1056 * Free the BFA FCXP
1057 *
1058 * @param[in] fcxp BFA fcxp pointer
1059 *
1060 * @return void
1061 */
1062void
1063bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1064{
1065 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1066
1067 bfa_assert(fcxp != NULL);
1068 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1069 bfa_fcxp_put(fcxp);
1070}
1071
1072/**
1073 * Send a FCXP request
1074 *
1075 * @param[in] fcxp BFA fcxp pointer
1076 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1077 * @param[in] vf_id virtual Fabric ID
1078 * @param[in] lp_tag lport tag
1079 * @param[in] cts use Continous sequence
1080 * @param[in] cos fc Class of Service
1081 * @param[in] reqlen request length, does not include FCHS length
1082 * @param[in] fchs fc Header Pointer. The header content will be copied
1083 * in by BFA.
1084 *
1085 * @param[in] cbfn call back function to be called on receiving
1086 * the response
1087 * @param[in] cbarg arg for cbfn
1088 * @param[in] rsp_timeout
1089 * response timeout
1090 *
1091 * @return bfa_status_t
1092 */
1093void
1094bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1095 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1096 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1097 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1098{
1099 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1100 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1101 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1102 struct bfi_fcxp_send_req_s *send_req;
1103
1104 bfa_trc(bfa, fcxp->fcxp_tag);
1105
1106 /**
1107 * setup request/response info
1108 */
1109 reqi->bfa_rport = rport;
1110 reqi->vf_id = vf_id;
1111 reqi->lp_tag = lp_tag;
1112 reqi->class = cos;
1113 rspi->rsp_timeout = rsp_timeout;
1114 reqi->cts = cts;
1115 reqi->fchs = *fchs;
1116 reqi->req_tot_len = reqlen;
1117 rspi->rsp_maxlen = rsp_maxlen;
1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1119 fcxp->send_cbarg = cbarg;
1120
1121 /**
1122 * If no room in CPE queue, wait for space in request queue
1123 */
1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1125 if (!send_req) {
1126 bfa_trc(bfa, fcxp->fcxp_tag);
1127 fcxp->reqq_waiting = BFA_TRUE;
1128 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1129 return;
1130 }
1131
1132 bfa_fcxp_queue(fcxp, send_req);
1133}
1134
1135/**
1136 * Abort a BFA FCXP
1137 *
1138 * @param[in] fcxp BFA fcxp pointer
1139 *
1140 * @return void
1141 */
1142bfa_status_t
1143bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1144{
1145 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1146 bfa_assert(0);
1147 return BFA_STATUS_OK;
1148}
1149
1150void
1151bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1152 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1153 void *caller, int nreq_sgles,
1154 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1155 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1156 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1157 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1158{
1159 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1160
1161 bfa_assert(list_empty(&mod->fcxp_free_q));
1162
1163 wqe->alloc_cbfn = alloc_cbfn;
1164 wqe->alloc_cbarg = alloc_cbarg;
1165 wqe->caller = caller;
1166 wqe->bfa = bfa;
1167 wqe->nreq_sgles = nreq_sgles;
1168 wqe->nrsp_sgles = nrsp_sgles;
1169 wqe->req_sga_cbfn = req_sga_cbfn;
1170 wqe->req_sglen_cbfn = req_sglen_cbfn;
1171 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1172 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1173
1174 list_add_tail(&wqe->qe, &mod->wait_q);
1175}
1176
1177void
1178bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1179{
1180 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1181
1182 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1183 list_del(&wqe->qe);
1184}
1185
1186void
1187bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1188{
1189 /**
1190 * If waiting for room in request queue, cancel reqq wait
1191 * and free fcxp.
1192 */
1193 if (fcxp->reqq_waiting) {
1194 fcxp->reqq_waiting = BFA_FALSE;
1195 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1196 bfa_fcxp_free(fcxp);
1197 return;
1198 }
1199
1200 fcxp->send_cbfn = bfa_fcxp_null_comp;
1201}
1202
1203
1204
1205/**
1206 * hal_fcxp_public BFA FCXP public functions
1207 */
1208
1209void
1210bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1211{
1212 switch (msg->mhdr.msg_id) {
1213 case BFI_FCXP_I2H_SEND_RSP:
1214 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1215 break;
1216
1217 default:
1218 bfa_trc(bfa, msg->mhdr.msg_id);
1219 bfa_assert(0);
1220 }
1221}
1222
1223u32
1224bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1225{
1226 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1227
1228 return mod->rsp_pld_sz;
1229}
1230
1231
1232/**
1233 * BFA LPS state machine functions
1234 */
1235
1236/**
1237 * Init state -- no login
1238 */
1239static void
1240bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1241{
1242 bfa_trc(lps->bfa, lps->lp_tag);
1243 bfa_trc(lps->bfa, event);
1244
1245 switch (event) {
1246 case BFA_LPS_SM_LOGIN:
1247 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1248 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1249 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1250 } else {
1251 bfa_sm_set_state(lps, bfa_lps_sm_login);
1252 bfa_lps_send_login(lps);
1253 }
1254
1255 if (lps->fdisc)
1256 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1257 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1258 else
1259 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1260 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1261 break;
1262
1263 case BFA_LPS_SM_LOGOUT:
1264 bfa_lps_logout_comp(lps);
1265 break;
1266
1267 case BFA_LPS_SM_DELETE:
1268 bfa_lps_free(lps);
1269 break;
1270
1271 case BFA_LPS_SM_RX_CVL:
1272 case BFA_LPS_SM_OFFLINE:
1273 break;
1274
1275 case BFA_LPS_SM_FWRSP:
1276 /*
1277 * Could happen when fabric detects loopback and discards
1278 * the lps request. Fw will eventually sent out the timeout
1279 * Just ignore
1280 */
1281 break;
1282
1283 default:
1284 bfa_sm_fault(lps->bfa, event);
1285 }
1286}
1287
1288/**
1289 * login is in progress -- awaiting response from firmware
1290 */
1291static void
1292bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1293{
1294 bfa_trc(lps->bfa, lps->lp_tag);
1295 bfa_trc(lps->bfa, event);
1296
1297 switch (event) {
1298 case BFA_LPS_SM_FWRSP:
1299 if (lps->status == BFA_STATUS_OK) {
1300 bfa_sm_set_state(lps, bfa_lps_sm_online);
1301 if (lps->fdisc)
1302 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1303 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1304 else
1305 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1306 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1307 } else {
1308 bfa_sm_set_state(lps, bfa_lps_sm_init);
1309 if (lps->fdisc)
1310 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1311 BFA_PL_EID_LOGIN, 0,
1312 "FDISC Fail (RJT or timeout)");
1313 else
1314 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1315 BFA_PL_EID_LOGIN, 0,
1316 "FLOGI Fail (RJT or timeout)");
1317 }
1318 bfa_lps_login_comp(lps);
1319 break;
1320
1321 case BFA_LPS_SM_OFFLINE:
1322 bfa_sm_set_state(lps, bfa_lps_sm_init);
1323 break;
1324
1325 default:
1326 bfa_sm_fault(lps->bfa, event);
1327 }
1328}
1329
1330/**
1331 * login pending - awaiting space in request queue
1332 */
1333static void
1334bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1335{
1336 bfa_trc(lps->bfa, lps->lp_tag);
1337 bfa_trc(lps->bfa, event);
1338
1339 switch (event) {
1340 case BFA_LPS_SM_RESUME:
1341 bfa_sm_set_state(lps, bfa_lps_sm_login);
1342 break;
1343
1344 case BFA_LPS_SM_OFFLINE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 bfa_reqq_wcancel(&lps->wqe);
1347 break;
1348
1349 case BFA_LPS_SM_RX_CVL:
1350 /*
1351 * Login was not even sent out; so when getting out
1352 * of this state, it will appear like a login retry
1353 * after Clear virtual link
1354 */
1355 break;
1356
1357 default:
1358 bfa_sm_fault(lps->bfa, event);
1359 }
1360}
1361
1362/**
1363 * login complete
1364 */
1365static void
1366bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1367{
1368 bfa_trc(lps->bfa, lps->lp_tag);
1369 bfa_trc(lps->bfa, event);
1370
1371 switch (event) {
1372 case BFA_LPS_SM_LOGOUT:
1373 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1375 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1376 } else {
1377 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1378 bfa_lps_send_logout(lps);
1379 }
1380 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381 BFA_PL_EID_LOGO, 0, "Logout");
1382 break;
1383
1384 case BFA_LPS_SM_RX_CVL:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386
1387 /* Let the vport module know about this event */
1388 bfa_lps_cvl_event(lps);
1389 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1390 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1391 break;
1392
1393 case BFA_LPS_SM_OFFLINE:
1394 case BFA_LPS_SM_DELETE:
1395 bfa_sm_set_state(lps, bfa_lps_sm_init);
1396 break;
1397
1398 default:
1399 bfa_sm_fault(lps->bfa, event);
1400 }
1401}
1402
1403/**
1404 * logout in progress - awaiting firmware response
1405 */
1406static void
1407bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1408{
1409 bfa_trc(lps->bfa, lps->lp_tag);
1410 bfa_trc(lps->bfa, event);
1411
1412 switch (event) {
1413 case BFA_LPS_SM_FWRSP:
1414 bfa_sm_set_state(lps, bfa_lps_sm_init);
1415 bfa_lps_logout_comp(lps);
1416 break;
1417
1418 case BFA_LPS_SM_OFFLINE:
1419 bfa_sm_set_state(lps, bfa_lps_sm_init);
1420 break;
1421
1422 default:
1423 bfa_sm_fault(lps->bfa, event);
1424 }
1425}
1426
1427/**
1428 * logout pending -- awaiting space in request queue
1429 */
1430static void
1431bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1432{
1433 bfa_trc(lps->bfa, lps->lp_tag);
1434 bfa_trc(lps->bfa, event);
1435
1436 switch (event) {
1437 case BFA_LPS_SM_RESUME:
1438 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1439 bfa_lps_send_logout(lps);
1440 break;
1441
1442 case BFA_LPS_SM_OFFLINE:
1443 bfa_sm_set_state(lps, bfa_lps_sm_init);
1444 bfa_reqq_wcancel(&lps->wqe);
1445 break;
1446
1447 default:
1448 bfa_sm_fault(lps->bfa, event);
1449 }
1450}
1451
1452
1453
1454/**
1455 * lps_pvt BFA LPS private functions
1456 */
1457
1458/**
1459 * return memory requirement
1460 */
1461static void
1462bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1463 u32 *dm_len)
1464{
1465 if (cfg->drvcfg.min_cfg)
1466 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1467 else
1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1469}
1470
1471/**
1472 * bfa module attach at initialization time
1473 */
1474static void
1475bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1476 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1477{
1478 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1479 struct bfa_lps_s *lps;
1480 int i;
1481
1482 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 if (cfg->drvcfg.min_cfg)
1485 mod->num_lps = BFA_LPS_MIN_LPORTS;
1486 else
1487 mod->num_lps = BFA_LPS_MAX_LPORTS;
1488 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1489
1490 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1491
1492 INIT_LIST_HEAD(&mod->lps_free_q);
1493 INIT_LIST_HEAD(&mod->lps_active_q);
1494
1495 for (i = 0; i < mod->num_lps; i++, lps++) {
1496 lps->bfa = bfa;
1497 lps->lp_tag = (u8) i;
1498 lps->reqq = BFA_REQQ_LPS;
1499 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1500 list_add_tail(&lps->qe, &mod->lps_free_q);
1501 }
1502}
1503
1504static void
1505bfa_lps_detach(struct bfa_s *bfa)
1506{
1507}
1508
1509static void
1510bfa_lps_start(struct bfa_s *bfa)
1511{
1512}
1513
1514static void
1515bfa_lps_stop(struct bfa_s *bfa)
1516{
1517}
1518
1519/**
1520 * IOC in disabled state -- consider all lps offline
1521 */
1522static void
1523bfa_lps_iocdisable(struct bfa_s *bfa)
1524{
1525 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1526 struct bfa_lps_s *lps;
1527 struct list_head *qe, *qen;
1528
1529 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1530 lps = (struct bfa_lps_s *) qe;
1531 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1532 }
1533}
1534
1535/**
1536 * Firmware login response
1537 */
1538static void
1539bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1540{
1541 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1542 struct bfa_lps_s *lps;
1543
1544 bfa_assert(rsp->lp_tag < mod->num_lps);
1545 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1546
1547 lps->status = rsp->status;
1548 switch (rsp->status) {
1549 case BFA_STATUS_OK:
1550 lps->fport = rsp->f_port;
1551 lps->npiv_en = rsp->npiv_en;
1552 lps->lp_pid = rsp->lp_pid;
1553 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
1554 lps->pr_pwwn = rsp->port_name;
1555 lps->pr_nwwn = rsp->node_name;
1556 lps->auth_req = rsp->auth_req;
1557 lps->lp_mac = rsp->lp_mac;
1558 lps->brcd_switch = rsp->brcd_switch;
1559 lps->fcf_mac = rsp->fcf_mac;
1560
1561 break;
1562
1563 case BFA_STATUS_FABRIC_RJT:
1564 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1565 lps->lsrjt_expl = rsp->lsrjt_expl;
1566
1567 break;
1568
1569 case BFA_STATUS_EPROTOCOL:
1570 lps->ext_status = rsp->ext_status;
1571
1572 break;
1573
1574 default:
1575 /* Nothing to do with other status */
1576 break;
1577 }
1578
1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1580}
1581
1582/**
1583 * Firmware logout response
1584 */
1585static void
1586bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1587{
1588 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1589 struct bfa_lps_s *lps;
1590
1591 bfa_assert(rsp->lp_tag < mod->num_lps);
1592 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1593
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595}
1596
1597/**
1598 * Firmware received a Clear virtual link request (for FCoE)
1599 */
1600static void
1601bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1602{
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps;
1605
1606 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1607
1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1609}
1610
1611/**
1612 * Space is available in request queue, resume queueing request to firmware.
1613 */
1614static void
1615bfa_lps_reqq_resume(void *lps_arg)
1616{
1617 struct bfa_lps_s *lps = lps_arg;
1618
1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1620}
1621
1622/**
1623 * lps is freed -- triggered by vport delete
1624 */
1625static void
1626bfa_lps_free(struct bfa_lps_s *lps)
1627{
1628 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1629
1630 lps->lp_pid = 0;
1631 list_del(&lps->qe);
1632 list_add_tail(&lps->qe, &mod->lps_free_q);
1633}
1634
1635/**
1636 * send login request to firmware
1637 */
1638static void
1639bfa_lps_send_login(struct bfa_lps_s *lps)
1640{
1641 struct bfi_lps_login_req_s *m;
1642
1643 m = bfa_reqq_next(lps->bfa, lps->reqq);
1644 bfa_assert(m);
1645
1646 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1647 bfa_lpuid(lps->bfa));
1648
1649 m->lp_tag = lps->lp_tag;
1650 m->alpa = lps->alpa;
1651 m->pdu_size = bfa_os_htons(lps->pdusz);
1652 m->pwwn = lps->pwwn;
1653 m->nwwn = lps->nwwn;
1654 m->fdisc = lps->fdisc;
1655 m->auth_en = lps->auth_en;
1656
1657 bfa_reqq_produce(lps->bfa, lps->reqq);
1658}
1659
1660/**
1661 * send logout request to firmware
1662 */
1663static void
1664bfa_lps_send_logout(struct bfa_lps_s *lps)
1665{
1666 struct bfi_lps_logout_req_s *m;
1667
1668 m = bfa_reqq_next(lps->bfa, lps->reqq);
1669 bfa_assert(m);
1670
1671 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1672 bfa_lpuid(lps->bfa));
1673
1674 m->lp_tag = lps->lp_tag;
1675 m->port_name = lps->pwwn;
1676 bfa_reqq_produce(lps->bfa, lps->reqq);
1677}
1678
1679/**
1680 * Indirect login completion handler for non-fcs
1681 */
1682static void
1683bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1684{
1685 struct bfa_lps_s *lps = arg;
1686
1687 if (!complete)
1688 return;
1689
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1692 else
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1694}
1695
1696/**
1697 * Login completion handler -- direct call for fcs, queue for others
1698 */
1699static void
1700bfa_lps_login_comp(struct bfa_lps_s *lps)
1701{
1702 if (!lps->bfa->fcs) {
1703 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1704 lps);
1705 return;
1706 }
1707
1708 if (lps->fdisc)
1709 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1710 else
1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1712}
1713
1714/**
1715 * Indirect logout completion handler for non-fcs
1716 */
1717static void
1718bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1719{
1720 struct bfa_lps_s *lps = arg;
1721
1722 if (!complete)
1723 return;
1724
1725 if (lps->fdisc)
1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1727}
1728
1729/**
1730 * Logout completion handler -- direct call for fcs, queue for others
1731 */
1732static void
1733bfa_lps_logout_comp(struct bfa_lps_s *lps)
1734{
1735 if (!lps->bfa->fcs) {
1736 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1737 lps);
1738 return;
1739 }
1740 if (lps->fdisc)
1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1742}
1743
1744/**
1745 * Clear virtual link completion handler for non-fcs
1746 */
1747static void
1748bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1749{
1750 struct bfa_lps_s *lps = arg;
1751
1752 if (!complete)
1753 return;
1754
1755 /* Clear virtual link to base port will result in link down */
1756 if (lps->fdisc)
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1758}
1759
1760/**
1761 * Received Clear virtual link event --direct call for fcs,
1762 * queue for others
1763 */
1764static void
1765bfa_lps_cvl_event(struct bfa_lps_s *lps)
1766{
1767 if (!lps->bfa->fcs) {
1768 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1769 lps);
1770 return;
1771 }
1772
1773 /* Clear virtual link to base port will result in link down */
1774 if (lps->fdisc)
1775 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1776}
1777
1778
1779
1780/**
1781 * lps_public BFA LPS public functions
1782 */
1783
1784u32
1785bfa_lps_get_max_vport(struct bfa_s *bfa)
1786{
1787 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1788 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1789 else
1790 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1791}
1792
1793/**
1794 * Allocate a lport srvice tag.
1795 */
1796struct bfa_lps_s *
1797bfa_lps_alloc(struct bfa_s *bfa)
1798{
1799 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1800 struct bfa_lps_s *lps = NULL;
1801
1802 bfa_q_deq(&mod->lps_free_q, &lps);
1803
1804 if (lps == NULL)
1805 return NULL;
1806
1807 list_add_tail(&lps->qe, &mod->lps_active_q);
1808
1809 bfa_sm_set_state(lps, bfa_lps_sm_init);
1810 return lps;
1811}
1812
1813/**
1814 * Free lport service tag. This can be called anytime after an alloc.
1815 * No need to wait for any pending login/logout completions.
1816 */
1817void
1818bfa_lps_delete(struct bfa_lps_s *lps)
1819{
1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1821}
1822
1823/**
1824 * Initiate a lport login.
1825 */
1826void
1827bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1828 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1829{
1830 lps->uarg = uarg;
1831 lps->alpa = alpa;
1832 lps->pdusz = pdusz;
1833 lps->pwwn = pwwn;
1834 lps->nwwn = nwwn;
1835 lps->fdisc = BFA_FALSE;
1836 lps->auth_en = auth_en;
1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1838}
1839
1840/**
1841 * Initiate a lport fdisc login.
1842 */
1843void
1844bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1845 wwn_t nwwn)
1846{
1847 lps->uarg = uarg;
1848 lps->alpa = 0;
1849 lps->pdusz = pdusz;
1850 lps->pwwn = pwwn;
1851 lps->nwwn = nwwn;
1852 lps->fdisc = BFA_TRUE;
1853 lps->auth_en = BFA_FALSE;
1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1855}
1856
1857/**
1858 * Initiate a lport logout (flogi).
1859 */
1860void
1861bfa_lps_flogo(struct bfa_lps_s *lps)
1862{
1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1864}
1865
1866/**
1867 * Initiate a lport FDSIC logout.
1868 */
1869void
1870bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1871{
1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1873}
1874
1875/**
1876 * Discard a pending login request -- should be called only for
1877 * link down handling.
1878 */
1879void
1880bfa_lps_discard(struct bfa_lps_s *lps)
1881{
1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1883}
1884
1885/**
1886 * Return lport services tag
1887 */
1888u8
1889bfa_lps_get_tag(struct bfa_lps_s *lps)
1890{
1891 return lps->lp_tag;
1892}
1893
1894/**
1895 * Return lport services tag given the pid
1896 */
1897u8
1898bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1899{
1900 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1901 struct bfa_lps_s *lps;
1902 int i;
1903
1904 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1905 if (lps->lp_pid == pid)
1906 return lps->lp_tag;
1907 }
1908
1909 /* Return base port tag anyway */
1910 return 0;
1911}
1912
1913/**
1914 * return if fabric login indicates support for NPIV
1915 */
1916bfa_boolean_t
1917bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1918{
1919 return lps->npiv_en;
1920}
1921
1922/**
1923 * Return TRUE if attached to F-Port, else return FALSE
1924 */
1925bfa_boolean_t
1926bfa_lps_is_fport(struct bfa_lps_s *lps)
1927{
1928 return lps->fport;
1929}
1930
1931/**
1932 * Return TRUE if attached to a Brocade Fabric
1933 */
1934bfa_boolean_t
1935bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1936{
1937 return lps->brcd_switch;
1938}
1939/**
1940 * return TRUE if authentication is required
1941 */
1942bfa_boolean_t
1943bfa_lps_is_authreq(struct bfa_lps_s *lps)
1944{
1945 return lps->auth_req;
1946}
1947
1948bfa_eproto_status_t
1949bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1950{
1951 return lps->ext_status;
1952}
1953
1954/**
1955 * return port id assigned to the lport
1956 */
1957u32
1958bfa_lps_get_pid(struct bfa_lps_s *lps)
1959{
1960 return lps->lp_pid;
1961}
1962
1963/**
1964 * return port id assigned to the base lport
1965 */
1966u32
1967bfa_lps_get_base_pid(struct bfa_s *bfa)
1968{
1969 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1970
1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1972}
1973
1974/**
1975 * Return bb_credit assigned in FLOGI response
1976 */
1977u16
1978bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1979{
1980 return lps->pr_bbcred;
1981}
1982
1983/**
1984 * Return peer port name
1985 */
1986wwn_t
1987bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1988{
1989 return lps->pr_pwwn;
1990}
1991
1992/**
1993 * Return peer node name
1994 */
1995wwn_t
1996bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1997{
1998 return lps->pr_nwwn;
1999}
2000
2001/**
2002 * return reason code if login request is rejected
2003 */
2004u8
2005bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2006{
2007 return lps->lsrjt_rsn;
2008}
2009
2010/**
2011 * return explanation code if login request is rejected
2012 */
2013u8
2014bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2015{
2016 return lps->lsrjt_expl;
2017}
2018
2019/**
2020 * Return fpma/spma MAC for lport
2021 */
2022mac_t
2023bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2024{
2025 return lps->lp_mac;
2026}
2027
2028/**
2029 * LPS firmware message class handler.
2030 */
2031void
2032bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2033{
2034 union bfi_lps_i2h_msg_u msg;
2035
2036 bfa_trc(bfa, m->mhdr.msg_id);
2037 msg.msg = m;
2038
2039 switch (m->mhdr.msg_id) {
2040 case BFI_LPS_H2I_LOGIN_RSP:
2041 bfa_lps_login_rsp(bfa, msg.login_rsp);
2042 break;
2043
2044 case BFI_LPS_H2I_LOGOUT_RSP:
2045 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2046 break;
2047
2048 case BFI_LPS_H2I_CVL_EVENT:
2049 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2050 break;
2051
2052 default:
2053 bfa_trc(bfa, m->mhdr.msg_id);
2054 bfa_assert(0);
2055 }
2056}
2057
2058/**
2059 * FC PORT state machine functions
2060 */
2061static void
2062bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2064{
2065 bfa_trc(fcport->bfa, event);
2066
2067 switch (event) {
2068 case BFA_FCPORT_SM_START:
2069 /**
2070 * Start event after IOC is configured and BFA is started.
2071 */
2072 if (bfa_fcport_send_enable(fcport)) {
2073 bfa_trc(fcport->bfa, BFA_TRUE);
2074 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2075 } else {
2076 bfa_trc(fcport->bfa, BFA_FALSE);
2077 bfa_sm_set_state(fcport,
2078 bfa_fcport_sm_enabling_qwait);
2079 }
2080 break;
2081
2082 case BFA_FCPORT_SM_ENABLE:
2083 /**
2084 * Port is persistently configured to be in enabled state. Do
2085 * not change state. Port enabling is done when START event is
2086 * received.
2087 */
2088 break;
2089
2090 case BFA_FCPORT_SM_DISABLE:
2091 /**
2092 * If a port is persistently configured to be disabled, the
2093 * first event will a port disable request.
2094 */
2095 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2096 break;
2097
2098 case BFA_FCPORT_SM_HWFAIL:
2099 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2100 break;
2101
2102 default:
2103 bfa_sm_fault(fcport->bfa, event);
2104 }
2105}
2106
2107static void
2108bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2109 enum bfa_fcport_sm_event event)
2110{
2111 char pwwn_buf[BFA_STRING_32];
2112 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2113 bfa_trc(fcport->bfa, event);
2114
2115 switch (event) {
2116 case BFA_FCPORT_SM_QRESUME:
2117 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2118 bfa_fcport_send_enable(fcport);
2119 break;
2120
2121 case BFA_FCPORT_SM_STOP:
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2124 break;
2125
2126 case BFA_FCPORT_SM_ENABLE:
2127 /**
2128 * Already enable is in progress.
2129 */
2130 break;
2131
2132 case BFA_FCPORT_SM_DISABLE:
2133 /**
2134 * Just send disable request to firmware when room becomes
2135 * available in request queue.
2136 */
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2138 bfa_reqq_wcancel(&fcport->reqq_wait);
2139 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2140 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2141 wwn2str(pwwn_buf, fcport->pwwn);
2142 BFA_LOG(KERN_INFO, bfad, log_level,
2143 "Base port disabled: WWN = %s\n", pwwn_buf);
2144 break;
2145
2146 case BFA_FCPORT_SM_LINKUP:
2147 case BFA_FCPORT_SM_LINKDOWN:
2148 /**
2149 * Possible to get link events when doing back-to-back
2150 * enable/disables.
2151 */
2152 break;
2153
2154 case BFA_FCPORT_SM_HWFAIL:
2155 bfa_reqq_wcancel(&fcport->reqq_wait);
2156 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2157 break;
2158
2159 default:
2160 bfa_sm_fault(fcport->bfa, event);
2161 }
2162}
2163
2164static void
2165bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2166 enum bfa_fcport_sm_event event)
2167{
2168 char pwwn_buf[BFA_STRING_32];
2169 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2170 bfa_trc(fcport->bfa, event);
2171
2172 switch (event) {
2173 case BFA_FCPORT_SM_FWRSP:
2174 case BFA_FCPORT_SM_LINKDOWN:
2175 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2176 break;
2177
2178 case BFA_FCPORT_SM_LINKUP:
2179 bfa_fcport_update_linkinfo(fcport);
2180 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2181
2182 bfa_assert(fcport->event_cbfn);
2183 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2184 break;
2185
2186 case BFA_FCPORT_SM_ENABLE:
2187 /**
2188 * Already being enabled.
2189 */
2190 break;
2191
2192 case BFA_FCPORT_SM_DISABLE:
2193 if (bfa_fcport_send_disable(fcport))
2194 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2195 else
2196 bfa_sm_set_state(fcport,
2197 bfa_fcport_sm_disabling_qwait);
2198
2199 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2200 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2201 wwn2str(pwwn_buf, fcport->pwwn);
2202 BFA_LOG(KERN_INFO, bfad, log_level,
2203 "Base port disabled: WWN = %s\n", pwwn_buf);
2204 break;
2205
2206 case BFA_FCPORT_SM_STOP:
2207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2208 break;
2209
2210 case BFA_FCPORT_SM_HWFAIL:
2211 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2212 break;
2213
2214 default:
2215 bfa_sm_fault(fcport->bfa, event);
2216 }
2217}
2218
2219static void
2220bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2221 enum bfa_fcport_sm_event event)
2222{
2223 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2224 char pwwn_buf[BFA_STRING_32];
2225 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2226
2227 bfa_trc(fcport->bfa, event);
2228
2229 switch (event) {
2230 case BFA_FCPORT_SM_LINKUP:
2231 bfa_fcport_update_linkinfo(fcport);
2232 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2233 bfa_assert(fcport->event_cbfn);
2234 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2235 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2236 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2237
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.vc_fcf.fcf.fipenabled);
2240 bfa_trc(fcport->bfa,
2241 pevent->link_state.vc_fcf.fcf.fipfailed);
2242
2243 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2244 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2245 BFA_PL_EID_FIP_FCF_DISC, 0,
2246 "FIP FCF Discovery Failed");
2247 else
2248 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2249 BFA_PL_EID_FIP_FCF_DISC, 0,
2250 "FIP FCF Discovered");
2251 }
2252
2253 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2254 wwn2str(pwwn_buf, fcport->pwwn);
2255 BFA_LOG(KERN_INFO, bfad, log_level,
2256 "Base port online: WWN = %s\n", pwwn_buf);
2257 break;
2258
2259 case BFA_FCPORT_SM_LINKDOWN:
2260 /**
2261 * Possible to get link down event.
2262 */
2263 break;
2264
2265 case BFA_FCPORT_SM_ENABLE:
2266 /**
2267 * Already enabled.
2268 */
2269 break;
2270
2271 case BFA_FCPORT_SM_DISABLE:
2272 if (bfa_fcport_send_disable(fcport))
2273 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2274 else
2275 bfa_sm_set_state(fcport,
2276 bfa_fcport_sm_disabling_qwait);
2277
2278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2280 wwn2str(pwwn_buf, fcport->pwwn);
2281 BFA_LOG(KERN_INFO, bfad, log_level,
2282 "Base port disabled: WWN = %s\n", pwwn_buf);
2283 break;
2284
2285 case BFA_FCPORT_SM_STOP:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2287 break;
2288
2289 case BFA_FCPORT_SM_HWFAIL:
2290 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2291 break;
2292
2293 default:
2294 bfa_sm_fault(fcport->bfa, event);
2295 }
2296}
2297
2298static void
2299bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2300 enum bfa_fcport_sm_event event)
2301{
2302 char pwwn_buf[BFA_STRING_32];
2303 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2304
2305 bfa_trc(fcport->bfa, event);
2306
2307 switch (event) {
2308 case BFA_FCPORT_SM_ENABLE:
2309 /**
2310 * Already enabled.
2311 */
2312 break;
2313
2314 case BFA_FCPORT_SM_DISABLE:
2315 if (bfa_fcport_send_disable(fcport))
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2317 else
2318 bfa_sm_set_state(fcport,
2319 bfa_fcport_sm_disabling_qwait);
2320
2321 bfa_fcport_reset_linkinfo(fcport);
2322 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2323 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2324 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2325 wwn2str(pwwn_buf, fcport->pwwn);
2326 BFA_LOG(KERN_INFO, bfad, log_level,
2327 "Base port offline: WWN = %s\n", pwwn_buf);
2328 BFA_LOG(KERN_INFO, bfad, log_level,
2329 "Base port disabled: WWN = %s\n", pwwn_buf);
2330 break;
2331
2332 case BFA_FCPORT_SM_LINKDOWN:
2333 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2334 bfa_fcport_reset_linkinfo(fcport);
2335 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2336 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2337 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2338 wwn2str(pwwn_buf, fcport->pwwn);
2339 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2340 BFA_LOG(KERN_INFO, bfad, log_level,
2341 "Base port offline: WWN = %s\n", pwwn_buf);
2342 else
2343 BFA_LOG(KERN_ERR, bfad, log_level,
2344 "Base port (WWN = %s) "
2345 "lost fabric connectivity\n", pwwn_buf);
2346 break;
2347
2348 case BFA_FCPORT_SM_STOP:
2349 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2350 bfa_fcport_reset_linkinfo(fcport);
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2353 BFA_LOG(KERN_INFO, bfad, log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2355 else
2356 BFA_LOG(KERN_ERR, bfad, log_level,
2357 "Base port (WWN = %s) "
2358 "lost fabric connectivity\n", pwwn_buf);
2359 break;
2360
2361 case BFA_FCPORT_SM_HWFAIL:
2362 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2363 bfa_fcport_reset_linkinfo(fcport);
2364 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2365 wwn2str(pwwn_buf, fcport->pwwn);
2366 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2367 BFA_LOG(KERN_INFO, bfad, log_level,
2368 "Base port offline: WWN = %s\n", pwwn_buf);
2369 else
2370 BFA_LOG(KERN_ERR, bfad, log_level,
2371 "Base port (WWN = %s) "
2372 "lost fabric connectivity\n", pwwn_buf);
2373 break;
2374
2375 default:
2376 bfa_sm_fault(fcport->bfa, event);
2377 }
2378}
2379
2380static void
2381bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2382 enum bfa_fcport_sm_event event)
2383{
2384 bfa_trc(fcport->bfa, event);
2385
2386 switch (event) {
2387 case BFA_FCPORT_SM_QRESUME:
2388 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2389 bfa_fcport_send_disable(fcport);
2390 break;
2391
2392 case BFA_FCPORT_SM_STOP:
2393 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2394 bfa_reqq_wcancel(&fcport->reqq_wait);
2395 break;
2396
2397 case BFA_FCPORT_SM_ENABLE:
2398 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2399 break;
2400
2401 case BFA_FCPORT_SM_DISABLE:
2402 /**
2403 * Already being disabled.
2404 */
2405 break;
2406
2407 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN:
2409 /**
2410 * Possible to get link events when doing back-to-back
2411 * enable/disables.
2412 */
2413 break;
2414
2415 case BFA_FCPORT_SM_HWFAIL:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2417 bfa_reqq_wcancel(&fcport->reqq_wait);
2418 break;
2419
2420 default:
2421 bfa_sm_fault(fcport->bfa, event);
2422 }
2423}
2424
2425static void
2426bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2427 enum bfa_fcport_sm_event event)
2428{
2429 bfa_trc(fcport->bfa, event);
2430
2431 switch (event) {
2432 case BFA_FCPORT_SM_QRESUME:
2433 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2434 bfa_fcport_send_disable(fcport);
2435 if (bfa_fcport_send_enable(fcport))
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2437 else
2438 bfa_sm_set_state(fcport,
2439 bfa_fcport_sm_enabling_qwait);
2440 break;
2441
2442 case BFA_FCPORT_SM_STOP:
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2444 bfa_reqq_wcancel(&fcport->reqq_wait);
2445 break;
2446
2447 case BFA_FCPORT_SM_ENABLE:
2448 break;
2449
2450 case BFA_FCPORT_SM_DISABLE:
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2452 break;
2453
2454 case BFA_FCPORT_SM_LINKUP:
2455 case BFA_FCPORT_SM_LINKDOWN:
2456 /**
2457 * Possible to get link events when doing back-to-back
2458 * enable/disables.
2459 */
2460 break;
2461
2462 case BFA_FCPORT_SM_HWFAIL:
2463 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2464 bfa_reqq_wcancel(&fcport->reqq_wait);
2465 break;
2466
2467 default:
2468 bfa_sm_fault(fcport->bfa, event);
2469 }
2470}
2471
2472static void
2473bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2474 enum bfa_fcport_sm_event event)
2475{
2476 char pwwn_buf[BFA_STRING_32];
2477 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2478 bfa_trc(fcport->bfa, event);
2479
2480 switch (event) {
2481 case BFA_FCPORT_SM_FWRSP:
2482 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2483 break;
2484
2485 case BFA_FCPORT_SM_DISABLE:
2486 /**
2487 * Already being disabled.
2488 */
2489 break;
2490
2491 case BFA_FCPORT_SM_ENABLE:
2492 if (bfa_fcport_send_enable(fcport))
2493 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2494 else
2495 bfa_sm_set_state(fcport,
2496 bfa_fcport_sm_enabling_qwait);
2497
2498 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2499 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2500 wwn2str(pwwn_buf, fcport->pwwn);
2501 BFA_LOG(KERN_INFO, bfad, log_level,
2502 "Base port enabled: WWN = %s\n", pwwn_buf);
2503 break;
2504
2505 case BFA_FCPORT_SM_STOP:
2506 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2507 break;
2508
2509 case BFA_FCPORT_SM_LINKUP:
2510 case BFA_FCPORT_SM_LINKDOWN:
2511 /**
2512 * Possible to get link events when doing back-to-back
2513 * enable/disables.
2514 */
2515 break;
2516
2517 case BFA_FCPORT_SM_HWFAIL:
2518 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2519 break;
2520
2521 default:
2522 bfa_sm_fault(fcport->bfa, event);
2523 }
2524}
2525
2526static void
2527bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2528 enum bfa_fcport_sm_event event)
2529{
2530 char pwwn_buf[BFA_STRING_32];
2531 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2532 bfa_trc(fcport->bfa, event);
2533
2534 switch (event) {
2535 case BFA_FCPORT_SM_START:
2536 /**
2537 * Ignore start event for a port that is disabled.
2538 */
2539 break;
2540
2541 case BFA_FCPORT_SM_STOP:
2542 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2543 break;
2544
2545 case BFA_FCPORT_SM_ENABLE:
2546 if (bfa_fcport_send_enable(fcport))
2547 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2548 else
2549 bfa_sm_set_state(fcport,
2550 bfa_fcport_sm_enabling_qwait);
2551
2552 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2553 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2554 wwn2str(pwwn_buf, fcport->pwwn);
2555 BFA_LOG(KERN_INFO, bfad, log_level,
2556 "Base port enabled: WWN = %s\n", pwwn_buf);
2557 break;
2558
2559 case BFA_FCPORT_SM_DISABLE:
2560 /**
2561 * Already disabled.
2562 */
2563 break;
2564
2565 case BFA_FCPORT_SM_HWFAIL:
2566 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2567 break;
2568
2569 default:
2570 bfa_sm_fault(fcport->bfa, event);
2571 }
2572}
2573
2574static void
2575bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2576 enum bfa_fcport_sm_event event)
2577{
2578 bfa_trc(fcport->bfa, event);
2579
2580 switch (event) {
2581 case BFA_FCPORT_SM_START:
2582 if (bfa_fcport_send_enable(fcport))
2583 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2584 else
2585 bfa_sm_set_state(fcport,
2586 bfa_fcport_sm_enabling_qwait);
2587 break;
2588
2589 default:
2590 /**
2591 * Ignore all other events.
2592 */
2593 ;
2594 }
2595}
2596
2597/**
2598 * Port is enabled. IOC is down/failed.
2599 */
2600static void
2601bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2602 enum bfa_fcport_sm_event event)
2603{
2604 bfa_trc(fcport->bfa, event);
2605
2606 switch (event) {
2607 case BFA_FCPORT_SM_START:
2608 if (bfa_fcport_send_enable(fcport))
2609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2610 else
2611 bfa_sm_set_state(fcport,
2612 bfa_fcport_sm_enabling_qwait);
2613 break;
2614
2615 default:
2616 /**
2617 * Ignore all events.
2618 */
2619 ;
2620 }
2621}
2622
2623/**
2624 * Port is disabled. IOC is down/failed.
2625 */
2626static void
2627bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2628 enum bfa_fcport_sm_event event)
2629{
2630 bfa_trc(fcport->bfa, event);
2631
2632 switch (event) {
2633 case BFA_FCPORT_SM_START:
2634 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2635 break;
2636
2637 case BFA_FCPORT_SM_ENABLE:
2638 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2639 break;
2640
2641 default:
2642 /**
2643 * Ignore all events.
2644 */
2645 ;
2646 }
2647}
2648
2649/**
2650 * Link state is down
2651 */
2652static void
2653bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2654 enum bfa_fcport_ln_sm_event event)
2655{
2656 bfa_trc(ln->fcport->bfa, event);
2657
2658 switch (event) {
2659 case BFA_FCPORT_LN_SM_LINKUP:
2660 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2661 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2662 break;
2663
2664 default:
2665 bfa_sm_fault(ln->fcport->bfa, event);
2666 }
2667}
2668
2669/**
2670 * Link state is waiting for down notification
2671 */
2672static void
2673bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2674 enum bfa_fcport_ln_sm_event event)
2675{
2676 bfa_trc(ln->fcport->bfa, event);
2677
2678 switch (event) {
2679 case BFA_FCPORT_LN_SM_LINKUP:
2680 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2681 break;
2682
2683 case BFA_FCPORT_LN_SM_NOTIFICATION:
2684 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2685 break;
2686
2687 default:
2688 bfa_sm_fault(ln->fcport->bfa, event);
2689 }
2690}
2691
2692/**
2693 * Link state is waiting for down notification and there is a pending up
2694 */
2695static void
2696bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2697 enum bfa_fcport_ln_sm_event event)
2698{
2699 bfa_trc(ln->fcport->bfa, event);
2700
2701 switch (event) {
2702 case BFA_FCPORT_LN_SM_LINKDOWN:
2703 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2704 break;
2705
2706 case BFA_FCPORT_LN_SM_NOTIFICATION:
2707 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2708 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2709 break;
2710
2711 default:
2712 bfa_sm_fault(ln->fcport->bfa, event);
2713 }
2714}
2715
2716/**
2717 * Link state is up
2718 */
2719static void
2720bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2721 enum bfa_fcport_ln_sm_event event)
2722{
2723 bfa_trc(ln->fcport->bfa, event);
2724
2725 switch (event) {
2726 case BFA_FCPORT_LN_SM_LINKDOWN:
2727 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2728 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2729 break;
2730
2731 default:
2732 bfa_sm_fault(ln->fcport->bfa, event);
2733 }
2734}
2735
2736/**
2737 * Link state is waiting for up notification
2738 */
2739static void
2740bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2741 enum bfa_fcport_ln_sm_event event)
2742{
2743 bfa_trc(ln->fcport->bfa, event);
2744
2745 switch (event) {
2746 case BFA_FCPORT_LN_SM_LINKDOWN:
2747 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2748 break;
2749
2750 case BFA_FCPORT_LN_SM_NOTIFICATION:
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2752 break;
2753
2754 default:
2755 bfa_sm_fault(ln->fcport->bfa, event);
2756 }
2757}
2758
2759/**
2760 * Link state is waiting for up notification and there is a pending down
2761 */
2762static void
2763bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2764 enum bfa_fcport_ln_sm_event event)
2765{
2766 bfa_trc(ln->fcport->bfa, event);
2767
2768 switch (event) {
2769 case BFA_FCPORT_LN_SM_LINKUP:
2770 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2771 break;
2772
2773 case BFA_FCPORT_LN_SM_NOTIFICATION:
2774 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2775 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2776 break;
2777
2778 default:
2779 bfa_sm_fault(ln->fcport->bfa, event);
2780 }
2781}
2782
2783/**
2784 * Link state is waiting for up notification and there are pending down and up
2785 */
2786static void
2787bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2788 enum bfa_fcport_ln_sm_event event)
2789{
2790 bfa_trc(ln->fcport->bfa, event);
2791
2792 switch (event) {
2793 case BFA_FCPORT_LN_SM_LINKDOWN:
2794 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2795 break;
2796
2797 case BFA_FCPORT_LN_SM_NOTIFICATION:
2798 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2799 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2800 break;
2801
2802 default:
2803 bfa_sm_fault(ln->fcport->bfa, event);
2804 }
2805}
2806
2807
2808
2809/**
2810 * hal_port_private
2811 */
2812
2813static void
2814__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2815{
2816 struct bfa_fcport_ln_s *ln = cbarg;
2817
2818 if (complete)
2819 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2820 else
2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2822}
2823
2824/**
2825 * Send SCN notification to upper layers.
2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2827 */
2828static void
2829bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2830 bfa_boolean_t trunk)
2831{
2832 if (fcport->cfg.trunked && !trunk)
2833 return;
2834
2835 switch (event) {
2836 case BFA_PORT_LINKUP:
2837 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2838 break;
2839 case BFA_PORT_LINKDOWN:
2840 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2841 break;
2842 default:
2843 bfa_assert(0);
2844 }
2845}
2846
2847static void
2848bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2849{
2850 struct bfa_fcport_s *fcport = ln->fcport;
2851
2852 if (fcport->bfa->fcs) {
2853 fcport->event_cbfn(fcport->event_cbarg, event);
2854 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2855 } else {
2856 ln->ln_event = event;
2857 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2858 __bfa_cb_fcport_event, ln);
2859 }
2860}
2861
2862#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2863 BFA_CACHELINE_SZ))
2864
2865static void
2866bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2867 u32 *dm_len)
2868{
2869 *dm_len += FCPORT_STATS_DMA_SZ;
2870}
2871
2872static void
2873bfa_fcport_qresume(void *cbarg)
2874{
2875 struct bfa_fcport_s *fcport = cbarg;
2876
2877 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2878}
2879
2880static void
2881bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2882{
2883 u8 *dm_kva;
2884 u64 dm_pa;
2885
2886 dm_kva = bfa_meminfo_dma_virt(meminfo);
2887 dm_pa = bfa_meminfo_dma_phys(meminfo);
2888
2889 fcport->stats_kva = dm_kva;
2890 fcport->stats_pa = dm_pa;
2891 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2892
2893 dm_kva += FCPORT_STATS_DMA_SZ;
2894 dm_pa += FCPORT_STATS_DMA_SZ;
2895
2896 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2897 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2898}
2899
2900/**
2901 * Memory initialization.
2902 */
2903static void
2904bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2905 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2906{
2907 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2908 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2909 struct bfa_fcport_ln_s *ln = &fcport->ln;
2910 struct bfa_timeval_s tv;
2911
2912 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
2913 fcport->bfa = bfa;
2914 ln->fcport = fcport;
2915
2916 bfa_fcport_mem_claim(fcport, meminfo);
2917
2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2920
2921 /**
2922 * initialize time stamp for stats reset
2923 */
2924 bfa_os_gettimeofday(&tv);
2925 fcport->stats_reset_time = tv.tv_sec;
2926
2927 /**
2928 * initialize and set default configuration
2929 */
2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2931 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2932 port_cfg->trunked = BFA_FALSE;
2933 port_cfg->maxfrsize = 0;
2934
2935 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2936
2937 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2938}
2939
2940static void
2941bfa_fcport_detach(struct bfa_s *bfa)
2942{
2943}
2944
2945/**
2946 * Called when IOC is ready.
2947 */
2948static void
2949bfa_fcport_start(struct bfa_s *bfa)
2950{
2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2952}
2953
2954/**
2955 * Called before IOC is stopped.
2956 */
2957static void
2958bfa_fcport_stop(struct bfa_s *bfa)
2959{
2960 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2961 bfa_trunk_iocdisable(bfa);
2962}
2963
2964/**
2965 * Called when IOC failure is detected.
2966 */
2967static void
2968bfa_fcport_iocdisable(struct bfa_s *bfa)
2969{
2970 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2971
2972 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2973 bfa_trunk_iocdisable(bfa);
2974}
2975
2976static void
2977bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2978{
2979 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2980 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2981
2982 fcport->speed = pevent->link_state.speed;
2983 fcport->topology = pevent->link_state.topology;
2984
2985 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2986 fcport->myalpa = 0;
2987
2988 /* QoS Details */
2989 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
2990 bfa_os_assign(fcport->qos_vc_attr,
2991 pevent->link_state.vc_fcf.qos_vc_attr);
2992
2993 /**
2994 * update trunk state if applicable
2995 */
2996 if (!fcport->cfg.trunked)
2997 trunk->attr.state = BFA_TRUNK_DISABLED;
2998
2999 /* update FCoE specific */
3000 fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
3001
3002 bfa_trc(fcport->bfa, fcport->speed);
3003 bfa_trc(fcport->bfa, fcport->topology);
3004}
3005
3006static void
3007bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3008{
3009 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3011}
3012
3013/**
3014 * Send port enable message to firmware.
3015 */
3016static bfa_boolean_t
3017bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3018{
3019 struct bfi_fcport_enable_req_s *m;
3020
3021 /**
3022 * Increment message tag before queue check, so that responses to old
3023 * requests are discarded.
3024 */
3025 fcport->msgtag++;
3026
3027 /**
3028 * check for room in queue to send request now
3029 */
3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3031 if (!m) {
3032 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3033 &fcport->reqq_wait);
3034 return BFA_FALSE;
3035 }
3036
3037 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3038 bfa_lpuid(fcport->bfa));
3039 m->nwwn = fcport->nwwn;
3040 m->pwwn = fcport->pwwn;
3041 m->port_cfg = fcport->cfg;
3042 m->msgtag = fcport->msgtag;
3043 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3047
3048 /**
3049 * queue I/O message to firmware
3050 */
3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3052 return BFA_TRUE;
3053}
3054
3055/**
3056 * Send port disable message to firmware.
3057 */
3058static bfa_boolean_t
3059bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3060{
3061 struct bfi_fcport_req_s *m;
3062
3063 /**
3064 * Increment message tag before queue check, so that responses to old
3065 * requests are discarded.
3066 */
3067 fcport->msgtag++;
3068
3069 /**
3070 * check for room in queue to send request now
3071 */
3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3073 if (!m) {
3074 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3075 &fcport->reqq_wait);
3076 return BFA_FALSE;
3077 }
3078
3079 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3080 bfa_lpuid(fcport->bfa));
3081 m->msgtag = fcport->msgtag;
3082
3083 /**
3084 * queue I/O message to firmware
3085 */
3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3087
3088 return BFA_TRUE;
3089}
3090
3091static void
3092bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3093{
3094 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3095 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3096
3097 bfa_trc(fcport->bfa, fcport->pwwn);
3098 bfa_trc(fcport->bfa, fcport->nwwn);
3099}
3100
3101static void
3102bfa_fcport_send_txcredit(void *port_cbarg)
3103{
3104
3105 struct bfa_fcport_s *fcport = port_cbarg;
3106 struct bfi_fcport_set_svc_params_req_s *m;
3107
3108 /**
3109 * check for room in queue to send request now
3110 */
3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3112 if (!m) {
3113 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3114 return;
3115 }
3116
3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3118 bfa_lpuid(fcport->bfa));
3119 m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
3120
3121 /**
3122 * queue I/O message to firmware
3123 */
3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3125}
3126
3127static void
3128bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3129 struct bfa_qos_stats_s *s)
3130{
3131 u32 *dip = (u32 *) d;
3132 u32 *sip = (u32 *) s;
3133 int i;
3134
3135 /* Now swap the 32 bit fields */
3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3137 dip[i] = bfa_os_ntohl(sip[i]);
3138}
3139
3140static void
3141bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3142 struct bfa_fcoe_stats_s *s)
3143{
3144 u32 *dip = (u32 *) d;
3145 u32 *sip = (u32 *) s;
3146 int i;
3147
3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3149 i = i + 2) {
3150#ifdef __BIGENDIAN
3151 dip[i] = bfa_os_ntohl(sip[i]);
3152 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
3153#else
3154 dip[i] = bfa_os_ntohl(sip[i + 1]);
3155 dip[i + 1] = bfa_os_ntohl(sip[i]);
3156#endif
3157 }
3158}
3159
3160static void
3161__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3162{
3163 struct bfa_fcport_s *fcport = cbarg;
3164
3165 if (complete) {
3166 if (fcport->stats_status == BFA_STATUS_OK) {
3167 struct bfa_timeval_s tv;
3168
3169 /* Swap FC QoS or FCoE stats */
3170 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3171 bfa_fcport_qos_stats_swap(
3172 &fcport->stats_ret->fcqos,
3173 &fcport->stats->fcqos);
3174 } else {
3175 bfa_fcport_fcoe_stats_swap(
3176 &fcport->stats_ret->fcoe,
3177 &fcport->stats->fcoe);
3178
3179 bfa_os_gettimeofday(&tv);
3180 fcport->stats_ret->fcoe.secs_reset =
3181 tv.tv_sec - fcport->stats_reset_time;
3182 }
3183 }
3184 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3185 } else {
3186 fcport->stats_busy = BFA_FALSE;
3187 fcport->stats_status = BFA_STATUS_OK;
3188 }
3189}
3190
3191static void
3192bfa_fcport_stats_get_timeout(void *cbarg)
3193{
3194 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3195
3196 bfa_trc(fcport->bfa, fcport->stats_qfull);
3197
3198 if (fcport->stats_qfull) {
3199 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3200 fcport->stats_qfull = BFA_FALSE;
3201 }
3202
3203 fcport->stats_status = BFA_STATUS_ETIMER;
3204 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3205 fcport);
3206}
3207
3208static void
3209bfa_fcport_send_stats_get(void *cbarg)
3210{
3211 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3212 struct bfi_fcport_req_s *msg;
3213
3214 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3215
3216 if (!msg) {
3217 fcport->stats_qfull = BFA_TRUE;
3218 bfa_reqq_winit(&fcport->stats_reqq_wait,
3219 bfa_fcport_send_stats_get, fcport);
3220 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3221 &fcport->stats_reqq_wait);
3222 return;
3223 }
3224 fcport->stats_qfull = BFA_FALSE;
3225
3226 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3228 bfa_lpuid(fcport->bfa));
3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3230}
3231
3232static void
3233__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3234{
3235 struct bfa_fcport_s *fcport = cbarg;
3236
3237 if (complete) {
3238 struct bfa_timeval_s tv;
3239
3240 /**
3241 * re-initialize time stamp for stats reset
3242 */
3243 bfa_os_gettimeofday(&tv);
3244 fcport->stats_reset_time = tv.tv_sec;
3245
3246 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3247 } else {
3248 fcport->stats_busy = BFA_FALSE;
3249 fcport->stats_status = BFA_STATUS_OK;
3250 }
3251}
3252
3253static void
3254bfa_fcport_stats_clr_timeout(void *cbarg)
3255{
3256 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3257
3258 bfa_trc(fcport->bfa, fcport->stats_qfull);
3259
3260 if (fcport->stats_qfull) {
3261 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3262 fcport->stats_qfull = BFA_FALSE;
3263 }
3264
3265 fcport->stats_status = BFA_STATUS_ETIMER;
3266 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3267 __bfa_cb_fcport_stats_clr, fcport);
3268}
3269
3270static void
3271bfa_fcport_send_stats_clear(void *cbarg)
3272{
3273 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3274 struct bfi_fcport_req_s *msg;
3275
3276 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3277
3278 if (!msg) {
3279 fcport->stats_qfull = BFA_TRUE;
3280 bfa_reqq_winit(&fcport->stats_reqq_wait,
3281 bfa_fcport_send_stats_clear, fcport);
3282 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3283 &fcport->stats_reqq_wait);
3284 return;
3285 }
3286 fcport->stats_qfull = BFA_FALSE;
3287
3288 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3290 bfa_lpuid(fcport->bfa));
3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3292}
3293
3294/**
3295 * Handle trunk SCN event from firmware.
3296 */
3297static void
3298bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3299{
3300 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3301 struct bfi_fcport_trunk_link_s *tlink;
3302 struct bfa_trunk_link_attr_s *lattr;
3303 enum bfa_trunk_state state_prev;
3304 int i;
3305 int link_bm = 0;
3306
3307 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3308 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3309 scn->trunk_state == BFA_TRUNK_OFFLINE);
3310
3311 bfa_trc(fcport->bfa, trunk->attr.state);
3312 bfa_trc(fcport->bfa, scn->trunk_state);
3313 bfa_trc(fcport->bfa, scn->trunk_speed);
3314
3315 /**
3316 * Save off new state for trunk attribute query
3317 */
3318 state_prev = trunk->attr.state;
3319 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3320 trunk->attr.state = scn->trunk_state;
3321 trunk->attr.speed = scn->trunk_speed;
3322 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3323 lattr = &trunk->attr.link_attr[i];
3324 tlink = &scn->tlink[i];
3325
3326 lattr->link_state = tlink->state;
3327 lattr->trunk_wwn = tlink->trunk_wwn;
3328 lattr->fctl = tlink->fctl;
3329 lattr->speed = tlink->speed;
3330 lattr->deskew = bfa_os_ntohl(tlink->deskew);
3331
3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3333 fcport->speed = tlink->speed;
3334 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3335 link_bm |= 1 << i;
3336 }
3337
3338 bfa_trc(fcport->bfa, lattr->link_state);
3339 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3340 bfa_trc(fcport->bfa, lattr->fctl);
3341 bfa_trc(fcport->bfa, lattr->speed);
3342 bfa_trc(fcport->bfa, lattr->deskew);
3343 }
3344
3345 switch (link_bm) {
3346 case 3:
3347 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3348 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3349 break;
3350 case 2:
3351 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3352 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3353 break;
3354 case 1:
3355 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3356 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3357 break;
3358 default:
3359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3361 }
3362
3363 /**
3364 * Notify upper layers if trunk state changed.
3365 */
3366 if ((state_prev != trunk->attr.state) ||
3367 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3368 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3369 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3370 }
3371}
3372
3373static void
3374bfa_trunk_iocdisable(struct bfa_s *bfa)
3375{
3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3377 int i = 0;
3378
3379 /**
3380 * In trunked mode, notify upper layers that link is down
3381 */
3382 if (fcport->cfg.trunked) {
3383 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3384 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3385
3386 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3387 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3388 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3389 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3390 fcport->trunk.attr.link_attr[i].fctl =
3391 BFA_TRUNK_LINK_FCTL_NORMAL;
3392 fcport->trunk.attr.link_attr[i].link_state =
3393 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3394 fcport->trunk.attr.link_attr[i].speed =
3395 BFA_PORT_SPEED_UNKNOWN;
3396 fcport->trunk.attr.link_attr[i].deskew = 0;
3397 }
3398 }
3399}
3400
3401
3402
3403/**
3404 * hal_port_public
3405 */
3406
3407/**
3408 * Called to initialize port attributes
3409 */
3410void
3411bfa_fcport_init(struct bfa_s *bfa)
3412{
3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3414
3415 /**
3416 * Initialize port attributes from IOC hardware data.
3417 */
3418 bfa_fcport_set_wwns(fcport);
3419 if (fcport->cfg.maxfrsize == 0)
3420 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3421 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3422 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3423
3424 bfa_assert(fcport->cfg.maxfrsize);
3425 bfa_assert(fcport->cfg.rx_bbcredit);
3426 bfa_assert(fcport->speed_sup);
3427}
3428
3429/**
3430 * Firmware message handler.
3431 */
3432void
3433bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3434{
3435 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3436 union bfi_fcport_i2h_msg_u i2hmsg;
3437
3438 i2hmsg.msg = msg;
3439 fcport->event_arg.i2hmsg = i2hmsg;
3440
3441 bfa_trc(bfa, msg->mhdr.msg_id);
3442 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3443
3444 switch (msg->mhdr.msg_id) {
3445 case BFI_FCPORT_I2H_ENABLE_RSP:
3446 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3447 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3448 break;
3449
3450 case BFI_FCPORT_I2H_DISABLE_RSP:
3451 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3452 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3453 break;
3454
3455 case BFI_FCPORT_I2H_EVENT:
3456 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3457 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3458 else
3459 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3460 break;
3461
3462 case BFI_FCPORT_I2H_TRUNK_SCN:
3463 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3464 break;
3465
3466 case BFI_FCPORT_I2H_STATS_GET_RSP:
3467 /*
3468 * check for timer pop before processing the rsp
3469 */
3470 if (fcport->stats_busy == BFA_FALSE ||
3471 fcport->stats_status == BFA_STATUS_ETIMER)
3472 break;
3473
3474 bfa_timer_stop(&fcport->timer);
3475 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3476 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3477 __bfa_cb_fcport_stats_get, fcport);
3478 break;
3479
3480 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3481 /*
3482 * check for timer pop before processing the rsp
3483 */
3484 if (fcport->stats_busy == BFA_FALSE ||
3485 fcport->stats_status == BFA_STATUS_ETIMER)
3486 break;
3487
3488 bfa_timer_stop(&fcport->timer);
3489 fcport->stats_status = BFA_STATUS_OK;
3490 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3491 __bfa_cb_fcport_stats_clr, fcport);
3492 break;
3493
3494 case BFI_FCPORT_I2H_ENABLE_AEN:
3495 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3496 break;
3497
3498 case BFI_FCPORT_I2H_DISABLE_AEN:
3499 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3500 break;
3501
3502 default:
3503 bfa_assert(0);
3504 break;
3505 }
3506}
3507
3508
3509
3510/**
3511 * hal_port_api
3512 */
3513
3514/**
3515 * Registered callback for port events.
3516 */
3517void
3518bfa_fcport_event_register(struct bfa_s *bfa,
3519 void (*cbfn) (void *cbarg,
3520 enum bfa_port_linkstate event),
3521 void *cbarg)
3522{
3523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3524
3525 fcport->event_cbfn = cbfn;
3526 fcport->event_cbarg = cbarg;
3527}
3528
3529bfa_status_t
3530bfa_fcport_enable(struct bfa_s *bfa)
3531{
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3533
3534 if (bfa_ioc_is_disabled(&bfa->ioc))
3535 return BFA_STATUS_IOC_DISABLED;
3536
3537 if (fcport->diag_busy)
3538 return BFA_STATUS_DIAG_BUSY;
3539
3540 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3541 return BFA_STATUS_OK;
3542}
3543
3544bfa_status_t
3545bfa_fcport_disable(struct bfa_s *bfa)
3546{
3547
3548 if (bfa_ioc_is_disabled(&bfa->ioc))
3549 return BFA_STATUS_IOC_DISABLED;
3550
3551 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3552 return BFA_STATUS_OK;
3553}
3554
3555/**
3556 * Configure port speed.
3557 */
3558bfa_status_t
3559bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3560{
3561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3562
3563 bfa_trc(bfa, speed);
3564
3565 if (fcport->cfg.trunked == BFA_TRUE)
3566 return BFA_STATUS_TRUNK_ENABLED;
3567 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3568 bfa_trc(bfa, fcport->speed_sup);
3569 return BFA_STATUS_UNSUPP_SPEED;
3570 }
3571
3572 fcport->cfg.speed = speed;
3573
3574 return BFA_STATUS_OK;
3575}
3576
3577/**
3578 * Get current speed.
3579 */
3580enum bfa_port_speed
3581bfa_fcport_get_speed(struct bfa_s *bfa)
3582{
3583 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3584
3585 return fcport->speed;
3586}
3587
3588/**
3589 * Configure port topology.
3590 */
3591bfa_status_t
3592bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3593{
3594 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3595
3596 bfa_trc(bfa, topology);
3597 bfa_trc(bfa, fcport->cfg.topology);
3598
3599 switch (topology) {
3600 case BFA_PORT_TOPOLOGY_P2P:
3601 case BFA_PORT_TOPOLOGY_LOOP:
3602 case BFA_PORT_TOPOLOGY_AUTO:
3603 break;
3604
3605 default:
3606 return BFA_STATUS_EINVAL;
3607 }
3608
3609 fcport->cfg.topology = topology;
3610 return BFA_STATUS_OK;
3611}
3612
3613/**
3614 * Get current topology.
3615 */
3616enum bfa_port_topology
3617bfa_fcport_get_topology(struct bfa_s *bfa)
3618{
3619 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3620
3621 return fcport->topology;
3622}
3623
3624bfa_status_t
3625bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3626{
3627 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3628
3629 bfa_trc(bfa, alpa);
3630 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3631 bfa_trc(bfa, fcport->cfg.hardalpa);
3632
3633 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3634 fcport->cfg.hardalpa = alpa;
3635
3636 return BFA_STATUS_OK;
3637}
3638
3639bfa_status_t
3640bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3641{
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643
3644 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3645 bfa_trc(bfa, fcport->cfg.hardalpa);
3646
3647 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3648 return BFA_STATUS_OK;
3649}
3650
3651bfa_boolean_t
3652bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3653{
3654 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3655
3656 *alpa = fcport->cfg.hardalpa;
3657 return fcport->cfg.cfg_hardalpa;
3658}
3659
3660u8
3661bfa_fcport_get_myalpa(struct bfa_s *bfa)
3662{
3663 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3664
3665 return fcport->myalpa;
3666}
3667
3668bfa_status_t
3669bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3670{
3671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3672
3673 bfa_trc(bfa, maxfrsize);
3674 bfa_trc(bfa, fcport->cfg.maxfrsize);
3675
3676 /* with in range */
3677 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3678 return BFA_STATUS_INVLD_DFSZ;
3679
3680 /* power of 2, if not the max frame size of 2112 */
3681 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3682 return BFA_STATUS_INVLD_DFSZ;
3683
3684 fcport->cfg.maxfrsize = maxfrsize;
3685 return BFA_STATUS_OK;
3686}
3687
3688u16
3689bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3690{
3691 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3692
3693 return fcport->cfg.maxfrsize;
3694}
3695
3696u8
3697bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3698{
3699 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3700
3701 return fcport->cfg.rx_bbcredit;
3702}
3703
3704void
3705bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3706{
3707 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3708
3709 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3710 bfa_fcport_send_txcredit(fcport);
3711}
3712
3713/**
3714 * Get port attributes.
3715 */
3716
3717wwn_t
3718bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3719{
3720 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3721 if (node)
3722 return fcport->nwwn;
3723 else
3724 return fcport->pwwn;
3725}
3726
3727void
3728bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3729{
3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3731
3732 bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
3733
3734 attr->nwwn = fcport->nwwn;
3735 attr->pwwn = fcport->pwwn;
3736
3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3739
3740 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
3741 sizeof(struct bfa_port_cfg_s));
3742 /* speed attributes */
3743 attr->pport_cfg.speed = fcport->cfg.speed;
3744 attr->speed_supported = fcport->speed_sup;
3745 attr->speed = fcport->speed;
3746 attr->cos_supported = FC_CLASS_3;
3747
3748 /* topology attributes */
3749 attr->pport_cfg.topology = fcport->cfg.topology;
3750 attr->topology = fcport->topology;
3751 attr->pport_cfg.trunked = fcport->cfg.trunked;
3752
3753 /* beacon attributes */
3754 attr->beacon = fcport->beacon;
3755 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3756 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3757 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3758
3759 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3760 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3761 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3762 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3763 attr->port_state = BFA_PORT_ST_IOCDIS;
3764 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3766
3767 /* FCoE vlan */
3768 attr->fcoe_vlan = fcport->fcoe_vlan;
3769}
3770
3771#define BFA_FCPORT_STATS_TOV 1000
3772
3773/**
3774 * Fetch port statistics (FCQoS or FCoE).
3775 */
3776bfa_status_t
3777bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3778 bfa_cb_port_t cbfn, void *cbarg)
3779{
3780 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3781
3782 if (fcport->stats_busy) {
3783 bfa_trc(bfa, fcport->stats_busy);
3784 return BFA_STATUS_DEVBUSY;
3785 }
3786
3787 fcport->stats_busy = BFA_TRUE;
3788 fcport->stats_ret = stats;
3789 fcport->stats_cbfn = cbfn;
3790 fcport->stats_cbarg = cbarg;
3791
3792 bfa_fcport_send_stats_get(fcport);
3793
3794 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3795 fcport, BFA_FCPORT_STATS_TOV);
3796 return BFA_STATUS_OK;
3797}
3798
3799/**
3800 * Reset port statistics (FCQoS or FCoE).
3801 */
3802bfa_status_t
3803bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3804{
3805 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3806
3807 if (fcport->stats_busy) {
3808 bfa_trc(bfa, fcport->stats_busy);
3809 return BFA_STATUS_DEVBUSY;
3810 }
3811
3812 fcport->stats_busy = BFA_TRUE;
3813 fcport->stats_cbfn = cbfn;
3814 fcport->stats_cbarg = cbarg;
3815
3816 bfa_fcport_send_stats_clear(fcport);
3817
3818 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3819 fcport, BFA_FCPORT_STATS_TOV);
3820 return BFA_STATUS_OK;
3821}
3822
3823/**
3824 * Fetch FCQoS port statistics
3825 */
3826bfa_status_t
3827bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3828 bfa_cb_port_t cbfn, void *cbarg)
3829{
3830 /* Meaningful only for FC mode */
3831 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3832
3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3834}
3835
3836/**
3837 * Reset FCoE port statistics
3838 */
3839bfa_status_t
3840bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3841{
3842 /* Meaningful only for FC mode */
3843 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3844
3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3846}
3847
3848/**
3849 * Fetch FCQoS port statistics
3850 */
3851bfa_status_t
3852bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3853 bfa_cb_port_t cbfn, void *cbarg)
3854{
3855 /* Meaningful only for FCoE mode */
3856 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3857
3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3859}
3860
3861/**
3862 * Reset FCoE port statistics
3863 */
3864bfa_status_t
3865bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3866{
3867 /* Meaningful only for FCoE mode */
3868 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3869
3870 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3871}
3872
3873void
3874bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3875{
3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3877
3878 qos_attr->state = fcport->qos_attr.state;
3879 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
3880}
3881
3882void
3883bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3884 struct bfa_qos_vc_attr_s *qos_vc_attr)
3885{
3886 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3888 u32 i = 0;
3889
3890 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
3891 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
3892 qos_vc_attr->elp_opmode_flags =
3893 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
3894
3895 /* Individual VC info */
3896 while (i < qos_vc_attr->total_vc_count) {
3897 qos_vc_attr->vc_info[i].vc_credit =
3898 bfa_vc_attr->vc_info[i].vc_credit;
3899 qos_vc_attr->vc_info[i].borrow_credit =
3900 bfa_vc_attr->vc_info[i].borrow_credit;
3901 qos_vc_attr->vc_info[i].priority =
3902 bfa_vc_attr->vc_info[i].priority;
3903 ++i;
3904 }
3905}
3906
3907/**
3908 * Fetch port attributes.
3909 */
3910bfa_boolean_t
3911bfa_fcport_is_disabled(struct bfa_s *bfa)
3912{
3913 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3914
3915 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3916 BFA_PORT_ST_DISABLED;
3917
3918}
3919
3920bfa_boolean_t
3921bfa_fcport_is_ratelim(struct bfa_s *bfa)
3922{
3923 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3924
3925 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3926
3927}
3928
3929void
3930bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3931{
3932 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3933 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3934
3935 bfa_trc(bfa, on_off);
3936 bfa_trc(bfa, fcport->cfg.qos_enabled);
3937
3938 bfa_trc(bfa, ioc_type);
3939
3940 if (ioc_type == BFA_IOC_TYPE_FC) {
3941 fcport->cfg.qos_enabled = on_off;
3942 /**
3943 * Notify fcpim of the change in QoS state
3944 */
3945 bfa_fcpim_update_ioredirect(bfa);
3946 }
3947}
3948
3949void
3950bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3951{
3952 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3953
3954 bfa_trc(bfa, on_off);
3955 bfa_trc(bfa, fcport->cfg.ratelimit);
3956
3957 fcport->cfg.ratelimit = on_off;
3958 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3960}
3961
3962/**
3963 * Configure default minimum ratelim speed
3964 */
3965bfa_status_t
3966bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3967{
3968 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3969
3970 bfa_trc(bfa, speed);
3971
3972 /* Auto and speeds greater than the supported speed, are invalid */
3973 if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3974 bfa_trc(bfa, fcport->speed_sup);
3975 return BFA_STATUS_UNSUPP_SPEED;
3976 }
3977
3978 fcport->cfg.trl_def_speed = speed;
3979
3980 return BFA_STATUS_OK;
3981}
3982
3983/**
3984 * Get default minimum ratelim speed
3985 */
3986enum bfa_port_speed
3987bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3988{
3989 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3990
3991 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3992 return fcport->cfg.trl_def_speed;
3993
3994}
3995void
3996bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3997{
3998 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3999
4000 bfa_trc(bfa, status);
4001 bfa_trc(bfa, fcport->diag_busy);
4002
4003 fcport->diag_busy = status;
4004}
4005
4006void
4007bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4008 bfa_boolean_t link_e2e_beacon)
4009{
4010 struct bfa_s *bfa = dev;
4011 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4012
4013 bfa_trc(bfa, beacon);
4014 bfa_trc(bfa, link_e2e_beacon);
4015 bfa_trc(bfa, fcport->beacon);
4016 bfa_trc(bfa, fcport->link_e2e_beacon);
4017
4018 fcport->beacon = beacon;
4019 fcport->link_e2e_beacon = link_e2e_beacon;
4020}
4021
4022bfa_boolean_t
4023bfa_fcport_is_linkup(struct bfa_s *bfa)
4024{
4025 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4026
4027 return (!fcport->cfg.trunked &&
4028 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4029 (fcport->cfg.trunked &&
4030 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4031}
4032
4033bfa_boolean_t
4034bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4035{
4036 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4037
4038 return fcport->cfg.qos_enabled;
4039}
4040
4041bfa_status_t
4042bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4043
4044{
4045 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4046 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4047
4048 bfa_trc(bfa, fcport->cfg.trunked);
4049 bfa_trc(bfa, trunk->attr.state);
4050 *attr = trunk->attr;
4051 attr->port_id = bfa_lps_get_base_pid(bfa);
4052
4053 return BFA_STATUS_OK;
4054}
4055
4056void
4057bfa_trunk_enable_cfg(struct bfa_s *bfa)
4058{
4059 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4061
4062 bfa_trc(bfa, 1);
4063 trunk->attr.state = BFA_TRUNK_OFFLINE;
4064 fcport->cfg.trunked = BFA_TRUE;
4065}
4066
4067bfa_status_t
4068bfa_trunk_enable(struct bfa_s *bfa)
4069{
4070 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4072
4073 bfa_trc(bfa, 1);
4074
4075 trunk->attr.state = BFA_TRUNK_OFFLINE;
4076 bfa_fcport_disable(bfa);
4077 fcport->cfg.trunked = BFA_TRUE;
4078 bfa_fcport_enable(bfa);
4079
4080 return BFA_STATUS_OK;
4081}
4082
4083bfa_status_t
4084bfa_trunk_disable(struct bfa_s *bfa)
4085{
4086 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4087 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4088
4089 bfa_trc(bfa, 0);
4090 trunk->attr.state = BFA_TRUNK_DISABLED;
4091 bfa_fcport_disable(bfa);
4092 fcport->cfg.trunked = BFA_FALSE;
4093 bfa_fcport_enable(bfa);
4094 return BFA_STATUS_OK;
4095}
4096
4097
4098/**
4099 * Rport State machine functions
4100 */
4101/**
4102 * Beginning state, only online event expected.
4103 */
4104static void
4105bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4106{
4107 bfa_trc(rp->bfa, rp->rport_tag);
4108 bfa_trc(rp->bfa, event);
4109
4110 switch (event) {
4111 case BFA_RPORT_SM_CREATE:
4112 bfa_stats(rp, sm_un_cr);
4113 bfa_sm_set_state(rp, bfa_rport_sm_created);
4114 break;
4115
4116 default:
4117 bfa_stats(rp, sm_un_unexp);
4118 bfa_sm_fault(rp->bfa, event);
4119 }
4120}
4121
4122static void
4123bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4124{
4125 bfa_trc(rp->bfa, rp->rport_tag);
4126 bfa_trc(rp->bfa, event);
4127
4128 switch (event) {
4129 case BFA_RPORT_SM_ONLINE:
4130 bfa_stats(rp, sm_cr_on);
4131 if (bfa_rport_send_fwcreate(rp))
4132 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4133 else
4134 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4135 break;
4136
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_cr_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4140 bfa_rport_free(rp);
4141 break;
4142
4143 case BFA_RPORT_SM_HWFAIL:
4144 bfa_stats(rp, sm_cr_hwf);
4145 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4146 break;
4147
4148 default:
4149 bfa_stats(rp, sm_cr_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4151 }
4152}
4153
4154/**
4155 * Waiting for rport create response from firmware.
4156 */
4157static void
4158bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4159{
4160 bfa_trc(rp->bfa, rp->rport_tag);
4161 bfa_trc(rp->bfa, event);
4162
4163 switch (event) {
4164 case BFA_RPORT_SM_FWRSP:
4165 bfa_stats(rp, sm_fwc_rsp);
4166 bfa_sm_set_state(rp, bfa_rport_sm_online);
4167 bfa_rport_online_cb(rp);
4168 break;
4169
4170 case BFA_RPORT_SM_DELETE:
4171 bfa_stats(rp, sm_fwc_del);
4172 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4173 break;
4174
4175 case BFA_RPORT_SM_OFFLINE:
4176 bfa_stats(rp, sm_fwc_off);
4177 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4178 break;
4179
4180 case BFA_RPORT_SM_HWFAIL:
4181 bfa_stats(rp, sm_fwc_hwf);
4182 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4183 break;
4184
4185 default:
4186 bfa_stats(rp, sm_fwc_unexp);
4187 bfa_sm_fault(rp->bfa, event);
4188 }
4189}
4190
4191/**
4192 * Request queue is full, awaiting queue resume to send create request.
4193 */
4194static void
4195bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4196{
4197 bfa_trc(rp->bfa, rp->rport_tag);
4198 bfa_trc(rp->bfa, event);
4199
4200 switch (event) {
4201 case BFA_RPORT_SM_QRESUME:
4202 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4203 bfa_rport_send_fwcreate(rp);
4204 break;
4205
4206 case BFA_RPORT_SM_DELETE:
4207 bfa_stats(rp, sm_fwc_del);
4208 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4209 bfa_reqq_wcancel(&rp->reqq_wait);
4210 bfa_rport_free(rp);
4211 break;
4212
4213 case BFA_RPORT_SM_OFFLINE:
4214 bfa_stats(rp, sm_fwc_off);
4215 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4216 bfa_reqq_wcancel(&rp->reqq_wait);
4217 bfa_rport_offline_cb(rp);
4218 break;
4219
4220 case BFA_RPORT_SM_HWFAIL:
4221 bfa_stats(rp, sm_fwc_hwf);
4222 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4223 bfa_reqq_wcancel(&rp->reqq_wait);
4224 break;
4225
4226 default:
4227 bfa_stats(rp, sm_fwc_unexp);
4228 bfa_sm_fault(rp->bfa, event);
4229 }
4230}
4231
4232/**
4233 * Online state - normal parking state.
4234 */
4235static void
4236bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4237{
4238 struct bfi_rport_qos_scn_s *qos_scn;
4239
4240 bfa_trc(rp->bfa, rp->rport_tag);
4241 bfa_trc(rp->bfa, event);
4242
4243 switch (event) {
4244 case BFA_RPORT_SM_OFFLINE:
4245 bfa_stats(rp, sm_on_off);
4246 if (bfa_rport_send_fwdelete(rp))
4247 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4248 else
4249 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4250 break;
4251
4252 case BFA_RPORT_SM_DELETE:
4253 bfa_stats(rp, sm_on_del);
4254 if (bfa_rport_send_fwdelete(rp))
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256 else
4257 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4258 break;
4259
4260 case BFA_RPORT_SM_HWFAIL:
4261 bfa_stats(rp, sm_on_hwf);
4262 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4263 break;
4264
4265 case BFA_RPORT_SM_SET_SPEED:
4266 bfa_rport_send_fwspeed(rp);
4267 break;
4268
4269 case BFA_RPORT_SM_QOS_SCN:
4270 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4271 rp->qos_attr = qos_scn->new_qos_attr;
4272 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4273 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4274 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4276
4277 qos_scn->old_qos_attr.qos_flow_id =
4278 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
4279 qos_scn->new_qos_attr.qos_flow_id =
4280 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
4281
4282 if (qos_scn->old_qos_attr.qos_flow_id !=
4283 qos_scn->new_qos_attr.qos_flow_id)
4284 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4285 qos_scn->old_qos_attr,
4286 qos_scn->new_qos_attr);
4287 if (qos_scn->old_qos_attr.qos_priority !=
4288 qos_scn->new_qos_attr.qos_priority)
4289 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4290 qos_scn->old_qos_attr,
4291 qos_scn->new_qos_attr);
4292 break;
4293
4294 default:
4295 bfa_stats(rp, sm_on_unexp);
4296 bfa_sm_fault(rp->bfa, event);
4297 }
4298}
4299
4300/**
4301 * Firmware rport is being deleted - awaiting f/w response.
4302 */
4303static void
4304bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4305{
4306 bfa_trc(rp->bfa, rp->rport_tag);
4307 bfa_trc(rp->bfa, event);
4308
4309 switch (event) {
4310 case BFA_RPORT_SM_FWRSP:
4311 bfa_stats(rp, sm_fwd_rsp);
4312 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4313 bfa_rport_offline_cb(rp);
4314 break;
4315
4316 case BFA_RPORT_SM_DELETE:
4317 bfa_stats(rp, sm_fwd_del);
4318 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4319 break;
4320
4321 case BFA_RPORT_SM_HWFAIL:
4322 bfa_stats(rp, sm_fwd_hwf);
4323 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4324 bfa_rport_offline_cb(rp);
4325 break;
4326
4327 default:
4328 bfa_stats(rp, sm_fwd_unexp);
4329 bfa_sm_fault(rp->bfa, event);
4330 }
4331}
4332
4333static void
4334bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4335{
4336 bfa_trc(rp->bfa, rp->rport_tag);
4337 bfa_trc(rp->bfa, event);
4338
4339 switch (event) {
4340 case BFA_RPORT_SM_QRESUME:
4341 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4342 bfa_rport_send_fwdelete(rp);
4343 break;
4344
4345 case BFA_RPORT_SM_DELETE:
4346 bfa_stats(rp, sm_fwd_del);
4347 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4348 break;
4349
4350 case BFA_RPORT_SM_HWFAIL:
4351 bfa_stats(rp, sm_fwd_hwf);
4352 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4353 bfa_reqq_wcancel(&rp->reqq_wait);
4354 bfa_rport_offline_cb(rp);
4355 break;
4356
4357 default:
4358 bfa_stats(rp, sm_fwd_unexp);
4359 bfa_sm_fault(rp->bfa, event);
4360 }
4361}
4362
4363/**
4364 * Offline state.
4365 */
4366static void
4367bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4368{
4369 bfa_trc(rp->bfa, rp->rport_tag);
4370 bfa_trc(rp->bfa, event);
4371
4372 switch (event) {
4373 case BFA_RPORT_SM_DELETE:
4374 bfa_stats(rp, sm_off_del);
4375 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4376 bfa_rport_free(rp);
4377 break;
4378
4379 case BFA_RPORT_SM_ONLINE:
4380 bfa_stats(rp, sm_off_on);
4381 if (bfa_rport_send_fwcreate(rp))
4382 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4383 else
4384 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4385 break;
4386
4387 case BFA_RPORT_SM_HWFAIL:
4388 bfa_stats(rp, sm_off_hwf);
4389 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4390 break;
4391
4392 default:
4393 bfa_stats(rp, sm_off_unexp);
4394 bfa_sm_fault(rp->bfa, event);
4395 }
4396}
4397
4398/**
4399 * Rport is deleted, waiting for firmware response to delete.
4400 */
4401static void
4402bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4403{
4404 bfa_trc(rp->bfa, rp->rport_tag);
4405 bfa_trc(rp->bfa, event);
4406
4407 switch (event) {
4408 case BFA_RPORT_SM_FWRSP:
4409 bfa_stats(rp, sm_del_fwrsp);
4410 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4411 bfa_rport_free(rp);
4412 break;
4413
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_del_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4417 bfa_rport_free(rp);
4418 break;
4419
4420 default:
4421 bfa_sm_fault(rp->bfa, event);
4422 }
4423}
4424
4425static void
4426bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4427{
4428 bfa_trc(rp->bfa, rp->rport_tag);
4429 bfa_trc(rp->bfa, event);
4430
4431 switch (event) {
4432 case BFA_RPORT_SM_QRESUME:
4433 bfa_stats(rp, sm_del_fwrsp);
4434 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4435 bfa_rport_send_fwdelete(rp);
4436 break;
4437
4438 case BFA_RPORT_SM_HWFAIL:
4439 bfa_stats(rp, sm_del_hwf);
4440 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4441 bfa_reqq_wcancel(&rp->reqq_wait);
4442 bfa_rport_free(rp);
4443 break;
4444
4445 default:
4446 bfa_sm_fault(rp->bfa, event);
4447 }
4448}
4449
4450/**
4451 * Waiting for rport create response from firmware. A delete is pending.
4452 */
4453static void
4454bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4455 enum bfa_rport_event event)
4456{
4457 bfa_trc(rp->bfa, rp->rport_tag);
4458 bfa_trc(rp->bfa, event);
4459
4460 switch (event) {
4461 case BFA_RPORT_SM_FWRSP:
4462 bfa_stats(rp, sm_delp_fwrsp);
4463 if (bfa_rport_send_fwdelete(rp))
4464 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4465 else
4466 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4467 break;
4468
4469 case BFA_RPORT_SM_HWFAIL:
4470 bfa_stats(rp, sm_delp_hwf);
4471 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4472 bfa_rport_free(rp);
4473 break;
4474
4475 default:
4476 bfa_stats(rp, sm_delp_unexp);
4477 bfa_sm_fault(rp->bfa, event);
4478 }
4479}
4480
4481/**
4482 * Waiting for rport create response from firmware. Rport offline is pending.
4483 */
4484static void
4485bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4486 enum bfa_rport_event event)
4487{
4488 bfa_trc(rp->bfa, rp->rport_tag);
4489 bfa_trc(rp->bfa, event);
4490
4491 switch (event) {
4492 case BFA_RPORT_SM_FWRSP:
4493 bfa_stats(rp, sm_offp_fwrsp);
4494 if (bfa_rport_send_fwdelete(rp))
4495 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4496 else
4497 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4498 break;
4499
4500 case BFA_RPORT_SM_DELETE:
4501 bfa_stats(rp, sm_offp_del);
4502 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4503 break;
4504
4505 case BFA_RPORT_SM_HWFAIL:
4506 bfa_stats(rp, sm_offp_hwf);
4507 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4508 break;
4509
4510 default:
4511 bfa_stats(rp, sm_offp_unexp);
4512 bfa_sm_fault(rp->bfa, event);
4513 }
4514}
4515
4516/**
4517 * IOC h/w failed.
4518 */
4519static void
4520bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4521{
4522 bfa_trc(rp->bfa, rp->rport_tag);
4523 bfa_trc(rp->bfa, event);
4524
4525 switch (event) {
4526 case BFA_RPORT_SM_OFFLINE:
4527 bfa_stats(rp, sm_iocd_off);
4528 bfa_rport_offline_cb(rp);
4529 break;
4530
4531 case BFA_RPORT_SM_DELETE:
4532 bfa_stats(rp, sm_iocd_del);
4533 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4534 bfa_rport_free(rp);
4535 break;
4536
4537 case BFA_RPORT_SM_ONLINE:
4538 bfa_stats(rp, sm_iocd_on);
4539 if (bfa_rport_send_fwcreate(rp))
4540 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4541 else
4542 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4543 break;
4544
4545 case BFA_RPORT_SM_HWFAIL:
4546 break;
4547
4548 default:
4549 bfa_stats(rp, sm_iocd_unexp);
4550 bfa_sm_fault(rp->bfa, event);
4551 }
4552}
4553
4554
4555
4556/**
4557 * bfa_rport_private BFA rport private functions
4558 */
4559
4560static void
4561__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4562{
4563 struct bfa_rport_s *rp = cbarg;
4564
4565 if (complete)
4566 bfa_cb_rport_online(rp->rport_drv);
4567}
4568
4569static void
4570__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4571{
4572 struct bfa_rport_s *rp = cbarg;
4573
4574 if (complete)
4575 bfa_cb_rport_offline(rp->rport_drv);
4576}
4577
4578static void
4579bfa_rport_qresume(void *cbarg)
4580{
4581 struct bfa_rport_s *rp = cbarg;
4582
4583 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4584}
4585
4586static void
4587bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4588 u32 *dm_len)
4589{
4590 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4591 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4592
4593 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4594}
4595
4596static void
4597bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4598 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4599{
4600 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4601 struct bfa_rport_s *rp;
4602 u16 i;
4603
4604 INIT_LIST_HEAD(&mod->rp_free_q);
4605 INIT_LIST_HEAD(&mod->rp_active_q);
4606
4607 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4608 mod->rps_list = rp;
4609 mod->num_rports = cfg->fwcfg.num_rports;
4610
4611 bfa_assert(mod->num_rports &&
4612 !(mod->num_rports & (mod->num_rports - 1)));
4613
4614 for (i = 0; i < mod->num_rports; i++, rp++) {
4615 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
4616 rp->bfa = bfa;
4617 rp->rport_tag = i;
4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4619
4620 /**
4621 * - is unused
4622 */
4623 if (i)
4624 list_add_tail(&rp->qe, &mod->rp_free_q);
4625
4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4627 }
4628
4629 /**
4630 * consume memory
4631 */
4632 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4633}
4634
4635static void
4636bfa_rport_detach(struct bfa_s *bfa)
4637{
4638}
4639
4640static void
4641bfa_rport_start(struct bfa_s *bfa)
4642{
4643}
4644
4645static void
4646bfa_rport_stop(struct bfa_s *bfa)
4647{
4648}
4649
4650static void
4651bfa_rport_iocdisable(struct bfa_s *bfa)
4652{
4653 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4654 struct bfa_rport_s *rport;
4655 struct list_head *qe, *qen;
4656
4657 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4658 rport = (struct bfa_rport_s *) qe;
4659 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4660 }
4661}
4662
4663static struct bfa_rport_s *
4664bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4665{
4666 struct bfa_rport_s *rport;
4667
4668 bfa_q_deq(&mod->rp_free_q, &rport);
4669 if (rport)
4670 list_add_tail(&rport->qe, &mod->rp_active_q);
4671
4672 return rport;
4673}
4674
4675static void
4676bfa_rport_free(struct bfa_rport_s *rport)
4677{
4678 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4679
4680 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4681 list_del(&rport->qe);
4682 list_add_tail(&rport->qe, &mod->rp_free_q);
4683}
4684
4685static bfa_boolean_t
4686bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4687{
4688 struct bfi_rport_create_req_s *m;
4689
4690 /**
4691 * check for room in queue to send request now
4692 */
4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4694 if (!m) {
4695 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4696 return BFA_FALSE;
4697 }
4698
4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4700 bfa_lpuid(rp->bfa));
4701 m->bfa_handle = rp->rport_tag;
4702 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
4703 m->pid = rp->rport_info.pid;
4704 m->lp_tag = rp->rport_info.lp_tag;
4705 m->local_pid = rp->rport_info.local_pid;
4706 m->fc_class = rp->rport_info.fc_class;
4707 m->vf_en = rp->rport_info.vf_en;
4708 m->vf_id = rp->rport_info.vf_id;
4709 m->cisc = rp->rport_info.cisc;
4710
4711 /**
4712 * queue I/O message to firmware
4713 */
4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4715 return BFA_TRUE;
4716}
4717
4718static bfa_boolean_t
4719bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4720{
4721 struct bfi_rport_delete_req_s *m;
4722
4723 /**
4724 * check for room in queue to send request now
4725 */
4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4727 if (!m) {
4728 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4729 return BFA_FALSE;
4730 }
4731
4732 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4733 bfa_lpuid(rp->bfa));
4734 m->fw_handle = rp->fw_handle;
4735
4736 /**
4737 * queue I/O message to firmware
4738 */
4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4740 return BFA_TRUE;
4741}
4742
4743static bfa_boolean_t
4744bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4745{
4746 struct bfa_rport_speed_req_s *m;
4747
4748 /**
4749 * check for room in queue to send request now
4750 */
4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4752 if (!m) {
4753 bfa_trc(rp->bfa, rp->rport_info.speed);
4754 return BFA_FALSE;
4755 }
4756
4757 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4758 bfa_lpuid(rp->bfa));
4759 m->fw_handle = rp->fw_handle;
4760 m->speed = (u8)rp->rport_info.speed;
4761
4762 /**
4763 * queue I/O message to firmware
4764 */
4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4766 return BFA_TRUE;
4767}
4768
4769
4770
4771/**
4772 * bfa_rport_public
4773 */
4774
4775/**
4776 * Rport interrupt processing.
4777 */
4778void
4779bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4780{
4781 union bfi_rport_i2h_msg_u msg;
4782 struct bfa_rport_s *rp;
4783
4784 bfa_trc(bfa, m->mhdr.msg_id);
4785
4786 msg.msg = m;
4787
4788 switch (m->mhdr.msg_id) {
4789 case BFI_RPORT_I2H_CREATE_RSP:
4790 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4791 rp->fw_handle = msg.create_rsp->fw_handle;
4792 rp->qos_attr = msg.create_rsp->qos_attr;
4793 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4794 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4795 break;
4796
4797 case BFI_RPORT_I2H_DELETE_RSP:
4798 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4799 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4800 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4801 break;
4802
4803 case BFI_RPORT_I2H_QOS_SCN:
4804 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4805 rp->event_arg.fw_msg = msg.qos_scn_evt;
4806 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4807 break;
4808
4809 default:
4810 bfa_trc(bfa, m->mhdr.msg_id);
4811 bfa_assert(0);
4812 }
4813}
4814
4815
4816
4817/**
4818 * bfa_rport_api
4819 */
4820
4821struct bfa_rport_s *
4822bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4823{
4824 struct bfa_rport_s *rp;
4825
4826 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4827
4828 if (rp == NULL)
4829 return NULL;
4830
4831 rp->bfa = bfa;
4832 rp->rport_drv = rport_drv;
4833 bfa_rport_clear_stats(rp);
4834
4835 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4836 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4837
4838 return rp;
4839}
4840
4841void
4842bfa_rport_delete(struct bfa_rport_s *rport)
4843{
4844 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4845}
4846
4847void
4848bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4849{
4850 bfa_assert(rport_info->max_frmsz != 0);
4851
4852 /**
4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4854 * responses. Default to minimum size.
4855 */
4856 if (rport_info->max_frmsz == 0) {
4857 bfa_trc(rport->bfa, rport->rport_tag);
4858 rport_info->max_frmsz = FC_MIN_PDUSZ;
4859 }
4860
4861 bfa_os_assign(rport->rport_info, *rport_info);
4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4863}
4864
4865void
4866bfa_rport_offline(struct bfa_rport_s *rport)
4867{
4868 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4869}
4870
4871void
4872bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4873{
4874 bfa_assert(speed != 0);
4875 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4876
4877 rport->rport_info.speed = speed;
4878 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4879}
4880
4881void
4882bfa_rport_get_stats(struct bfa_rport_s *rport,
4883 struct bfa_rport_hal_stats_s *stats)
4884{
4885 *stats = rport->stats;
4886}
4887
4888void
4889bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4890 struct bfa_rport_qos_attr_s *qos_attr)
4891{
4892 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4893 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
4894
4895}
4896
4897void
4898bfa_rport_clear_stats(struct bfa_rport_s *rport)
4899{
4900 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
4901}
4902
4903
4904/**
4905 * SGPG related functions
4906 */
4907
4908/**
4909 * Compute and return memory needed by FCP(im) module.
4910 */
4911static void
4912bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4913 u32 *dm_len)
4914{
4915 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4916 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4917
4918 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4919 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4920}
4921
4922
4923static void
4924bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4925 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4926{
4927 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4928 int i;
4929 struct bfa_sgpg_s *hsgpg;
4930 struct bfi_sgpg_s *sgpg;
4931 u64 align_len;
4932
4933 union {
4934 u64 pa;
4935 union bfi_addr_u addr;
4936 } sgpg_pa, sgpg_pa_tmp;
4937
4938 INIT_LIST_HEAD(&mod->sgpg_q);
4939 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4940
4941 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4942
4943 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4944 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4945 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4946 mod->sgpg_arr_pa += align_len;
4947 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4948 align_len);
4949 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4950 align_len);
4951
4952 hsgpg = mod->hsgpg_arr;
4953 sgpg = mod->sgpg_arr;
4954 sgpg_pa.pa = mod->sgpg_arr_pa;
4955 mod->free_sgpgs = mod->num_sgpgs;
4956
4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4958
4959 for (i = 0; i < mod->num_sgpgs; i++) {
4960 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
4961 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
4962
4963 hsgpg->sgpg = sgpg;
4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4965 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4966 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4967
4968 hsgpg++;
4969 sgpg++;
4970 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4971 }
4972
4973 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4974 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4975 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4976}
4977
4978static void
4979bfa_sgpg_detach(struct bfa_s *bfa)
4980{
4981}
4982
4983static void
4984bfa_sgpg_start(struct bfa_s *bfa)
4985{
4986}
4987
4988static void
4989bfa_sgpg_stop(struct bfa_s *bfa)
4990{
4991}
4992
4993static void
4994bfa_sgpg_iocdisable(struct bfa_s *bfa)
4995{
4996}
4997
4998
4999
5000/**
5001 * hal_sgpg_public BFA SGPG public functions
5002 */
5003
5004bfa_status_t
5005bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5006{
5007 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5008 struct bfa_sgpg_s *hsgpg;
5009 int i;
5010
5011 bfa_trc_fp(bfa, nsgpgs);
5012
5013 if (mod->free_sgpgs < nsgpgs)
5014 return BFA_STATUS_ENOMEM;
5015
5016 for (i = 0; i < nsgpgs; i++) {
5017 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5018 bfa_assert(hsgpg);
5019 list_add_tail(&hsgpg->qe, sgpg_q);
5020 }
5021
5022 mod->free_sgpgs -= nsgpgs;
5023 return BFA_STATUS_OK;
5024}
5025
5026void
5027bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5028{
5029 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5030 struct bfa_sgpg_wqe_s *wqe;
5031
5032 bfa_trc_fp(bfa, nsgpg);
5033
5034 mod->free_sgpgs += nsgpg;
5035 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
5036
5037 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5038
5039 if (list_empty(&mod->sgpg_wait_q))
5040 return;
5041
5042 /**
5043 * satisfy as many waiting requests as possible
5044 */
5045 do {
5046 wqe = bfa_q_first(&mod->sgpg_wait_q);
5047 if (mod->free_sgpgs < wqe->nsgpg)
5048 nsgpg = mod->free_sgpgs;
5049 else
5050 nsgpg = wqe->nsgpg;
5051 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5052 wqe->nsgpg -= nsgpg;
5053 if (wqe->nsgpg == 0) {
5054 list_del(&wqe->qe);
5055 wqe->cbfn(wqe->cbarg);
5056 }
5057 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5058}
5059
5060void
5061bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5062{
5063 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5064
5065 bfa_assert(nsgpg > 0);
5066 bfa_assert(nsgpg > mod->free_sgpgs);
5067
5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5069
5070 /**
5071 * allocate any left to this one first
5072 */
5073 if (mod->free_sgpgs) {
5074 /**
5075 * no one else is waiting for SGPG
5076 */
5077 bfa_assert(list_empty(&mod->sgpg_wait_q));
5078 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5079 wqe->nsgpg -= mod->free_sgpgs;
5080 mod->free_sgpgs = 0;
5081 }
5082
5083 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5084}
5085
5086void
5087bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5088{
5089 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5090
5091 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5092 list_del(&wqe->qe);
5093
5094 if (wqe->nsgpg_total != wqe->nsgpg)
5095 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5096 wqe->nsgpg_total - wqe->nsgpg);
5097}
5098
5099void
5100bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5101 void *cbarg)
5102{
5103 INIT_LIST_HEAD(&wqe->sgpg_q);
5104 wqe->cbfn = cbfn;
5105 wqe->cbarg = cbarg;
5106}
5107
5108/**
5109 * UF related functions
5110 */
5111/*
5112 *****************************************************************************
5113 * Internal functions
5114 *****************************************************************************
5115 */
5116static void
5117__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5118{
5119 struct bfa_uf_s *uf = cbarg;
5120 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5121
5122 if (complete)
5123 ufm->ufrecv(ufm->cbarg, uf);
5124}
5125
5126static void
5127claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5128{
5129 u32 uf_pb_tot_sz;
5130
5131 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
5132 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
5133 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
5134 BFA_DMA_ALIGN_SZ);
5135
5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5138
5139 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5140}
5141
5142static void
5143claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5144{
5145 struct bfi_uf_buf_post_s *uf_bp_msg;
5146 struct bfi_sge_s *sge;
5147 union bfi_addr_u sga_zero = { {0} };
5148 u16 i;
5149 u16 buf_len;
5150
5151 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
5152 uf_bp_msg = ufm->uf_buf_posts;
5153
5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5155 i++, uf_bp_msg++) {
5156 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5157
5158 uf_bp_msg->buf_tag = i;
5159 buf_len = sizeof(struct bfa_uf_buf_s);
5160 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5162 bfa_lpuid(ufm->bfa));
5163
5164 sge = uf_bp_msg->sge;
5165 sge[0].sg_len = buf_len;
5166 sge[0].flags = BFI_SGE_DATA_LAST;
5167 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
5168 bfa_sge_to_be(sge);
5169
5170 sge[1].sg_len = buf_len;
5171 sge[1].flags = BFI_SGE_PGDLEN;
5172 sge[1].sga = sga_zero;
5173 bfa_sge_to_be(&sge[1]);
5174 }
5175
5176 /**
5177 * advance pointer beyond consumed memory
5178 */
5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
5180}
5181
5182static void
5183claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5184{
5185 u16 i;
5186 struct bfa_uf_s *uf;
5187
5188 /*
5189 * Claim block of memory for UF list
5190 */
5191 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
5192
5193 /*
5194 * Initialize UFs and queue it in UF free queue
5195 */
5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5197 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
5198 uf->bfa = ufm->bfa;
5199 uf->uf_tag = i;
5200 uf->pb_len = sizeof(struct bfa_uf_buf_s);
5201 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
5202 uf->buf_pa = ufm_pbs_pa(ufm, i);
5203 list_add_tail(&uf->qe, &ufm->uf_free_q);
5204 }
5205
5206 /**
5207 * advance memory pointer
5208 */
5209 bfa_meminfo_kva(mi) = (u8 *) uf;
5210}
5211
5212static void
5213uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5214{
5215 claim_uf_pbs(ufm, mi);
5216 claim_ufs(ufm, mi);
5217 claim_uf_post_msgs(ufm, mi);
5218}
5219
5220static void
5221bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
5222{
5223 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5224
5225 /*
5226 * dma-able memory for UF posted bufs
5227 */
5228 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
5229 BFA_DMA_ALIGN_SZ);
5230
5231 /*
5232 * kernel Virtual memory for UFs and UF buf post msg copies
5233 */
5234 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
5235 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
5236}
5237
5238static void
5239bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5240 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
5241{
5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5243
5244 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5245 ufm->bfa = bfa;
5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5247 INIT_LIST_HEAD(&ufm->uf_free_q);
5248 INIT_LIST_HEAD(&ufm->uf_posted_q);
5249
5250 uf_mem_claim(ufm, meminfo);
5251}
5252
5253static void
5254bfa_uf_detach(struct bfa_s *bfa)
5255{
5256}
5257
5258static struct bfa_uf_s *
5259bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5260{
5261 struct bfa_uf_s *uf;
5262
5263 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5264 return uf;
5265}
5266
5267static void
5268bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5269{
5270 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5271}
5272
5273static bfa_status_t
5274bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5275{
5276 struct bfi_uf_buf_post_s *uf_post_msg;
5277
5278 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5279 if (!uf_post_msg)
5280 return BFA_STATUS_FAILED;
5281
5282 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5283 sizeof(struct bfi_uf_buf_post_s));
5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5285
5286 bfa_trc(ufm->bfa, uf->uf_tag);
5287
5288 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5289 return BFA_STATUS_OK;
5290}
5291
5292static void
5293bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5294{
5295 struct bfa_uf_s *uf;
5296
5297 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5298 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5299 break;
5300 }
5301}
5302
5303static void
5304uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5305{
5306 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5307 u16 uf_tag = m->buf_tag;
5308 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5309 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5310 u8 *buf = &uf_buf->d[0];
5311 struct fchs_s *fchs;
5312
5313 m->frm_len = bfa_os_ntohs(m->frm_len);
5314 m->xfr_len = bfa_os_ntohs(m->xfr_len);
5315
5316 fchs = (struct fchs_s *)uf_buf;
5317
5318 list_del(&uf->qe); /* dequeue from posted queue */
5319
5320 uf->data_ptr = buf;
5321 uf->data_len = m->xfr_len;
5322
5323 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5324
5325 if (uf->data_len == sizeof(struct fchs_s)) {
5326 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5327 uf->data_len, (struct fchs_s *)buf);
5328 } else {
5329 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5330 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5331 BFA_PL_EID_RX, uf->data_len,
5332 (struct fchs_s *)buf, pld_w0);
5333 }
5334
5335 if (bfa->fcs)
5336 __bfa_cb_uf_recv(uf, BFA_TRUE);
5337 else
5338 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5339}
5340
5341static void
5342bfa_uf_stop(struct bfa_s *bfa)
5343{
5344}
5345
5346static void
5347bfa_uf_iocdisable(struct bfa_s *bfa)
5348{
5349 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5350 struct bfa_uf_s *uf;
5351 struct list_head *qe, *qen;
5352
5353 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5354 uf = (struct bfa_uf_s *) qe;
5355 list_del(&uf->qe);
5356 bfa_uf_put(ufm, uf);
5357 }
5358}
5359
5360static void
5361bfa_uf_start(struct bfa_s *bfa)
5362{
5363 bfa_uf_post_all(BFA_UF_MOD(bfa));
5364}
5365
5366
5367
5368/**
5369 * hal_uf_api
5370 */
5371
5372/**
5373 * Register handler for all unsolicted recieve frames.
5374 *
5375 * @param[in] bfa BFA instance
5376 * @param[in] ufrecv receive handler function
5377 * @param[in] cbarg receive handler arg
5378 */
5379void
5380bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5381{
5382 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5383
5384 ufm->ufrecv = ufrecv;
5385 ufm->cbarg = cbarg;
5386}
5387
5388/**
5389 * Free an unsolicited frame back to BFA.
5390 *
5391 * @param[in] uf unsolicited frame to be freed
5392 *
5393 * @return None
5394 */
5395void
5396bfa_uf_free(struct bfa_uf_s *uf)
5397{
5398 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5399 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5400}
5401
5402
5403
5404/**
5405 * uf_pub BFA uf module public functions
5406 */
5407void
5408bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5409{
5410 bfa_trc(bfa, msg->mhdr.msg_id);
5411
5412 switch (msg->mhdr.msg_id) {
5413 case BFI_UF_I2H_FRM_RCVD:
5414 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5415 break;
5416
5417 default:
5418 bfa_trc(bfa, msg->mhdr.msg_id);
5419 bfa_assert(0);
5420 }
5421}
5422
5423
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
new file mode 100644
index 000000000000..9921dad0d039
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -0,0 +1,657 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_SVC_H__
19#define __BFA_SVC_H__
20
21#include "bfa_cs.h"
22#include "bfi_ms.h"
23
24
25/**
26 * Scatter-gather DMA related defines
27 */
28#define BFA_SGPG_MIN (16)
29
30/**
31 * Alignment macro for SG page allocation
32 */
33#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
34 & ~(sizeof(struct bfi_sgpg_s) - 1))
35
36struct bfa_sgpg_wqe_s {
37 struct list_head qe; /* queue sg page element */
38 int nsgpg; /* pages to be allocated */
39 int nsgpg_total; /* total pages required */
40 void (*cbfn) (void *cbarg); /* callback function */
41 void *cbarg; /* callback arg */
42 struct list_head sgpg_q; /* queue of alloced sgpgs */
43};
44
45struct bfa_sgpg_s {
46 struct list_head qe; /* queue sg page element */
47 struct bfi_sgpg_s *sgpg; /* va of SG page */
48 union bfi_addr_u sgpg_pa; /* pa of SG page */
49};
50
51/**
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required.
54 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56
57struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */
66};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
68
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs);
71void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
72void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
73 void (*cbfn) (void *cbarg), void *cbarg);
74void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
75void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
76
77
78/**
79 * FCXP related defines
80 */
81#define BFA_FCXP_MIN (1)
82#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
83#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
84
85struct bfa_fcxp_mod_s {
86 struct bfa_s *bfa; /* backpointer to BFA */
87 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
88 u16 num_fcxps; /* max num FCXP requests */
89 struct list_head fcxp_free_q; /* free FCXPs */
90 struct list_head fcxp_active_q; /* active FCXPs */
91 void *req_pld_list_kva; /* list of FCXP req pld */
92 u64 req_pld_list_pa; /* list of FCXP req pld */
93 void *rsp_pld_list_kva; /* list of FCXP resp pld */
94 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
95 struct list_head wait_q; /* wait queue for free fcxp */
96 u32 req_pld_sz;
97 u32 rsp_pld_sz;
98};
99
100#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
101#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
102
103typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
104 void *cb_arg, bfa_status_t req_status,
105 u32 rsp_len, u32 resid_len,
106 struct fchs_s *rsp_fchs);
107
108typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
109typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
110typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
111 void *cbarg, enum bfa_status req_status,
112 u32 rsp_len, u32 resid_len,
113 struct fchs_s *rsp_fchs);
114typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
115
116
117
118/**
119 * Information needed for a FCXP request
120 */
121struct bfa_fcxp_req_info_s {
122 struct bfa_rport_s *bfa_rport;
123 /** Pointer to the bfa rport that was
124 * returned from bfa_rport_create().
125 * This could be left NULL for WKA or
126 * for FCXP interactions before the
127 * rport nexus is established
128 */
129 struct fchs_s fchs; /* request FC header structure */
130 u8 cts; /* continous sequence */
131 u8 class; /* FC class for the request/response */
132 u16 max_frmsz; /* max send frame size */
133 u16 vf_id; /* vsan tag if applicable */
134 u8 lp_tag; /* lport tag */
135 u32 req_tot_len; /* request payload total length */
136};
137
138struct bfa_fcxp_rsp_info_s {
139 struct fchs_s rsp_fchs;
140 /** !< Response frame's FC header will
141 * be sent back in this field */
142 u8 rsp_timeout;
143 /** !< timeout in seconds, 0-no response
144 */
145 u8 rsvd2[3];
146 u32 rsp_maxlen; /* max response length expected */
147};
148
149struct bfa_fcxp_s {
150 struct list_head qe; /* fcxp queue element */
151 bfa_sm_t sm; /* state machine */
152 void *caller; /* driver or fcs */
153 struct bfa_fcxp_mod_s *fcxp_mod;
154 /* back pointer to fcxp mod */
155 u16 fcxp_tag; /* internal tag */
156 struct bfa_fcxp_req_info_s req_info;
157 /* request info */
158 struct bfa_fcxp_rsp_info_s rsp_info;
159 /* response info */
160 u8 use_ireqbuf; /* use internal req buf */
161 u8 use_irspbuf; /* use internal rsp buf */
162 u32 nreq_sgles; /* num request SGLEs */
163 u32 nrsp_sgles; /* num response SGLEs */
164 struct list_head req_sgpg_q; /* SG pages for request buf */
165 struct list_head req_sgpg_wqe; /* wait queue for req SG page */
166 struct list_head rsp_sgpg_q; /* SG pages for response buf */
167 struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
168
169 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
170 /* SG elem addr user function */
171 bfa_fcxp_get_sglen_t req_sglen_cbfn;
172 /* SG elem len user function */
173 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
174 /* SG elem addr user function */
175 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
176 /* SG elem len user function */
177 bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
178 void *send_cbarg; /* callback arg */
179 struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
180 /* req SG elems */
181 struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
182 /* rsp SG elems */
183 u8 rsp_status; /* comp: rsp status */
184 u32 rsp_len; /* comp: actual response len */
185 u32 residue_len; /* comp: residual rsp length */
186 struct fchs_s rsp_fchs; /* comp: response fchs */
187 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
188 struct bfa_reqq_wait_s reqq_wqe;
189 bfa_boolean_t reqq_waiting;
190};
191
192struct bfa_fcxp_wqe_s {
193 struct list_head qe;
194 bfa_fcxp_alloc_cbfn_t alloc_cbfn;
195 void *alloc_cbarg;
196 void *caller;
197 struct bfa_s *bfa;
198 int nreq_sgles;
199 int nrsp_sgles;
200 bfa_fcxp_get_sgaddr_t req_sga_cbfn;
201 bfa_fcxp_get_sglen_t req_sglen_cbfn;
202 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
203 bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
204};
205
206#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
207#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
208#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
209
210#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
211 ((_fcxp)->fcxp_mod->req_pld_list_pa + \
212 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
213
214#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
215 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
216 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
217
218void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
219
220
221/**
222 * RPORT related defines
223 */
224#define BFA_RPORT_MIN 4
225
226struct bfa_rport_mod_s {
227 struct bfa_rport_s *rps_list; /* list of rports */
228 struct list_head rp_free_q; /* free bfa_rports */
229 struct list_head rp_active_q; /* free bfa_rports */
230 u16 num_rports; /* number of rports */
231};
232
233#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
234
235/**
236 * Convert rport tag to RPORT
237 */
238#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
239 (BFA_RPORT_MOD(__bfa)->rps_list + \
240 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
241
242/*
243 * protected functions
244 */
245void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
246
247/**
248 * BFA rport information.
249 */
250struct bfa_rport_info_s {
251 u16 max_frmsz; /* max rcv pdu size */
252 u32 pid:24, /* remote port ID */
253 lp_tag:8; /* tag */
254 u32 local_pid:24, /* local port ID */
255 cisc:8; /* CIRO supported */
256 u8 fc_class; /* supported FC classes. enum fc_cos */
257 u8 vf_en; /* virtual fabric enable */
258 u16 vf_id; /* virtual fabric ID */
259 enum bfa_port_speed speed; /* Rport's current speed */
260};
261
262/**
263 * BFA rport data structure
264 */
265struct bfa_rport_s {
266 struct list_head qe; /* queue element */
267 bfa_sm_t sm; /* state machine */
268 struct bfa_s *bfa; /* backpointer to BFA */
269 void *rport_drv; /* fcs/driver rport object */
270 u16 fw_handle; /* firmware rport handle */
271 u16 rport_tag; /* BFA rport tag */
272 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
273 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
274 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
275 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
276 struct bfa_rport_qos_attr_s qos_attr;
277 union a {
278 bfa_status_t status; /* f/w status */
279 void *fw_msg; /* QoS scn event */
280 } event_arg;
281};
282#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
283
284
285/**
286 * UF - unsolicited receive related defines
287 */
288
289#define BFA_UF_MIN (4)
290
291
292struct bfa_uf_s {
293 struct list_head qe; /* queue element */
294 struct bfa_s *bfa; /* bfa instance */
295 u16 uf_tag; /* identifying tag fw msgs */
296 u16 vf_id;
297 u16 src_rport_handle;
298 u16 rsvd;
299 u8 *data_ptr;
300 u16 data_len; /* actual receive length */
301 u16 pb_len; /* posted buffer length */
302 void *buf_kva; /* buffer virtual address */
303 u64 buf_pa; /* buffer physical address */
304 struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
305 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
306};
307
308/**
309 * Callback prototype for unsolicited frame receive handler.
310 *
311 * @param[in] cbarg callback arg for receive handler
312 * @param[in] uf unsolicited frame descriptor
313 *
314 * @return None
315 */
316typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
317
318struct bfa_uf_mod_s {
319 struct bfa_s *bfa; /* back pointer to BFA */
320 struct bfa_uf_s *uf_list; /* array of UFs */
321 u16 num_ufs; /* num unsolicited rx frames */
322 struct list_head uf_free_q; /* free UFs */
323 struct list_head uf_posted_q; /* UFs posted to IOC */
324 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
325 u64 uf_pbs_pa; /* phy addr for UF bufs */
326 struct bfi_uf_buf_post_s *uf_buf_posts;
327 /* pre-built UF post msgs */
328 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
329 void *cbarg; /* uf receive handler arg */
330};
331
332#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
333
334#define ufm_pbs_pa(_ufmod, _uftag) \
335 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
336
337void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
338
339#define BFA_UF_BUFSZ (2 * 1024 + 256)
340
341/**
342 * @todo private
343 */
344struct bfa_uf_buf_s {
345 u8 d[BFA_UF_BUFSZ];
346};
347
348
349/**
350 * LPS - bfa lport login/logout service interface
351 */
352struct bfa_lps_s {
353 struct list_head qe; /* queue element */
354 struct bfa_s *bfa; /* parent bfa instance */
355 bfa_sm_t sm; /* finite state machine */
356 u8 lp_tag; /* lport tag */
357 u8 reqq; /* lport request queue */
358 u8 alpa; /* ALPA for loop topologies */
359 u32 lp_pid; /* lport port ID */
360 bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */
361 bfa_boolean_t auth_en; /* enable authentication */
362 bfa_boolean_t auth_req; /* authentication required */
363 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
364 bfa_boolean_t fport; /* attached peer is F_PORT */
365 bfa_boolean_t brcd_switch; /* attached peer is brcd sw */
366 bfa_status_t status; /* login status */
367 u16 pdusz; /* max receive PDU size */
368 u16 pr_bbcred; /* BB_CREDIT from peer */
369 u8 lsrjt_rsn; /* LSRJT reason */
370 u8 lsrjt_expl; /* LSRJT explanation */
371 wwn_t pwwn; /* port wwn of lport */
372 wwn_t nwwn; /* node wwn of lport */
373 wwn_t pr_pwwn; /* port wwn of lport peer */
374 wwn_t pr_nwwn; /* node wwn of lport peer */
375 mac_t lp_mac; /* fpma/spma MAC for lport */
376 mac_t fcf_mac; /* FCF MAC of lport */
377 struct bfa_reqq_wait_s wqe; /* request wait queue element */
378 void *uarg; /* user callback arg */
379 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
380 struct bfi_lps_login_rsp_s *loginrsp;
381 bfa_eproto_status_t ext_status;
382};
383
384struct bfa_lps_mod_s {
385 struct list_head lps_free_q;
386 struct list_head lps_active_q;
387 struct bfa_lps_s *lps_arr;
388 int num_lps;
389};
390
391#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
392#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
393
394/*
395 * external functions
396 */
397void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
398
399
400/**
401 * FCPORT related defines
402 */
403
404#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
405typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
406
407/**
408 * Link notification data structure
409 */
410struct bfa_fcport_ln_s {
411 struct bfa_fcport_s *fcport;
412 bfa_sm_t sm;
413 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
414 enum bfa_port_linkstate ln_event; /* ln event for callback */
415};
416
417struct bfa_fcport_trunk_s {
418 struct bfa_trunk_attr_s attr;
419};
420
421/**
422 * BFA FC port data structure
423 */
424struct bfa_fcport_s {
425 struct bfa_s *bfa; /* parent BFA instance */
426 bfa_sm_t sm; /* port state machine */
427 wwn_t nwwn; /* node wwn of physical port */
428 wwn_t pwwn; /* port wwn of physical oprt */
429 enum bfa_port_speed speed_sup;
430 /* supported speeds */
431 enum bfa_port_speed speed; /* current speed */
432 enum bfa_port_topology topology; /* current topology */
433 u8 myalpa; /* my ALPA in LOOP topology */
434 u8 rsvd[3];
435 struct bfa_port_cfg_s cfg; /* current port configuration */
436 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
437 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
438 struct bfa_reqq_wait_s reqq_wait;
439 /* to wait for room in reqq */
440 struct bfa_reqq_wait_s svcreq_wait;
441 /* to wait for room in reqq */
442 struct bfa_reqq_wait_s stats_reqq_wait;
443 /* to wait for room in reqq (stats) */
444 void *event_cbarg;
445 void (*event_cbfn) (void *cbarg,
446 enum bfa_port_linkstate event);
447 union {
448 union bfi_fcport_i2h_msg_u i2hmsg;
449 } event_arg;
450 void *bfad; /* BFA driver handle */
451 struct bfa_fcport_ln_s ln; /* Link Notification */
452 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
453 struct bfa_timer_s timer; /* timer */
454 u32 msgtag; /* fimrware msg tag for reply */
455 u8 *stats_kva;
456 u64 stats_pa;
457 union bfa_fcport_stats_u *stats;
458 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
459 bfa_status_t stats_status; /* stats/statsclr status */
460 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
461 bfa_boolean_t stats_qfull;
462 u32 stats_reset_time; /* stats reset time stamp */
463 bfa_cb_port_t stats_cbfn; /* driver callback function */
464 void *stats_cbarg; /* *!< user callback arg */
465 bfa_boolean_t diag_busy; /* diag busy status */
466 bfa_boolean_t beacon; /* port beacon status */
467 bfa_boolean_t link_e2e_beacon; /* link beacon status */
468 struct bfa_fcport_trunk_s trunk;
469 u16 fcoe_vlan;
470};
471
472#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
473
474/*
475 * protected functions
476 */
477void bfa_fcport_init(struct bfa_s *bfa);
478void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
479
480/*
481 * bfa fcport API functions
482 */
483bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
484bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
485bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
486 enum bfa_port_speed speed);
487enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
488bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
489 enum bfa_port_topology topo);
490enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
491bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
492bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
493u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
494bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
495bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
496u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
497u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
498void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
499wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
500void bfa_fcport_event_register(struct bfa_s *bfa,
501 void (*event_cbfn) (void *cbarg,
502 enum bfa_port_linkstate event), void *event_cbarg);
503bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
504void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
505void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
506bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
507 enum bfa_port_speed speed);
508enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
509
510void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
511void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
512void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
513 bfa_boolean_t link_e2e_beacon);
514void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
515 struct bfa_qos_attr_s *qos_attr);
516void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
517 struct bfa_qos_vc_attr_s *qos_vc_attr);
518bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
519 union bfa_fcport_stats_u *stats,
520 bfa_cb_port_t cbfn, void *cbarg);
521bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
522 void *cbarg);
523bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
524 union bfa_fcport_stats_u *stats,
525 bfa_cb_port_t cbfn, void *cbarg);
526bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
527 void *cbarg);
528bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
529bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
530bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
531 union bfa_fcport_stats_u *stats,
532 bfa_cb_port_t cbfn, void *cbarg);
533bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
534 void *cbarg);
535bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
536
537/*
538 * bfa rport API functions
539 */
540struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
541void bfa_rport_delete(struct bfa_rport_s *rport);
542void bfa_rport_online(struct bfa_rport_s *rport,
543 struct bfa_rport_info_s *rport_info);
544void bfa_rport_offline(struct bfa_rport_s *rport);
545void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
546void bfa_rport_get_stats(struct bfa_rport_s *rport,
547 struct bfa_rport_hal_stats_s *stats);
548void bfa_rport_clear_stats(struct bfa_rport_s *rport);
549void bfa_cb_rport_online(void *rport);
550void bfa_cb_rport_offline(void *rport);
551void bfa_cb_rport_qos_scn_flowid(void *rport,
552 struct bfa_rport_qos_attr_s old_qos_attr,
553 struct bfa_rport_qos_attr_s new_qos_attr);
554void bfa_cb_rport_qos_scn_prio(void *rport,
555 struct bfa_rport_qos_attr_s old_qos_attr,
556 struct bfa_rport_qos_attr_s new_qos_attr);
557void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
558 struct bfa_rport_qos_attr_s *qos_attr);
559
560/*
561 * bfa fcxp API functions
562 */
563struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
564 int nreq_sgles, int nrsp_sgles,
565 bfa_fcxp_get_sgaddr_t get_req_sga,
566 bfa_fcxp_get_sglen_t get_req_sglen,
567 bfa_fcxp_get_sgaddr_t get_rsp_sga,
568 bfa_fcxp_get_sglen_t get_rsp_sglen);
569void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
570 bfa_fcxp_alloc_cbfn_t alloc_cbfn,
571 void *cbarg, void *bfad_fcxp,
572 int nreq_sgles, int nrsp_sgles,
573 bfa_fcxp_get_sgaddr_t get_req_sga,
574 bfa_fcxp_get_sglen_t get_req_sglen,
575 bfa_fcxp_get_sgaddr_t get_rsp_sga,
576 bfa_fcxp_get_sglen_t get_rsp_sglen);
577void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
578 struct bfa_fcxp_wqe_s *wqe);
579void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
580
581void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
582void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
583
584void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
585
586void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
587 u16 vf_id, u8 lp_tag,
588 bfa_boolean_t cts, enum fc_cos cos,
589 u32 reqlen, struct fchs_s *fchs,
590 bfa_cb_fcxp_send_t cbfn,
591 void *cbarg,
592 u32 rsp_maxlen, u8 rsp_timeout);
593bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
594u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
595u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
596
597static inline void *
598bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
599{
600 return uf->data_ptr;
601}
602
603static inline u16
604bfa_uf_get_frmlen(struct bfa_uf_s *uf)
605{
606 return uf->data_len;
607}
608
609/*
610 * bfa uf API functions
611 */
612void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
613 void *cbarg);
614void bfa_uf_free(struct bfa_uf_s *uf);
615
616/**
617 * bfa lport service api
618 */
619
620u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
621struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
622void bfa_lps_delete(struct bfa_lps_s *lps);
623void bfa_lps_discard(struct bfa_lps_s *lps);
624void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
625 u16 pdusz, wwn_t pwwn, wwn_t nwwn,
626 bfa_boolean_t auth_en);
627void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
628 wwn_t pwwn, wwn_t nwwn);
629void bfa_lps_flogo(struct bfa_lps_s *lps);
630void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
631u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
632bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
633bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
634bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
635bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
636bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
637u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
638u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
639u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
640u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
641wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
642wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
643u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
644u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
645mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
646void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
647void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
648void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
649void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
650
651void bfa_trunk_enable_cfg(struct bfa_s *bfa);
652bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
653bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
654bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
655 struct bfa_trunk_attr_s *attr);
656
657#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_timer.c b/drivers/scsi/bfa/bfa_timer.c
deleted file mode 100644
index cb76481f5cb1..000000000000
--- a/drivers/scsi/bfa/bfa_timer.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_timer.h>
19#include <cs/bfa_debug.h>
20
21void
22bfa_timer_init(struct bfa_timer_mod_s *mod)
23{
24 INIT_LIST_HEAD(&mod->timer_q);
25}
26
27void
28bfa_timer_beat(struct bfa_timer_mod_s *mod)
29{
30 struct list_head *qh = &mod->timer_q;
31 struct list_head *qe, *qe_next;
32 struct bfa_timer_s *elem;
33 struct list_head timedout_q;
34
35 INIT_LIST_HEAD(&timedout_q);
36
37 qe = bfa_q_next(qh);
38
39 while (qe != qh) {
40 qe_next = bfa_q_next(qe);
41
42 elem = (struct bfa_timer_s *) qe;
43 if (elem->timeout <= BFA_TIMER_FREQ) {
44 elem->timeout = 0;
45 list_del(&elem->qe);
46 list_add_tail(&elem->qe, &timedout_q);
47 } else {
48 elem->timeout -= BFA_TIMER_FREQ;
49 }
50
51 qe = qe_next; /* go to next elem */
52 }
53
54 /*
55 * Pop all the timeout entries
56 */
57 while (!list_empty(&timedout_q)) {
58 bfa_q_deq(&timedout_q, &elem);
59 elem->timercb(elem->arg);
60 }
61}
62
63/**
64 * Should be called with lock protection
65 */
66void
67bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
68 void (*timercb) (void *), void *arg, unsigned int timeout)
69{
70
71 bfa_assert(timercb != NULL);
72 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
73
74 timer->timeout = timeout;
75 timer->timercb = timercb;
76 timer->arg = arg;
77
78 list_add_tail(&timer->qe, &mod->timer_q);
79}
80
81/**
82 * Should be called with lock protection
83 */
84void
85bfa_timer_stop(struct bfa_timer_s *timer)
86{
87 bfa_assert(!list_empty(&timer->qe));
88
89 list_del(&timer->qe);
90}
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
deleted file mode 100644
index a7a82610db85..000000000000
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * hal_trcmod.h BFA trace modules
20 */
21
22#ifndef __BFA_TRCMOD_PRIV_H__
23#define __BFA_TRCMOD_PRIV_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_HAL_INTR = 1,
33 BFA_TRC_HAL_FCXP = 2,
34 BFA_TRC_HAL_UF = 3,
35 BFA_TRC_HAL_RPORT = 4,
36 BFA_TRC_HAL_FCPIM = 5,
37 BFA_TRC_HAL_IOIM = 6,
38 BFA_TRC_HAL_TSKIM = 7,
39 BFA_TRC_HAL_ITNIM = 8,
40 BFA_TRC_HAL_FCPORT = 9,
41 BFA_TRC_HAL_SGPG = 10,
42 BFA_TRC_HAL_FLASH = 11,
43 BFA_TRC_HAL_DEBUG = 12,
44 BFA_TRC_HAL_WWN = 13,
45 BFA_TRC_HAL_FLASH_RAW = 14,
46 BFA_TRC_HAL_SBOOT = 15,
47 BFA_TRC_HAL_SBOOT_IO = 16,
48 BFA_TRC_HAL_SBOOT_INTR = 17,
49 BFA_TRC_HAL_SBTEST = 18,
50 BFA_TRC_HAL_IPFC = 19,
51 BFA_TRC_HAL_IOCFC = 20,
52 BFA_TRC_HAL_FCPTM = 21,
53 BFA_TRC_HAL_IOTM = 22,
54 BFA_TRC_HAL_TSKTM = 23,
55 BFA_TRC_HAL_TIN = 24,
56 BFA_TRC_HAL_LPS = 25,
57 BFA_TRC_HAL_FCDIAG = 26,
58 BFA_TRC_HAL_PBIND = 27,
59 BFA_TRC_HAL_IOCFC_CT = 28,
60 BFA_TRC_HAL_IOCFC_CB = 29,
61 BFA_TRC_HAL_IOCFC_Q = 30,
62};
63
64#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
deleted file mode 100644
index ad9aaaedd3f1..000000000000
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ /dev/null
@@ -1,690 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_cb_ioim_macros.h>
20
21BFA_TRC_FILE(HAL, TSKIM);
22
23/**
24 * task management completion handling
25 */
26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \
28 __cbfn, (__tskim)); \
29 bfa_tskim_notify_comp(__tskim); \
30} while (0)
31
32#define bfa_tskim_notify_comp(__tskim) do { \
33 if ((__tskim)->notify) \
34 bfa_itnim_tskdone((__tskim)->itnim); \
35} while (0)
36
37/*
38 * forward declarations
39 */
40static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
41static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
42static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
43 lun_t lun);
44static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
45static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
46static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
47static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
48static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
49static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
50
51/**
52 * bfa_tskim_sm
53 */
54
55enum bfa_tskim_event {
56 BFA_TSKIM_SM_START = 1, /* TM command start */
57 BFA_TSKIM_SM_DONE = 2, /* TM completion */
58 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
59 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
60 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
61 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
62 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
63 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
64};
65
66static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
67 enum bfa_tskim_event event);
68static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
69 enum bfa_tskim_event event);
70static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
71 enum bfa_tskim_event event);
72static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
73 enum bfa_tskim_event event);
74static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
75 enum bfa_tskim_event event);
76static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
77 enum bfa_tskim_event event);
78static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
79 enum bfa_tskim_event event);
80
81/**
82 * Task management command beginning state.
83 */
84static void
85bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
86{
87 bfa_trc(tskim->bfa, event);
88
89 switch (event) {
90 case BFA_TSKIM_SM_START:
91 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
92 bfa_tskim_gather_ios(tskim);
93
94 /**
95 * If device is offline, do not send TM on wire. Just cleanup
96 * any pending IO requests and complete TM request.
97 */
98 if (!bfa_itnim_is_online(tskim->itnim)) {
99 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
100 tskim->tsk_status = BFI_TSKIM_STS_OK;
101 bfa_tskim_cleanup_ios(tskim);
102 return;
103 }
104
105 if (!bfa_tskim_send(tskim)) {
106 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
107 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
108 &tskim->reqq_wait);
109 }
110 break;
111
112 default:
113 bfa_sm_fault(tskim->bfa, event);
114 }
115}
116
117/**
118 * brief
119 * TM command is active, awaiting completion from firmware to
120 * cleanup IO requests in TM scope.
121 */
122static void
123bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
124{
125 bfa_trc(tskim->bfa, event);
126
127 switch (event) {
128 case BFA_TSKIM_SM_DONE:
129 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
130 bfa_tskim_cleanup_ios(tskim);
131 break;
132
133 case BFA_TSKIM_SM_CLEANUP:
134 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
135 if (!bfa_tskim_send_abort(tskim)) {
136 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
137 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
138 &tskim->reqq_wait);
139 }
140 break;
141
142 case BFA_TSKIM_SM_HWFAIL:
143 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
144 bfa_tskim_iocdisable_ios(tskim);
145 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
146 break;
147
148 default:
149 bfa_sm_fault(tskim->bfa, event);
150 }
151}
152
153/**
154 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
155 * completion event from firmware.
156 */
157static void
158bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
159{
160 bfa_trc(tskim->bfa, event);
161
162 switch (event) {
163 case BFA_TSKIM_SM_DONE:
164 /**
165 * Ignore and wait for ABORT completion from firmware.
166 */
167 break;
168
169 case BFA_TSKIM_SM_CLEANUP_DONE:
170 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
171 bfa_tskim_cleanup_ios(tskim);
172 break;
173
174 case BFA_TSKIM_SM_HWFAIL:
175 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
176 bfa_tskim_iocdisable_ios(tskim);
177 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
178 break;
179
180 default:
181 bfa_sm_fault(tskim->bfa, event);
182 }
183}
184
185static void
186bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
187{
188 bfa_trc(tskim->bfa, event);
189
190 switch (event) {
191 case BFA_TSKIM_SM_IOS_DONE:
192 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
193 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
194 break;
195
196 case BFA_TSKIM_SM_CLEANUP:
197 /**
198 * Ignore, TM command completed on wire.
199 * Notify TM conmpletion on IO cleanup completion.
200 */
201 break;
202
203 case BFA_TSKIM_SM_HWFAIL:
204 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
205 bfa_tskim_iocdisable_ios(tskim);
206 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
207 break;
208
209 default:
210 bfa_sm_fault(tskim->bfa, event);
211 }
212}
213
214/**
215 * Task management command is waiting for room in request CQ
216 */
217static void
218bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
219{
220 bfa_trc(tskim->bfa, event);
221
222 switch (event) {
223 case BFA_TSKIM_SM_QRESUME:
224 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
225 bfa_tskim_send(tskim);
226 break;
227
228 case BFA_TSKIM_SM_CLEANUP:
229 /**
230 * No need to send TM on wire since ITN is offline.
231 */
232 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
233 bfa_reqq_wcancel(&tskim->reqq_wait);
234 bfa_tskim_cleanup_ios(tskim);
235 break;
236
237 case BFA_TSKIM_SM_HWFAIL:
238 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
239 bfa_reqq_wcancel(&tskim->reqq_wait);
240 bfa_tskim_iocdisable_ios(tskim);
241 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
242 break;
243
244 default:
245 bfa_sm_fault(tskim->bfa, event);
246 }
247}
248
249/**
250 * Task management command is active, awaiting for room in request CQ
251 * to send clean up request.
252 */
253static void
254bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
255 enum bfa_tskim_event event)
256{
257 bfa_trc(tskim->bfa, event);
258
259 switch (event) {
260 case BFA_TSKIM_SM_DONE:
261 bfa_reqq_wcancel(&tskim->reqq_wait);
262 /**
263 *
264 * Fall through !!!
265 */
266
267 case BFA_TSKIM_SM_QRESUME:
268 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
269 bfa_tskim_send_abort(tskim);
270 break;
271
272 case BFA_TSKIM_SM_HWFAIL:
273 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
274 bfa_reqq_wcancel(&tskim->reqq_wait);
275 bfa_tskim_iocdisable_ios(tskim);
276 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
277 break;
278
279 default:
280 bfa_sm_fault(tskim->bfa, event);
281 }
282}
283
284/**
285 * BFA callback is pending
286 */
287static void
288bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
289{
290 bfa_trc(tskim->bfa, event);
291
292 switch (event) {
293 case BFA_TSKIM_SM_HCB:
294 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
295 bfa_tskim_free(tskim);
296 break;
297
298 case BFA_TSKIM_SM_CLEANUP:
299 bfa_tskim_notify_comp(tskim);
300 break;
301
302 case BFA_TSKIM_SM_HWFAIL:
303 break;
304
305 default:
306 bfa_sm_fault(tskim->bfa, event);
307 }
308}
309
310
311
312/**
313 * bfa_tskim_private
314 */
315
316static void
317__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
318{
319 struct bfa_tskim_s *tskim = cbarg;
320
321 if (!complete) {
322 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
323 return;
324 }
325
326 bfa_stats(tskim->itnim, tm_success);
327 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
328}
329
330static void
331__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
332{
333 struct bfa_tskim_s *tskim = cbarg;
334
335 if (!complete) {
336 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
337 return;
338 }
339
340 bfa_stats(tskim->itnim, tm_failures);
341 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
342 BFI_TSKIM_STS_FAILED);
343}
344
345static bfa_boolean_t
346bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
347{
348 switch (tskim->tm_cmnd) {
349 case FCP_TM_TARGET_RESET:
350 return BFA_TRUE;
351
352 case FCP_TM_ABORT_TASK_SET:
353 case FCP_TM_CLEAR_TASK_SET:
354 case FCP_TM_LUN_RESET:
355 case FCP_TM_CLEAR_ACA:
356 return (tskim->lun == lun);
357
358 default:
359 bfa_assert(0);
360 }
361
362 return BFA_FALSE;
363}
364
365/**
366 * Gather affected IO requests and task management commands.
367 */
368static void
369bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
370{
371 struct bfa_itnim_s *itnim = tskim->itnim;
372 struct bfa_ioim_s *ioim;
373 struct list_head *qe, *qen;
374
375 INIT_LIST_HEAD(&tskim->io_q);
376
377 /**
378 * Gather any active IO requests first.
379 */
380 list_for_each_safe(qe, qen, &itnim->io_q) {
381 ioim = (struct bfa_ioim_s *) qe;
382 if (bfa_tskim_match_scope
383 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
384 list_del(&ioim->qe);
385 list_add_tail(&ioim->qe, &tskim->io_q);
386 }
387 }
388
389 /**
390 * Failback any pending IO requests immediately.
391 */
392 list_for_each_safe(qe, qen, &itnim->pending_q) {
393 ioim = (struct bfa_ioim_s *) qe;
394 if (bfa_tskim_match_scope
395 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
396 list_del(&ioim->qe);
397 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
398 bfa_ioim_tov(ioim);
399 }
400 }
401}
402
403/**
404 * IO cleanup completion
405 */
406static void
407bfa_tskim_cleanp_comp(void *tskim_cbarg)
408{
409 struct bfa_tskim_s *tskim = tskim_cbarg;
410
411 bfa_stats(tskim->itnim, tm_io_comps);
412 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
413}
414
415/**
416 * Gather affected IO requests and task management commands.
417 */
418static void
419bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
420{
421 struct bfa_ioim_s *ioim;
422 struct list_head *qe, *qen;
423
424 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
425
426 list_for_each_safe(qe, qen, &tskim->io_q) {
427 ioim = (struct bfa_ioim_s *) qe;
428 bfa_wc_up(&tskim->wc);
429 bfa_ioim_cleanup_tm(ioim, tskim);
430 }
431
432 bfa_wc_wait(&tskim->wc);
433}
434
435/**
436 * Send task management request to firmware.
437 */
438static bfa_boolean_t
439bfa_tskim_send(struct bfa_tskim_s *tskim)
440{
441 struct bfa_itnim_s *itnim = tskim->itnim;
442 struct bfi_tskim_req_s *m;
443
444 /**
445 * check for room in queue to send request now
446 */
447 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
448 if (!m)
449 return BFA_FALSE;
450
451 /**
452 * build i/o request message next
453 */
454 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
455 bfa_lpuid(tskim->bfa));
456
457 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
458 m->itn_fhdl = tskim->itnim->rport->fw_handle;
459 m->t_secs = tskim->tsecs;
460 m->lun = tskim->lun;
461 m->tm_flags = tskim->tm_cmnd;
462
463 /**
464 * queue I/O message to firmware
465 */
466 bfa_reqq_produce(tskim->bfa, itnim->reqq);
467 return BFA_TRUE;
468}
469
470/**
471 * Send abort request to cleanup an active TM to firmware.
472 */
473static bfa_boolean_t
474bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
475{
476 struct bfa_itnim_s *itnim = tskim->itnim;
477 struct bfi_tskim_abortreq_s *m;
478
479 /**
480 * check for room in queue to send request now
481 */
482 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
483 if (!m)
484 return BFA_FALSE;
485
486 /**
487 * build i/o request message next
488 */
489 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
490 bfa_lpuid(tskim->bfa));
491
492 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
493
494 /**
495 * queue I/O message to firmware
496 */
497 bfa_reqq_produce(tskim->bfa, itnim->reqq);
498 return BFA_TRUE;
499}
500
501/**
502 * Call to resume task management cmnd waiting for room in request queue.
503 */
504static void
505bfa_tskim_qresume(void *cbarg)
506{
507 struct bfa_tskim_s *tskim = cbarg;
508
509 bfa_fcpim_stats(tskim->fcpim, qresumes);
510 bfa_stats(tskim->itnim, tm_qresumes);
511 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
512}
513
514/**
515 * Cleanup IOs associated with a task mangement command on IOC failures.
516 */
517static void
518bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
519{
520 struct bfa_ioim_s *ioim;
521 struct list_head *qe, *qen;
522
523 list_for_each_safe(qe, qen, &tskim->io_q) {
524 ioim = (struct bfa_ioim_s *) qe;
525 bfa_ioim_iocdisable(ioim);
526 }
527}
528
529
530
531/**
532 * bfa_tskim_friend
533 */
534
535/**
536 * Notification on completions from related ioim.
537 */
538void
539bfa_tskim_iodone(struct bfa_tskim_s *tskim)
540{
541 bfa_wc_down(&tskim->wc);
542}
543
544/**
545 * Handle IOC h/w failure notification from itnim.
546 */
547void
548bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
549{
550 tskim->notify = BFA_FALSE;
551 bfa_stats(tskim->itnim, tm_iocdowns);
552 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
553}
554
555/**
556 * Cleanup TM command and associated IOs as part of ITNIM offline.
557 */
558void
559bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
560{
561 tskim->notify = BFA_TRUE;
562 bfa_stats(tskim->itnim, tm_cleanups);
563 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
564}
565
566/**
567 * Memory allocation and initialization.
568 */
569void
570bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
571{
572 struct bfa_tskim_s *tskim;
573 u16 i;
574
575 INIT_LIST_HEAD(&fcpim->tskim_free_q);
576
577 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
578 fcpim->tskim_arr = tskim;
579
580 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
581 /*
582 * initialize TSKIM
583 */
584 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
585 tskim->tsk_tag = i;
586 tskim->bfa = fcpim->bfa;
587 tskim->fcpim = fcpim;
588 tskim->notify = BFA_FALSE;
589 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
590 tskim);
591 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
592
593 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
594 }
595
596 bfa_meminfo_kva(minfo) = (u8 *) tskim;
597}
598
599void
600bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
601{
602 /**
603 * @todo
604 */
605}
606
607void
608bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
609{
610 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
611 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
612 struct bfa_tskim_s *tskim;
613 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
614
615 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
616 bfa_assert(tskim->tsk_tag == tsk_tag);
617
618 tskim->tsk_status = rsp->tsk_status;
619
620 /**
621 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
622 * requests. All other statuses are for normal completions.
623 */
624 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
625 bfa_stats(tskim->itnim, tm_cleanup_comps);
626 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
627 } else {
628 bfa_stats(tskim->itnim, tm_fw_rsps);
629 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
630 }
631}
632
633
634
635/**
636 * bfa_tskim_api
637 */
638
639
640struct bfa_tskim_s *
641bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
642{
643 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
644 struct bfa_tskim_s *tskim;
645
646 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
647
648 if (!tskim)
649 bfa_fcpim_stats(fcpim, no_tskims);
650 else
651 tskim->dtsk = dtsk;
652
653 return tskim;
654}
655
656void
657bfa_tskim_free(struct bfa_tskim_s *tskim)
658{
659 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
660 list_del(&tskim->qe);
661 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
662}
663
664/**
665 * Start a task management command.
666 *
667 * @param[in] tskim BFA task management command instance
668 * @param[in] itnim i-t nexus for the task management command
669 * @param[in] lun lun, if applicable
670 * @param[in] tm_cmnd Task management command code.
671 * @param[in] t_secs Timeout in seconds
672 *
673 * @return None.
674 */
675void
676bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
677 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
678{
679 tskim->itnim = itnim;
680 tskim->lun = lun;
681 tskim->tm_cmnd = tm_cmnd;
682 tskim->tsecs = tsecs;
683 tskim->notify = BFA_FALSE;
684 bfa_stats(itnim, tm_cmnds);
685
686 list_add_tail(&tskim->qe, &itnim->tsk_q);
687 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
688}
689
690
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
deleted file mode 100644
index b9a9a686ef6a..000000000000
--- a/drivers/scsi/bfa/bfa_uf.c
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_uf.c BFA unsolicited frame receive implementation
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <bfi/bfi_uf.h>
25#include <cs/bfa_debug.h>
26
27BFA_TRC_FILE(HAL, UF);
28BFA_MODULE(uf);
29
30/*
31 *****************************************************************************
32 * Internal functions
33 *****************************************************************************
34 */
35static void
36__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
37{
38 struct bfa_uf_s *uf = cbarg;
39 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
40
41 if (complete)
42 ufm->ufrecv(ufm->cbarg, uf);
43}
44
45static void
46claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
47{
48 u32 uf_pb_tot_sz;
49
50 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
51 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
52 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
53 BFA_DMA_ALIGN_SZ);
54
55 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
56 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
57
58 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
59}
60
61static void
62claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
63{
64 struct bfi_uf_buf_post_s *uf_bp_msg;
65 struct bfi_sge_s *sge;
66 union bfi_addr_u sga_zero = { {0} };
67 u16 i;
68 u16 buf_len;
69
70 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
71 uf_bp_msg = ufm->uf_buf_posts;
72
73 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
74 i++, uf_bp_msg++) {
75 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
76
77 uf_bp_msg->buf_tag = i;
78 buf_len = sizeof(struct bfa_uf_buf_s);
79 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
80 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
81 bfa_lpuid(ufm->bfa));
82
83 sge = uf_bp_msg->sge;
84 sge[0].sg_len = buf_len;
85 sge[0].flags = BFI_SGE_DATA_LAST;
86 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
87 bfa_sge_to_be(sge);
88
89 sge[1].sg_len = buf_len;
90 sge[1].flags = BFI_SGE_PGDLEN;
91 sge[1].sga = sga_zero;
92 bfa_sge_to_be(&sge[1]);
93 }
94
95 /**
96 * advance pointer beyond consumed memory
97 */
98 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
99}
100
101static void
102claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
103{
104 u16 i;
105 struct bfa_uf_s *uf;
106
107 /*
108 * Claim block of memory for UF list
109 */
110 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
111
112 /*
113 * Initialize UFs and queue it in UF free queue
114 */
115 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
116 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
117 uf->bfa = ufm->bfa;
118 uf->uf_tag = i;
119 uf->pb_len = sizeof(struct bfa_uf_buf_s);
120 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
121 uf->buf_pa = ufm_pbs_pa(ufm, i);
122 list_add_tail(&uf->qe, &ufm->uf_free_q);
123 }
124
125 /**
126 * advance memory pointer
127 */
128 bfa_meminfo_kva(mi) = (u8 *) uf;
129}
130
131static void
132uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
133{
134 claim_uf_pbs(ufm, mi);
135 claim_ufs(ufm, mi);
136 claim_uf_post_msgs(ufm, mi);
137}
138
139static void
140bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
141{
142 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
143
144 /*
145 * dma-able memory for UF posted bufs
146 */
147 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
148 BFA_DMA_ALIGN_SZ);
149
150 /*
151 * kernel Virtual memory for UFs and UF buf post msg copies
152 */
153 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
154 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
155}
156
157static void
158bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
159 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
160{
161 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
162
163 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
164 ufm->bfa = bfa;
165 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
166 INIT_LIST_HEAD(&ufm->uf_free_q);
167 INIT_LIST_HEAD(&ufm->uf_posted_q);
168
169 uf_mem_claim(ufm, meminfo);
170}
171
172static void
173bfa_uf_detach(struct bfa_s *bfa)
174{
175}
176
177static struct bfa_uf_s *
178bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
179{
180 struct bfa_uf_s *uf;
181
182 bfa_q_deq(&uf_mod->uf_free_q, &uf);
183 return uf;
184}
185
186static void
187bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
188{
189 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
190}
191
192static bfa_status_t
193bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
194{
195 struct bfi_uf_buf_post_s *uf_post_msg;
196
197 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
198 if (!uf_post_msg)
199 return BFA_STATUS_FAILED;
200
201 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
202 sizeof(struct bfi_uf_buf_post_s));
203 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
204
205 bfa_trc(ufm->bfa, uf->uf_tag);
206
207 list_add_tail(&uf->qe, &ufm->uf_posted_q);
208 return BFA_STATUS_OK;
209}
210
211static void
212bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
213{
214 struct bfa_uf_s *uf;
215
216 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
217 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
218 break;
219 }
220}
221
222static void
223uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
224{
225 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
226 u16 uf_tag = m->buf_tag;
227 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
228 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
229 u8 *buf = &uf_buf->d[0];
230 struct fchs_s *fchs;
231
232 m->frm_len = bfa_os_ntohs(m->frm_len);
233 m->xfr_len = bfa_os_ntohs(m->xfr_len);
234
235 fchs = (struct fchs_s *) uf_buf;
236
237 list_del(&uf->qe); /* dequeue from posted queue */
238
239 uf->data_ptr = buf;
240 uf->data_len = m->xfr_len;
241
242 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
243
244 if (uf->data_len == sizeof(struct fchs_s)) {
245 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
246 uf->data_len, (struct fchs_s *) buf);
247 } else {
248 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
249 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
250 BFA_PL_EID_RX, uf->data_len,
251 (struct fchs_s *) buf, pld_w0);
252 }
253
254 if (bfa->fcs)
255 __bfa_cb_uf_recv(uf, BFA_TRUE);
256 else
257 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
258}
259
260static void
261bfa_uf_stop(struct bfa_s *bfa)
262{
263}
264
265static void
266bfa_uf_iocdisable(struct bfa_s *bfa)
267{
268 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
269 struct bfa_uf_s *uf;
270 struct list_head *qe, *qen;
271
272 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
273 uf = (struct bfa_uf_s *) qe;
274 list_del(&uf->qe);
275 bfa_uf_put(ufm, uf);
276 }
277}
278
279static void
280bfa_uf_start(struct bfa_s *bfa)
281{
282 bfa_uf_post_all(BFA_UF_MOD(bfa));
283}
284
285
286
287/**
288 * bfa_uf_api
289 */
290
291/**
292 * Register handler for all unsolicted recieve frames.
293 *
294 * @param[in] bfa BFA instance
295 * @param[in] ufrecv receive handler function
296 * @param[in] cbarg receive handler arg
297 */
298void
299bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
300{
301 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
302
303 ufm->ufrecv = ufrecv;
304 ufm->cbarg = cbarg;
305}
306
307/**
308 * Free an unsolicited frame back to BFA.
309 *
310 * @param[in] uf unsolicited frame to be freed
311 *
312 * @return None
313 */
314void
315bfa_uf_free(struct bfa_uf_s *uf)
316{
317 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
318 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
319}
320
321
322
323/**
324 * uf_pub BFA uf module public functions
325 */
326
327void
328bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
329{
330 bfa_trc(bfa, msg->mhdr.msg_id);
331
332 switch (msg->mhdr.msg_id) {
333 case BFI_UF_I2H_FRM_RCVD:
334 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
335 break;
336
337 default:
338 bfa_trc(bfa, msg->mhdr.msg_id);
339 bfa_assert(0);
340 }
341}
342
343
diff --git a/drivers/scsi/bfa/bfa_uf_priv.h b/drivers/scsi/bfa/bfa_uf_priv.h
deleted file mode 100644
index bcb490f834f3..000000000000
--- a/drivers/scsi/bfa/bfa_uf_priv.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_UF_PRIV_H__
18#define __BFA_UF_PRIV_H__
19
20#include <cs/bfa_sm.h>
21#include <bfa_svc.h>
22#include <bfi/bfi_uf.h>
23
24#define BFA_UF_MIN (4)
25
26struct bfa_uf_mod_s {
27 struct bfa_s *bfa; /* back pointer to BFA */
28 struct bfa_uf_s *uf_list; /* array of UFs */
29 u16 num_ufs; /* num unsolicited rx frames */
30 struct list_head uf_free_q; /* free UFs */
31 struct list_head uf_posted_q; /* UFs posted to IOC */
32 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
33 u64 uf_pbs_pa; /* phy addr for UF bufs */
34 struct bfi_uf_buf_post_s *uf_buf_posts;
35 /* pre-built UF post msgs */
36 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
37 void *cbarg; /* uf receive handler arg */
38};
39
40#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
41
42#define ufm_pbs_pa(_ufmod, _uftag) \
43 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
44
45void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
46
47#endif /* __BFA_UF_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index ca04cc9d332f..4d8784e06e14 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,46 +18,62 @@
18/** 18/**
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21
22#include <linux/slab.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
25#include "bfad_drv.h" 32#include "bfad_drv.h"
26#include "bfad_im.h" 33#include "bfad_im.h"
27#include "bfad_tm.h" 34#include "bfa_fcs.h"
28#include "bfad_ipfc.h" 35#include "bfa_os_inc.h"
29#include "bfad_trcmod.h" 36#include "bfa_defs.h"
30#include <fcb/bfa_fcb_vf.h> 37#include "bfa.h"
31#include <fcb/bfa_fcb_rport.h>
32#include <fcb/bfa_fcb_port.h>
33#include <fcb/bfa_fcb.h>
34 38
35BFA_TRC_FILE(LDRV, BFAD); 39BFA_TRC_FILE(LDRV, BFAD);
36DEFINE_MUTEX(bfad_mutex); 40DEFINE_MUTEX(bfad_mutex);
37LIST_HEAD(bfad_list); 41LIST_HEAD(bfad_list);
38static int bfad_inst; 42
39int bfad_supported_fc4s; 43static int bfad_inst;
40 44static int num_sgpgs_parm;
41static char *host_name; 45int supported_fc4s;
42static char *os_name; 46char *host_name, *os_name, *os_patch;
43static char *os_patch; 47int num_rports, num_ios, num_tms;
44static int num_rports; 48int num_fcxps, num_ufbufs;
45static int num_ios; 49int reqq_size, rspq_size, num_sgpgs;
46static int num_tms; 50int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
47static int num_fcxps; 51int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
48static int num_ufbufs; 52int bfa_io_max_sge = BFAD_IO_MAX_SGE;
49static int reqq_size; 53int log_level = 3; /* WARNING log level */
50static int rspq_size; 54int ioc_auto_recover = BFA_TRUE;
51static int num_sgpgs; 55int bfa_linkup_delay = -1;
52static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 56int fdmi_enable = BFA_TRUE;
53static int bfa_io_max_sge = BFAD_IO_MAX_SGE; 57int pcie_max_read_reqsz;
54static int log_level = BFA_LOG_WARNING;
55static int ioc_auto_recover = BFA_TRUE;
56static int ipfc_enable = BFA_FALSE;
57static int fdmi_enable = BFA_TRUE;
58int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
59int bfa_linkup_delay = -1;
60int bfa_debugfs_enable = 1; 58int bfa_debugfs_enable = 1;
59int msix_disable_cb = 0, msix_disable_ct = 0;
60
61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
63
64const char *msix_name_ct[] = {
65 "cpe0", "cpe1", "cpe2", "cpe3",
66 "rme0", "rme1", "rme2", "rme3",
67 "ctrl" };
68
69const char *msix_name_cb[] = {
70 "cpe0", "cpe1", "cpe2", "cpe3",
71 "rme0", "rme1", "rme2", "rme3",
72 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
73
74MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
75MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
76MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
61 77
62module_param(os_name, charp, S_IRUGO | S_IWUSR); 78module_param(os_name, charp, S_IRUGO | S_IWUSR);
63MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 79MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -66,8 +82,8 @@ MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
66module_param(host_name, charp, S_IRUGO | S_IWUSR); 82module_param(host_name, charp, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); 83MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
68module_param(num_rports, int, S_IRUGO | S_IWUSR); 84module_param(num_rports, int, S_IRUGO | S_IWUSR);
69MODULE_PARM_DESC(num_rports, "Max number of rports supported per port" 85MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
70 " (physical/logical), default=1024"); 86 "(physical/logical), default=1024");
71module_param(num_ios, int, S_IRUGO | S_IWUSR); 87module_param(num_ios, int, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); 88MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
73module_param(num_tms, int, S_IRUGO | S_IWUSR); 89module_param(num_tms, int, S_IRUGO | S_IWUSR);
@@ -75,120 +91,277 @@ MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
75module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 91module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); 92MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
77module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 93module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
78MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame buffers," 94MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
79 " default=64"); 95 "buffers, default=64");
80module_param(reqq_size, int, S_IRUGO | S_IWUSR); 96module_param(reqq_size, int, S_IRUGO | S_IWUSR);
81MODULE_PARM_DESC(reqq_size, "Max number of request queue elements," 97MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
82 " default=256"); 98 "default=256");
83module_param(rspq_size, int, S_IRUGO | S_IWUSR); 99module_param(rspq_size, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(rspq_size, "Max number of response queue elements," 100MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
85 " default=64"); 101 "default=64");
86module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 102module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); 103MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
88module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 104module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs," 105MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
90 " Range[>0]"); 106 "Range[>0]");
91module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 107module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32," 108MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
93 " Range[>0]");
94module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 109module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 110MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
96module_param(log_level, int, S_IRUGO | S_IWUSR); 111module_param(log_level, int, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(log_level, "Driver log level, default=3," 112MODULE_PARM_DESC(log_level, "Driver log level, default=3, "
98 " Range[Critical:1|Error:2|Warning:3|Info:4]"); 113 "Range[Critical:1|Error:2|Warning:3|Info:4]");
99module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 114module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1," 115MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
101 " Range[off:0|on:1]"); 116 "Range[off:0|on:1]");
102module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
103MODULE_PARM_DESC(ipfc_enable, "Enable IPoFC, default=0, Range[off:0|on:1]");
104module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 117module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for boot" 118MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
106 " port. Otherwise Range[>0]"); 119 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
120 "[RHEL5, SLES10, ESX40] Range[>0]");
121module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
123 "for Brocade-415/425/815/825 cards, default=0, "
124 " Range[false:0|true:1]");
125module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
126MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
127 "if possible for Brocade-1010/1020/804/1007/902/1741 "
128 "cards, default=0, Range[false:0|true:1]");
107module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 129module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1," 130MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
109 " Range[false:0|true:1]"); 131 "Range[false:0|true:1]");
132module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
133MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
134 "(use system setting), Range[128|256|512|1024|2048|4096]");
110module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 135module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 136MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
112 " Range[false:0|true:1]"); 137 " Range[false:0|true:1]");
113 138
114/* 139static void
115 * Stores the module parm num_sgpgs value; 140bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
116 * used to reset for bfad next instance. 141static void
142bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
143static void
144bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
145static void
146bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
147static void
148bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
149static void
150bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151static void
152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
153
154/**
155 * Beginning state for the driver instance, awaiting the pci_probe event
117 */ 156 */
118static int num_sgpgs_parm; 157static void
158bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
159{
160 bfa_trc(bfad, event);
161
162 switch (event) {
163 case BFAD_E_CREATE:
164 bfa_sm_set_state(bfad, bfad_sm_created);
165 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
166 "%s", "bfad_worker");
167 if (IS_ERR(bfad->bfad_tsk)) {
168 printk(KERN_INFO "bfad[%d]: Kernel thread "
169 "creation failed!\n", bfad->inst_no);
170 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
171 }
172 bfa_sm_send_event(bfad, BFAD_E_INIT);
173 break;
174
175 case BFAD_E_STOP:
176 /* Ignore stop; already in uninit */
177 break;
178
179 default:
180 bfa_sm_fault(bfad, event);
181 }
182}
119 183
120static bfa_status_t 184/**
121bfad_fc4_probe(struct bfad_s *bfad) 185 * Driver Instance is created, awaiting event INIT to initialize the bfad
186 */
187static void
188bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
122{ 189{
123 int rc; 190 unsigned long flags;
124 191
125 rc = bfad_im_probe(bfad); 192 bfa_trc(bfad, event);
126 if (rc != BFA_STATUS_OK)
127 goto ext;
128 193
129 bfad_tm_probe(bfad); 194 switch (event) {
195 case BFAD_E_INIT:
196 bfa_sm_set_state(bfad, bfad_sm_initializing);
130 197
131 if (ipfc_enable) 198 init_completion(&bfad->comp);
132 bfad_ipfc_probe(bfad);
133 199
134 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 200 /* Enable Interrupt and wait bfa_init completion */
135ext: 201 if (bfad_setup_intr(bfad)) {
136 return rc; 202 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
203 bfad->inst_no);
204 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
205 break;
206 }
207
208 spin_lock_irqsave(&bfad->bfad_lock, flags);
209 bfa_init(&bfad->bfa);
210 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
211
212 /* Set up interrupt handler for each vectors */
213 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
214 bfad_install_msix_handler(bfad)) {
215 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
216 __func__, bfad->inst_no);
217 }
218
219 bfad_init_timer(bfad);
220
221 wait_for_completion(&bfad->comp);
222
223 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
224 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
225 } else {
226 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
227 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
228 }
229
230 break;
231
232 case BFAD_E_KTHREAD_CREATE_FAILED:
233 bfa_sm_set_state(bfad, bfad_sm_uninit);
234 break;
235
236 default:
237 bfa_sm_fault(bfad, event);
238 }
137} 239}
138 240
139static void 241static void
140bfad_fc4_probe_undo(struct bfad_s *bfad) 242bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
141{ 243{
142 bfad_im_probe_undo(bfad); 244 int retval;
143 bfad_tm_probe_undo(bfad); 245 unsigned long flags;
144 if (ipfc_enable) 246
145 bfad_ipfc_probe_undo(bfad); 247 bfa_trc(bfad, event);
146 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 248
249 switch (event) {
250 case BFAD_E_INIT_SUCCESS:
251 kthread_stop(bfad->bfad_tsk);
252 spin_lock_irqsave(&bfad->bfad_lock, flags);
253 bfad->bfad_tsk = NULL;
254 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
255
256 retval = bfad_start_ops(bfad);
257 if (retval != BFA_STATUS_OK)
258 break;
259 bfa_sm_set_state(bfad, bfad_sm_operational);
260 break;
261
262 case BFAD_E_INTR_INIT_FAILED:
263 bfa_sm_set_state(bfad, bfad_sm_uninit);
264 kthread_stop(bfad->bfad_tsk);
265 spin_lock_irqsave(&bfad->bfad_lock, flags);
266 bfad->bfad_tsk = NULL;
267 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
268 break;
269
270 case BFAD_E_INIT_FAILED:
271 bfa_sm_set_state(bfad, bfad_sm_failed);
272 break;
273 default:
274 bfa_sm_fault(bfad, event);
275 }
147} 276}
148 277
149static void 278static void
150bfad_fc4_probe_post(struct bfad_s *bfad) 279bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
151{ 280{
152 if (bfad->im) 281 int retval;
153 bfad_im_probe_post(bfad->im);
154 282
155 bfad_tm_probe_post(bfad); 283 bfa_trc(bfad, event);
156 if (ipfc_enable) 284
157 bfad_ipfc_probe_post(bfad); 285 switch (event) {
286 case BFAD_E_INIT_SUCCESS:
287 retval = bfad_start_ops(bfad);
288 if (retval != BFA_STATUS_OK)
289 break;
290 bfa_sm_set_state(bfad, bfad_sm_operational);
291 break;
292
293 case BFAD_E_STOP:
294 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
295 bfad_uncfg_pport(bfad);
296 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
297 bfad_im_probe_undo(bfad);
298 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
299 }
300 bfad_stop(bfad);
301 break;
302
303 case BFAD_E_EXIT_COMP:
304 bfa_sm_set_state(bfad, bfad_sm_uninit);
305 bfad_remove_intr(bfad);
306 del_timer_sync(&bfad->hal_tmo);
307 break;
308
309 default:
310 bfa_sm_fault(bfad, event);
311 }
158} 312}
159 313
160static bfa_status_t 314static void
161bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 315bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
162{ 316{
163 int rc = BFA_STATUS_FAILED; 317 bfa_trc(bfad, event);
164 318
165 if (roles & BFA_PORT_ROLE_FCP_IM) 319 switch (event) {
166 rc = bfad_im_port_new(bfad, port); 320 case BFAD_E_STOP:
167 if (rc != BFA_STATUS_OK) 321 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
168 goto ext; 322 bfad_fcs_stop(bfad);
323 break;
169 324
170 if (roles & BFA_PORT_ROLE_FCP_TM) 325 default:
171 rc = bfad_tm_port_new(bfad, port); 326 bfa_sm_fault(bfad, event);
172 if (rc != BFA_STATUS_OK) 327 }
173 goto ext; 328}
174 329
175 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 330static void
176 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); 331bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
177ext: 332{
178 return rc; 333 bfa_trc(bfad, event);
334
335 switch (event) {
336 case BFAD_E_FCS_EXIT_COMP:
337 bfa_sm_set_state(bfad, bfad_sm_stopping);
338 bfad_stop(bfad);
339 break;
340
341 default:
342 bfa_sm_fault(bfad, event);
343 }
179} 344}
180 345
181static void 346static void
182bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 347bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
183{ 348{
184 if (roles & BFA_PORT_ROLE_FCP_IM) 349 bfa_trc(bfad, event);
185 bfad_im_port_delete(bfad, port);
186 350
187 if (roles & BFA_PORT_ROLE_FCP_TM) 351 switch (event) {
188 bfad_tm_port_delete(bfad, port); 352 case BFAD_E_EXIT_COMP:
353 bfa_sm_set_state(bfad, bfad_sm_uninit);
354 bfad_remove_intr(bfad);
355 del_timer_sync(&bfad->hal_tmo);
356 bfad_im_probe_undo(bfad);
357 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
358 bfad_uncfg_pport(bfad);
359 break;
189 360
190 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 361 default:
191 bfad_ipfc_port_delete(bfad, port); 362 bfa_sm_fault(bfad, event);
363 break;
364 }
192} 365}
193 366
194/** 367/**
@@ -209,12 +382,13 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
209void 382void
210bfa_cb_init(void *drv, bfa_status_t init_status) 383bfa_cb_init(void *drv, bfa_status_t init_status)
211{ 384{
212 struct bfad_s *bfad = drv; 385 struct bfad_s *bfad = drv;
213 386
214 if (init_status == BFA_STATUS_OK) { 387 if (init_status == BFA_STATUS_OK) {
215 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 388 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
216 389
217 /* If BFAD_HAL_INIT_FAIL flag is set: 390 /*
391 * If BFAD_HAL_INIT_FAIL flag is set:
218 * Wake up the kernel thread to start 392 * Wake up the kernel thread to start
219 * the bfad operations after HAL init done 393 * the bfad operations after HAL init done
220 */ 394 */
@@ -227,26 +401,16 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
227 complete(&bfad->comp); 401 complete(&bfad->comp);
228} 402}
229 403
230
231
232/** 404/**
233 * BFA_FCS callbacks 405 * BFA_FCS callbacks
234 */ 406 */
235static struct bfad_port_s *
236bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv,
237 struct bfad_vport_s *vp_drv)
238{
239 return (vp_drv) ? (&(vp_drv)->drv_port)
240 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport));
241}
242
243struct bfad_port_s * 407struct bfad_port_s *
244bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, 408bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
245 enum bfa_port_role roles, struct bfad_vf_s *vf_drv, 409 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
246 struct bfad_vport_s *vp_drv) 410 struct bfad_vport_s *vp_drv)
247{ 411{
248 bfa_status_t rc; 412 bfa_status_t rc;
249 struct bfad_port_s *port_drv; 413 struct bfad_port_s *port_drv;
250 414
251 if (!vp_drv && !vf_drv) { 415 if (!vp_drv && !vf_drv) {
252 port_drv = &bfad->pport; 416 port_drv = &bfad->pport;
@@ -264,71 +428,32 @@ bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port,
264 428
265 port_drv->fcs_port = port; 429 port_drv->fcs_port = port;
266 port_drv->roles = roles; 430 port_drv->roles = roles;
267 rc = bfad_fc4_port_new(bfad, port_drv, roles); 431
268 if (rc != BFA_STATUS_OK) { 432 if (roles & BFA_LPORT_ROLE_FCP_IM) {
269 bfad_fc4_port_delete(bfad, port_drv, roles); 433 rc = bfad_im_port_new(bfad, port_drv);
270 port_drv = NULL; 434 if (rc != BFA_STATUS_OK) {
435 bfad_im_port_delete(bfad, port_drv);
436 port_drv = NULL;
437 }
271 } 438 }
272 439
273 return port_drv; 440 return port_drv;
274} 441}
275 442
276void 443void
277bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, 444bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
278 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 445 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
279{ 446{
280 struct bfad_port_s *port_drv; 447 struct bfad_port_s *port_drv;
281 448
282 /* 449 /* this will be only called from rmmod context */
283 * this will be only called from rmmod context
284 */
285 if (vp_drv && !vp_drv->comp_del) { 450 if (vp_drv && !vp_drv->comp_del) {
286 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 451 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
452 ((vf_drv) ? (&(vf_drv)->base_port) :
453 (&(bfad)->pport));
287 bfa_trc(bfad, roles); 454 bfa_trc(bfad, roles);
288 bfad_fc4_port_delete(bfad, port_drv, roles); 455 if (roles & BFA_LPORT_ROLE_FCP_IM)
289 } 456 bfad_im_port_delete(bfad, port_drv);
290}
291
292void
293bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
294 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
295{
296 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
297
298 if (roles & BFA_PORT_ROLE_FCP_IM)
299 bfad_im_port_online(bfad, port_drv);
300
301 if (roles & BFA_PORT_ROLE_FCP_TM)
302 bfad_tm_port_online(bfad, port_drv);
303
304 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
305 bfad_ipfc_port_online(bfad, port_drv);
306
307 bfad->bfad_flags |= BFAD_PORT_ONLINE;
308}
309
310void
311bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
312 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
313{
314 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv);
315
316 if (roles & BFA_PORT_ROLE_FCP_IM)
317 bfad_im_port_offline(bfad, port_drv);
318
319 if (roles & BFA_PORT_ROLE_FCP_TM)
320 bfad_tm_port_offline(bfad, port_drv);
321
322 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable)
323 bfad_ipfc_port_offline(bfad, port_drv);
324}
325
326void
327bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
328{
329 if (vport_drv->comp_del) {
330 complete(vport_drv->comp_del);
331 return;
332 } 457 }
333} 458}
334 459
@@ -339,7 +464,7 @@ bfa_status_t
339bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 464bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
340 struct bfad_rport_s **rport_drv) 465 struct bfad_rport_s **rport_drv)
341{ 466{
342 bfa_status_t rc = BFA_STATUS_OK; 467 bfa_status_t rc = BFA_STATUS_OK;
343 468
344 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 469 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
345 if (*rport_drv == NULL) { 470 if (*rport_drv == NULL) {
@@ -354,35 +479,43 @@ ext:
354} 479}
355 480
356/** 481/**
357 * @brief
358 * FCS PBC VPORT Create 482 * FCS PBC VPORT Create
359 */ 483 */
360void 484void
361bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) 485bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
362{ 486{
363 487
364 struct bfad_pcfg_s *pcfg; 488 struct bfa_lport_cfg_s port_cfg = {0};
489 struct bfad_vport_s *vport;
490 int rc;
365 491
366 pcfg = kzalloc(sizeof(struct bfad_pcfg_s), GFP_ATOMIC); 492 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
367 if (!pcfg) { 493 if (!vport) {
368 bfa_trc(bfad, 0); 494 bfa_trc(bfad, 0);
369 return; 495 return;
370 } 496 }
371 497
372 pcfg->port_cfg.roles = BFA_PORT_ROLE_FCP_IM; 498 vport->drv_port.bfad = bfad;
373 pcfg->port_cfg.pwwn = pbc_vport.vp_pwwn; 499 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
374 pcfg->port_cfg.nwwn = pbc_vport.vp_nwwn; 500 port_cfg.pwwn = pbc_vport.vp_pwwn;
375 pcfg->port_cfg.preboot_vp = BFA_TRUE; 501 port_cfg.nwwn = pbc_vport.vp_nwwn;
502 port_cfg.preboot_vp = BFA_TRUE;
503
504 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
505 &port_cfg, vport);
376 506
377 list_add_tail(&pcfg->list_entry, &bfad->pbc_pcfg_list); 507 if (rc != BFA_STATUS_OK) {
508 bfa_trc(bfad, 0);
509 return;
510 }
378 511
379 return; 512 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
380} 513}
381 514
382void 515void
383bfad_hal_mem_release(struct bfad_s *bfad) 516bfad_hal_mem_release(struct bfad_s *bfad)
384{ 517{
385 int i; 518 int i;
386 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 519 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
387 struct bfa_mem_elem_s *meminfo_elem; 520 struct bfa_mem_elem_s *meminfo_elem;
388 521
@@ -395,9 +528,9 @@ bfad_hal_mem_release(struct bfad_s *bfad)
395 break; 528 break;
396 case BFA_MEM_TYPE_DMA: 529 case BFA_MEM_TYPE_DMA:
397 dma_free_coherent(&bfad->pcidev->dev, 530 dma_free_coherent(&bfad->pcidev->dev,
398 meminfo_elem->mem_len, 531 meminfo_elem->mem_len,
399 meminfo_elem->kva, 532 meminfo_elem->kva,
400 (dma_addr_t) meminfo_elem->dma); 533 (dma_addr_t) meminfo_elem->dma);
401 break; 534 break;
402 default: 535 default:
403 bfa_assert(0); 536 bfa_assert(0);
@@ -434,27 +567,27 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
434 * otherwise, the default values will be shown as 0 in sysfs 567 * otherwise, the default values will be shown as 0 in sysfs
435 */ 568 */
436 num_rports = bfa_cfg->fwcfg.num_rports; 569 num_rports = bfa_cfg->fwcfg.num_rports;
437 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 570 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
438 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 571 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
439 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 572 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
440 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 573 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
441 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 574 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
442 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 575 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
443 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 576 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
444} 577}
445 578
446bfa_status_t 579bfa_status_t
447bfad_hal_mem_alloc(struct bfad_s *bfad) 580bfad_hal_mem_alloc(struct bfad_s *bfad)
448{ 581{
582 int i;
449 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 583 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
450 struct bfa_mem_elem_s *meminfo_elem; 584 struct bfa_mem_elem_s *meminfo_elem;
451 bfa_status_t rc = BFA_STATUS_OK; 585 dma_addr_t phys_addr;
452 dma_addr_t phys_addr; 586 void *kva;
453 int retry_count = 0; 587 bfa_status_t rc = BFA_STATUS_OK;
454 int reset_value = 1; 588 int retry_count = 0;
455 int min_num_sgpgs = 512; 589 int reset_value = 1;
456 void *kva; 590 int min_num_sgpgs = 512;
457 int i;
458 591
459 bfa_cfg_get_default(&bfad->ioc_cfg); 592 bfa_cfg_get_default(&bfad->ioc_cfg);
460 593
@@ -478,8 +611,7 @@ retry:
478 break; 611 break;
479 case BFA_MEM_TYPE_DMA: 612 case BFA_MEM_TYPE_DMA:
480 kva = dma_alloc_coherent(&bfad->pcidev->dev, 613 kva = dma_alloc_coherent(&bfad->pcidev->dev,
481 meminfo_elem->mem_len, 614 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
482 &phys_addr, GFP_KERNEL);
483 if (kva == NULL) { 615 if (kva == NULL) {
484 bfad_hal_mem_release(bfad); 616 bfad_hal_mem_release(bfad);
485 /* 617 /*
@@ -487,14 +619,14 @@ retry:
487 * num_sgpages try with half the value. 619 * num_sgpages try with half the value.
488 */ 620 */
489 if (num_sgpgs > min_num_sgpgs) { 621 if (num_sgpgs > min_num_sgpgs) {
490 printk(KERN_INFO "bfad[%d]: memory" 622 printk(KERN_INFO
491 " allocation failed with" 623 "bfad[%d]: memory allocation failed"
492 " num_sgpgs: %d\n", 624 " with num_sgpgs: %d\n",
493 bfad->inst_no, num_sgpgs); 625 bfad->inst_no, num_sgpgs);
494 nextLowerInt(&num_sgpgs); 626 nextLowerInt(&num_sgpgs);
495 printk(KERN_INFO "bfad[%d]: trying to" 627 printk(KERN_INFO
496 " allocate memory with" 628 "bfad[%d]: trying to allocate memory"
497 " num_sgpgs: %d\n", 629 " with num_sgpgs: %d\n",
498 bfad->inst_no, num_sgpgs); 630 bfad->inst_no, num_sgpgs);
499 retry_count++; 631 retry_count++;
500 goto retry; 632 goto retry;
@@ -536,11 +668,11 @@ ext:
536 */ 668 */
537bfa_status_t 669bfa_status_t
538bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 670bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
539 struct bfa_port_cfg_s *port_cfg, struct device *dev) 671 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
540{ 672{
541 struct bfad_vport_s *vport; 673 struct bfad_vport_s *vport;
542 int rc = BFA_STATUS_OK; 674 int rc = BFA_STATUS_OK;
543 unsigned long flags; 675 unsigned long flags;
544 struct completion fcomp; 676 struct completion fcomp;
545 677
546 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 678 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
@@ -551,18 +683,14 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
551 683
552 vport->drv_port.bfad = bfad; 684 vport->drv_port.bfad = bfad;
553 spin_lock_irqsave(&bfad->bfad_lock, flags); 685 spin_lock_irqsave(&bfad->bfad_lock, flags);
554 if (port_cfg->preboot_vp == BFA_TRUE) 686 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
555 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, 687 port_cfg, vport);
556 &bfad->bfa_fcs, vf_id, port_cfg, vport);
557 else
558 rc = bfa_fcs_vport_create(&vport->fcs_vport,
559 &bfad->bfa_fcs, vf_id, port_cfg, vport);
560 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 688 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
561 689
562 if (rc != BFA_STATUS_OK) 690 if (rc != BFA_STATUS_OK)
563 goto ext_free_vport; 691 goto ext_free_vport;
564 692
565 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 693 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
566 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 694 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
567 dev); 695 dev);
568 if (rc != BFA_STATUS_OK) 696 if (rc != BFA_STATUS_OK)
@@ -593,10 +721,10 @@ ext:
593 */ 721 */
594bfa_status_t 722bfa_status_t
595bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 723bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
596 struct bfa_port_cfg_s *port_cfg) 724 struct bfa_lport_cfg_s *port_cfg)
597{ 725{
598 struct bfad_vf_s *vf; 726 struct bfad_vf_s *vf;
599 int rc = BFA_STATUS_OK; 727 int rc = BFA_STATUS_OK;
600 728
601 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 729 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
602 if (!vf) { 730 if (!vf) {
@@ -615,9 +743,9 @@ ext:
615void 743void
616bfad_bfa_tmo(unsigned long data) 744bfad_bfa_tmo(unsigned long data)
617{ 745{
618 struct bfad_s *bfad = (struct bfad_s *)data; 746 struct bfad_s *bfad = (struct bfad_s *) data;
619 unsigned long flags; 747 unsigned long flags;
620 struct list_head doneq; 748 struct list_head doneq;
621 749
622 spin_lock_irqsave(&bfad->bfad_lock, flags); 750 spin_lock_irqsave(&bfad->bfad_lock, flags);
623 751
@@ -633,7 +761,8 @@ bfad_bfa_tmo(unsigned long data)
633 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 761 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
634 } 762 }
635 763
636 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 764 mod_timer(&bfad->hal_tmo,
765 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
637} 766}
638 767
639void 768void
@@ -643,16 +772,17 @@ bfad_init_timer(struct bfad_s *bfad)
643 bfad->hal_tmo.function = bfad_bfa_tmo; 772 bfad->hal_tmo.function = bfad_bfa_tmo;
644 bfad->hal_tmo.data = (unsigned long)bfad; 773 bfad->hal_tmo.data = (unsigned long)bfad;
645 774
646 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 775 mod_timer(&bfad->hal_tmo,
776 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
647} 777}
648 778
649int 779int
650bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 780bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
651{ 781{
652 int rc = -ENODEV; 782 int rc = -ENODEV;
653 783
654 if (pci_enable_device(pdev)) { 784 if (pci_enable_device(pdev)) {
655 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); 785 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
656 goto out; 786 goto out;
657 } 787 }
658 788
@@ -664,14 +794,14 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
664 794
665 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 795 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
666 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 796 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
667 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); 797 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
668 goto out_release_region; 798 goto out_release_region;
669 } 799 }
670 800
671 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 801 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
672 802
673 if (bfad->pci_bar0_kva == NULL) { 803 if (bfad->pci_bar0_kva == NULL) {
674 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 804 printk(KERN_ERR "Fail to map bar0\n");
675 goto out_release_region; 805 goto out_release_region;
676 } 806 }
677 807
@@ -688,6 +818,54 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
688 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 818 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
689 819
690 bfad->pcidev = pdev; 820 bfad->pcidev = pdev;
821
822 /* Adjust PCIe Maximum Read Request Size */
823 if (pcie_max_read_reqsz > 0) {
824 int pcie_cap_reg;
825 u16 pcie_dev_ctl;
826 u16 mask = 0xffff;
827
828 switch (pcie_max_read_reqsz) {
829 case 128:
830 mask = 0x0;
831 break;
832 case 256:
833 mask = 0x1000;
834 break;
835 case 512:
836 mask = 0x2000;
837 break;
838 case 1024:
839 mask = 0x3000;
840 break;
841 case 2048:
842 mask = 0x4000;
843 break;
844 case 4096:
845 mask = 0x5000;
846 break;
847 default:
848 break;
849 }
850
851 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
852 if (mask != 0xffff && pcie_cap_reg) {
853 pcie_cap_reg += 0x08;
854 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
855 if ((pcie_dev_ctl & 0x7000) != mask) {
856 printk(KERN_WARNING "BFA[%s]: "
857 "pcie_max_read_request_size is %d, "
858 "reset to %d\n", bfad->pci_name,
859 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
860 pcie_max_read_reqsz);
861
862 pcie_dev_ctl &= ~0x7000;
863 pci_write_config_word(pdev, pcie_cap_reg,
864 pcie_dev_ctl | mask);
865 }
866 }
867 }
868
691 return 0; 869 return 0;
692 870
693out_release_region: 871out_release_region:
@@ -710,25 +888,22 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
710void 888void
711bfad_fcs_port_cfg(struct bfad_s *bfad) 889bfad_fcs_port_cfg(struct bfad_s *bfad)
712{ 890{
713 struct bfa_port_cfg_s port_cfg; 891 struct bfa_lport_cfg_s port_cfg;
714 struct bfa_pport_attr_s attr; 892 struct bfa_port_attr_s attr;
715 char symname[BFA_SYMNAME_MAXLEN]; 893 char symname[BFA_SYMNAME_MAXLEN];
716 894
717 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 895 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
718 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 896 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
719 bfa_fcport_get_attr(&bfad->bfa, &attr); 897 bfa_fcport_get_attr(&bfad->bfa, &attr);
720 port_cfg.nwwn = attr.nwwn; 898 port_cfg.nwwn = attr.nwwn;
721 port_cfg.pwwn = attr.pwwn; 899 port_cfg.pwwn = attr.pwwn;
722
723 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg);
724} 900}
725 901
726bfa_status_t 902bfa_status_t
727bfad_drv_init(struct bfad_s *bfad) 903bfad_drv_init(struct bfad_s *bfad)
728{ 904{
729 bfa_status_t rc; 905 bfa_status_t rc;
730 unsigned long flags; 906 unsigned long flags;
731 struct bfa_fcs_driver_info_s driver_info;
732 907
733 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 908 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
734 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 909 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -740,15 +915,12 @@ bfad_drv_init(struct bfad_s *bfad)
740 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 915 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
741 bfad->inst_no); 916 bfad->inst_no);
742 printk(KERN_WARNING 917 printk(KERN_WARNING
743 "Not enough memory to attach all Brocade HBA ports," 918 "Not enough memory to attach all Brocade HBA ports, %s",
744 " System may need more memory.\n"); 919 "System may need more memory.\n");
745 goto out_hal_mem_alloc_failure; 920 goto out_hal_mem_alloc_failure;
746 } 921 }
747 922
748 bfa_init_log(&bfad->bfa, bfad->logmod);
749 bfa_init_trc(&bfad->bfa, bfad->trcmod); 923 bfa_init_trc(&bfad->bfa, bfad->trcmod);
750 bfa_init_aen(&bfad->bfa, bfad->aen);
751 memset(bfad->file_map, 0, sizeof(bfad->file_map));
752 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 924 bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
753 bfa_plog_init(&bfad->plog_buf); 925 bfa_plog_init(&bfad->plog_buf);
754 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 926 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -757,77 +929,17 @@ bfad_drv_init(struct bfad_s *bfad)
757 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 929 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
758 &bfad->hal_pcidev); 930 &bfad->hal_pcidev);
759 931
760 init_completion(&bfad->comp); 932 /* FCS INIT */
761
762 /*
763 * Enable Interrupt and wait bfa_init completion
764 */
765 if (bfad_setup_intr(bfad)) {
766 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
767 bfad->inst_no);
768 goto out_setup_intr_failure;
769 }
770
771 spin_lock_irqsave(&bfad->bfad_lock, flags); 933 spin_lock_irqsave(&bfad->bfad_lock, flags);
772 bfa_init(&bfad->bfa);
773 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
774
775 /*
776 * Set up interrupt handler for each vectors
777 */
778 if ((bfad->bfad_flags & BFAD_MSIX_ON)
779 && bfad_install_msix_handler(bfad)) {
780 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
781 __func__, bfad->inst_no);
782 }
783
784 bfad_init_timer(bfad);
785
786 wait_for_completion(&bfad->comp);
787
788 memset(&driver_info, 0, sizeof(driver_info));
789 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
790 sizeof(driver_info.version) - 1);
791 __kernel_param_lock();
792 if (host_name)
793 strncpy(driver_info.host_machine_name, host_name,
794 sizeof(driver_info.host_machine_name) - 1);
795 if (os_name)
796 strncpy(driver_info.host_os_name, os_name,
797 sizeof(driver_info.host_os_name) - 1);
798 if (os_patch)
799 strncpy(driver_info.host_os_patch, os_patch,
800 sizeof(driver_info.host_os_patch) - 1);
801 __kernel_param_unlock();
802
803 strncpy(driver_info.os_device_name, bfad->pci_name,
804 sizeof(driver_info.os_device_name - 1));
805
806 /*
807 * FCS INIT
808 */
809 spin_lock_irqsave(&bfad->bfad_lock, flags);
810 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
811 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 934 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
812 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
813 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 935 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
814
815 /* Do FCS init only when HAL init is done */
816 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
817 bfa_fcs_init(&bfad->bfa_fcs);
818 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
819 }
820
821 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
822 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 936 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
823 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
824 938
825 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 939 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
940
826 return BFA_STATUS_OK; 941 return BFA_STATUS_OK;
827 942
828out_setup_intr_failure:
829 bfa_detach(&bfad->bfa);
830 bfad_hal_mem_release(bfad);
831out_hal_mem_alloc_failure: 943out_hal_mem_alloc_failure:
832 return BFA_STATUS_FAILED; 944 return BFA_STATUS_FAILED;
833} 945}
@@ -855,7 +967,7 @@ bfad_drv_uninit(struct bfad_s *bfad)
855void 967void
856bfad_drv_start(struct bfad_s *bfad) 968bfad_drv_start(struct bfad_s *bfad)
857{ 969{
858 unsigned long flags; 970 unsigned long flags;
859 971
860 spin_lock_irqsave(&bfad->bfad_lock, flags); 972 spin_lock_irqsave(&bfad->bfad_lock, flags);
861 bfa_start(&bfad->bfa); 973 bfa_start(&bfad->bfa);
@@ -863,13 +975,14 @@ bfad_drv_start(struct bfad_s *bfad)
863 bfad->bfad_flags |= BFAD_HAL_START_DONE; 975 bfad->bfad_flags |= BFAD_HAL_START_DONE;
864 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 976 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
865 977
866 bfad_fc4_probe_post(bfad); 978 if (bfad->im)
979 flush_workqueue(bfad->im->drv_workq);
867} 980}
868 981
869void 982void
870bfad_drv_stop(struct bfad_s *bfad) 983bfad_fcs_stop(struct bfad_s *bfad)
871{ 984{
872 unsigned long flags; 985 unsigned long flags;
873 986
874 spin_lock_irqsave(&bfad->bfad_lock, flags); 987 spin_lock_irqsave(&bfad->bfad_lock, flags);
875 init_completion(&bfad->comp); 988 init_completion(&bfad->comp);
@@ -878,24 +991,32 @@ bfad_drv_stop(struct bfad_s *bfad)
878 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 991 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
879 wait_for_completion(&bfad->comp); 992 wait_for_completion(&bfad->comp);
880 993
994 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
995}
996
997void
998bfad_stop(struct bfad_s *bfad)
999{
1000 unsigned long flags;
1001
881 spin_lock_irqsave(&bfad->bfad_lock, flags); 1002 spin_lock_irqsave(&bfad->bfad_lock, flags);
882 init_completion(&bfad->comp); 1003 init_completion(&bfad->comp);
883 bfa_stop(&bfad->bfa); 1004 bfa_stop(&bfad->bfa);
884 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 1005 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
885 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1006 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
886 wait_for_completion(&bfad->comp); 1007 wait_for_completion(&bfad->comp);
1008
1009 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
887} 1010}
888 1011
889bfa_status_t 1012bfa_status_t
890bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) 1013bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
891{ 1014{
892 int rc = BFA_STATUS_OK; 1015 int rc = BFA_STATUS_OK;
893 1016
894 /* 1017 /* Allocate scsi_host for the physical port */
895 * Allocate scsi_host for the physical port 1018 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
896 */ 1019 (role & BFA_LPORT_ROLE_FCP_IM)) {
897 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
898 && (role & BFA_PORT_ROLE_FCP_IM)) {
899 if (bfad->pport.im_port == NULL) { 1020 if (bfad->pport.im_port == NULL) {
900 rc = BFA_STATUS_FAILED; 1021 rc = BFA_STATUS_FAILED;
901 goto out; 1022 goto out;
@@ -906,7 +1027,7 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
906 if (rc != BFA_STATUS_OK) 1027 if (rc != BFA_STATUS_OK)
907 goto out; 1028 goto out;
908 1029
909 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 1030 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
910 } 1031 }
911 1032
912 /* Setup the debugfs node for this scsi_host */ 1033 /* Setup the debugfs node for this scsi_host */
@@ -922,74 +1043,102 @@ out:
922void 1043void
923bfad_uncfg_pport(struct bfad_s *bfad) 1044bfad_uncfg_pport(struct bfad_s *bfad)
924{ 1045{
925 /* Remove the debugfs node for this scsi_host */ 1046 /* Remove the debugfs node for this scsi_host */
926 kfree(bfad->regdata); 1047 kfree(bfad->regdata);
927 bfad_debugfs_exit(&bfad->pport); 1048 bfad_debugfs_exit(&bfad->pport);
928 1049
929 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 1050 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
930 bfad_ipfc_port_delete(bfad, &bfad->pport); 1051 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
931 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
932 }
933
934 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM)
935 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) {
936 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1052 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
937 bfad_im_port_clean(bfad->pport.im_port); 1053 bfad_im_port_clean(bfad->pport.im_port);
938 kfree(bfad->pport.im_port); 1054 kfree(bfad->pport.im_port);
939 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; 1055 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
940 } 1056 }
941 1057
942 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 1058 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
943} 1059}
944 1060
945void
946bfad_drv_log_level_set(struct bfad_s *bfad)
947{
948 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX)
949 bfa_log_set_level_all(&bfad->log_data, log_level);
950}
951
952bfa_status_t 1061bfa_status_t
953bfad_start_ops(struct bfad_s *bfad) 1062bfad_start_ops(struct bfad_s *bfad) {
954{ 1063
955 int retval; 1064 int retval;
956 struct bfad_pcfg_s *pcfg, *pcfg_new; 1065 unsigned long flags;
1066 struct bfad_vport_s *vport, *vport_new;
1067 struct bfa_fcs_driver_info_s driver_info;
1068
1069 /* Fill the driver_info info to fcs*/
1070 memset(&driver_info, 0, sizeof(driver_info));
1071 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1072 sizeof(driver_info.version) - 1);
1073 if (host_name)
1074 strncpy(driver_info.host_machine_name, host_name,
1075 sizeof(driver_info.host_machine_name) - 1);
1076 if (os_name)
1077 strncpy(driver_info.host_os_name, os_name,
1078 sizeof(driver_info.host_os_name) - 1);
1079 if (os_patch)
1080 strncpy(driver_info.host_os_patch, os_patch,
1081 sizeof(driver_info.host_os_patch) - 1);
1082
1083 strncpy(driver_info.os_device_name, bfad->pci_name,
1084 sizeof(driver_info.os_device_name - 1));
1085
1086 /* FCS INIT */
1087 spin_lock_irqsave(&bfad->bfad_lock, flags);
1088 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1089 bfa_fcs_init(&bfad->bfa_fcs);
1090 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
957 1091
958 /* PPORT FCS config */ 1092 /* PPORT FCS config */
959 bfad_fcs_port_cfg(bfad); 1093 bfad_fcs_port_cfg(bfad);
960 1094
961 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 1095 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
962 if (retval != BFA_STATUS_OK)
963 goto out_cfg_pport_failure;
964
965 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
966 retval = bfad_fc4_probe(bfad);
967 if (retval != BFA_STATUS_OK) { 1096 if (retval != BFA_STATUS_OK) {
968 printk(KERN_WARNING "bfad_fc4_probe failed\n"); 1097 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
969 goto out_fc4_probe_failure; 1098 bfa_sm_set_state(bfad, bfad_sm_failed);
1099 bfad_stop(bfad);
1100 return BFA_STATUS_FAILED;
970 } 1101 }
971 1102
1103 /* BFAD level FC4 IM specific resource allocation */
1104 retval = bfad_im_probe(bfad);
1105 if (retval != BFA_STATUS_OK) {
1106 printk(KERN_WARNING "bfad_im_probe failed\n");
1107 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1108 bfa_sm_set_state(bfad, bfad_sm_failed);
1109 bfad_im_probe_undo(bfad);
1110 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1111 bfad_uncfg_pport(bfad);
1112 bfad_stop(bfad);
1113 return BFA_STATUS_FAILED;
1114 } else
1115 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1116
972 bfad_drv_start(bfad); 1117 bfad_drv_start(bfad);
973 1118
974 /* pbc vport creation */ 1119 /* Complete pbc vport create */
975 list_for_each_entry_safe(pcfg, pcfg_new, &bfad->pbc_pcfg_list, 1120 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
976 list_entry) { 1121 list_entry) {
977 struct fc_vport_identifiers vid; 1122 struct fc_vport_identifiers vid;
978 struct fc_vport *fc_vport; 1123 struct fc_vport *fc_vport;
1124 char pwwn_buf[BFA_STRING_32];
979 1125
980 memset(&vid, 0, sizeof(vid)); 1126 memset(&vid, 0, sizeof(vid));
981 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 1127 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
982 vid.vport_type = FC_PORTTYPE_NPIV; 1128 vid.vport_type = FC_PORTTYPE_NPIV;
983 vid.disable = false; 1129 vid.disable = false;
984 vid.node_name = wwn_to_u64((u8 *)&pcfg->port_cfg.nwwn); 1130 vid.node_name = wwn_to_u64((u8 *)
985 vid.port_name = wwn_to_u64((u8 *)&pcfg->port_cfg.pwwn); 1131 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1132 vid.port_name = wwn_to_u64((u8 *)
1133 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
986 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); 1134 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
987 if (!fc_vport) 1135 if (!fc_vport) {
1136 wwn2str(pwwn_buf, vid.port_name);
988 printk(KERN_WARNING "bfad%d: failed to create pbc vport" 1137 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
989 " %llx\n", bfad->inst_no, vid.port_name); 1138 " %s\n", bfad->inst_no, pwwn_buf);
990 list_del(&pcfg->list_entry); 1139 }
991 kfree(pcfg); 1140 list_del(&vport->list_entry);
992 1141 kfree(vport);
993 } 1142 }
994 1143
995 /* 1144 /*
@@ -998,24 +1147,15 @@ bfad_start_ops(struct bfad_s *bfad)
998 * passed in module param value as the bfa_linkup_delay. 1147 * passed in module param value as the bfa_linkup_delay.
999 */ 1148 */
1000 if (bfa_linkup_delay < 0) { 1149 if (bfa_linkup_delay < 0) {
1001
1002 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 1150 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
1003 bfad_os_rport_online_wait(bfad); 1151 bfad_os_rport_online_wait(bfad);
1004 bfa_linkup_delay = -1; 1152 bfa_linkup_delay = -1;
1005 1153 } else
1006 } else {
1007 bfad_os_rport_online_wait(bfad); 1154 bfad_os_rport_online_wait(bfad);
1008 }
1009 1155
1010 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 1156 BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n");
1011 1157
1012 return BFA_STATUS_OK; 1158 return BFA_STATUS_OK;
1013
1014out_fc4_probe_failure:
1015 bfad_fc4_probe_undo(bfad);
1016 bfad_uncfg_pport(bfad);
1017out_cfg_pport_failure:
1018 return BFA_STATUS_FAILED;
1019} 1159}
1020 1160
1021int 1161int
@@ -1028,18 +1168,8 @@ bfad_worker(void *ptr)
1028 1168
1029 while (!kthread_should_stop()) { 1169 while (!kthread_should_stop()) {
1030 1170
1031 /* Check if the FCS init is done from bfad_drv_init; 1171 /* Send event BFAD_E_INIT_SUCCESS */
1032 * if not done do FCS init and set the flag. 1172 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1033 */
1034 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
1035 spin_lock_irqsave(&bfad->bfad_lock, flags);
1036 bfa_fcs_init(&bfad->bfa_fcs);
1037 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
1038 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1039 }
1040
1041 /* Start the bfad operations after HAL init done */
1042 bfad_start_ops(bfad);
1043 1173
1044 spin_lock_irqsave(&bfad->bfad_lock, flags); 1174 spin_lock_irqsave(&bfad->bfad_lock, flags);
1045 bfad->bfad_tsk = NULL; 1175 bfad->bfad_tsk = NULL;
@@ -1051,9 +1181,198 @@ bfad_worker(void *ptr)
1051 return 0; 1181 return 0;
1052} 1182}
1053 1183
1054 /* 1184/**
1055 * PCI_entry PCI driver entries * { 1185 * BFA driver interrupt functions
1056 */ 1186 */
1187irqreturn_t
1188bfad_intx(int irq, void *dev_id)
1189{
1190 struct bfad_s *bfad = dev_id;
1191 struct list_head doneq;
1192 unsigned long flags;
1193 bfa_boolean_t rc;
1194
1195 spin_lock_irqsave(&bfad->bfad_lock, flags);
1196 rc = bfa_intx(&bfad->bfa);
1197 if (!rc) {
1198 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1199 return IRQ_NONE;
1200 }
1201
1202 bfa_comp_deq(&bfad->bfa, &doneq);
1203 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1204
1205 if (!list_empty(&doneq)) {
1206 bfa_comp_process(&bfad->bfa, &doneq);
1207
1208 spin_lock_irqsave(&bfad->bfad_lock, flags);
1209 bfa_comp_free(&bfad->bfa, &doneq);
1210 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1211 bfa_trc_fp(bfad, irq);
1212 }
1213
1214 return IRQ_HANDLED;
1215
1216}
1217
1218static irqreturn_t
1219bfad_msix(int irq, void *dev_id)
1220{
1221 struct bfad_msix_s *vec = dev_id;
1222 struct bfad_s *bfad = vec->bfad;
1223 struct list_head doneq;
1224 unsigned long flags;
1225
1226 spin_lock_irqsave(&bfad->bfad_lock, flags);
1227
1228 bfa_msix(&bfad->bfa, vec->msix.entry);
1229 bfa_comp_deq(&bfad->bfa, &doneq);
1230 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1231
1232 if (!list_empty(&doneq)) {
1233 bfa_comp_process(&bfad->bfa, &doneq);
1234
1235 spin_lock_irqsave(&bfad->bfad_lock, flags);
1236 bfa_comp_free(&bfad->bfa, &doneq);
1237 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1238 }
1239
1240 return IRQ_HANDLED;
1241}
1242
1243/**
1244 * Initialize the MSIX entry table.
1245 */
1246static void
1247bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1248 int mask, int max_bit)
1249{
1250 int i;
1251 int match = 0x00000001;
1252
1253 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1254 if (mask & match) {
1255 bfad->msix_tab[bfad->nvec].msix.entry = i;
1256 bfad->msix_tab[bfad->nvec].bfad = bfad;
1257 msix_entries[bfad->nvec].entry = i;
1258 bfad->nvec++;
1259 }
1260
1261 match <<= 1;
1262 }
1263
1264}
1265
1266int
1267bfad_install_msix_handler(struct bfad_s *bfad)
1268{
1269 int i, error = 0;
1270
1271 for (i = 0; i < bfad->nvec; i++) {
1272 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1273 bfad->pci_name,
1274 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1275 msix_name_ct[i] : msix_name_cb[i]));
1276
1277 error = request_irq(bfad->msix_tab[i].msix.vector,
1278 (irq_handler_t) bfad_msix, 0,
1279 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1280 bfa_trc(bfad, i);
1281 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1282 if (error) {
1283 int j;
1284
1285 for (j = 0; j < i; j++)
1286 free_irq(bfad->msix_tab[j].msix.vector,
1287 &bfad->msix_tab[j]);
1288
1289 return 1;
1290 }
1291 }
1292
1293 return 0;
1294}
1295
1296/**
1297 * Setup MSIX based interrupt.
1298 */
1299int
1300bfad_setup_intr(struct bfad_s *bfad)
1301{
1302 int error = 0;
1303 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1304 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1305 struct pci_dev *pdev = bfad->pcidev;
1306
1307 /* Call BFA to get the msix map for this PCI function. */
1308 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1309
1310 /* Set up the msix entry table */
1311 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1312
1313 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1314 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1315
1316 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1317 if (error) {
1318 /*
1319 * Only error number of vector is available.
1320 * We don't have a mechanism to map multiple
1321 * interrupts into one vector, so even if we
1322 * can try to request less vectors, we don't
1323 * know how to associate interrupt events to
1324 * vectors. Linux doesn't dupicate vectors
1325 * in the MSIX table for this case.
1326 */
1327
1328 printk(KERN_WARNING "bfad%d: "
1329 "pci_enable_msix failed (%d),"
1330 " use line based.\n", bfad->inst_no, error);
1331
1332 goto line_based;
1333 }
1334
1335 /* Save the vectors */
1336 for (i = 0; i < bfad->nvec; i++) {
1337 bfa_trc(bfad, msix_entries[i].vector);
1338 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1339 }
1340
1341 bfa_msix_init(&bfad->bfa, bfad->nvec);
1342
1343 bfad->bfad_flags |= BFAD_MSIX_ON;
1344
1345 return error;
1346 }
1347
1348line_based:
1349 error = 0;
1350 if (request_irq
1351 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1352 BFAD_DRIVER_NAME, bfad) != 0) {
1353 /* Enable interrupt handler failed */
1354 return 1;
1355 }
1356
1357 return error;
1358}
1359
1360void
1361bfad_remove_intr(struct bfad_s *bfad)
1362{
1363 int i;
1364
1365 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1366 for (i = 0; i < bfad->nvec; i++)
1367 free_irq(bfad->msix_tab[i].msix.vector,
1368 &bfad->msix_tab[i]);
1369
1370 pci_disable_msix(bfad->pcidev);
1371 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1372 } else {
1373 free_irq(bfad->pcidev->irq, bfad);
1374 }
1375}
1057 1376
1058/** 1377/**
1059 * PCI probe entry. 1378 * PCI probe entry.
@@ -1061,18 +1380,14 @@ bfad_worker(void *ptr)
1061int 1380int
1062bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1381bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1063{ 1382{
1064 struct bfad_s *bfad; 1383 struct bfad_s *bfad;
1065 int error = -ENODEV, retval; 1384 int error = -ENODEV, retval;
1066 1385
1067 /* 1386 /* For single port cards - only claim function 0 */
1068 * For single port cards - only claim function 0 1387 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1069 */ 1388 (PCI_FUNC(pdev->devfn) != 0))
1070 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P)
1071 && (PCI_FUNC(pdev->devfn) != 0))
1072 return -ENODEV; 1389 return -ENODEV;
1073 1390
1074 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry");
1075
1076 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 1391 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1077 if (!bfad) { 1392 if (!bfad) {
1078 error = -ENOMEM; 1393 error = -ENOMEM;
@@ -1086,21 +1401,11 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1086 goto out_alloc_trace_failure; 1401 goto out_alloc_trace_failure;
1087 } 1402 }
1088 1403
1089 /* 1404 /* TRACE INIT */
1090 * LOG/TRACE INIT
1091 */
1092 bfa_trc_init(bfad->trcmod); 1405 bfa_trc_init(bfad->trcmod);
1093 bfa_trc(bfad, bfad_inst); 1406 bfa_trc(bfad, bfad_inst);
1094 1407
1095 bfad->logmod = &bfad->log_data;
1096 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
1097
1098 bfad_drv_log_level_set(bfad);
1099
1100 bfad->aen = &bfad->aen_buf;
1101
1102 if (!(bfad_load_fwimg(pdev))) { 1408 if (!(bfad_load_fwimg(pdev))) {
1103 printk(KERN_WARNING "bfad_load_fwimg failure!\n");
1104 kfree(bfad->trcmod); 1409 kfree(bfad->trcmod);
1105 goto out_alloc_trace_failure; 1410 goto out_alloc_trace_failure;
1106 } 1411 }
@@ -1117,46 +1422,31 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1117 list_add_tail(&bfad->list_entry, &bfad_list); 1422 list_add_tail(&bfad->list_entry, &bfad_list);
1118 mutex_unlock(&bfad_mutex); 1423 mutex_unlock(&bfad_mutex);
1119 1424
1425 /* Initializing the state machine: State set to uninit */
1426 bfa_sm_set_state(bfad, bfad_sm_uninit);
1427
1120 spin_lock_init(&bfad->bfad_lock); 1428 spin_lock_init(&bfad->bfad_lock);
1121 pci_set_drvdata(pdev, bfad); 1429 pci_set_drvdata(pdev, bfad);
1122 1430
1123 bfad->ref_count = 0; 1431 bfad->ref_count = 0;
1124 bfad->pport.bfad = bfad; 1432 bfad->pport.bfad = bfad;
1125 INIT_LIST_HEAD(&bfad->pbc_pcfg_list); 1433 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1126
1127 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
1128 "bfad_worker");
1129 if (IS_ERR(bfad->bfad_tsk)) {
1130 printk(KERN_INFO "bfad[%d]: Kernel thread"
1131 " creation failed!\n",
1132 bfad->inst_no);
1133 goto out_kthread_create_failure;
1134 }
1135 1434
1136 retval = bfad_drv_init(bfad); 1435 retval = bfad_drv_init(bfad);
1137 if (retval != BFA_STATUS_OK) 1436 if (retval != BFA_STATUS_OK)
1138 goto out_drv_init_failure; 1437 goto out_drv_init_failure;
1139 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1140 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
1141 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
1142 goto ok;
1143 }
1144 1438
1145 retval = bfad_start_ops(bfad); 1439 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1146 if (retval != BFA_STATUS_OK)
1147 goto out_start_ops_failure;
1148 1440
1149 kthread_stop(bfad->bfad_tsk); 1441 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1150 bfad->bfad_tsk = NULL; 1442 goto out_bfad_sm_failure;
1151 1443
1152ok:
1153 return 0; 1444 return 0;
1154 1445
1155out_start_ops_failure: 1446out_bfad_sm_failure:
1156 bfad_drv_uninit(bfad); 1447 bfa_detach(&bfad->bfa);
1448 bfad_hal_mem_release(bfad);
1157out_drv_init_failure: 1449out_drv_init_failure:
1158 kthread_stop(bfad->bfad_tsk);
1159out_kthread_create_failure:
1160 mutex_lock(&bfad_mutex); 1450 mutex_lock(&bfad_mutex);
1161 bfad_inst--; 1451 bfad_inst--;
1162 list_del(&bfad->list_entry); 1452 list_del(&bfad->list_entry);
@@ -1176,62 +1466,29 @@ out:
1176void 1466void
1177bfad_pci_remove(struct pci_dev *pdev) 1467bfad_pci_remove(struct pci_dev *pdev)
1178{ 1468{
1179 struct bfad_s *bfad = pci_get_drvdata(pdev); 1469 struct bfad_s *bfad = pci_get_drvdata(pdev);
1180 unsigned long flags; 1470 unsigned long flags;
1181 1471
1182 bfa_trc(bfad, bfad->inst_no); 1472 bfa_trc(bfad, bfad->inst_no);
1183 1473
1184 spin_lock_irqsave(&bfad->bfad_lock, flags); 1474 spin_lock_irqsave(&bfad->bfad_lock, flags);
1185 if (bfad->bfad_tsk != NULL) 1475 if (bfad->bfad_tsk != NULL) {
1186 kthread_stop(bfad->bfad_tsk);
1187 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1188
1189 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
1190 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1191
1192 spin_lock_irqsave(&bfad->bfad_lock, flags);
1193 init_completion(&bfad->comp);
1194 bfa_stop(&bfad->bfa);
1195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1476 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1196 wait_for_completion(&bfad->comp); 1477 kthread_stop(bfad->bfad_tsk);
1197 1478 } else {
1198 bfad_remove_intr(bfad);
1199 del_timer_sync(&bfad->hal_tmo);
1200 goto hal_detach;
1201 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) {
1202 goto remove_sysfs;
1203 }
1204
1205 if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
1206 bfad_drv_stop(bfad);
1207 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
1208 /* Invoking bfa_stop() before bfa_detach
1209 * when HAL and DRV init are success
1210 * but HAL start did not occur.
1211 */
1212 spin_lock_irqsave(&bfad->bfad_lock, flags);
1213 init_completion(&bfad->comp);
1214 bfa_stop(&bfad->bfa);
1215 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1216 wait_for_completion(&bfad->comp);
1217 } 1480 }
1218 1481
1219 bfad_remove_intr(bfad); 1482 /* Send Event BFAD_E_STOP */
1220 del_timer_sync(&bfad->hal_tmo); 1483 bfa_sm_send_event(bfad, BFAD_E_STOP);
1221 1484
1222 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) 1485 /* Driver detach and dealloc mem */
1223 bfad_fc4_probe_undo(bfad);
1224
1225 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1226 bfad_uncfg_pport(bfad);
1227
1228hal_detach:
1229 spin_lock_irqsave(&bfad->bfad_lock, flags); 1486 spin_lock_irqsave(&bfad->bfad_lock, flags);
1230 bfa_detach(&bfad->bfa); 1487 bfa_detach(&bfad->bfa);
1231 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1488 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1232 bfad_hal_mem_release(bfad); 1489 bfad_hal_mem_release(bfad);
1233remove_sysfs:
1234 1490
1491 /* Cleaning the BFAD instance */
1235 mutex_lock(&bfad_mutex); 1492 mutex_lock(&bfad_mutex);
1236 bfad_inst--; 1493 bfad_inst--;
1237 list_del(&bfad->list_entry); 1494 list_del(&bfad->list_entry);
@@ -1242,35 +1499,34 @@ remove_sysfs:
1242 kfree(bfad); 1499 kfree(bfad);
1243} 1500}
1244 1501
1245 1502struct pci_device_id bfad_id_table[] = {
1246static struct pci_device_id bfad_id_table[] = {
1247 { 1503 {
1248 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1504 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1249 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1505 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1250 .subvendor = PCI_ANY_ID, 1506 .subvendor = PCI_ANY_ID,
1251 .subdevice = PCI_ANY_ID, 1507 .subdevice = PCI_ANY_ID,
1252 }, 1508 },
1253 { 1509 {
1254 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1510 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1255 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1511 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1256 .subvendor = PCI_ANY_ID, 1512 .subvendor = PCI_ANY_ID,
1257 .subdevice = PCI_ANY_ID, 1513 .subdevice = PCI_ANY_ID,
1258 }, 1514 },
1259 { 1515 {
1260 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1516 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1261 .device = BFA_PCI_DEVICE_ID_CT, 1517 .device = BFA_PCI_DEVICE_ID_CT,
1262 .subvendor = PCI_ANY_ID, 1518 .subvendor = PCI_ANY_ID,
1263 .subdevice = PCI_ANY_ID, 1519 .subdevice = PCI_ANY_ID,
1264 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1520 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1265 .class_mask = ~0, 1521 .class_mask = ~0,
1266 }, 1522 },
1267 { 1523 {
1268 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1524 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1269 .device = BFA_PCI_DEVICE_ID_CT_FC, 1525 .device = BFA_PCI_DEVICE_ID_CT_FC,
1270 .subvendor = PCI_ANY_ID, 1526 .subvendor = PCI_ANY_ID,
1271 .subdevice = PCI_ANY_ID, 1527 .subdevice = PCI_ANY_ID,
1272 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1528 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1273 .class_mask = ~0, 1529 .class_mask = ~0,
1274 }, 1530 },
1275 1531
1276 {0, 0}, 1532 {0, 0},
@@ -1286,89 +1542,104 @@ static struct pci_driver bfad_pci_driver = {
1286}; 1542};
1287 1543
1288/** 1544/**
1289 * Linux driver module functions
1290 */
1291bfa_status_t
1292bfad_fc4_module_init(void)
1293{
1294 int rc;
1295
1296 rc = bfad_im_module_init();
1297 if (rc != BFA_STATUS_OK)
1298 goto ext;
1299
1300 bfad_tm_module_init();
1301 if (ipfc_enable)
1302 bfad_ipfc_module_init();
1303ext:
1304 return rc;
1305}
1306
1307void
1308bfad_fc4_module_exit(void)
1309{
1310 if (ipfc_enable)
1311 bfad_ipfc_module_exit();
1312 bfad_tm_module_exit();
1313 bfad_im_module_exit();
1314}
1315
1316/**
1317 * Driver module init. 1545 * Driver module init.
1318 */ 1546 */
1319static int __init 1547static int __init
1320bfad_init(void) 1548bfad_init(void)
1321{ 1549{
1322 int error = 0; 1550 int error = 0;
1323 1551
1324 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1552 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1325 BFAD_DRIVER_VERSION); 1553 BFAD_DRIVER_VERSION);
1326 1554
1327 if (num_sgpgs > 0) 1555 if (num_sgpgs > 0)
1328 num_sgpgs_parm = num_sgpgs; 1556 num_sgpgs_parm = num_sgpgs;
1329 1557
1330 error = bfad_fc4_module_init(); 1558 error = bfad_im_module_init();
1331 if (error) { 1559 if (error) {
1332 error = -ENOMEM; 1560 error = -ENOMEM;
1333 printk(KERN_WARNING "bfad_fc4_module_init failure\n"); 1561 printk(KERN_WARNING "bfad_im_module_init failure\n");
1334 goto ext; 1562 goto ext;
1335 } 1563 }
1336 1564
1337 if (!strcmp(FCPI_NAME, " fcpim")) 1565 if (strcmp(FCPI_NAME, " fcpim") == 0)
1338 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; 1566 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1339 if (!strcmp(FCPT_NAME, " fcptm"))
1340 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM;
1341 if (!strcmp(IPFC_NAME, " ipfc"))
1342 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC;
1343 1567
1344 bfa_ioc_auto_recover(ioc_auto_recover); 1568 bfa_ioc_auto_recover(ioc_auto_recover);
1345 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1569 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1346 error = pci_register_driver(&bfad_pci_driver);
1347 1570
1571 error = pci_register_driver(&bfad_pci_driver);
1348 if (error) { 1572 if (error) {
1349 printk(KERN_WARNING "bfad pci_register_driver failure\n"); 1573 printk(KERN_WARNING "pci_register_driver failure\n");
1350 goto ext; 1574 goto ext;
1351 } 1575 }
1352 1576
1353 return 0; 1577 return 0;
1354 1578
1355ext: 1579ext:
1356 bfad_fc4_module_exit(); 1580 bfad_im_module_exit();
1357 return error; 1581 return error;
1358} 1582}
1359 1583
1360/** 1584/**
1361 * Driver module exit. 1585 * Driver module exit.
1362 */ 1586 */
1363static void __exit 1587static void __exit
1364bfad_exit(void) 1588bfad_exit(void)
1365{ 1589{
1366 pci_unregister_driver(&bfad_pci_driver); 1590 pci_unregister_driver(&bfad_pci_driver);
1367 bfad_fc4_module_exit(); 1591 bfad_im_module_exit();
1368 bfad_free_fwimg(); 1592 bfad_free_fwimg();
1369} 1593}
1370 1594
1371#define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME 1595/* Firmware handling */
1596u32 *
1597bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1598 u32 *bfi_image_size, char *fw_name)
1599{
1600 const struct firmware *fw;
1601
1602 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1603 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1604 goto error;
1605 }
1606
1607 *bfi_image = vmalloc(fw->size);
1608 if (NULL == *bfi_image) {
1609 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1610 "size=%x!\n", (u32) fw->size);
1611 goto error;
1612 }
1613
1614 memcpy(*bfi_image, fw->data, fw->size);
1615 *bfi_image_size = fw->size/sizeof(u32);
1616
1617 return *bfi_image;
1618
1619error:
1620 return NULL;
1621}
1622
1623u32 *
1624bfad_get_firmware_buf(struct pci_dev *pdev)
1625{
1626 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1627 if (bfi_image_ct_fc_size == 0)
1628 bfad_read_firmware(pdev, &bfi_image_ct_fc,
1629 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1630 return bfi_image_ct_fc;
1631 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1632 if (bfi_image_ct_cna_size == 0)
1633 bfad_read_firmware(pdev, &bfi_image_ct_cna,
1634 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1635 return bfi_image_ct_cna;
1636 } else {
1637 if (bfi_image_cb_fc_size == 0)
1638 bfad_read_firmware(pdev, &bfi_image_cb_fc,
1639 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1640 return bfi_image_cb_fc;
1641 }
1642}
1372 1643
1373module_init(bfad_init); 1644module_init(bfad_init);
1374module_exit(bfad_exit); 1645module_exit(bfad_exit);
@@ -1376,5 +1647,3 @@ MODULE_LICENSE("GPL");
1376MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1647MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1377MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1648MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1378MODULE_VERSION(BFAD_DRIVER_VERSION); 1649MODULE_VERSION(BFAD_DRIVER_VERSION);
1379
1380
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 0818eb07ef88..d8843720eac1 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,15 +19,8 @@
19 * bfa_attr.c Linux driver configuration interface module. 19 * bfa_attr.c Linux driver configuration interface module.
20 */ 20 */
21 21
22#include <linux/slab.h>
23#include "bfad_drv.h" 22#include "bfad_drv.h"
24#include "bfad_im.h" 23#include "bfad_im.h"
25#include "bfad_trcmod.h"
26#include "bfad_attr.h"
27
28/**
29 * FC_transport_template FC transport template
30 */
31 24
32/** 25/**
33 * FC transport template entry, get SCSI target port ID. 26 * FC transport template entry, get SCSI target port ID.
@@ -42,7 +35,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
42 u32 fc_id = -1; 35 u32 fc_id = -1;
43 unsigned long flags; 36 unsigned long flags;
44 37
45 shost = bfad_os_starget_to_shost(starget); 38 shost = dev_to_shost(starget->dev.parent);
46 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 39 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
47 bfad = im_port->bfad; 40 bfad = im_port->bfad;
48 spin_lock_irqsave(&bfad->bfad_lock, flags); 41 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -68,7 +61,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
68 u64 node_name = 0; 61 u64 node_name = 0;
69 unsigned long flags; 62 unsigned long flags;
70 63
71 shost = bfad_os_starget_to_shost(starget); 64 shost = dev_to_shost(starget->dev.parent);
72 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 65 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
73 bfad = im_port->bfad; 66 bfad = im_port->bfad;
74 spin_lock_irqsave(&bfad->bfad_lock, flags); 67 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -94,7 +87,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
94 u64 port_name = 0; 87 u64 port_name = 0;
95 unsigned long flags; 88 unsigned long flags;
96 89
97 shost = bfad_os_starget_to_shost(starget); 90 shost = dev_to_shost(starget->dev.parent);
98 im_port = (struct bfad_im_port_s *) shost->hostdata[0]; 91 im_port = (struct bfad_im_port_s *) shost->hostdata[0];
99 bfad = im_port->bfad; 92 bfad = im_port->bfad;
100 spin_lock_irqsave(&bfad->bfad_lock, flags); 93 spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -118,17 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
118 struct bfad_port_s *port = im_port->port; 111 struct bfad_port_s *port = im_port->port;
119 112
120 fc_host_port_id(shost) = 113 fc_host_port_id(shost) =
121 bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port)); 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
122}
123
124
125
126
127
128struct Scsi_Host *
129bfad_os_starget_to_shost(struct scsi_target *starget)
130{
131 return dev_to_shost(starget->dev.parent);
132} 115}
133 116
134/** 117/**
@@ -140,21 +123,21 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
140 struct bfad_im_port_s *im_port = 123 struct bfad_im_port_s *im_port =
141 (struct bfad_im_port_s *) shost->hostdata[0]; 124 (struct bfad_im_port_s *) shost->hostdata[0];
142 struct bfad_s *bfad = im_port->bfad; 125 struct bfad_s *bfad = im_port->bfad;
143 struct bfa_pport_attr_s attr; 126 struct bfa_lport_attr_s port_attr;
144 127
145 bfa_fcport_get_attr(&bfad->bfa, &attr); 128 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
146 129
147 switch (attr.port_type) { 130 switch (port_attr.port_type) {
148 case BFA_PPORT_TYPE_NPORT: 131 case BFA_PORT_TYPE_NPORT:
149 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 132 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
150 break; 133 break;
151 case BFA_PPORT_TYPE_NLPORT: 134 case BFA_PORT_TYPE_NLPORT:
152 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 135 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
153 break; 136 break;
154 case BFA_PPORT_TYPE_P2P: 137 case BFA_PORT_TYPE_P2P:
155 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 138 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
156 break; 139 break;
157 case BFA_PPORT_TYPE_LPORT: 140 case BFA_PORT_TYPE_LPORT:
158 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 141 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
159 break; 142 break;
160 default: 143 default:
@@ -172,25 +155,28 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
172 struct bfad_im_port_s *im_port = 155 struct bfad_im_port_s *im_port =
173 (struct bfad_im_port_s *) shost->hostdata[0]; 156 (struct bfad_im_port_s *) shost->hostdata[0];
174 struct bfad_s *bfad = im_port->bfad; 157 struct bfad_s *bfad = im_port->bfad;
175 struct bfa_pport_attr_s attr; 158 struct bfa_port_attr_s attr;
176 159
177 bfa_fcport_get_attr(&bfad->bfa, &attr); 160 bfa_fcport_get_attr(&bfad->bfa, &attr);
178 161
179 switch (attr.port_state) { 162 switch (attr.port_state) {
180 case BFA_PPORT_ST_LINKDOWN: 163 case BFA_PORT_ST_LINKDOWN:
181 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 164 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
182 break; 165 break;
183 case BFA_PPORT_ST_LINKUP: 166 case BFA_PORT_ST_LINKUP:
184 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 167 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
185 break; 168 break;
186 case BFA_PPORT_ST_UNINIT: 169 case BFA_PORT_ST_DISABLED:
187 case BFA_PPORT_ST_ENABLING_QWAIT: 170 case BFA_PORT_ST_STOPPED:
188 case BFA_PPORT_ST_ENABLING: 171 case BFA_PORT_ST_IOCDOWN:
189 case BFA_PPORT_ST_DISABLING_QWAIT: 172 case BFA_PORT_ST_IOCDIS:
190 case BFA_PPORT_ST_DISABLING: 173 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
191 case BFA_PPORT_ST_DISABLED: 174 break;
192 case BFA_PPORT_ST_STOPPED: 175 case BFA_PORT_ST_UNINIT:
193 case BFA_PPORT_ST_IOCDOWN: 176 case BFA_PORT_ST_ENABLING_QWAIT:
177 case BFA_PORT_ST_ENABLING:
178 case BFA_PORT_ST_DISABLING_QWAIT:
179 case BFA_PORT_ST_DISABLING:
194 default: 180 default:
195 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 181 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
196 break; 182 break;
@@ -210,13 +196,9 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
210 memset(fc_host_active_fc4s(shost), 0, 196 memset(fc_host_active_fc4s(shost), 0,
211 sizeof(fc_host_active_fc4s(shost))); 197 sizeof(fc_host_active_fc4s(shost)));
212 198
213 if (port->supported_fc4s & 199 if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
214 (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
215 fc_host_active_fc4s(shost)[2] = 1; 200 fc_host_active_fc4s(shost)[2] = 1;
216 201
217 if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
218 fc_host_active_fc4s(shost)[3] = 0x20;
219
220 fc_host_active_fc4s(shost)[7] = 1; 202 fc_host_active_fc4s(shost)[7] = 1;
221} 203}
222 204
@@ -229,29 +211,29 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
229 struct bfad_im_port_s *im_port = 211 struct bfad_im_port_s *im_port =
230 (struct bfad_im_port_s *) shost->hostdata[0]; 212 (struct bfad_im_port_s *) shost->hostdata[0];
231 struct bfad_s *bfad = im_port->bfad; 213 struct bfad_s *bfad = im_port->bfad;
232 struct bfa_pport_attr_s attr; 214 struct bfa_port_attr_s attr;
233 unsigned long flags;
234 215
235 spin_lock_irqsave(shost->host_lock, flags);
236 bfa_fcport_get_attr(&bfad->bfa, &attr); 216 bfa_fcport_get_attr(&bfad->bfa, &attr);
237 switch (attr.speed) { 217 switch (attr.speed) {
238 case BFA_PPORT_SPEED_8GBPS: 218 case BFA_PORT_SPEED_10GBPS:
219 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
220 break;
221 case BFA_PORT_SPEED_8GBPS:
239 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 222 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
240 break; 223 break;
241 case BFA_PPORT_SPEED_4GBPS: 224 case BFA_PORT_SPEED_4GBPS:
242 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 225 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
243 break; 226 break;
244 case BFA_PPORT_SPEED_2GBPS: 227 case BFA_PORT_SPEED_2GBPS:
245 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 228 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
246 break; 229 break;
247 case BFA_PPORT_SPEED_1GBPS: 230 case BFA_PORT_SPEED_1GBPS:
248 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 231 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
249 break; 232 break;
250 default: 233 default:
251 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 234 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
252 break; 235 break;
253 } 236 }
254 spin_unlock_irqrestore(shost->host_lock, flags);
255} 237}
256 238
257/** 239/**
@@ -265,7 +247,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
265 struct bfad_port_s *port = im_port->port; 247 struct bfad_port_s *port = im_port->port;
266 wwn_t fabric_nwwn = 0; 248 wwn_t fabric_nwwn = 0;
267 249
268 fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port); 250 fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
269 251
270 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); 252 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
271 253
@@ -281,23 +263,44 @@ bfad_im_get_stats(struct Scsi_Host *shost)
281 (struct bfad_im_port_s *) shost->hostdata[0]; 263 (struct bfad_im_port_s *) shost->hostdata[0];
282 struct bfad_s *bfad = im_port->bfad; 264 struct bfad_s *bfad = im_port->bfad;
283 struct bfad_hal_comp fcomp; 265 struct bfad_hal_comp fcomp;
266 union bfa_port_stats_u *fcstats;
284 struct fc_host_statistics *hstats; 267 struct fc_host_statistics *hstats;
285 bfa_status_t rc; 268 bfa_status_t rc;
286 unsigned long flags; 269 unsigned long flags;
287 270
271 fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
272 if (fcstats == NULL)
273 return NULL;
274
288 hstats = &bfad->link_stats; 275 hstats = &bfad->link_stats;
289 init_completion(&fcomp.comp); 276 init_completion(&fcomp.comp);
290 spin_lock_irqsave(&bfad->bfad_lock, flags); 277 spin_lock_irqsave(&bfad->bfad_lock, flags);
291 memset(hstats, 0, sizeof(struct fc_host_statistics)); 278 memset(hstats, 0, sizeof(struct fc_host_statistics));
292 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), 279 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
293 (union bfa_pport_stats_u *) hstats, 280 fcstats, bfad_hcb_comp, &fcomp);
294 bfad_hcb_comp, &fcomp);
295 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 281 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
296 if (rc != BFA_STATUS_OK) 282 if (rc != BFA_STATUS_OK)
297 return NULL; 283 return NULL;
298 284
299 wait_for_completion(&fcomp.comp); 285 wait_for_completion(&fcomp.comp);
300 286
287 /* Fill the fc_host_statistics structure */
288 hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
289 hstats->tx_frames = fcstats->fc.tx_frames;
290 hstats->tx_words = fcstats->fc.tx_words;
291 hstats->rx_frames = fcstats->fc.rx_frames;
292 hstats->rx_words = fcstats->fc.rx_words;
293 hstats->lip_count = fcstats->fc.lip_count;
294 hstats->nos_count = fcstats->fc.nos_count;
295 hstats->error_frames = fcstats->fc.error_frames;
296 hstats->dumped_frames = fcstats->fc.dropped_frames;
297 hstats->link_failure_count = fcstats->fc.link_failures;
298 hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
299 hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
300 hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
301 hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
302
303 kfree(fcstats);
301 return hstats; 304 return hstats;
302} 305}
303 306
@@ -317,7 +320,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
317 init_completion(&fcomp.comp); 320 init_completion(&fcomp.comp);
318 spin_lock_irqsave(&bfad->bfad_lock, flags); 321 spin_lock_irqsave(&bfad->bfad_lock, flags);
319 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, 322 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
320 &fcomp); 323 &fcomp);
321 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 324 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
322 325
323 if (rc != BFA_STATUS_OK) 326 if (rc != BFA_STATUS_OK)
@@ -372,8 +375,8 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
372 struct bfad_im_port_s *im_port = 375 struct bfad_im_port_s *im_port =
373 (struct bfad_im_port_s *) shost->hostdata[0]; 376 (struct bfad_im_port_s *) shost->hostdata[0];
374 struct bfad_s *bfad = im_port->bfad; 377 struct bfad_s *bfad = im_port->bfad;
375 struct bfa_port_cfg_s port_cfg; 378 struct bfa_lport_cfg_s port_cfg;
376 struct bfad_pcfg_s *pcfg; 379 struct bfad_vport_s *vp;
377 int status = 0, rc; 380 int status = 0, rc;
378 unsigned long flags; 381 unsigned long flags;
379 382
@@ -382,12 +385,14 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
382 u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); 385 u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
383 if (strlen(vname) > 0) 386 if (strlen(vname) > 0)
384 strcpy((char *)&port_cfg.sym_name, vname); 387 strcpy((char *)&port_cfg.sym_name, vname);
385 port_cfg.roles = BFA_PORT_ROLE_FCP_IM; 388 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
386 389
387 spin_lock_irqsave(&bfad->bfad_lock, flags); 390 spin_lock_irqsave(&bfad->bfad_lock, flags);
388 list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) { 391 list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
389 if (port_cfg.pwwn == pcfg->port_cfg.pwwn) { 392 if (port_cfg.pwwn ==
390 port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp; 393 vp->fcs_vport.lport.port_cfg.pwwn) {
394 port_cfg.preboot_vp =
395 vp->fcs_vport.lport.port_cfg.preboot_vp;
391 break; 396 break;
392 } 397 }
393 } 398 }
@@ -638,7 +643,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
638 struct Scsi_Host *shost = class_to_shost(dev); 643 struct Scsi_Host *shost = class_to_shost(dev);
639 struct bfad_im_port_s *im_port = 644 struct bfad_im_port_s *im_port =
640 (struct bfad_im_port_s *) shost->hostdata[0]; 645 (struct bfad_im_port_s *) shost->hostdata[0];
641 struct bfad_s *bfad = im_port->bfad; 646 struct bfad_s *bfad = im_port->bfad;
642 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 647 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
643 648
644 bfa_get_adapter_serial_num(&bfad->bfa, serial_num); 649 bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
@@ -652,7 +657,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
652 struct Scsi_Host *shost = class_to_shost(dev); 657 struct Scsi_Host *shost = class_to_shost(dev);
653 struct bfad_im_port_s *im_port = 658 struct bfad_im_port_s *im_port =
654 (struct bfad_im_port_s *) shost->hostdata[0]; 659 (struct bfad_im_port_s *) shost->hostdata[0];
655 struct bfad_s *bfad = im_port->bfad; 660 struct bfad_s *bfad = im_port->bfad;
656 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 661 char model[BFA_ADAPTER_MODEL_NAME_LEN];
657 662
658 bfa_get_adapter_model(&bfad->bfa, model); 663 bfa_get_adapter_model(&bfad->bfa, model);
@@ -666,10 +671,54 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
666 struct Scsi_Host *shost = class_to_shost(dev); 671 struct Scsi_Host *shost = class_to_shost(dev);
667 struct bfad_im_port_s *im_port = 672 struct bfad_im_port_s *im_port =
668 (struct bfad_im_port_s *) shost->hostdata[0]; 673 (struct bfad_im_port_s *) shost->hostdata[0];
669 struct bfad_s *bfad = im_port->bfad; 674 struct bfad_s *bfad = im_port->bfad;
675 char model[BFA_ADAPTER_MODEL_NAME_LEN];
670 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; 676 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
671 677
672 bfa_get_adapter_model(&bfad->bfa, model_descr); 678 bfa_get_adapter_model(&bfad->bfa, model);
679 if (!strcmp(model, "Brocade-425"))
680 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
681 "Brocade 4Gbps PCIe dual port FC HBA");
682 else if (!strcmp(model, "Brocade-825"))
683 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
684 "Brocade 8Gbps PCIe dual port FC HBA");
685 else if (!strcmp(model, "Brocade-42B"))
686 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
687 "HP 4Gbps PCIe dual port FC HBA");
688 else if (!strcmp(model, "Brocade-82B"))
689 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
690 "HP 8Gbps PCIe dual port FC HBA");
691 else if (!strcmp(model, "Brocade-1010"))
692 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
693 "Brocade 10Gbps single port CNA");
694 else if (!strcmp(model, "Brocade-1020"))
695 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
696 "Brocade 10Gbps dual port CNA");
697 else if (!strcmp(model, "Brocade-1007"))
698 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
699 "Brocade 10Gbps CNA");
700 else if (!strcmp(model, "Brocade-415"))
701 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
702 "Brocade 4Gbps PCIe single port FC HBA");
703 else if (!strcmp(model, "Brocade-815"))
704 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
705 "Brocade 8Gbps PCIe single port FC HBA");
706 else if (!strcmp(model, "Brocade-41B"))
707 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
708 "HP 4Gbps PCIe single port FC HBA");
709 else if (!strcmp(model, "Brocade-81B"))
710 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
711 "HP 8Gbps PCIe single port FC HBA");
712 else if (!strcmp(model, "Brocade-804"))
713 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
714 "HP Bladesystem C-class 8Gbps FC HBA");
715 else if (!strcmp(model, "Brocade-902"))
716 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
717 "Brocade 10Gbps CNA");
718 else
719 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
720 "Invalid Model");
721
673 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); 722 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
674} 723}
675 724
@@ -683,7 +732,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
683 struct bfad_port_s *port = im_port->port; 732 struct bfad_port_s *port = im_port->port;
684 u64 nwwn; 733 u64 nwwn;
685 734
686 nwwn = bfa_fcs_port_get_nwwn(port->fcs_port); 735 nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
687 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); 736 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
688} 737}
689 738
@@ -694,14 +743,14 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
694 struct Scsi_Host *shost = class_to_shost(dev); 743 struct Scsi_Host *shost = class_to_shost(dev);
695 struct bfad_im_port_s *im_port = 744 struct bfad_im_port_s *im_port =
696 (struct bfad_im_port_s *) shost->hostdata[0]; 745 (struct bfad_im_port_s *) shost->hostdata[0];
697 struct bfad_s *bfad = im_port->bfad; 746 struct bfad_s *bfad = im_port->bfad;
698 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 747 struct bfa_lport_attr_s port_attr;
699 char fw_ver[BFA_VERSION_LEN]; 748 char symname[BFA_SYMNAME_MAXLEN];
700 749
701 bfa_get_adapter_model(&bfad->bfa, model); 750 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
702 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 751 strncpy(symname, port_attr.port_cfg.sym_name.symname,
703 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", 752 BFA_SYMNAME_MAXLEN);
704 model, fw_ver, BFAD_DRIVER_VERSION); 753 return snprintf(buf, PAGE_SIZE, "%s\n", symname);
705} 754}
706 755
707static ssize_t 756static ssize_t
@@ -711,7 +760,7 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
711 struct Scsi_Host *shost = class_to_shost(dev); 760 struct Scsi_Host *shost = class_to_shost(dev);
712 struct bfad_im_port_s *im_port = 761 struct bfad_im_port_s *im_port =
713 (struct bfad_im_port_s *) shost->hostdata[0]; 762 (struct bfad_im_port_s *) shost->hostdata[0];
714 struct bfad_s *bfad = im_port->bfad; 763 struct bfad_s *bfad = im_port->bfad;
715 char hw_ver[BFA_VERSION_LEN]; 764 char hw_ver[BFA_VERSION_LEN];
716 765
717 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); 766 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
@@ -732,7 +781,7 @@ bfad_im_optionrom_version_show(struct device *dev,
732 struct Scsi_Host *shost = class_to_shost(dev); 781 struct Scsi_Host *shost = class_to_shost(dev);
733 struct bfad_im_port_s *im_port = 782 struct bfad_im_port_s *im_port =
734 (struct bfad_im_port_s *) shost->hostdata[0]; 783 (struct bfad_im_port_s *) shost->hostdata[0];
735 struct bfad_s *bfad = im_port->bfad; 784 struct bfad_s *bfad = im_port->bfad;
736 char optrom_ver[BFA_VERSION_LEN]; 785 char optrom_ver[BFA_VERSION_LEN];
737 786
738 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); 787 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
@@ -746,7 +795,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
746 struct Scsi_Host *shost = class_to_shost(dev); 795 struct Scsi_Host *shost = class_to_shost(dev);
747 struct bfad_im_port_s *im_port = 796 struct bfad_im_port_s *im_port =
748 (struct bfad_im_port_s *) shost->hostdata[0]; 797 (struct bfad_im_port_s *) shost->hostdata[0];
749 struct bfad_s *bfad = im_port->bfad; 798 struct bfad_s *bfad = im_port->bfad;
750 char fw_ver[BFA_VERSION_LEN]; 799 char fw_ver[BFA_VERSION_LEN];
751 800
752 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 801 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
@@ -760,10 +809,10 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
760 struct Scsi_Host *shost = class_to_shost(dev); 809 struct Scsi_Host *shost = class_to_shost(dev);
761 struct bfad_im_port_s *im_port = 810 struct bfad_im_port_s *im_port =
762 (struct bfad_im_port_s *) shost->hostdata[0]; 811 (struct bfad_im_port_s *) shost->hostdata[0];
763 struct bfad_s *bfad = im_port->bfad; 812 struct bfad_s *bfad = im_port->bfad;
764 813
765 return snprintf(buf, PAGE_SIZE, "%d\n", 814 return snprintf(buf, PAGE_SIZE, "%d\n",
766 bfa_get_nports(&bfad->bfa)); 815 bfa_get_nports(&bfad->bfa));
767} 816}
768 817
769static ssize_t 818static ssize_t
@@ -788,10 +837,10 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
788 837
789 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC); 838 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
790 if (rports == NULL) 839 if (rports == NULL)
791 return -ENOMEM; 840 return snprintf(buf, PAGE_SIZE, "Failed\n");
792 841
793 spin_lock_irqsave(&bfad->bfad_lock, flags); 842 spin_lock_irqsave(&bfad->bfad_lock, flags);
794 bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports); 843 bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
795 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 844 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
796 kfree(rports); 845 kfree(rports);
797 846
@@ -837,19 +886,19 @@ struct device_attribute *bfad_im_host_attrs[] = {
837}; 886};
838 887
839struct device_attribute *bfad_im_vport_attrs[] = { 888struct device_attribute *bfad_im_vport_attrs[] = {
840 &dev_attr_serial_number, 889 &dev_attr_serial_number,
841 &dev_attr_model, 890 &dev_attr_model,
842 &dev_attr_model_description, 891 &dev_attr_model_description,
843 &dev_attr_node_name, 892 &dev_attr_node_name,
844 &dev_attr_symbolic_name, 893 &dev_attr_symbolic_name,
845 &dev_attr_hardware_version, 894 &dev_attr_hardware_version,
846 &dev_attr_driver_version, 895 &dev_attr_driver_version,
847 &dev_attr_option_rom_version, 896 &dev_attr_option_rom_version,
848 &dev_attr_firmware_version, 897 &dev_attr_firmware_version,
849 &dev_attr_number_of_ports, 898 &dev_attr_number_of_ports,
850 &dev_attr_driver_name, 899 &dev_attr_driver_name,
851 &dev_attr_number_of_discovered_ports, 900 &dev_attr_number_of_discovered_ports,
852 NULL, 901 NULL,
853}; 902};
854 903
855 904
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
deleted file mode 100644
index bf0102076508..000000000000
--- a/drivers/scsi/bfa/bfad_attr.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_ATTR_H__
19#define __BFAD_ATTR_H__
20
21/**
22 * FC_transport_template FC transport template
23 */
24
25struct Scsi_Host*
26bfad_os_dev_to_shost(struct scsi_target *starget);
27
28/**
29 * FC transport template entry, get SCSI target port ID.
30 */
31void
32bfad_im_get_starget_port_id(struct scsi_target *starget);
33
34/**
35 * FC transport template entry, get SCSI target nwwn.
36 */
37void
38bfad_im_get_starget_node_name(struct scsi_target *starget);
39
40/**
41 * FC transport template entry, get SCSI target pwwn.
42 */
43void
44bfad_im_get_starget_port_name(struct scsi_target *starget);
45
46/**
47 * FC transport template entry, get SCSI host port ID.
48 */
49void
50bfad_im_get_host_port_id(struct Scsi_Host *shost);
51
52struct Scsi_Host*
53bfad_os_starget_to_shost(struct scsi_target *starget);
54
55
56#endif /* __BFAD_ATTR_H__ */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 4b82f12aad62..69ed1c4a903e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -17,8 +17,8 @@
17 17
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19 19
20#include <bfad_drv.h> 20#include "bfad_drv.h"
21#include <bfad_im.h> 21#include "bfad_im.h"
22 22
23/* 23/*
24 * BFA debufs interface 24 * BFA debufs interface
@@ -28,7 +28,7 @@
28 * mount -t debugfs none /sys/kernel/debug 28 * mount -t debugfs none /sys/kernel/debug
29 * 29 *
30 * BFA Hierarchy: 30 * BFA Hierarchy:
31 * - bfa/host# 31 * - bfa/host#
32 * where the host number corresponds to the one under /sys/class/scsi_host/host# 32 * where the host number corresponds to the one under /sys/class/scsi_host/host#
33 * 33 *
34 * Debugging service available per host: 34 * Debugging service available per host:
@@ -217,7 +217,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
217#define BFA_REG_ADDRSZ(__bfa) \ 217#define BFA_REG_ADDRSZ(__bfa) \
218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \ 218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ) 219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
220#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1)) 220#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
221 221
222static bfa_status_t 222static bfa_status_t
223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) 223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -359,7 +359,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
359 return -EINVAL; 359 return -EINVAL;
360 } 360 }
361 361
362 reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr); 362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
363 spin_lock_irqsave(&bfad->bfad_lock, flags); 363 spin_lock_irqsave(&bfad->bfad_lock, flags);
364 bfa_reg_write(reg_addr, val); 364 bfa_reg_write(reg_addr, val);
365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 365 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 465b8b86ec9c..98420bbb4f3f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -28,30 +28,27 @@
28 28
29#include "bfa_os_inc.h" 29#include "bfa_os_inc.h"
30 30
31#include <bfa.h> 31#include "bfa_modules.h"
32#include <bfa_svc.h> 32#include "bfa_fcs.h"
33#include <fcs/bfa_fcs.h> 33#include "bfa_defs_fcs.h"
34#include <defs/bfa_defs_pci.h> 34
35#include <defs/bfa_defs_port.h> 35#include "bfa_plog.h"
36#include <defs/bfa_defs_rport.h> 36#include "bfa_cs.h"
37#include <fcs/bfa_fcs_rport.h> 37
38#include <defs/bfa_defs_vport.h> 38#define BFAD_DRIVER_NAME "bfa"
39#include <fcs/bfa_fcs_vport.h>
40
41#include <cs/bfa_plog.h>
42#include "aen/bfa_aen.h"
43#include <log/bfa_log_linux.h>
44
45#define BFAD_DRIVER_NAME "bfa"
46#ifdef BFA_DRIVER_VERSION 39#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 40#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else 41#else
49#define BFAD_DRIVER_VERSION "2.2.2.1" 42#define BFAD_DRIVER_VERSION "2.3.2.0"
50#endif 43#endif
51 44
52 45#define BFAD_PROTO_NAME FCPI_NAME
53#define BFAD_IRQ_FLAGS IRQF_SHARED 46#define BFAD_IRQ_FLAGS IRQF_SHARED
54 47
48#ifndef FC_PORTSPEED_8GBIT
49#define FC_PORTSPEED_8GBIT 0x10
50#endif
51
55/* 52/*
56 * BFAD flags 53 * BFAD flags
57 */ 54 */
@@ -62,9 +59,9 @@
62#define BFAD_HAL_START_DONE 0x00000010 59#define BFAD_HAL_START_DONE 0x00000010
63#define BFAD_PORT_ONLINE 0x00000020 60#define BFAD_PORT_ONLINE 0x00000020
64#define BFAD_RPORT_ONLINE 0x00000040 61#define BFAD_RPORT_ONLINE 0x00000040
65#define BFAD_FCS_INIT_DONE 0x00000080 62#define BFAD_FCS_INIT_DONE 0x00000080
66#define BFAD_HAL_INIT_FAIL 0x00000100 63#define BFAD_HAL_INIT_FAIL 0x00000100
67#define BFAD_FC4_PROBE_DONE 0x00000200 64#define BFAD_FC4_PROBE_DONE 0x00000200
68#define BFAD_PORT_DELETE 0x00000001 65#define BFAD_PORT_DELETE 0x00000001
69 66
70/* 67/*
@@ -77,8 +74,8 @@
77/* 74/*
78 * BFAD configuration parameter default values 75 * BFAD configuration parameter default values
79 */ 76 */
80#define BFAD_LUN_QUEUE_DEPTH 32 77#define BFAD_LUN_QUEUE_DEPTH 32
81#define BFAD_IO_MAX_SGE SG_ALL 78#define BFAD_IO_MAX_SGE SG_ALL
82 79
83#define bfad_isr_t irq_handler_t 80#define bfad_isr_t irq_handler_t
84 81
@@ -87,6 +84,16 @@
87struct bfad_msix_s { 84struct bfad_msix_s {
88 struct bfad_s *bfad; 85 struct bfad_s *bfad;
89 struct msix_entry msix; 86 struct msix_entry msix;
87 char name[32];
88};
89
90/*
91 * Only append to the enums defined here to avoid any versioning
92 * needed between trace utility and driver version
93 */
94enum {
95 BFA_TRC_LDRV_BFAD = 1,
96 BFA_TRC_LDRV_IM = 2,
90}; 97};
91 98
92enum bfad_port_pvb_type { 99enum bfad_port_pvb_type {
@@ -101,17 +108,13 @@ enum bfad_port_pvb_type {
101 */ 108 */
102struct bfad_port_s { 109struct bfad_port_s {
103 struct list_head list_entry; 110 struct list_head list_entry;
104 struct bfad_s *bfad; 111 struct bfad_s *bfad;
105 struct bfa_fcs_port_s *fcs_port; 112 struct bfa_fcs_lport_s *fcs_port;
106 u32 roles; 113 u32 roles;
107 s32 flags; 114 s32 flags;
108 u32 supported_fc4s; 115 u32 supported_fc4s;
109 u8 ipfc_flags;
110 enum bfad_port_pvb_type pvb_type; 116 enum bfad_port_pvb_type pvb_type;
111 struct bfad_im_port_s *im_port; /* IM specific data */ 117 struct bfad_im_port_s *im_port; /* IM specific data */
112 struct bfad_tm_port_s *tm_port; /* TM specific data */
113 struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
114
115 /* port debugfs specific data */ 118 /* port debugfs specific data */
116 struct dentry *port_debugfs_root; 119 struct dentry *port_debugfs_root;
117}; 120};
@@ -124,7 +127,6 @@ struct bfad_vport_s {
124 struct bfa_fcs_vport_s fcs_vport; 127 struct bfa_fcs_vport_s fcs_vport;
125 struct completion *comp_del; 128 struct completion *comp_del;
126 struct list_head list_entry; 129 struct list_head list_entry;
127 struct bfa_port_cfg_s port_cfg;
128}; 130};
129 131
130/* 132/*
@@ -137,20 +139,35 @@ struct bfad_vf_s {
137}; 139};
138 140
139struct bfad_cfg_param_s { 141struct bfad_cfg_param_s {
140 u32 rport_del_timeout; 142 u32 rport_del_timeout;
141 u32 ioc_queue_depth; 143 u32 ioc_queue_depth;
142 u32 lun_queue_depth; 144 u32 lun_queue_depth;
143 u32 io_max_sge; 145 u32 io_max_sge;
144 u32 binding_method; 146 u32 binding_method;
147};
148
149union bfad_tmp_buf {
150 /* From struct bfa_adapter_attr_s */
151 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
152 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
153 char model[BFA_ADAPTER_MODEL_NAME_LEN];
154 char fw_ver[BFA_VERSION_LEN];
155 char optrom_ver[BFA_VERSION_LEN];
156
157 /* From struct bfa_ioc_pci_attr_s */
158 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
159
160 wwn_t wwn[BFA_FCS_MAX_LPORTS];
145}; 161};
146 162
147/* 163/*
148 * BFAD (PCI function) data structure 164 * BFAD (PCI function) data structure
149 */ 165 */
150struct bfad_s { 166struct bfad_s {
167 bfa_sm_t sm; /* state machine */
151 struct list_head list_entry; 168 struct list_head list_entry;
152 struct bfa_s bfa; 169 struct bfa_s bfa;
153 struct bfa_fcs_s bfa_fcs; 170 struct bfa_fcs_s bfa_fcs;
154 struct pci_dev *pcidev; 171 struct pci_dev *pcidev;
155 const char *pci_name; 172 const char *pci_name;
156 struct bfa_pcidev_s hal_pcidev; 173 struct bfa_pcidev_s hal_pcidev;
@@ -163,41 +180,41 @@ struct bfad_s {
163 struct bfad_port_s pport; /* physical port of the BFAD */ 180 struct bfad_port_s pport; /* physical port of the BFAD */
164 struct bfa_meminfo_s meminfo; 181 struct bfa_meminfo_s meminfo;
165 struct bfa_iocfc_cfg_s ioc_cfg; 182 struct bfa_iocfc_cfg_s ioc_cfg;
166 u32 inst_no; /* BFAD instance number */ 183 u32 inst_no; /* BFAD instance number */
167 u32 bfad_flags; 184 u32 bfad_flags;
168 spinlock_t bfad_lock; 185 spinlock_t bfad_lock;
169 struct task_struct *bfad_tsk; 186 struct task_struct *bfad_tsk;
170 struct bfad_cfg_param_s cfg_data; 187 struct bfad_cfg_param_s cfg_data;
171 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 188 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
172 int nvec; 189 int nvec;
173 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; 190 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
174 char port_name[BFA_ADAPTER_SYM_NAME_LEN]; 191 char port_name[BFA_ADAPTER_SYM_NAME_LEN];
175 struct timer_list hal_tmo; 192 struct timer_list hal_tmo;
176 unsigned long hs_start; 193 unsigned long hs_start;
177 struct bfad_im_s *im; /* IM specific data */ 194 struct bfad_im_s *im; /* IM specific data */
178 struct bfad_tm_s *tm; /* TM specific data */
179 struct bfad_ipfc_s *ipfc; /* IPFC specific data */
180 struct bfa_log_mod_s log_data;
181 struct bfa_trc_mod_s *trcmod; 195 struct bfa_trc_mod_s *trcmod;
182 struct bfa_log_mod_s *logmod;
183 struct bfa_aen_s *aen;
184 struct bfa_aen_s aen_buf;
185 void *file_map[BFA_AEN_MAX_APP];
186 struct bfa_plog_s plog_buf; 196 struct bfa_plog_s plog_buf;
187 int ref_count; 197 int ref_count;
188 bfa_boolean_t ipfc_enabled; 198 union bfad_tmp_buf tmp_buf;
189 struct fc_host_statistics link_stats; 199 struct fc_host_statistics link_stats;
190 struct list_head pbc_pcfg_list; 200 struct list_head pbc_vport_list;
191 atomic_t wq_reqcnt;
192 /* debugfs specific data */ 201 /* debugfs specific data */
193 char *regdata; 202 char *regdata;
194 u32 reglen; 203 u32 reglen;
195 struct dentry *bfad_dentry_files[5]; 204 struct dentry *bfad_dentry_files[5];
196}; 205};
197 206
198struct bfad_pcfg_s { 207/* BFAD state machine events */
199 struct list_head list_entry; 208enum bfad_sm_event {
200 struct bfa_port_cfg_s port_cfg; 209 BFAD_E_CREATE = 1,
210 BFAD_E_KTHREAD_CREATE_FAILED = 2,
211 BFAD_E_INIT = 3,
212 BFAD_E_INIT_SUCCESS = 4,
213 BFAD_E_INIT_FAILED = 5,
214 BFAD_E_INTR_INIT_FAILED = 6,
215 BFAD_E_FCS_EXIT_COMP = 7,
216 BFAD_E_EXIT_COMP = 8,
217 BFAD_E_STOP = 9
201}; 218};
202 219
203/* 220/*
@@ -208,30 +225,30 @@ struct bfad_rport_s {
208}; 225};
209 226
210struct bfad_buf_info { 227struct bfad_buf_info {
211 void *virt; 228 void *virt;
212 dma_addr_t phys; 229 dma_addr_t phys;
213 u32 size; 230 u32 size;
214}; 231};
215 232
216struct bfad_fcxp { 233struct bfad_fcxp {
217 struct bfad_port_s *port; 234 struct bfad_port_s *port;
218 struct bfa_rport_s *bfa_rport; 235 struct bfa_rport_s *bfa_rport;
219 bfa_status_t req_status; 236 bfa_status_t req_status;
220 u16 tag; 237 u16 tag;
221 u16 rsp_len; 238 u16 rsp_len;
222 u16 rsp_maxlen; 239 u16 rsp_maxlen;
223 u8 use_ireqbuf; 240 u8 use_ireqbuf;
224 u8 use_irspbuf; 241 u8 use_irspbuf;
225 u32 num_req_sgles; 242 u32 num_req_sgles;
226 u32 num_rsp_sgles; 243 u32 num_rsp_sgles;
227 struct fchs_s fchs; 244 struct fchs_s fchs;
228 void *reqbuf_info; 245 void *reqbuf_info;
229 void *rspbuf_info; 246 void *rspbuf_info;
230 struct bfa_sge_s *req_sge; 247 struct bfa_sge_s *req_sge;
231 struct bfa_sge_s *rsp_sge; 248 struct bfa_sge_s *rsp_sge;
232 fcxp_send_cb_t send_cbfn; 249 fcxp_send_cb_t send_cbfn;
233 void *send_cbarg; 250 void *send_cbarg;
234 void *bfa_fcxp; 251 void *bfa_fcxp;
235 struct completion comp; 252 struct completion comp;
236}; 253};
237 254
@@ -244,34 +261,48 @@ struct bfad_hal_comp {
244 * Macro to obtain the immediate lower power 261 * Macro to obtain the immediate lower power
245 * of two for the integer. 262 * of two for the integer.
246 */ 263 */
247#define nextLowerInt(x) \ 264#define nextLowerInt(x) \
248do { \ 265do { \
249 int j; \ 266 int i; \
250 (*x)--; \ 267 (*x)--; \
251 for (j = 1; j < (sizeof(int) * 8); j <<= 1) \ 268 for (i = 1; i < (sizeof(int)*8); i <<= 1) \
252 (*x) = (*x) | (*x) >> j; \ 269 (*x) = (*x) | (*x) >> i; \
253 (*x)++; \ 270 (*x)++; \
254 (*x) = (*x) >> 1; \ 271 (*x) = (*x) >> 1; \
255} while (0) 272} while (0)
256 273
257 274
258bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 275#define list_remove_head(list, entry, type, member) \
259 struct bfa_port_cfg_s *port_cfg, struct device *dev); 276do { \
260bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 277 entry = NULL; \
261 struct bfa_port_cfg_s *port_cfg); 278 if (!list_empty(list)) { \
262bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 279 entry = list_entry((list)->next, type, member); \
263bfa_status_t bfad_drv_init(struct bfad_s *bfad); 280 list_del_init(&entry->member); \
264bfa_status_t bfad_start_ops(struct bfad_s *bfad); 281 } \
265void bfad_drv_start(struct bfad_s *bfad); 282} while (0)
266void bfad_uncfg_pport(struct bfad_s *bfad);
267void bfad_drv_stop(struct bfad_s *bfad);
268void bfad_remove_intr(struct bfad_s *bfad);
269void bfad_hal_mem_release(struct bfad_s *bfad);
270void bfad_hcb_comp(void *arg, bfa_status_t status);
271
272int bfad_setup_intr(struct bfad_s *bfad);
273void bfad_remove_intr(struct bfad_s *bfad);
274 283
284#define list_get_first(list, type, member) \
285((list_empty(list)) ? NULL : \
286 list_entry((list)->next, type, member))
287
288bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
289 struct bfa_lport_cfg_s *port_cfg,
290 struct device *dev);
291bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
292 struct bfa_lport_cfg_s *port_cfg);
293bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
294bfa_status_t bfad_drv_init(struct bfad_s *bfad);
295bfa_status_t bfad_start_ops(struct bfad_s *bfad);
296void bfad_drv_start(struct bfad_s *bfad);
297void bfad_uncfg_pport(struct bfad_s *bfad);
298void bfad_stop(struct bfad_s *bfad);
299void bfad_fcs_stop(struct bfad_s *bfad);
300void bfad_remove_intr(struct bfad_s *bfad);
301void bfad_hal_mem_release(struct bfad_s *bfad);
302void bfad_hcb_comp(void *arg, bfa_status_t status);
303
304int bfad_setup_intr(struct bfad_s *bfad);
305void bfad_remove_intr(struct bfad_s *bfad);
275void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg); 306void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
276bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad); 307bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
277void bfad_bfa_tmo(unsigned long data); 308void bfad_bfa_tmo(unsigned long data);
@@ -280,9 +311,6 @@ int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
280void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); 311void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
281void bfad_fcs_port_cfg(struct bfad_s *bfad); 312void bfad_fcs_port_cfg(struct bfad_s *bfad);
282void bfad_drv_uninit(struct bfad_s *bfad); 313void bfad_drv_uninit(struct bfad_s *bfad);
283void bfad_drv_log_level_set(struct bfad_s *bfad);
284bfa_status_t bfad_fc4_module_init(void);
285void bfad_fc4_module_exit(void);
286int bfad_worker(void *ptr); 314int bfad_worker(void *ptr);
287void bfad_debugfs_init(struct bfad_port_s *port); 315void bfad_debugfs_init(struct bfad_port_s *port);
288void bfad_debugfs_exit(struct bfad_port_s *port); 316void bfad_debugfs_exit(struct bfad_port_s *port);
@@ -294,10 +322,30 @@ int bfad_os_get_linkup_delay(struct bfad_s *bfad);
294int bfad_install_msix_handler(struct bfad_s *bfad); 322int bfad_install_msix_handler(struct bfad_s *bfad);
295 323
296extern struct idr bfad_im_port_index; 324extern struct idr bfad_im_port_index;
325extern struct pci_device_id bfad_id_table[];
297extern struct list_head bfad_list; 326extern struct list_head bfad_list;
298extern int bfa_lun_queue_depth; 327extern char *os_name;
299extern int bfad_supported_fc4s; 328extern char *os_patch;
300extern int bfa_linkup_delay; 329extern char *host_name;
330extern int num_rports;
331extern int num_ios;
332extern int num_tms;
333extern int num_fcxps;
334extern int num_ufbufs;
335extern int reqq_size;
336extern int rspq_size;
337extern int num_sgpgs;
338extern int rport_del_timeout;
339extern int bfa_lun_queue_depth;
340extern int bfa_io_max_sge;
341extern int log_level;
342extern int ioc_auto_recover;
343extern int bfa_linkup_delay;
344extern int msix_disable_cb;
345extern int msix_disable_ct;
346extern int fdmi_enable;
347extern int supported_fc4s;
348extern int pcie_max_read_reqsz;
301extern int bfa_debugfs_enable; 349extern int bfa_debugfs_enable;
302extern struct mutex bfad_mutex; 350extern struct mutex bfad_mutex;
303 351
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
deleted file mode 100644
index 1baca1a12085..000000000000
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_fwimg.c Linux driver PCI interface module.
20 */
21#include <bfa_os_inc.h>
22#include <bfad_drv.h>
23#include <bfad_im_compat.h>
24#include <defs/bfa_defs_version.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31#include <linux/pci.h>
32#include <linux/firmware.h>
33#include <bfa_fwimg_priv.h>
34#include <bfa.h>
35
36u32 bfi_image_ct_fc_size;
37u32 bfi_image_ct_cna_size;
38u32 bfi_image_cb_fc_size;
39u32 *bfi_image_ct_fc;
40u32 *bfi_image_ct_cna;
41u32 *bfi_image_cb_fc;
42
43
44#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
45#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
46#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
47MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
48MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
49MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
50
51u32 *
52bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
53 u32 *bfi_image_size, char *fw_name)
54{
55 const struct firmware *fw;
56
57 if (request_firmware(&fw, fw_name, &pdev->dev)) {
58 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
59 goto error;
60 }
61
62 *bfi_image = vmalloc(fw->size);
63 if (NULL == *bfi_image) {
64 printk(KERN_ALERT "Fail to allocate buffer for fw image "
65 "size=%x!\n", (u32) fw->size);
66 goto error;
67 }
68
69 memcpy(*bfi_image, fw->data, fw->size);
70 *bfi_image_size = fw->size/sizeof(u32);
71
72 return *bfi_image;
73
74error:
75 return NULL;
76}
77
78u32 *
79bfad_get_firmware_buf(struct pci_dev *pdev)
80{
81 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
82 if (bfi_image_ct_fc_size == 0)
83 bfad_read_firmware(pdev, &bfi_image_ct_fc,
84 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
85 return bfi_image_ct_fc;
86 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
87 if (bfi_image_ct_cna_size == 0)
88 bfad_read_firmware(pdev, &bfi_image_ct_cna,
89 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
90 return bfi_image_ct_cna;
91 } else {
92 if (bfi_image_cb_fc_size == 0)
93 bfad_read_firmware(pdev, &bfi_image_cb_fc,
94 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
95 return bfi_image_cb_fc;
96 }
97}
98
99u32 *
100bfi_image_ct_fc_get_chunk(u32 off)
101{ return (u32 *)(bfi_image_ct_fc + off); }
102
103u32 *
104bfi_image_ct_cna_get_chunk(u32 off)
105{ return (u32 *)(bfi_image_ct_cna + off); }
106
107u32 *
108bfi_image_cb_fc_get_chunk(u32 off)
109{ return (u32 *)(bfi_image_cb_fc + off); }
110
111uint32_t *
112bfi_image_get_chunk(int type, uint32_t off)
113{
114 switch (type) {
115 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
116 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
117 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
118 default: return 0; break;
119 }
120}
121
122uint32_t
123bfi_image_get_size(int type)
124{
125 switch (type) {
126 case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
127 case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
128 case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
129 default: return 0; break;
130 }
131}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 6ef87f6fcdbb..d950ee44016e 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -19,12 +19,10 @@
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
22#include <linux/slab.h>
23#include "bfad_drv.h" 22#include "bfad_drv.h"
24#include "bfad_im.h" 23#include "bfad_im.h"
25#include "bfad_trcmod.h" 24#include "bfa_cb_ioim.h"
26#include "bfa_cb_ioim_macros.h" 25#include "bfa_fcs.h"
27#include <fcb/bfa_fcb_fcpim.h>
28 26
29BFA_TRC_FILE(LDRV, IM); 27BFA_TRC_FILE(LDRV, IM);
30 28
@@ -33,8 +31,10 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
33struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 31struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
34static void bfad_im_itnim_work_handler(struct work_struct *work); 32static void bfad_im_itnim_work_handler(struct work_struct *work);
35static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, 33static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
36 void (*done)(struct scsi_cmnd *)); 34 void (*done)(struct scsi_cmnd *));
37static int bfad_im_slave_alloc(struct scsi_device *sdev); 35static int bfad_im_slave_alloc(struct scsi_device *sdev);
36static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
37 struct bfad_itnim_s *itnim);
38 38
39void 39void
40bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, 40bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
@@ -58,6 +58,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
58 sns_len = SCSI_SENSE_BUFFERSIZE; 58 sns_len = SCSI_SENSE_BUFFERSIZE;
59 memcpy(cmnd->sense_buffer, sns_info, sns_len); 59 memcpy(cmnd->sense_buffer, sns_info, sns_len);
60 } 60 }
61
61 if (residue > 0) { 62 if (residue > 0) {
62 bfa_trc(bfad, residue); 63 bfa_trc(bfad, residue);
63 scsi_set_resid(cmnd, residue); 64 scsi_set_resid(cmnd, residue);
@@ -76,7 +77,8 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
76 case BFI_IOIM_STS_TIMEDOUT: 77 case BFI_IOIM_STS_TIMEDOUT:
77 case BFI_IOIM_STS_PATHTOV: 78 case BFI_IOIM_STS_PATHTOV:
78 default: 79 default:
79 cmnd->result = ScsiResult(DID_ERROR, 0); 80 host_status = DID_ERROR;
81 cmnd->result = ScsiResult(host_status, 0);
80 } 82 }
81 83
82 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 84 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -162,11 +164,6 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
162 wake_up(wq); 164 wake_up(wq);
163} 165}
164 166
165void
166bfa_cb_ioim_resfree(void *drv)
167{
168}
169
170/** 167/**
171 * Scsi_Host_template SCSI host template 168 * Scsi_Host_template SCSI host template
172 */ 169 */
@@ -179,15 +176,23 @@ bfad_im_info(struct Scsi_Host *shost)
179 static char bfa_buf[256]; 176 static char bfa_buf[256];
180 struct bfad_im_port_s *im_port = 177 struct bfad_im_port_s *im_port =
181 (struct bfad_im_port_s *) shost->hostdata[0]; 178 (struct bfad_im_port_s *) shost->hostdata[0];
182 struct bfad_s *bfad = im_port->bfad; 179 struct bfad_s *bfad = im_port->bfad;
180 struct bfa_s *bfa = &bfad->bfa;
181 struct bfa_ioc_s *ioc = &bfa->ioc;
183 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 182 char model[BFA_ADAPTER_MODEL_NAME_LEN];
184 183
185 bfa_get_adapter_model(&bfad->bfa, model); 184 bfa_get_adapter_model(bfa, model);
186 185
187 memset(bfa_buf, 0, sizeof(bfa_buf)); 186 memset(bfa_buf, 0, sizeof(bfa_buf));
188 snprintf(bfa_buf, sizeof(bfa_buf), 187 if (ioc->ctdev)
189 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 188 snprintf(bfa_buf, sizeof(bfa_buf),
189 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
190 model, bfad->pci_name, BFAD_DRIVER_VERSION);
191 else
192 snprintf(bfa_buf, sizeof(bfa_buf),
193 "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
190 model, bfad->pci_name, BFAD_DRIVER_VERSION); 194 model, bfad->pci_name, BFAD_DRIVER_VERSION);
195
191 return bfa_buf; 196 return bfa_buf;
192} 197}
193 198
@@ -221,9 +226,9 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
221 } 226 }
222 227
223 bfa_trc(bfad, hal_io->iotag); 228 bfa_trc(bfad, hal_io->iotag);
224 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT, 229 BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n",
225 im_port->shost->host_no, cmnd, hal_io->iotag); 230 im_port->shost->host_no, cmnd, hal_io->iotag);
226 bfa_ioim_abort(hal_io); 231 (void) bfa_ioim_abort(hal_io);
227 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 232 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
228 233
229 /* Need to wait until the command get aborted */ 234 /* Need to wait until the command get aborted */
@@ -237,7 +242,8 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
237 242
238 cmnd->scsi_done(cmnd); 243 cmnd->scsi_done(cmnd);
239 bfa_trc(bfad, hal_io->iotag); 244 bfa_trc(bfad, hal_io->iotag);
240 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP, 245 BFA_LOG(KERN_INFO, bfad, log_level,
246 "scsi%d: complete abort 0x%p iotag 0x%x\n",
241 im_port->shost->host_no, cmnd, hal_io->iotag); 247 im_port->shost->host_no, cmnd, hal_io->iotag);
242 return SUCCESS; 248 return SUCCESS;
243out: 249out:
@@ -255,8 +261,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
255 261
256 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
257 if (!tskim) { 263 if (!tskim) {
258 BFA_DEV_PRINTF(bfad, BFA_ERR, 264 BFA_LOG(KERN_ERR, bfad, log_level,
259 "target reset, fail to allocate tskim\n"); 265 "target reset, fail to allocate tskim\n");
260 rc = BFA_STATUS_FAILED; 266 rc = BFA_STATUS_FAILED;
261 goto out; 267 goto out;
262 } 268 }
@@ -306,7 +312,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
306 312
307 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 313 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
308 if (!tskim) { 314 if (!tskim) {
309 BFA_DEV_PRINTF(bfad, BFA_ERR, 315 BFA_LOG(KERN_ERR, bfad, log_level,
310 "LUN reset, fail to allocate tskim"); 316 "LUN reset, fail to allocate tskim");
311 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 317 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
312 rc = FAILED; 318 rc = FAILED;
@@ -331,8 +337,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
331 337
332 task_status = cmnd->SCp.Status >> 1; 338 task_status = cmnd->SCp.Status >> 1;
333 if (task_status != BFI_TSKIM_STS_OK) { 339 if (task_status != BFI_TSKIM_STS_OK) {
334 BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n", 340 BFA_LOG(KERN_ERR, bfad, log_level,
335 task_status); 341 "LUN reset failure, status: %d\n", task_status);
336 rc = FAILED; 342 rc = FAILED;
337 } 343 }
338 344
@@ -375,7 +381,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
375 381
376 task_status = cmnd->SCp.Status >> 1; 382 task_status = cmnd->SCp.Status >> 1;
377 if (task_status != BFI_TSKIM_STS_OK) { 383 if (task_status != BFI_TSKIM_STS_OK) {
378 BFA_DEV_PRINTF(bfad, BFA_ERR, 384 BFA_LOG(KERN_ERR, bfad, log_level,
379 "target reset failure," 385 "target reset failure,"
380 " status: %d\n", task_status); 386 " status: %d\n", task_status);
381 err_cnt++; 387 err_cnt++;
@@ -438,6 +444,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
438 wwn_t wwpn; 444 wwn_t wwpn;
439 u32 fcid; 445 u32 fcid;
440 char wwpn_str[32], fcid_str[16]; 446 char wwpn_str[32], fcid_str[16];
447 struct bfad_im_s *im = itnim_drv->im;
441 448
442 /* online to free state transtion should not happen */ 449 /* online to free state transtion should not happen */
443 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE); 450 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
@@ -454,10 +461,14 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
454 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 461 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
455 wwn2str(wwpn_str, wwpn); 462 wwn2str(wwpn_str, wwpn);
456 fcid2str(fcid_str, fcid); 463 fcid2str(fcid_str, fcid);
457 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE, 464 BFA_LOG(KERN_INFO, bfad, log_level,
465 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
458 port->im_port->shost->host_no, 466 port->im_port->shost->host_no,
459 fcid_str, wwpn_str); 467 fcid_str, wwpn_str);
460 bfad_os_itnim_process(itnim_drv); 468
469 /* ITNIM processing */
470 if (itnim_drv->queue_work)
471 queue_work(im->drv_workq, &itnim_drv->itnim_work);
461} 472}
462 473
463/** 474/**
@@ -468,13 +479,17 @@ void
468bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) 479bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
469{ 480{
470 struct bfad_port_s *port; 481 struct bfad_port_s *port;
482 struct bfad_im_s *im = itnim_drv->im;
471 483
472 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); 484 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
473 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 485 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
474 itnim_drv->state = ITNIM_STATE_ONLINE; 486 itnim_drv->state = ITNIM_STATE_ONLINE;
475 itnim_drv->queue_work = 1; 487 itnim_drv->queue_work = 1;
476 itnim_drv->im_port = port->im_port; 488 itnim_drv->im_port = port->im_port;
477 bfad_os_itnim_process(itnim_drv); 489
490 /* ITNIM processing */
491 if (itnim_drv->queue_work)
492 queue_work(im->drv_workq, &itnim_drv->itnim_work);
478} 493}
479 494
480/** 495/**
@@ -486,6 +501,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
486{ 501{
487 struct bfad_port_s *port; 502 struct bfad_port_s *port;
488 struct bfad_s *bfad; 503 struct bfad_s *bfad;
504 struct bfad_im_s *im = itnim_drv->im;
489 505
490 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 506 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
491 bfad = port->bfad; 507 bfad = port->bfad;
@@ -497,16 +513,10 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
497 itnim_drv->im_port = port->im_port; 513 itnim_drv->im_port = port->im_port;
498 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; 514 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
499 itnim_drv->queue_work = 1; 515 itnim_drv->queue_work = 1;
500 bfad_os_itnim_process(itnim_drv);
501}
502 516
503/** 517 /* ITNIM processing */
504 * BFA FCS itnim timeout callback. 518 if (itnim_drv->queue_work)
505 * Context: Interrupt. bfad_lock is held 519 queue_work(im->drv_workq, &itnim_drv->itnim_work);
506 */
507void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
508{
509 itnim->state = ITNIM_STATE_TIMEOUT;
510} 520}
511 521
512/** 522/**
@@ -514,7 +524,7 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
514 */ 524 */
515int 525int
516bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 526bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
517 struct device *dev) 527 struct device *dev)
518{ 528{
519 int error = 1; 529 int error = 1;
520 530
@@ -580,7 +590,7 @@ void
580bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 590bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
581{ 591{
582 bfa_trc(bfad, bfad->inst_no); 592 bfa_trc(bfad, bfad->inst_no);
583 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE, 593 BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n",
584 im_port->shost->host_no); 594 im_port->shost->host_no);
585 595
586 fc_remove_host(im_port->shost); 596 fc_remove_host(im_port->shost);
@@ -598,14 +608,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
598{ 608{
599 struct bfad_im_port_s *im_port = 609 struct bfad_im_port_s *im_port =
600 container_of(work, struct bfad_im_port_s, port_delete_work); 610 container_of(work, struct bfad_im_port_s, port_delete_work);
601 struct bfad_s *bfad = im_port->bfad;
602 611
603 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 612 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
604 im_port->flags |= BFAD_PORT_DELETE; 613 im_port->flags |= BFAD_PORT_DELETE;
605 fc_vport_terminate(im_port->fc_vport); 614 fc_vport_terminate(im_port->fc_vport);
606 atomic_dec(&bfad->wq_reqcnt);
607 } 615 }
608
609} 616}
610 617
611bfa_status_t 618bfa_status_t
@@ -636,11 +643,8 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
636{ 643{
637 struct bfad_im_port_s *im_port = port->im_port; 644 struct bfad_im_port_s *im_port = port->im_port;
638 645
639 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 646 queue_work(bfad->im->drv_workq,
640 atomic_inc(&bfad->wq_reqcnt);
641 queue_work(bfad->im->drv_workq,
642 &im_port->port_delete_work); 647 &im_port->port_delete_work);
643 }
644} 648}
645 649
646void 650void
@@ -663,16 +667,6 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
663 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 667 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
664} 668}
665 669
666void
667bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
668{
669}
670
671void
672bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
673{
674}
675
676bfa_status_t 670bfa_status_t
677bfad_im_probe(struct bfad_s *bfad) 671bfad_im_probe(struct bfad_s *bfad)
678{ 672{
@@ -701,27 +695,12 @@ void
701bfad_im_probe_undo(struct bfad_s *bfad) 695bfad_im_probe_undo(struct bfad_s *bfad)
702{ 696{
703 if (bfad->im) { 697 if (bfad->im) {
704 while (atomic_read(&bfad->wq_reqcnt)) {
705 printk(KERN_INFO "bfa %s: waiting workq processing,"
706 " wq_reqcnt:%x\n", bfad->pci_name,
707 atomic_read(&bfad->wq_reqcnt));
708 schedule_timeout_uninterruptible(HZ);
709 }
710 bfad_os_destroy_workq(bfad->im); 698 bfad_os_destroy_workq(bfad->im);
711 kfree(bfad->im); 699 kfree(bfad->im);
712 bfad->im = NULL; 700 bfad->im = NULL;
713 } 701 }
714} 702}
715 703
716/**
717 * Call back function to handle IO redirection state change
718 */
719void
720bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
721{
722 /* Do nothing */
723}
724
725struct Scsi_Host * 704struct Scsi_Host *
726bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 705bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
727{ 706{
@@ -751,6 +730,7 @@ void
751bfad_os_destroy_workq(struct bfad_im_s *im) 730bfad_os_destroy_workq(struct bfad_im_s *im)
752{ 731{
753 if (im && im->drv_workq) { 732 if (im && im->drv_workq) {
733 flush_workqueue(im->drv_workq);
754 destroy_workqueue(im->drv_workq); 734 destroy_workqueue(im->drv_workq);
755 im->drv_workq = NULL; 735 im->drv_workq = NULL;
756 } 736 }
@@ -762,7 +742,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
762 struct bfad_im_s *im = bfad->im; 742 struct bfad_im_s *im = bfad->im;
763 743
764 bfa_trc(bfad, 0); 744 bfa_trc(bfad, 0);
765 snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d", 745 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
766 bfad->inst_no); 746 bfad->inst_no);
767 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); 747 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
768 if (!im->drv_workq) 748 if (!im->drv_workq)
@@ -832,12 +812,6 @@ struct scsi_host_template bfad_im_vport_template = {
832 .max_sectors = 0xFFFF, 812 .max_sectors = 0xFFFF,
833}; 813};
834 814
835void
836bfad_im_probe_post(struct bfad_im_s *im)
837{
838 flush_workqueue(im->drv_workq);
839}
840
841bfa_status_t 815bfa_status_t
842bfad_im_module_init(void) 816bfad_im_module_init(void)
843{ 817{
@@ -861,20 +835,12 @@ bfad_im_module_exit(void)
861{ 835{
862 if (bfad_im_scsi_transport_template) 836 if (bfad_im_scsi_transport_template)
863 fc_release_transport(bfad_im_scsi_transport_template); 837 fc_release_transport(bfad_im_scsi_transport_template);
838
864 if (bfad_im_scsi_vport_transport_template) 839 if (bfad_im_scsi_vport_transport_template)
865 fc_release_transport(bfad_im_scsi_vport_transport_template); 840 fc_release_transport(bfad_im_scsi_vport_transport_template);
866} 841}
867 842
868void 843void
869bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
870{
871 struct bfad_im_s *im = itnim_drv->im;
872
873 if (itnim_drv->queue_work)
874 queue_work(im->drv_workq, &itnim_drv->itnim_work);
875}
876
877void
878bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 844bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
879{ 845{
880 struct scsi_device *tmp_sdev; 846 struct scsi_device *tmp_sdev;
@@ -916,9 +882,6 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
916 } 882 }
917} 883}
918 884
919
920
921
922struct bfad_itnim_s * 885struct bfad_itnim_s *
923bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) 886bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
924{ 887{
@@ -949,44 +912,64 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
949 return 0; 912 return 0;
950} 913}
951 914
915static u32
916bfad_im_supported_speeds(struct bfa_s *bfa)
917{
918 struct bfa_ioc_attr_s ioc_attr;
919 u32 supported_speed = 0;
920
921 bfa_get_attr(bfa, &ioc_attr);
922 if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
923 if (ioc_attr.adapter_attr.is_mezz) {
924 supported_speed |= FC_PORTSPEED_8GBIT |
925 FC_PORTSPEED_4GBIT |
926 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
927 } else {
928 supported_speed |= FC_PORTSPEED_8GBIT |
929 FC_PORTSPEED_4GBIT |
930 FC_PORTSPEED_2GBIT;
931 }
932 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
933 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
934 FC_PORTSPEED_1GBIT;
935 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
936 supported_speed |= FC_PORTSPEED_10GBIT;
937 }
938 return supported_speed;
939}
940
952void 941void
953bfad_os_fc_host_init(struct bfad_im_port_s *im_port) 942bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
954{ 943{
955 struct Scsi_Host *host = im_port->shost; 944 struct Scsi_Host *host = im_port->shost;
956 struct bfad_s *bfad = im_port->bfad; 945 struct bfad_s *bfad = im_port->bfad;
957 struct bfad_port_s *port = im_port->port; 946 struct bfad_port_s *port = im_port->port;
958 struct bfa_pport_attr_s pattr; 947 struct bfa_port_attr_s pattr;
959 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 948 struct bfa_lport_attr_s port_attr;
960 char fw_ver[BFA_VERSION_LEN]; 949 char symname[BFA_SYMNAME_MAXLEN];
961 950
962 fc_host_node_name(host) = 951 fc_host_node_name(host) =
963 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); 952 bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
964 fc_host_port_name(host) = 953 fc_host_port_name(host) =
965 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port))); 954 bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
966 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 955 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
967 956
968 fc_host_supported_classes(host) = FC_COS_CLASS3; 957 fc_host_supported_classes(host) = FC_COS_CLASS3;
969 958
970 memset(fc_host_supported_fc4s(host), 0, 959 memset(fc_host_supported_fc4s(host), 0,
971 sizeof(fc_host_supported_fc4s(host))); 960 sizeof(fc_host_supported_fc4s(host)));
972 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM)) 961 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
973 /* For FCP type 0x08 */ 962 /* For FCP type 0x08 */
974 fc_host_supported_fc4s(host)[2] = 1; 963 fc_host_supported_fc4s(host)[2] = 1;
975 if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
976 /* For LLC/SNAP type 0x05 */
977 fc_host_supported_fc4s(host)[3] = 0x20;
978 /* For fibre channel services type 0x20 */ 964 /* For fibre channel services type 0x20 */
979 fc_host_supported_fc4s(host)[7] = 1; 965 fc_host_supported_fc4s(host)[7] = 1;
980 966
981 bfa_get_adapter_model(&bfad->bfa, model); 967 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
982 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 968 strncpy(symname, port_attr.port_cfg.sym_name.symname,
983 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", 969 BFA_SYMNAME_MAXLEN);
984 model, fw_ver, BFAD_DRIVER_VERSION); 970 sprintf(fc_host_symbolic_name(host), "%s", symname);
985 971
986 fc_host_supported_speeds(host) = 0; 972 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
987 fc_host_supported_speeds(host) |=
988 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
989 FC_PORTSPEED_1GBIT;
990 973
991 bfa_fcport_get_attr(&bfad->bfa, &pattr); 974 bfa_fcport_get_attr(&bfad->bfa, &pattr);
992 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; 975 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
@@ -1065,7 +1048,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1065 fcid2str(fcid_str, fcid); 1048 fcid2str(fcid_str, fcid);
1066 list_add_tail(&itnim->list_entry, 1049 list_add_tail(&itnim->list_entry,
1067 &im_port->itnim_mapped_list); 1050 &im_port->itnim_mapped_list);
1068 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE, 1051 BFA_LOG(KERN_INFO, bfad, log_level,
1052 "ITNIM ONLINE Target: %d:0:%d "
1053 "FCID: %s WWPN: %s\n",
1069 im_port->shost->host_no, 1054 im_port->shost->host_no,
1070 itnim->scsi_tgt_id, 1055 itnim->scsi_tgt_id,
1071 fcid_str, wwpn_str); 1056 fcid_str, wwpn_str);
@@ -1096,7 +1081,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1096 wwn2str(wwpn_str, wwpn); 1081 wwn2str(wwpn_str, wwpn);
1097 fcid2str(fcid_str, fcid); 1082 fcid2str(fcid_str, fcid);
1098 list_del(&itnim->list_entry); 1083 list_del(&itnim->list_entry);
1099 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE, 1084 BFA_LOG(KERN_INFO, bfad, log_level,
1085 "ITNIM OFFLINE Target: %d:0:%d "
1086 "FCID: %s WWPN: %s\n",
1100 im_port->shost->host_no, 1087 im_port->shost->host_no,
1101 itnim->scsi_tgt_id, 1088 itnim->scsi_tgt_id,
1102 fcid_str, wwpn_str); 1089 fcid_str, wwpn_str);
@@ -1142,7 +1129,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1142 struct bfa_ioim_s *hal_io; 1129 struct bfa_ioim_s *hal_io;
1143 unsigned long flags; 1130 unsigned long flags;
1144 int rc; 1131 int rc;
1145 s16 sg_cnt = 0; 1132 int sg_cnt = 0;
1146 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1133 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1147 1134
1148 rc = fc_remote_port_chkready(rport); 1135 rc = fc_remote_port_chkready(rport);
@@ -1153,7 +1140,6 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1153 } 1140 }
1154 1141
1155 sg_cnt = scsi_dma_map(cmnd); 1142 sg_cnt = scsi_dma_map(cmnd);
1156
1157 if (sg_cnt < 0) 1143 if (sg_cnt < 0)
1158 return SCSI_MLQUEUE_HOST_BUSY; 1144 return SCSI_MLQUEUE_HOST_BUSY;
1159 1145
@@ -1168,6 +1154,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1168 goto out_fail_cmd; 1154 goto out_fail_cmd;
1169 } 1155 }
1170 1156
1157
1171 itnim = itnim_data->itnim; 1158 itnim = itnim_data->itnim;
1172 if (!itnim) { 1159 if (!itnim) {
1173 cmnd->result = ScsiResult(DID_IMM_RETRY, 0); 1160 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
@@ -1206,47 +1193,49 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
1206 int rport_delay = 10; 1193 int rport_delay = 10;
1207 1194
1208 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) 1195 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1209 && i < bfa_linkup_delay; i++) 1196 && i < bfa_linkup_delay; i++) {
1210 schedule_timeout_uninterruptible(HZ); 1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 schedule_timeout(HZ);
1199 }
1211 1200
1212 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { 1201 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1213 rport_delay = rport_delay < bfa_linkup_delay ? 1202 rport_delay = rport_delay < bfa_linkup_delay ?
1214 rport_delay : bfa_linkup_delay; 1203 rport_delay : bfa_linkup_delay;
1215 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) 1204 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1216 && i < rport_delay; i++) 1205 && i < rport_delay; i++) {
1217 schedule_timeout_uninterruptible(HZ); 1206 set_current_state(TASK_UNINTERRUPTIBLE);
1207 schedule_timeout(HZ);
1208 }
1218 1209
1219 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) 1210 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
1220 schedule_timeout_uninterruptible(rport_delay * HZ); 1211 set_current_state(TASK_UNINTERRUPTIBLE);
1212 schedule_timeout(rport_delay * HZ);
1213 }
1221 } 1214 }
1222} 1215}
1223 1216
1224int 1217int
1225bfad_os_get_linkup_delay(struct bfad_s *bfad) 1218bfad_os_get_linkup_delay(struct bfad_s *bfad)
1226{ 1219{
1227 1220 u8 nwwns = 0;
1228 u8 nwwns = 0; 1221 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1229 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1222 int linkup_delay;
1230 int ldelay;
1231 1223
1232 /* 1224 /*
1233 * Querying for the boot target port wwns 1225 * Querying for the boot target port wwns
1234 * -- read from boot information in flash. 1226 * -- read from boot information in flash.
1235 * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30 1227 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
1236 * else => local boot machine set bfa_linkup_delay = 10 1228 * else => local boot machine set linkup_delay = 0
1237 */ 1229 */
1238 1230
1239 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); 1231 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
1240 1232
1241 if (nwwns > 0) { 1233 if (nwwns > 0)
1242 /* If boot over SAN; linkup_delay = 30sec */ 1234 /* If Boot over SAN set linkup_delay = 30sec */
1243 ldelay = 30; 1235 linkup_delay = 30;
1244 } else { 1236 else
1245 /* If local boot; linkup_delay = 10sec */ 1237 /* If local boot; no linkup_delay */
1246 ldelay = 0; 1238 linkup_delay = 0;
1247 }
1248 1239
1249 return ldelay; 1240 return linkup_delay;
1250} 1241}
1251
1252
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 973cab4d09c7..b038c0e08921 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
@@ -18,20 +18,20 @@
18#ifndef __BFAD_IM_H__ 18#ifndef __BFAD_IM_H__
19#define __BFAD_IM_H__ 19#define __BFAD_IM_H__
20 20
21#include "fcs/bfa_fcs_fcpim.h" 21#include "bfa_fcs.h"
22#include "bfad_im_compat.h"
23 22
24#define FCPI_NAME " fcpim" 23#define FCPI_NAME " fcpim"
25 24
25#ifndef KOBJ_NAME_LEN
26#define KOBJ_NAME_LEN 20
27#endif
28
26bfa_status_t bfad_im_module_init(void); 29bfa_status_t bfad_im_module_init(void);
27void bfad_im_module_exit(void); 30void bfad_im_module_exit(void);
28bfa_status_t bfad_im_probe(struct bfad_s *bfad); 31bfa_status_t bfad_im_probe(struct bfad_s *bfad);
29void bfad_im_probe_undo(struct bfad_s *bfad); 32void bfad_im_probe_undo(struct bfad_s *bfad);
30void bfad_im_probe_post(struct bfad_im_s *im);
31bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port); 33bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
32void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port); 34void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
33void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
34void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
35void bfad_im_port_clean(struct bfad_im_port_s *im_port); 35void bfad_im_port_clean(struct bfad_im_port_s *im_port);
36int bfad_im_scsi_host_alloc(struct bfad_s *bfad, 36int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port, struct device *dev); 37 struct bfad_im_port_s *im_port, struct device *dev);
@@ -44,14 +44,10 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad,
44#define BFAD_LUN_RESET_TMO 60 44#define BFAD_LUN_RESET_TMO 60
45#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 45#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
46#define BFA_QUEUE_FULL_RAMP_UP_TIME 120 46#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
47#define BFAD_KOBJ_NAME_LEN 20
48 47
49/* 48/*
50 * itnim flags 49 * itnim flags
51 */ 50 */
52#define ITNIM_MAPPED 0x00000001
53
54#define SCSI_TASK_MGMT 0x00000001
55#define IO_DONE_BIT 0 51#define IO_DONE_BIT 0
56 52
57struct bfad_itnim_data_s { 53struct bfad_itnim_data_s {
@@ -64,7 +60,7 @@ struct bfad_im_port_s {
64 struct work_struct port_delete_work; 60 struct work_struct port_delete_work;
65 int idr_id; 61 int idr_id;
66 u16 cur_scsi_id; 62 u16 cur_scsi_id;
67 u16 flags; 63 u16 flags;
68 struct list_head binding_list; 64 struct list_head binding_list;
69 struct Scsi_Host *shost; 65 struct Scsi_Host *shost;
70 struct list_head itnim_mapped_list; 66 struct list_head itnim_mapped_list;
@@ -118,14 +114,13 @@ struct bfad_fcp_binding {
118struct bfad_im_s { 114struct bfad_im_s {
119 struct bfad_s *bfad; 115 struct bfad_s *bfad;
120 struct workqueue_struct *drv_workq; 116 struct workqueue_struct *drv_workq;
121 char drv_workq_name[BFAD_KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
122}; 118};
123 119
124struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, 120struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
125 struct bfad_s *); 121 struct bfad_s *);
126bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); 122bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
127void bfad_os_destroy_workq(struct bfad_im_s *im); 123void bfad_os_destroy_workq(struct bfad_im_s *im);
128void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
129void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 124void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
130void bfad_os_scsi_host_free(struct bfad_s *bfad, 125void bfad_os_scsi_host_free(struct bfad_s *bfad,
131 struct bfad_im_port_s *im_port); 126 struct bfad_im_port_s *im_port);
@@ -133,11 +128,6 @@ void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
133 struct scsi_device *sdev); 128 struct scsi_device *sdev);
134void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); 129void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
135struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); 130struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
136int bfad_os_scsi_add_host(struct Scsi_Host *shost,
137 struct bfad_im_port_s *im_port, struct bfad_s *bfad);
138
139void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
140 struct bfad_itnim_s *itnim);
141 131
142extern struct scsi_host_template bfad_im_scsi_host_template; 132extern struct scsi_host_template bfad_im_scsi_host_template;
143extern struct scsi_host_template bfad_im_vport_template; 133extern struct scsi_host_template bfad_im_vport_template;
@@ -146,4 +136,34 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
146extern struct scsi_transport_template *bfad_im_scsi_transport_template; 136extern struct scsi_transport_template *bfad_im_scsi_transport_template;
147extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 137extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
148 138
139extern struct device_attribute *bfad_im_host_attrs[];
140extern struct device_attribute *bfad_im_vport_attrs[];
141
142irqreturn_t bfad_intx(int irq, void *dev_id);
143
144/* Firmware releated */
145#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
146#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
147#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
148
149u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
150u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
151 u32 *bfi_image_size, char *fw_name);
152
153static inline u32 *
154bfad_load_fwimg(struct pci_dev *pdev)
155{
156 return bfad_get_firmware_buf(pdev);
157}
158
159static inline void
160bfad_free_fwimg(void)
161{
162 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
163 vfree(bfi_image_ct_fc);
164 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
165 vfree(bfi_image_ct_cna);
166 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
167 vfree(bfi_image_cb_fc);
168}
149#endif 169#endif
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
deleted file mode 100644
index 0a122abbbe89..000000000000
--- a/drivers/scsi/bfa/bfad_im_compat.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFAD_IM_COMPAT_H__
19#define __BFAD_IM_COMPAT_H__
20
21extern struct device_attribute *bfad_im_host_attrs[];
22extern struct device_attribute *bfad_im_vport_attrs[];
23
24u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
25u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
26 u32 *bfi_image_size, char *fw_name);
27
28static inline u32 *
29bfad_load_fwimg(struct pci_dev *pdev)
30{
31 return bfad_get_firmware_buf(pdev);
32}
33
34static inline void
35bfad_free_fwimg(void)
36{
37 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
38 vfree(bfi_image_ct_fc);
39 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
40 vfree(bfi_image_ct_cna);
41 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
42 vfree(bfi_image_cb_fc);
43}
44
45#endif
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
deleted file mode 100644
index 56a351584f0c..000000000000
--- a/drivers/scsi/bfa/bfad_intr.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfad_drv.h"
19#include "bfad_trcmod.h"
20
21BFA_TRC_FILE(LDRV, INTR);
22
23/**
24 * bfa_isr BFA driver interrupt functions
25 */
26static int msix_disable_cb;
27static int msix_disable_ct;
28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
29MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
30 " cards, default=0, Range[false:0|true:1]");
31module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
33 " cards, default=0, Range[false:0|true:1]");
34/**
35 * Line based interrupt handler.
36 */
37static irqreturn_t
38bfad_intx(int irq, void *dev_id)
39{
40 struct bfad_s *bfad = dev_id;
41 struct list_head doneq;
42 unsigned long flags;
43 bfa_boolean_t rc;
44
45 spin_lock_irqsave(&bfad->bfad_lock, flags);
46 rc = bfa_intx(&bfad->bfa);
47 if (!rc) {
48 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
49 return IRQ_NONE;
50 }
51
52 bfa_comp_deq(&bfad->bfa, &doneq);
53 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
54
55 if (!list_empty(&doneq)) {
56 bfa_comp_process(&bfad->bfa, &doneq);
57
58 spin_lock_irqsave(&bfad->bfad_lock, flags);
59 bfa_comp_free(&bfad->bfa, &doneq);
60 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
61 bfa_trc_fp(bfad, irq);
62 }
63
64 return IRQ_HANDLED;
65
66}
67
68static irqreturn_t
69bfad_msix(int irq, void *dev_id)
70{
71 struct bfad_msix_s *vec = dev_id;
72 struct bfad_s *bfad = vec->bfad;
73 struct list_head doneq;
74 unsigned long flags;
75
76 spin_lock_irqsave(&bfad->bfad_lock, flags);
77
78 bfa_msix(&bfad->bfa, vec->msix.entry);
79 bfa_comp_deq(&bfad->bfa, &doneq);
80 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
81
82 if (!list_empty(&doneq)) {
83 bfa_comp_process(&bfad->bfa, &doneq);
84
85 spin_lock_irqsave(&bfad->bfad_lock, flags);
86 bfa_comp_free(&bfad->bfa, &doneq);
87 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
88 }
89
90 return IRQ_HANDLED;
91}
92
93/**
94 * Initialize the MSIX entry table.
95 */
96static void
97bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
98 int mask, int max_bit)
99{
100 int i;
101 int match = 0x00000001;
102
103 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
104 if (mask & match) {
105 bfad->msix_tab[bfad->nvec].msix.entry = i;
106 bfad->msix_tab[bfad->nvec].bfad = bfad;
107 msix_entries[bfad->nvec].entry = i;
108 bfad->nvec++;
109 }
110
111 match <<= 1;
112 }
113
114}
115
116int
117bfad_install_msix_handler(struct bfad_s *bfad)
118{
119 int i, error = 0;
120
121 for (i = 0; i < bfad->nvec; i++) {
122 error = request_irq(bfad->msix_tab[i].msix.vector,
123 (irq_handler_t) bfad_msix, 0,
124 BFAD_DRIVER_NAME, &bfad->msix_tab[i]);
125 bfa_trc(bfad, i);
126 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
127 if (error) {
128 int j;
129
130 for (j = 0; j < i; j++)
131 free_irq(bfad->msix_tab[j].msix.vector,
132 &bfad->msix_tab[j]);
133
134 return 1;
135 }
136 }
137
138 return 0;
139}
140
141/**
142 * Setup MSIX based interrupt.
143 */
144int
145bfad_setup_intr(struct bfad_s *bfad)
146{
147 int error = 0;
148 u32 mask = 0, i, num_bit = 0, max_bit = 0;
149 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
150 struct pci_dev *pdev = bfad->pcidev;
151
152 /* Call BFA to get the msix map for this PCI function. */
153 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
154
155 /* Set up the msix entry table */
156 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
157
158 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
159 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
160
161 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
162 if (error) {
163 /*
164 * Only error number of vector is available.
165 * We don't have a mechanism to map multiple
166 * interrupts into one vector, so even if we
167 * can try to request less vectors, we don't
168 * know how to associate interrupt events to
169 * vectors. Linux doesn't dupicate vectors
170 * in the MSIX table for this case.
171 */
172
173 printk(KERN_WARNING "bfad%d: "
174 "pci_enable_msix failed (%d),"
175 " use line based.\n", bfad->inst_no, error);
176
177 goto line_based;
178 }
179
180 /* Save the vectors */
181 for (i = 0; i < bfad->nvec; i++) {
182 bfa_trc(bfad, msix_entries[i].vector);
183 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
184 }
185
186 bfa_msix_init(&bfad->bfa, bfad->nvec);
187
188 bfad->bfad_flags |= BFAD_MSIX_ON;
189
190 return error;
191 }
192
193line_based:
194 error = 0;
195 if (request_irq
196 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
197 BFAD_DRIVER_NAME, bfad) != 0) {
198 /* Enable interrupt handler failed */
199 return 1;
200 }
201
202 return error;
203}
204
205void
206bfad_remove_intr(struct bfad_s *bfad)
207{
208 int i;
209
210 if (bfad->bfad_flags & BFAD_MSIX_ON) {
211 for (i = 0; i < bfad->nvec; i++)
212 free_irq(bfad->msix_tab[i].msix.vector,
213 &bfad->msix_tab[i]);
214
215 pci_disable_msix(bfad->pcidev);
216 bfad->bfad_flags &= ~BFAD_MSIX_ON;
217 } else {
218 free_irq(bfad->pcidev->irq, bfad);
219 }
220}
221
222
diff --git a/drivers/scsi/bfa/bfad_ipfc.h b/drivers/scsi/bfa/bfad_ipfc.h
deleted file mode 100644
index 718bc5227671..000000000000
--- a/drivers/scsi/bfa/bfad_ipfc.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DRV_IPFC_H__
18#define __BFA_DRV_IPFC_H__
19
20
21#define IPFC_NAME ""
22
23#define bfad_ipfc_module_init(x) do {} while (0)
24#define bfad_ipfc_module_exit(x) do {} while (0)
25#define bfad_ipfc_probe(x) do {} while (0)
26#define bfad_ipfc_probe_undo(x) do {} while (0)
27#define bfad_ipfc_port_config(x, y) BFA_STATUS_OK
28#define bfad_ipfc_port_unconfig(x, y) do {} while (0)
29
30#define bfad_ipfc_probe_post(x) do {} while (0)
31#define bfad_ipfc_port_new(x, y, z) BFA_STATUS_OK
32#define bfad_ipfc_port_delete(x, y) do {} while (0)
33#define bfad_ipfc_port_online(x, y) do {} while (0)
34#define bfad_ipfc_port_offline(x, y) do {} while (0)
35
36#define bfad_ip_get_attr(x) BFA_STATUS_FAILED
37#define bfad_ip_reset_drv_stats(x) BFA_STATUS_FAILED
38#define bfad_ip_get_drv_stats(x, y) BFA_STATUS_FAILED
39#define bfad_ip_enable_ipfc(x, y, z) BFA_STATUS_FAILED
40
41
42#endif
diff --git a/drivers/scsi/bfa/bfad_os.c b/drivers/scsi/bfa/bfad_os.c
deleted file mode 100644
index faf47b4f1a38..000000000000
--- a/drivers/scsi/bfa/bfad_os.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_os.c Linux driver OS specific calls.
20 */
21
22#include "bfa_os_inc.h"
23#include "bfad_drv.h"
24
25void
26bfa_os_gettimeofday(struct bfa_timeval_s *tv)
27{
28 struct timeval tmp_tv;
29
30 do_gettimeofday(&tmp_tv);
31 tv->tv_sec = (u32) tmp_tv.tv_sec;
32 tv->tv_usec = (u32) tmp_tv.tv_usec;
33}
34
35void
36bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
37 const char *fmt, ...)
38{
39 va_list ap;
40 #define BFA_STRING_256 256
41 char tmp[BFA_STRING_256];
42
43 va_start(ap, fmt);
44 vsprintf(tmp, fmt, ap);
45 va_end(ap);
46
47 printk(tmp);
48}
49
50
diff --git a/drivers/scsi/bfa/bfad_tm.h b/drivers/scsi/bfa/bfad_tm.h
deleted file mode 100644
index 4901b1b7df02..000000000000
--- a/drivers/scsi/bfa/bfad_tm.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * Brocade Fibre Channel HBA Linux Target Mode Driver
20 */
21
22/**
23 * tm/dummy/bfad_tm.h BFA callback dummy header file for BFA Linux target mode PCI interface module.
24 */
25
26#ifndef __BFAD_TM_H__
27#define __BFAD_TM_H__
28
29#include <defs/bfa_defs_status.h>
30
31#define FCPT_NAME ""
32
33/*
34 * Called from base Linux driver on (De)Init events
35 */
36
37/* attach tgt template with scst */
38#define bfad_tm_module_init() do {} while (0)
39
40/* detach/release tgt template */
41#define bfad_tm_module_exit() do {} while (0)
42
43#define bfad_tm_probe(x) do {} while (0)
44#define bfad_tm_probe_undo(x) do {} while (0)
45#define bfad_tm_probe_post(x) do {} while (0)
46
47/*
48 * Called by base Linux driver but triggered by BFA FCS on config events
49 */
50#define bfad_tm_port_new(x, y) BFA_STATUS_OK
51#define bfad_tm_port_delete(x, y) do {} while (0)
52
53/*
54 * Called by base Linux driver but triggered by BFA FCS on PLOGI/O events
55 */
56#define bfad_tm_port_online(x, y) do {} while (0)
57#define bfad_tm_port_offline(x, y) do {} while (0)
58
59#endif
diff --git a/drivers/scsi/bfa/bfad_trcmod.h b/drivers/scsi/bfa/bfad_trcmod.h
deleted file mode 100644
index 2827b2acd041..000000000000
--- a/drivers/scsi/bfa/bfad_trcmod.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_trcmod.h Linux driver trace modules
20 */
21
22
23#ifndef __BFAD_TRCMOD_H__
24#define __BFAD_TRCMOD_H__
25
26#include <cs/bfa_trc.h>
27
28/*
29 * !!! Only append to the enums defined here to avoid any versioning
30 * !!! needed between trace utility and driver version
31 */
32enum {
33 /* 2.6 Driver */
34 BFA_TRC_LDRV_BFAD = 1,
35 BFA_TRC_LDRV_BFAD_2_6 = 2,
36 BFA_TRC_LDRV_BFAD_2_6_9 = 3,
37 BFA_TRC_LDRV_BFAD_2_6_10 = 4,
38 BFA_TRC_LDRV_INTR = 5,
39 BFA_TRC_LDRV_IOCTL = 6,
40 BFA_TRC_LDRV_OS = 7,
41 BFA_TRC_LDRV_IM = 8,
42 BFA_TRC_LDRV_IM_2_6 = 9,
43 BFA_TRC_LDRV_IM_2_6_9 = 10,
44 BFA_TRC_LDRV_IM_2_6_10 = 11,
45 BFA_TRC_LDRV_TM = 12,
46 BFA_TRC_LDRV_IPFC = 13,
47 BFA_TRC_LDRV_IM_2_4 = 14,
48 BFA_TRC_LDRV_IM_VMW = 15,
49 BFA_TRC_LDRV_IM_LT_2_6_10 = 16,
50};
51
52#endif /* __BFAD_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
new file mode 100644
index 000000000000..85f2224a5733
--- /dev/null
+++ b/drivers/scsi/bfa/bfi.h
@@ -0,0 +1,579 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_H__
19#define __BFI_H__
20
21#include "bfa_defs.h"
22#include "bfa_defs_svc.h"
23
24#pragma pack(1)
25
26/**
27 * BFI FW image type
28 */
29#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37
38/**
39 * Msg header common to all msgs
40 */
41struct bfi_mhdr_s {
42 u8 msg_class; /* @ref bfi_mclass_t */
43 u8 msg_id; /* msg opcode with in the class */
44 union {
45 struct {
46 u8 rsvd;
47 u8 lpu_id; /* msg destination */
48 } h2i;
49 u16 i2htok; /* token in msgs to host */
50 } mtag;
51};
52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
54 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \
57} while (0)
58
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
60 (_mh).msg_class = (_mc); \
61 (_mh).msg_id = (_op); \
62 (_mh).mtag.i2htok = (_i2htok); \
63} while (0)
64
65/*
66 * Message opcodes: 0-127 to firmware, 128-255 to host
67 */
68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70
71/**
72 ****************************************************************************
73 *
74 * Scatter Gather Element and Page definition
75 *
76 ****************************************************************************
77 */
78
79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81
82/**
83 * SG Flags
84 */
85enum {
86 BFI_SGE_DATA = 0, /* data address, not last */
87 BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
88 BFI_SGE_DATA_LAST = 3, /* data address, last */
89 BFI_SGE_LINK = 2, /* link address */
90 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
91};
92
93/**
94 * DMA addresses
95 */
96union bfi_addr_u {
97 struct {
98 u32 addr_lo;
99 u32 addr_hi;
100 } a32;
101};
102
103/**
104 * Scatter Gather Element
105 */
106struct bfi_sge_s {
107#ifdef __BIGENDIAN
108 u32 flags:2,
109 rsvd:2,
110 sg_len:28;
111#else
112 u32 sg_len:28,
113 rsvd:2,
114 flags:2;
115#endif
116 union bfi_addr_u sga;
117};
118
119/**
120 * Scatter Gather Page
121 */
122#define BFI_SGPG_DATA_SGES 7
123#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
124#define BFI_SGPG_RSVD_WD_LEN 8
125struct bfi_sgpg_s {
126 struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128};
129
130/*
131 * Large Message structure - 128 Bytes size Msgs
132 */
133#define BFI_LMSG_SZ 128
134#define BFI_LMSG_PL_WSZ \
135 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
136
137struct bfi_msg_s {
138 struct bfi_mhdr_s mhdr;
139 u32 pl[BFI_LMSG_PL_WSZ];
140};
141
142/**
143 * Mailbox message structure
144 */
145#define BFI_MBMSG_SZ 7
146struct bfi_mbmsg_s {
147 struct bfi_mhdr_s mh;
148 u32 pl[BFI_MBMSG_SZ];
149};
150
151/**
152 * Message Classes
153 */
154enum bfi_mclass {
155 BFI_MC_IOC = 1, /* IO Controller (IOC) */
156 BFI_MC_FCPORT = 5, /* FC port */
157 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
158 BFI_MC_LL = 7, /* Link Layer */
159 BFI_MC_UF = 8, /* Unsolicited frame receive */
160 BFI_MC_FCXP = 9, /* FC Transport */
161 BFI_MC_LPS = 10, /* lport fc login services */
162 BFI_MC_RPORT = 11, /* Remote port */
163 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
164 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
165 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
166 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
167 BFI_MC_IOIM = 16, /* IO (Initiator mode) */
168 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
169 BFI_MC_TSKIM = 18, /* Initiator Task management */
170 BFI_MC_PORT = 21, /* Physical port */
171 BFI_MC_MAX = 32
172};
173
174#define BFI_IOC_MAX_CQS 4
175#define BFI_IOC_MAX_CQS_ASIC 8
176#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
177
178#define BFI_BOOT_TYPE_OFF 8
179#define BFI_BOOT_LOADER_OFF 12
180
181#define BFI_BOOT_TYPE_NORMAL 0
182#define BFI_BOOT_TYPE_FLASH 1
183#define BFI_BOOT_TYPE_MEMTEST 2
184
185#define BFI_BOOT_LOADER_OS 0
186#define BFI_BOOT_LOADER_BIOS 1
187#define BFI_BOOT_LOADER_UEFI 2
188
189/**
190 *----------------------------------------------------------------------
191 * IOC
192 *----------------------------------------------------------------------
193 */
194
195enum bfi_ioc_h2i_msgs {
196 BFI_IOC_H2I_ENABLE_REQ = 1,
197 BFI_IOC_H2I_DISABLE_REQ = 2,
198 BFI_IOC_H2I_GETATTR_REQ = 3,
199 BFI_IOC_H2I_DBG_SYNC = 4,
200 BFI_IOC_H2I_DBG_DUMP = 5,
201};
202
203enum bfi_ioc_i2h_msgs {
204 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
205 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
206 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
207 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
209};
210
211/**
212 * BFI_IOC_H2I_GETATTR_REQ message
213 */
214struct bfi_ioc_getattr_req_s {
215 struct bfi_mhdr_s mh;
216 union bfi_addr_u attr_addr;
217};
218
219struct bfi_ioc_attr_s {
220 wwn_t mfg_pwwn; /* Mfg port wwn */
221 wwn_t mfg_nwwn; /* Mfg node wwn */
222 mac_t mfg_mac; /* Mfg mac */
223 u16 rsvd_a;
224 wwn_t pwwn;
225 wwn_t nwwn;
226 mac_t mac; /* PBC or Mfg mac */
227 u16 rsvd_b;
228 mac_t fcoe_mac;
229 u16 rsvd_c;
230 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
231 u8 pcie_gen;
232 u8 pcie_lanes_orig;
233 u8 pcie_lanes;
234 u8 rx_bbcredit; /* receive buffer credits */
235 u32 adapter_prop; /* adapter properties */
236 u16 maxfrsize; /* max receive frame size */
237 char asic_rev;
238 u8 rsvd_d;
239 char fw_version[BFA_VERSION_LEN];
240 char optrom_version[BFA_VERSION_LEN];
241 struct bfa_mfg_vpd_s vpd;
242 u32 card_type; /* card type */
243};
244
245/**
246 * BFI_IOC_I2H_GETATTR_REPLY message
247 */
248struct bfi_ioc_getattr_reply_s {
249 struct bfi_mhdr_s mh; /* Common msg header */
250 u8 status; /* cfg reply status */
251 u8 rsvd[3];
252};
253
254/**
255 * Firmware memory page offsets
256 */
257#define BFI_IOC_SMEM_PG0_CB (0x40)
258#define BFI_IOC_SMEM_PG0_CT (0x180)
259
260/**
261 * Firmware statistic offset
262 */
263#define BFI_IOC_FWSTATS_OFF (0x6B40)
264#define BFI_IOC_FWSTATS_SZ (4096)
265
266/**
267 * Firmware trace offset
268 */
269#define BFI_IOC_TRC_OFF (0x4b00)
270#define BFI_IOC_TRC_ENTS 256
271
272#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
273#define BFI_IOC_MD5SUM_SZ 4
274struct bfi_ioc_image_hdr_s {
275 u32 signature; /* constant signature */
276 u32 rsvd_a;
277 u32 exec; /* exec vector */
278 u32 param; /* parameters */
279 u32 rsvd_b[4];
280 u32 md5sum[BFI_IOC_MD5SUM_SZ];
281};
282
283/**
284 * BFI_IOC_I2H_READY_EVENT message
285 */
286struct bfi_ioc_rdy_event_s {
287 struct bfi_mhdr_s mh; /* common msg header */
288 u8 init_status; /* init event status */
289 u8 rsvd[3];
290};
291
292struct bfi_ioc_hbeat_s {
293 struct bfi_mhdr_s mh; /* common msg header */
294 u32 hb_count; /* current heart beat count */
295};
296
297/**
298 * IOC hardware/firmware state
299 */
300enum bfi_ioc_state {
301 BFI_IOC_UNINIT = 0, /* not initialized */
302 BFI_IOC_INITING = 1, /* h/w is being initialized */
303 BFI_IOC_HWINIT = 2, /* h/w is initialized */
304 BFI_IOC_CFG = 3, /* IOC configuration in progress */
305 BFI_IOC_OP = 4, /* IOC is operational */
306 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
307 BFI_IOC_DISABLED = 6, /* IOC is disabled */
308 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
309 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
310 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
311};
312
313#define BFI_IOC_ENDIAN_SIG 0x12345678
314
315enum {
316 BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
317 BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
318 BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
319 BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
320 BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
321 BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
322 BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
323 BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
324 BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
325 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
326};
327
328#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
329 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
330 BFI_ADAPTER_ ## __prop ## _SH)
331#define BFI_ADAPTER_SETP(__prop, __val) \
332 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
333#define BFI_ADAPTER_IS_PROTO(__adap_type) \
334 ((__adap_type) & BFI_ADAPTER_PROTO)
335#define BFI_ADAPTER_IS_TTV(__adap_type) \
336 ((__adap_type) & BFI_ADAPTER_TTV)
337#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
338 ((__adap_type) & BFI_ADAPTER_UNSUPP)
339#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
340 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
341 BFI_ADAPTER_UNSUPP))
342
343/**
344 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
345 */
346struct bfi_ioc_ctrl_req_s {
347 struct bfi_mhdr_s mh;
348 u8 ioc_class;
349 u8 rsvd[3];
350 u32 tv_sec;
351};
352#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
353#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
354
355/**
356 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
357 */
358struct bfi_ioc_ctrl_reply_s {
359 struct bfi_mhdr_s mh; /* Common msg header */
360 u8 status; /* enable/disable status */
361 u8 rsvd[3];
362};
363#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
364#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
365
366#define BFI_IOC_MSGSZ 8
367/**
368 * H2I Messages
369 */
370union bfi_ioc_h2i_msg_u {
371 struct bfi_mhdr_s mh;
372 struct bfi_ioc_ctrl_req_s enable_req;
373 struct bfi_ioc_ctrl_req_s disable_req;
374 struct bfi_ioc_getattr_req_s getattr_req;
375 u32 mboxmsg[BFI_IOC_MSGSZ];
376};
377
378/**
379 * I2H Messages
380 */
381union bfi_ioc_i2h_msg_u {
382 struct bfi_mhdr_s mh;
383 struct bfi_ioc_rdy_event_s rdy_event;
384 u32 mboxmsg[BFI_IOC_MSGSZ];
385};
386
387
388/**
389 *----------------------------------------------------------------------
390 * PBC
391 *----------------------------------------------------------------------
392 */
393
394#define BFI_PBC_MAX_BLUNS 8
395#define BFI_PBC_MAX_VPORTS 16
396
397/**
398 * PBC boot lun configuration
399 */
400struct bfi_pbc_blun_s {
401 wwn_t tgt_pwwn;
402 lun_t tgt_lun;
403};
404
405/**
406 * PBC virtual port configuration
407 */
408struct bfi_pbc_vport_s {
409 wwn_t vp_pwwn;
410 wwn_t vp_nwwn;
411};
412
413/**
414 * BFI pre-boot configuration information
415 */
416struct bfi_pbc_s {
417 u8 port_enabled;
418 u8 boot_enabled;
419 u8 nbluns;
420 u8 nvports;
421 u8 port_speed;
422 u8 rsvd_a;
423 u16 hss;
424 wwn_t pbc_pwwn;
425 wwn_t pbc_nwwn;
426 struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
427 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
428};
429
430/**
431 *----------------------------------------------------------------------
432 * MSGQ
433 *----------------------------------------------------------------------
434 */
435#define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci)
436#define BFI_MSGQ_EMPTY(_q) (_q->pi == _q->ci)
437#define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth)
438#define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth)
439
440/* q_depth must be power of 2 */
441#define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
442
443enum bfi_msgq_h2i_msgs_e {
444 BFI_MSGQ_H2I_INIT_REQ = 1,
445 BFI_MSGQ_H2I_DOORBELL = 2,
446 BFI_MSGQ_H2I_SHUTDOWN = 3,
447};
448
449enum bfi_msgq_i2h_msgs_e {
450 BFI_MSGQ_I2H_INIT_RSP = 1,
451 BFI_MSGQ_I2H_DOORBELL = 2,
452};
453
454
455/* Messages(commands/responsed/AENS will have the following header */
456struct bfi_msgq_mhdr_s {
457 u8 msg_class;
458 u8 msg_id;
459 u16 msg_token;
460 u16 num_entries;
461 u8 enet_id;
462 u8 rsvd[1];
463};
464
465#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
466 (_mh).msg_class = (_mc); \
467 (_mh).msg_id = (_mid); \
468 (_mh).msg_token = (_tok); \
469 (_mh).enet_id = (_enet_id); \
470} while (0)
471
472/*
473 * Mailbox for messaging interface
474 *
475*/
476#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
477#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
478#define BFI_MSGQ_MSG_SIZE_MAX (2048) /* TBD */
479
480struct bfi_msgq_s {
481 union bfi_addr_u addr;
482 u16 q_depth; /* Total num of entries in the queue */
483 u8 rsvd[2];
484};
485
486/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
487struct bfi_msgq_cfg_req_s {
488 struct bfi_mhdr_s mh;
489 struct bfi_msgq_s cmdq;
490 struct bfi_msgq_s rspq;
491};
492
493/* BFI_ENET_MSGQ_CFG_RSP */
494struct bfi_msgq_cfg_rsp_s {
495 struct bfi_mhdr_s mh;
496 u8 cmd_status;
497 u8 rsvd[3];
498};
499
500
501/* BFI_MSGQ_H2I_DOORBELL */
502struct bfi_msgq_h2i_db_s {
503 struct bfi_mhdr_s mh;
504 u16 cmdq_pi;
505 u16 rspq_ci;
506};
507
508/* BFI_MSGQ_I2H_DOORBELL */
509struct bfi_msgq_i2h_db_s {
510 struct bfi_mhdr_s mh;
511 u16 rspq_pi;
512 u16 cmdq_ci;
513};
514
515#pragma pack()
516
517/* BFI port specific */
518#pragma pack(1)
519
520enum bfi_port_h2i {
521 BFI_PORT_H2I_ENABLE_REQ = (1),
522 BFI_PORT_H2I_DISABLE_REQ = (2),
523 BFI_PORT_H2I_GET_STATS_REQ = (3),
524 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
525};
526
527enum bfi_port_i2h {
528 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
529 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
530 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
531 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
532};
533
534/**
535 * Generic REQ type
536 */
537struct bfi_port_generic_req_s {
538 struct bfi_mhdr_s mh; /* msg header */
539 u32 msgtag; /* msgtag for reply */
540 u32 rsvd;
541};
542
543/**
544 * Generic RSP type
545 */
546struct bfi_port_generic_rsp_s {
547 struct bfi_mhdr_s mh; /* common msg header */
548 u8 status; /* port enable status */
549 u8 rsvd[3];
550 u32 msgtag; /* msgtag for reply */
551};
552
553/**
554 * BFI_PORT_H2I_GET_STATS_REQ
555 */
556struct bfi_port_get_stats_req_s {
557 struct bfi_mhdr_s mh; /* common msg header */
558 union bfi_addr_u dma_addr;
559};
560
561union bfi_port_h2i_msg_u {
562 struct bfi_mhdr_s mh;
563 struct bfi_port_generic_req_s enable_req;
564 struct bfi_port_generic_req_s disable_req;
565 struct bfi_port_get_stats_req_s getstats_req;
566 struct bfi_port_generic_req_s clearstats_req;
567};
568
569union bfi_port_i2h_msg_u {
570 struct bfi_mhdr_s mh;
571 struct bfi_port_generic_rsp_s enable_rsp;
572 struct bfi_port_generic_rsp_s disable_rsp;
573 struct bfi_port_generic_rsp_s getstats_rsp;
574 struct bfi_port_generic_rsp_s clearstats_rsp;
575};
576
577#pragma pack()
578
579#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
index a51ee61ddb19..6f03ed382c69 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/bfi_cbreg.h
@@ -1,19 +1,3 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17 1
18/* 2/*
19 * bfi_cbreg.h crossbow host block register definitions 3 * bfi_cbreg.h crossbow host block register definitions
@@ -177,8 +161,8 @@
177#define __PSS_LMEM_INIT_EN 0x00000100 161#define __PSS_LMEM_INIT_EN 0x00000100
178#define __PSS_LPU1_RESET 0x00000002 162#define __PSS_LPU1_RESET 0x00000002
179#define __PSS_LPU0_RESET 0x00000001 163#define __PSS_LPU0_RESET 0x00000001
180#define PSS_ERR_STATUS_REG 0x00018810 164#define PSS_ERR_STATUS_REG 0x00018810
181#define __PSS_LMEM1_CORR_ERR 0x00000800 165#define __PSS_LMEM1_CORR_ERR 0x00000800
182#define __PSS_LMEM0_CORR_ERR 0x00000400 166#define __PSS_LMEM0_CORR_ERR 0x00000400
183#define __PSS_LMEM1_UNCORR_ERR 0x00000200 167#define __PSS_LMEM1_UNCORR_ERR 0x00000200
184#define __PSS_LMEM0_UNCORR_ERR 0x00000100 168#define __PSS_LMEM0_UNCORR_ERR 0x00000100
@@ -190,8 +174,9 @@
190#define __PSS_SGM_IF_ERR 0x00000004 174#define __PSS_SGM_IF_ERR 0x00000004
191#define __PSS_LPU1_RAM_ERR 0x00000002 175#define __PSS_LPU1_RAM_ERR 0x00000002
192#define __PSS_LPU0_RAM_ERR 0x00000001 176#define __PSS_LPU0_RAM_ERR 0x00000001
193#define ERR_SET_REG 0x00018818 177#define ERR_SET_REG 0x00018818
194#define __PSS_ERR_STATUS_SET 0x00000fff 178#define __PSS_ERR_STATUS_SET 0x00000fff
179
195 180
196/* 181/*
197 * These definitions are either in error/missing in spec. Its auto-generated 182 * These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
new file mode 100644
index 000000000000..62b86a4b0e4b
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ctreg.h
@@ -0,0 +1,627 @@
1
2/*
3 * bfi_ctreg.h catapult host block register definitions
4 *
5 * !!! Do not edit. Auto generated. !!!
6 */
7
8#ifndef __BFI_CTREG_H__
9#define __BFI_CTREG_H__
10
11
12#define HOSTFN0_LPU_MBOX0_0 0x00019200
13#define HOSTFN1_LPU_MBOX0_8 0x00019260
14#define LPU_HOSTFN0_MBOX0_0 0x00019280
15#define LPU_HOSTFN1_MBOX0_8 0x000192e0
16#define HOSTFN2_LPU_MBOX0_0 0x00019400
17#define HOSTFN3_LPU_MBOX0_8 0x00019460
18#define LPU_HOSTFN2_MBOX0_0 0x00019480
19#define LPU_HOSTFN3_MBOX0_8 0x000194e0
20#define HOSTFN0_INT_STATUS 0x00014000
21#define __HOSTFN0_HALT_OCCURRED 0x01000000
22#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
23#define __HOSTFN0_INT_STATUS_LVL_SH 20
24#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
25#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
26#define __HOSTFN0_INT_STATUS_P_SH 16
27#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
28#define __HOSTFN0_INT_STATUS_F 0x0000ffff
29#define HOSTFN0_INT_MSK 0x00014004
30#define HOST_PAGE_NUM_FN0 0x00014008
31#define __HOST_PAGE_NUM_FN 0x000001ff
32#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
33#define __MSIX_ERR_INDEX_FN 0x000001ff
34#define HOSTFN1_INT_STATUS 0x00014100
35#define __HOSTFN1_HALT_OCCURRED 0x01000000
36#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
37#define __HOSTFN1_INT_STATUS_LVL_SH 20
38#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
39#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
40#define __HOSTFN1_INT_STATUS_P_SH 16
41#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
42#define __HOSTFN1_INT_STATUS_F 0x0000ffff
43#define HOSTFN1_INT_MSK 0x00014104
44#define HOST_PAGE_NUM_FN1 0x00014108
45#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
46#define APP_PLL_425_CTL_REG 0x00014204
47#define __P_425_PLL_LOCK 0x80000000
48#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
49#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
50#define __APP_PLL_425_RESET_TIMER_SH 17
51#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
52#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
53#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
54#define __APP_PLL_425_CNTLMT0_1_SH 14
55#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
56#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
57#define __APP_PLL_425_JITLMT0_1_SH 12
58#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
59#define __APP_PLL_425_HREF 0x00000800
60#define __APP_PLL_425_HDIV 0x00000400
61#define __APP_PLL_425_P0_1_MK 0x00000300
62#define __APP_PLL_425_P0_1_SH 8
63#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
64#define __APP_PLL_425_Z0_2_MK 0x000000e0
65#define __APP_PLL_425_Z0_2_SH 5
66#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
67#define __APP_PLL_425_RSEL200500 0x00000010
68#define __APP_PLL_425_ENARST 0x00000008
69#define __APP_PLL_425_BYPASS 0x00000004
70#define __APP_PLL_425_LRESETN 0x00000002
71#define __APP_PLL_425_ENABLE 0x00000001
72#define APP_PLL_312_CTL_REG 0x00014208
73#define __P_312_PLL_LOCK 0x80000000
74#define __ENABLE_MAC_AHB_1 0x00800000
75#define __ENABLE_MAC_AHB_0 0x00400000
76#define __ENABLE_MAC_1 0x00200000
77#define __ENABLE_MAC_0 0x00100000
78#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
79#define __APP_PLL_312_RESET_TIMER_SH 17
80#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
81#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
82#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
83#define __APP_PLL_312_CNTLMT0_1_SH 14
84#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
85#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
86#define __APP_PLL_312_JITLMT0_1_SH 12
87#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
88#define __APP_PLL_312_HREF 0x00000800
89#define __APP_PLL_312_HDIV 0x00000400
90#define __APP_PLL_312_P0_1_MK 0x00000300
91#define __APP_PLL_312_P0_1_SH 8
92#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
93#define __APP_PLL_312_Z0_2_MK 0x000000e0
94#define __APP_PLL_312_Z0_2_SH 5
95#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
96#define __APP_PLL_312_RSEL200500 0x00000010
97#define __APP_PLL_312_ENARST 0x00000008
98#define __APP_PLL_312_BYPASS 0x00000004
99#define __APP_PLL_312_LRESETN 0x00000002
100#define __APP_PLL_312_ENABLE 0x00000001
101#define MBIST_CTL_REG 0x00014220
102#define __EDRAM_BISTR_START 0x00000004
103#define __MBIST_RESET 0x00000002
104#define __MBIST_START 0x00000001
105#define MBIST_STAT_REG 0x00014224
106#define __EDRAM_BISTR_STATUS 0x00000008
107#define __EDRAM_BISTR_DONE 0x00000004
108#define __MEM_BIT_STATUS 0x00000002
109#define __MBIST_DONE 0x00000001
110#define HOST_SEM0_REG 0x00014230
111#define __HOST_SEMAPHORE 0x00000001
112#define HOST_SEM1_REG 0x00014234
113#define HOST_SEM2_REG 0x00014238
114#define HOST_SEM3_REG 0x0001423c
115#define HOST_SEM0_INFO_REG 0x00014240
116#define HOST_SEM1_INFO_REG 0x00014244
117#define HOST_SEM2_INFO_REG 0x00014248
118#define HOST_SEM3_INFO_REG 0x0001424c
119#define ETH_MAC_SER_REG 0x00014288
120#define __APP_EMS_CKBUFAMPIN 0x00000020
121#define __APP_EMS_REFCLKSEL 0x00000010
122#define __APP_EMS_CMLCKSEL 0x00000008
123#define __APP_EMS_REFCKBUFEN2 0x00000004
124#define __APP_EMS_REFCKBUFEN1 0x00000002
125#define __APP_EMS_CHANNEL_SEL 0x00000001
126#define HOSTFN2_INT_STATUS 0x00014300
127#define __HOSTFN2_HALT_OCCURRED 0x01000000
128#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
129#define __HOSTFN2_INT_STATUS_LVL_SH 20
130#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
131#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
132#define __HOSTFN2_INT_STATUS_P_SH 16
133#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
134#define __HOSTFN2_INT_STATUS_F 0x0000ffff
135#define HOSTFN2_INT_MSK 0x00014304
136#define HOST_PAGE_NUM_FN2 0x00014308
137#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
138#define HOSTFN3_INT_STATUS 0x00014400
139#define __HALT_OCCURRED 0x01000000
140#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
141#define __HOSTFN3_INT_STATUS_LVL_SH 20
142#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
143#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
144#define __HOSTFN3_INT_STATUS_P_SH 16
145#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
146#define __HOSTFN3_INT_STATUS_F 0x0000ffff
147#define HOSTFN3_INT_MSK 0x00014404
148#define HOST_PAGE_NUM_FN3 0x00014408
149#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
150#define FNC_ID_REG 0x00014600
151#define __FUNCTION_NUMBER 0x00000007
152#define FNC_PERS_REG 0x00014604
153#define __F3_FUNCTION_ACTIVE 0x80000000
154#define __F3_FUNCTION_MODE 0x40000000
155#define __F3_PORT_MAP_MK 0x30000000
156#define __F3_PORT_MAP_SH 28
157#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
158#define __F3_VM_MODE 0x08000000
159#define __F3_INTX_STATUS_MK 0x07000000
160#define __F3_INTX_STATUS_SH 24
161#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
162#define __F2_FUNCTION_ACTIVE 0x00800000
163#define __F2_FUNCTION_MODE 0x00400000
164#define __F2_PORT_MAP_MK 0x00300000
165#define __F2_PORT_MAP_SH 20
166#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
167#define __F2_VM_MODE 0x00080000
168#define __F2_INTX_STATUS_MK 0x00070000
169#define __F2_INTX_STATUS_SH 16
170#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
171#define __F1_FUNCTION_ACTIVE 0x00008000
172#define __F1_FUNCTION_MODE 0x00004000
173#define __F1_PORT_MAP_MK 0x00003000
174#define __F1_PORT_MAP_SH 12
175#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
176#define __F1_VM_MODE 0x00000800
177#define __F1_INTX_STATUS_MK 0x00000700
178#define __F1_INTX_STATUS_SH 8
179#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
180#define __F0_FUNCTION_ACTIVE 0x00000080
181#define __F0_FUNCTION_MODE 0x00000040
182#define __F0_PORT_MAP_MK 0x00000030
183#define __F0_PORT_MAP_SH 4
184#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
185#define __F0_VM_MODE 0x00000008
186#define __F0_INTX_STATUS 0x00000007
187enum {
188 __F0_INTX_STATUS_MSIX = 0x0,
189 __F0_INTX_STATUS_INTA = 0x1,
190 __F0_INTX_STATUS_INTB = 0x2,
191 __F0_INTX_STATUS_INTC = 0x3,
192 __F0_INTX_STATUS_INTD = 0x4,
193};
194#define OP_MODE 0x0001460c
195#define __APP_ETH_CLK_LOWSPEED 0x00000004
196#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
197#define __GLOBAL_FCOE_MODE 0x00000001
198#define HOST_SEM4_REG 0x00014610
199#define HOST_SEM5_REG 0x00014614
200#define HOST_SEM6_REG 0x00014618
201#define HOST_SEM7_REG 0x0001461c
202#define HOST_SEM4_INFO_REG 0x00014620
203#define HOST_SEM5_INFO_REG 0x00014624
204#define HOST_SEM6_INFO_REG 0x00014628
205#define HOST_SEM7_INFO_REG 0x0001462c
206#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
207#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
208#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
209#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
210#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
211#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
212#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
213#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
214#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
215#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
216#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
217#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
218#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
219#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
220#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
221#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
222#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
223#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
224#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
225#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
226#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
227#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
228#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
229#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
230#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
231#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
232#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
233#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
234#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
235#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
236#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
237#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
238#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
239#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
240#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
241#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
242#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
243#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
244#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
245#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
246#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
247#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
248#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
249#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
250#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
251#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
252#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
253#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
254#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
255#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
256#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
257#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
258#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
259#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
260#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
261#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
262#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
263#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
264#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
265#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
266#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
267#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
268#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
269#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
270#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
271#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
272#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
273#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
274#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
275#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
276#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
277#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
278#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
279#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
280#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
281#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
282#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
283#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
284#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
285#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
286#define FW_INIT_HALT_P0 0x000191ac
287#define __FW_INIT_HALT_P 0x00000001
288#define FW_INIT_HALT_P1 0x000191bc
289#define CPE_PI_PTR_Q0 0x00038000
290#define __CPE_PI_UNUSED_MK 0xffff0000
291#define __CPE_PI_UNUSED_SH 16
292#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
293#define __CPE_PI_PTR 0x0000ffff
294#define CPE_PI_PTR_Q1 0x00038040
295#define CPE_CI_PTR_Q0 0x00038004
296#define __CPE_CI_UNUSED_MK 0xffff0000
297#define __CPE_CI_UNUSED_SH 16
298#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
299#define __CPE_CI_PTR 0x0000ffff
300#define CPE_CI_PTR_Q1 0x00038044
301#define CPE_DEPTH_Q0 0x00038008
302#define __CPE_DEPTH_UNUSED_MK 0xf8000000
303#define __CPE_DEPTH_UNUSED_SH 27
304#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
305#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
306#define __CPE_MSIX_VEC_INDEX_SH 16
307#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
308#define __CPE_DEPTH 0x0000ffff
309#define CPE_DEPTH_Q1 0x00038048
310#define CPE_QCTRL_Q0 0x0003800c
311#define __CPE_CTRL_UNUSED30_MK 0xfc000000
312#define __CPE_CTRL_UNUSED30_SH 26
313#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
314#define __CPE_FUNC_INT_CTRL_MK 0x03000000
315#define __CPE_FUNC_INT_CTRL_SH 24
316#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
317enum {
318 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
319 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
320 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
321 __CPE_FUNC_INT_CTRL_HALF = 0x3,
322};
323#define __CPE_CTRL_UNUSED20_MK 0x00f00000
324#define __CPE_CTRL_UNUSED20_SH 20
325#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
326#define __CPE_SCI_TH_MK 0x000f0000
327#define __CPE_SCI_TH_SH 16
328#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
329#define __CPE_CTRL_UNUSED10_MK 0x0000c000
330#define __CPE_CTRL_UNUSED10_SH 14
331#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
332#define __CPE_ACK_PENDING 0x00002000
333#define __CPE_CTRL_UNUSED40_MK 0x00001c00
334#define __CPE_CTRL_UNUSED40_SH 10
335#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
336#define __CPE_PCIEID_MK 0x00000300
337#define __CPE_PCIEID_SH 8
338#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
339#define __CPE_CTRL_UNUSED00_MK 0x000000fe
340#define __CPE_CTRL_UNUSED00_SH 1
341#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
342#define __CPE_ESIZE 0x00000001
343#define CPE_QCTRL_Q1 0x0003804c
344#define __CPE_CTRL_UNUSED31_MK 0xfc000000
345#define __CPE_CTRL_UNUSED31_SH 26
346#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
347#define __CPE_CTRL_UNUSED21_MK 0x00f00000
348#define __CPE_CTRL_UNUSED21_SH 20
349#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
350#define __CPE_CTRL_UNUSED11_MK 0x0000c000
351#define __CPE_CTRL_UNUSED11_SH 14
352#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
353#define __CPE_CTRL_UNUSED41_MK 0x00001c00
354#define __CPE_CTRL_UNUSED41_SH 10
355#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
356#define __CPE_CTRL_UNUSED01_MK 0x000000fe
357#define __CPE_CTRL_UNUSED01_SH 1
358#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
359#define RME_PI_PTR_Q0 0x00038020
360#define __LATENCY_TIME_STAMP_MK 0xffff0000
361#define __LATENCY_TIME_STAMP_SH 16
362#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
363#define __RME_PI_PTR 0x0000ffff
364#define RME_PI_PTR_Q1 0x00038060
365#define RME_CI_PTR_Q0 0x00038024
366#define __DELAY_TIME_STAMP_MK 0xffff0000
367#define __DELAY_TIME_STAMP_SH 16
368#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
369#define __RME_CI_PTR 0x0000ffff
370#define RME_CI_PTR_Q1 0x00038064
371#define RME_DEPTH_Q0 0x00038028
372#define __RME_DEPTH_UNUSED_MK 0xf8000000
373#define __RME_DEPTH_UNUSED_SH 27
374#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
375#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
376#define __RME_MSIX_VEC_INDEX_SH 16
377#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
378#define __RME_DEPTH 0x0000ffff
379#define RME_DEPTH_Q1 0x00038068
380#define RME_QCTRL_Q0 0x0003802c
381#define __RME_INT_LATENCY_TIMER_MK 0xff000000
382#define __RME_INT_LATENCY_TIMER_SH 24
383#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
384#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
385#define __RME_INT_DELAY_TIMER_SH 16
386#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
387#define __RME_INT_DELAY_DISABLE 0x00008000
388#define __RME_DLY_DELAY_DISABLE 0x00004000
389#define __RME_ACK_PENDING 0x00002000
390#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
391#define __RME_CTRL_UNUSED10_MK 0x00000c00
392#define __RME_CTRL_UNUSED10_SH 10
393#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
394#define __RME_PCIEID_MK 0x00000300
395#define __RME_PCIEID_SH 8
396#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
397#define __RME_CTRL_UNUSED00_MK 0x000000fe
398#define __RME_CTRL_UNUSED00_SH 1
399#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
400#define __RME_ESIZE 0x00000001
401#define RME_QCTRL_Q1 0x0003806c
402#define __RME_CTRL_UNUSED11_MK 0x00000c00
403#define __RME_CTRL_UNUSED11_SH 10
404#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
405#define __RME_CTRL_UNUSED01_MK 0x000000fe
406#define __RME_CTRL_UNUSED01_SH 1
407#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
408#define PSS_CTL_REG 0x00018800
409#define __PSS_I2C_CLK_DIV_MK 0x007f0000
410#define __PSS_I2C_CLK_DIV_SH 16
411#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
412#define __PSS_LMEM_INIT_DONE 0x00001000
413#define __PSS_LMEM_RESET 0x00000200
414#define __PSS_LMEM_INIT_EN 0x00000100
415#define __PSS_LPU1_RESET 0x00000002
416#define __PSS_LPU0_RESET 0x00000001
417#define PSS_ERR_STATUS_REG 0x00018810
418#define __PSS_LPU1_TCM_READ_ERR 0x00200000
419#define __PSS_LPU0_TCM_READ_ERR 0x00100000
420#define __PSS_LMEM5_CORR_ERR 0x00080000
421#define __PSS_LMEM4_CORR_ERR 0x00040000
422#define __PSS_LMEM3_CORR_ERR 0x00020000
423#define __PSS_LMEM2_CORR_ERR 0x00010000
424#define __PSS_LMEM1_CORR_ERR 0x00008000
425#define __PSS_LMEM0_CORR_ERR 0x00004000
426#define __PSS_LMEM5_UNCORR_ERR 0x00002000
427#define __PSS_LMEM4_UNCORR_ERR 0x00001000
428#define __PSS_LMEM3_UNCORR_ERR 0x00000800
429#define __PSS_LMEM2_UNCORR_ERR 0x00000400
430#define __PSS_LMEM1_UNCORR_ERR 0x00000200
431#define __PSS_LMEM0_UNCORR_ERR 0x00000100
432#define __PSS_BAL_PERR 0x00000080
433#define __PSS_DIP_IF_ERR 0x00000040
434#define __PSS_IOH_IF_ERR 0x00000020
435#define __PSS_TDS_IF_ERR 0x00000010
436#define __PSS_RDS_IF_ERR 0x00000008
437#define __PSS_SGM_IF_ERR 0x00000004
438#define __PSS_LPU1_RAM_ERR 0x00000002
439#define __PSS_LPU0_RAM_ERR 0x00000001
440#define ERR_SET_REG 0x00018818
441#define __PSS_ERR_STATUS_SET 0x003fffff
442#define PMM_1T_RESET_REG_P0 0x0002381c
443#define __PMM_1T_RESET_P 0x00000001
444#define PMM_1T_RESET_REG_P1 0x00023c1c
445#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
446#define __RXQ0_ADD_VECTORS_P 0x80000000
447#define __RXQ0_STOP_P 0x40000000
448#define __RXQ0_PRD_PTR_P 0x0000ffff
449#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
450#define __RXQ1_ADD_VECTORS_P 0x80000000
451#define __RXQ1_STOP_P 0x40000000
452#define __RXQ1_PRD_PTR_P 0x0000ffff
453#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
454#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
455#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
456#define __TXQ0_ADD_VECTORS_P 0x80000000
457#define __TXQ0_STOP_P 0x40000000
458#define __TXQ0_PRD_PTR_P 0x0000ffff
459#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
460#define __TXQ1_ADD_VECTORS_P 0x80000000
461#define __TXQ1_STOP_P 0x40000000
462#define __TXQ1_PRD_PTR_P 0x0000ffff
463#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
464#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
465#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
466#define __IB1_0_ACK_P 0x80000000
467#define __IB1_0_DISABLE_P 0x40000000
468#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
469#define __IB1_0_COALESCING_CFG_P_SH 16
470#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
471#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
472#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
473#define __IB1_1_ACK_P 0x80000000
474#define __IB1_1_DISABLE_P 0x40000000
475#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
476#define __IB1_1_COALESCING_CFG_P_SH 16
477#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
478#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
479#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
480#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
481#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
482#define __IB2_0_ACK_P 0x80000000
483#define __IB2_0_DISABLE_P 0x40000000
484#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
485#define __IB2_0_COALESCING_CFG_P_SH 16
486#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
487#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
488#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
489#define __IB2_1_ACK_P 0x80000000
490#define __IB2_1_DISABLE_P 0x40000000
491#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
492#define __IB2_1_COALESCING_CFG_P_SH 16
493#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
494#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
496#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
497
498
499/*
500 * These definitions are either in error/missing in spec. Its auto-generated
501 * from hard coded values in regparse.pl.
502 */
503#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
504#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
505#define __EMPHPRE_AT_4G_FIX 0x00000003
506#define __SFP_TXRATE_EN_FIX 0x00000100
507#define __SFP_RXRATE_EN_FIX 0x00000080
508
509
510/*
511 * These register definitions are auto-generated from hard coded values
512 * in regparse.pl.
513 */
514
515
516/*
517 * These register mapping definitions are auto-generated from mapping tables
518 * in regparse.pl.
519 */
520#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
521#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
525
526#define CPE_DEPTH_Q(__n) \
527 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
528#define CPE_QCTRL_Q(__n) \
529 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
530#define CPE_PI_PTR_Q(__n) \
531 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
532#define CPE_CI_PTR_Q(__n) \
533 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
534#define RME_DEPTH_Q(__n) \
535 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
536#define RME_QCTRL_Q(__n) \
537 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
538#define RME_PI_PTR_Q(__n) \
539 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
540#define RME_CI_PTR_Q(__n) \
541 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
542#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
543 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
544#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
545 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
546#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
547 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
548#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
549 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
550#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
551 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
552#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
553 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
554#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
555 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
556#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
557 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
558
559#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
560#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
561#define CPE_Q_MASK(__q) ((__q) & 0x3)
562#define RME_Q_MASK(__q) ((__q) & 0x3)
563
564
565/*
566 * PCI MSI-X vector defines
567 */
568enum {
569 BFA_MSIX_CPE_Q0 = 0,
570 BFA_MSIX_CPE_Q1 = 1,
571 BFA_MSIX_CPE_Q2 = 2,
572 BFA_MSIX_CPE_Q3 = 3,
573 BFA_MSIX_RME_Q0 = 4,
574 BFA_MSIX_RME_Q1 = 5,
575 BFA_MSIX_RME_Q2 = 6,
576 BFA_MSIX_RME_Q3 = 7,
577 BFA_MSIX_LPU_ERR = 8,
578 BFA_MSIX_CT_MAX = 9,
579};
580
581/*
582 * And corresponding host interrupt status bit field defines
583 */
584#define __HFN_INT_CPE_Q0 0x00000001U
585#define __HFN_INT_CPE_Q1 0x00000002U
586#define __HFN_INT_CPE_Q2 0x00000004U
587#define __HFN_INT_CPE_Q3 0x00000008U
588#define __HFN_INT_CPE_Q4 0x00000010U
589#define __HFN_INT_CPE_Q5 0x00000020U
590#define __HFN_INT_CPE_Q6 0x00000040U
591#define __HFN_INT_CPE_Q7 0x00000080U
592#define __HFN_INT_RME_Q0 0x00000100U
593#define __HFN_INT_RME_Q1 0x00000200U
594#define __HFN_INT_RME_Q2 0x00000400U
595#define __HFN_INT_RME_Q3 0x00000800U
596#define __HFN_INT_RME_Q4 0x00001000U
597#define __HFN_INT_RME_Q5 0x00002000U
598#define __HFN_INT_RME_Q6 0x00004000U
599#define __HFN_INT_RME_Q7 0x00008000U
600#define __HFN_INT_ERR_EMC 0x00010000U
601#define __HFN_INT_ERR_LPU0 0x00020000U
602#define __HFN_INT_ERR_LPU1 0x00040000U
603#define __HFN_INT_ERR_PSS 0x00080000U
604#define __HFN_INT_MBOX_LPU0 0x00100000U
605#define __HFN_INT_MBOX_LPU1 0x00200000U
606#define __HFN_INT_MBOX1_LPU0 0x00400000U
607#define __HFN_INT_MBOX1_LPU1 0x00800000U
608#define __HFN_INT_LL_HALT 0x01000000U
609#define __HFN_INT_CPE_MASK 0x000000ffU
610#define __HFN_INT_RME_MASK 0x0000ff00U
611
612
613/*
614 * catapult memory map.
615 */
616#define LL_PGN_HQM0 0x0096
617#define LL_PGN_HQM1 0x0097
618#define PSS_SMEM_PAGE_START 0x8000
619#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
620#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
621
622/*
623 * End of catapult memory map
624 */
625
626
627#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
new file mode 100644
index 000000000000..69ac85f9e938
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -0,0 +1,765 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_MS_H__
19#define __BFI_MS_H__
20
21#include "bfi.h"
22#include "bfa_fc.h"
23#include "bfa_defs_svc.h"
24
25#pragma pack(1)
26
27enum bfi_iocfc_h2i_msgs {
28 BFI_IOCFC_H2I_CFG_REQ = 1,
29 BFI_IOCFC_H2I_SET_INTR_REQ = 2,
30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
31};
32
33enum bfi_iocfc_i2h_msgs {
34 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
35 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
36};
37
38struct bfi_iocfc_cfg_s {
39 u8 num_cqs; /* Number of CQs to be used */
40 u8 sense_buf_len; /* SCSI sense length */
41 u16 rsvd_1;
42 u32 endian_sig; /* endian signature of host */
43
44 /**
45 * Request and response circular queue base addresses, size and
46 * shadow index pointers.
47 */
48 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
49 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
50 u16 req_cq_elems[BFI_IOC_MAX_CQS];
51 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
52 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
53 u16 rsp_cq_elems[BFI_IOC_MAX_CQS];
54
55 union bfi_addr_u stats_addr; /* DMA-able address for stats */
56 union bfi_addr_u cfgrsp_addr; /* config response dma address */
57 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
59};
60
61/**
62 * Boot target wwn information for this port. This contains either the stored
63 * or discovered boot target port wwns for the port.
64 */
65struct bfi_iocfc_bootwwns {
66 wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
67 u8 nwwns;
68 u8 rsvd[7];
69};
70
71struct bfi_iocfc_cfgrsp_s {
72 struct bfa_iocfc_fwcfg_s fwcfg;
73 struct bfa_iocfc_intr_attr_s intr_attr;
74 struct bfi_iocfc_bootwwns bootwwns;
75 struct bfi_pbc_s pbc_cfg;
76};
77
78/**
79 * BFI_IOCFC_H2I_CFG_REQ message
80 */
81struct bfi_iocfc_cfg_req_s {
82 struct bfi_mhdr_s mh;
83 union bfi_addr_u ioc_cfg_dma_addr;
84};
85
86
87/**
88 * BFI_IOCFC_I2H_CFG_REPLY message
89 */
90struct bfi_iocfc_cfg_reply_s {
91 struct bfi_mhdr_s mh; /* Common msg header */
92 u8 cfg_success; /* cfg reply status */
93 u8 lpu_bm; /* LPUs assigned for this IOC */
94 u8 rsvd[2];
95};
96
97
98/**
99 * BFI_IOCFC_H2I_SET_INTR_REQ message
100 */
101struct bfi_iocfc_set_intr_req_s {
102 struct bfi_mhdr_s mh; /* common msg header */
103 u8 coalesce; /* enable intr coalescing */
104 u8 rsvd[3];
105 u16 delay; /* delay timer 0..1125us */
106 u16 latency; /* latency timer 0..225us */
107};
108
109
110/**
111 * BFI_IOCFC_H2I_UPDATEQ_REQ message
112 */
113struct bfi_iocfc_updateq_req_s {
114 struct bfi_mhdr_s mh; /* common msg header */
115 u32 reqq_ba; /* reqq base addr */
116 u32 rspq_ba; /* rspq base addr */
117 u32 reqq_sci; /* reqq shadow ci */
118 u32 rspq_spi; /* rspq shadow pi */
119};
120
121
122/**
123 * BFI_IOCFC_I2H_UPDATEQ_RSP message
124 */
125struct bfi_iocfc_updateq_rsp_s {
126 struct bfi_mhdr_s mh; /* common msg header */
127 u8 status; /* updateq status */
128 u8 rsvd[3];
129};
130
131
132/**
133 * H2I Messages
134 */
135union bfi_iocfc_h2i_msg_u {
136 struct bfi_mhdr_s mh;
137 struct bfi_iocfc_cfg_req_s cfg_req;
138 struct bfi_iocfc_updateq_req_s updateq_req;
139 u32 mboxmsg[BFI_IOC_MSGSZ];
140};
141
142
143/**
144 * I2H Messages
145 */
146union bfi_iocfc_i2h_msg_u {
147 struct bfi_mhdr_s mh;
148 struct bfi_iocfc_cfg_reply_s cfg_reply;
149 struct bfi_iocfc_updateq_rsp_s updateq_rsp;
150 u32 mboxmsg[BFI_IOC_MSGSZ];
151};
152
153
154enum bfi_fcport_h2i {
155 BFI_FCPORT_H2I_ENABLE_REQ = (1),
156 BFI_FCPORT_H2I_DISABLE_REQ = (2),
157 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
158 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
159 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
160};
161
162
163enum bfi_fcport_i2h {
164 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
165 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
166 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
167 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
168 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
169 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
170 BFI_FCPORT_I2H_TRUNK_SCN = BFA_I2HM(7),
171 BFI_FCPORT_I2H_ENABLE_AEN = BFA_I2HM(8),
172 BFI_FCPORT_I2H_DISABLE_AEN = BFA_I2HM(9),
173};
174
175
176/**
177 * Generic REQ type
178 */
179struct bfi_fcport_req_s {
180 struct bfi_mhdr_s mh; /* msg header */
181 u32 msgtag; /* msgtag for reply */
182};
183
184/**
185 * Generic RSP type
186 */
187struct bfi_fcport_rsp_s {
188 struct bfi_mhdr_s mh; /* common msg header */
189 u8 status; /* port enable status */
190 u8 rsvd[3];
191 u32 msgtag; /* msgtag for reply */
192};
193
194/**
195 * BFI_FCPORT_H2I_ENABLE_REQ
196 */
197struct bfi_fcport_enable_req_s {
198 struct bfi_mhdr_s mh; /* msg header */
199 u32 rsvd1;
200 wwn_t nwwn; /* node wwn of physical port */
201 wwn_t pwwn; /* port wwn of physical port */
202 struct bfa_port_cfg_s port_cfg; /* port configuration */
203 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
204 u32 msgtag; /* msgtag for reply */
205 u32 rsvd2;
206};
207
208/**
209 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
210 */
211struct bfi_fcport_set_svc_params_req_s {
212 struct bfi_mhdr_s mh; /* msg header */
213 u16 tx_bbcredit; /* Tx credits */
214 u16 rsvd;
215};
216
217/**
218 * BFI_FCPORT_I2H_EVENT
219 */
220struct bfi_fcport_event_s {
221 struct bfi_mhdr_s mh; /* common msg header */
222 struct bfa_port_link_s link_state;
223};
224
225/**
226 * BFI_FCPORT_I2H_TRUNK_SCN
227 */
228struct bfi_fcport_trunk_link_s {
229 wwn_t trunk_wwn;
230 u8 fctl; /* bfa_trunk_link_fctl_t */
231 u8 state; /* bfa_trunk_link_state_t */
232 u8 speed; /* bfa_port_speed_t */
233 u8 rsvd;
234 u32 deskew;
235};
236
237#define BFI_FCPORT_MAX_LINKS 2
238struct bfi_fcport_trunk_scn_s {
239 struct bfi_mhdr_s mh;
240 u8 trunk_state; /* bfa_trunk_state_t */
241 u8 trunk_speed; /* bfa_port_speed_t */
242 u8 rsvd_a[2];
243 struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
244};
245
246/**
247 * fcport H2I message
248 */
249union bfi_fcport_h2i_msg_u {
250 struct bfi_mhdr_s *mhdr;
251 struct bfi_fcport_enable_req_s *penable;
252 struct bfi_fcport_req_s *pdisable;
253 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
254 struct bfi_fcport_req_s *pstatsget;
255 struct bfi_fcport_req_s *pstatsclear;
256};
257
258/**
259 * fcport I2H message
260 */
261union bfi_fcport_i2h_msg_u {
262 struct bfi_msg_s *msg;
263 struct bfi_fcport_rsp_s *penable_rsp;
264 struct bfi_fcport_rsp_s *pdisable_rsp;
265 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
266 struct bfi_fcport_rsp_s *pstatsget_rsp;
267 struct bfi_fcport_rsp_s *pstatsclear_rsp;
268 struct bfi_fcport_event_s *event;
269 struct bfi_fcport_trunk_scn_s *trunk_scn;
270};
271
272enum bfi_fcxp_h2i {
273 BFI_FCXP_H2I_SEND_REQ = 1,
274};
275
276enum bfi_fcxp_i2h {
277 BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
278};
279
280#define BFA_FCXP_MAX_SGES 2
281
282/**
283 * FCXP send request structure
284 */
285struct bfi_fcxp_send_req_s {
286 struct bfi_mhdr_s mh; /* Common msg header */
287 u16 fcxp_tag; /* driver request tag */
288 u16 max_frmsz; /* max send frame size */
289 u16 vf_id; /* vsan tag if applicable */
290 u16 rport_fw_hndl; /* FW Handle for the remote port */
291 u8 class; /* FC class used for req/rsp */
292 u8 rsp_timeout; /* timeout in secs, 0-no response */
293 u8 cts; /* continue sequence */
294 u8 lp_tag; /* lport tag */
295 struct fchs_s fchs; /* request FC header structure */
296 u32 req_len; /* request payload length */
297 u32 rsp_maxlen; /* max response length expected */
298 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
300};
301
302/**
303 * FCXP send response structure
304 */
305struct bfi_fcxp_send_rsp_s {
306 struct bfi_mhdr_s mh; /* Common msg header */
307 u16 fcxp_tag; /* send request tag */
308 u8 req_status; /* request status */
309 u8 rsvd;
310 u32 rsp_len; /* actual response length */
311 u32 residue_len; /* residual response length */
312 struct fchs_s fchs; /* response FC header structure */
313};
314
315enum bfi_uf_h2i {
316 BFI_UF_H2I_BUF_POST = 1,
317};
318
319enum bfi_uf_i2h {
320 BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
321};
322
323#define BFA_UF_MAX_SGES 2
324
325struct bfi_uf_buf_post_s {
326 struct bfi_mhdr_s mh; /* Common msg header */
327 u16 buf_tag; /* buffer tag */
328 u16 buf_len; /* total buffer length */
329 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
330};
331
332struct bfi_uf_frm_rcvd_s {
333 struct bfi_mhdr_s mh; /* Common msg header */
334 u16 buf_tag; /* buffer tag */
335 u16 rsvd;
336 u16 frm_len; /* received frame length */
337 u16 xfr_len; /* tranferred length */
338};
339
340enum bfi_lps_h2i_msgs {
341 BFI_LPS_H2I_LOGIN_REQ = 1,
342 BFI_LPS_H2I_LOGOUT_REQ = 2,
343};
344
345enum bfi_lps_i2h_msgs {
346 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
347 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
348 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
349};
350
351struct bfi_lps_login_req_s {
352 struct bfi_mhdr_s mh; /* common msg header */
353 u8 lp_tag;
354 u8 alpa;
355 u16 pdu_size;
356 wwn_t pwwn;
357 wwn_t nwwn;
358 u8 fdisc;
359 u8 auth_en;
360 u8 rsvd[2];
361};
362
363struct bfi_lps_login_rsp_s {
364 struct bfi_mhdr_s mh; /* common msg header */
365 u8 lp_tag;
366 u8 status;
367 u8 lsrjt_rsn;
368 u8 lsrjt_expl;
369 wwn_t port_name;
370 wwn_t node_name;
371 u16 bb_credit;
372 u8 f_port;
373 u8 npiv_en;
374 u32 lp_pid:24;
375 u32 auth_req:8;
376 mac_t lp_mac;
377 mac_t fcf_mac;
378 u8 ext_status;
379 u8 brcd_switch; /* attached peer is brcd switch */
380};
381
382struct bfi_lps_logout_req_s {
383 struct bfi_mhdr_s mh; /* common msg header */
384 u8 lp_tag;
385 u8 rsvd[3];
386 wwn_t port_name;
387};
388
389struct bfi_lps_logout_rsp_s {
390 struct bfi_mhdr_s mh; /* common msg header */
391 u8 lp_tag;
392 u8 status;
393 u8 rsvd[2];
394};
395
396struct bfi_lps_cvl_event_s {
397 struct bfi_mhdr_s mh; /* common msg header */
398 u8 lp_tag;
399 u8 rsvd[3];
400};
401
402union bfi_lps_h2i_msg_u {
403 struct bfi_mhdr_s *msg;
404 struct bfi_lps_login_req_s *login_req;
405 struct bfi_lps_logout_req_s *logout_req;
406};
407
408union bfi_lps_i2h_msg_u {
409 struct bfi_msg_s *msg;
410 struct bfi_lps_login_rsp_s *login_rsp;
411 struct bfi_lps_logout_rsp_s *logout_rsp;
412 struct bfi_lps_cvl_event_s *cvl_event;
413};
414
415enum bfi_rport_h2i_msgs {
416 BFI_RPORT_H2I_CREATE_REQ = 1,
417 BFI_RPORT_H2I_DELETE_REQ = 2,
418 BFI_RPORT_H2I_SET_SPEED_REQ = 3,
419};
420
421enum bfi_rport_i2h_msgs {
422 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
423 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
424 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
425};
426
427struct bfi_rport_create_req_s {
428 struct bfi_mhdr_s mh; /* common msg header */
429 u16 bfa_handle; /* host rport handle */
430 u16 max_frmsz; /* max rcv pdu size */
431 u32 pid:24, /* remote port ID */
432 lp_tag:8; /* local port tag */
433 u32 local_pid:24, /* local port ID */
434 cisc:8;
435 u8 fc_class; /* supported FC classes */
436 u8 vf_en; /* virtual fabric enable */
437 u16 vf_id; /* virtual fabric ID */
438};
439
440struct bfi_rport_create_rsp_s {
441 struct bfi_mhdr_s mh; /* common msg header */
442 u8 status; /* rport creation status */
443 u8 rsvd[3];
444 u16 bfa_handle; /* host rport handle */
445 u16 fw_handle; /* firmware rport handle */
446 struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
447};
448
449struct bfa_rport_speed_req_s {
450 struct bfi_mhdr_s mh; /* common msg header */
451 u16 fw_handle; /* firmware rport handle */
452 u8 speed; /* rport's speed via RPSC */
453 u8 rsvd;
454};
455
456struct bfi_rport_delete_req_s {
457 struct bfi_mhdr_s mh; /* common msg header */
458 u16 fw_handle; /* firmware rport handle */
459 u16 rsvd;
460};
461
462struct bfi_rport_delete_rsp_s {
463 struct bfi_mhdr_s mh; /* common msg header */
464 u16 bfa_handle; /* host rport handle */
465 u8 status; /* rport deletion status */
466 u8 rsvd;
467};
468
469struct bfi_rport_qos_scn_s {
470 struct bfi_mhdr_s mh; /* common msg header */
471 u16 bfa_handle; /* host rport handle */
472 u16 rsvd;
473 struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
474 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
475};
476
477union bfi_rport_h2i_msg_u {
478 struct bfi_msg_s *msg;
479 struct bfi_rport_create_req_s *create_req;
480 struct bfi_rport_delete_req_s *delete_req;
481 struct bfi_rport_speed_req_s *speed_req;
482};
483
484union bfi_rport_i2h_msg_u {
485 struct bfi_msg_s *msg;
486 struct bfi_rport_create_rsp_s *create_rsp;
487 struct bfi_rport_delete_rsp_s *delete_rsp;
488 struct bfi_rport_qos_scn_s *qos_scn_evt;
489};
490
491/*
492 * Initiator mode I-T nexus interface defines.
493 */
494
495enum bfi_itnim_h2i {
496 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
497 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
498};
499
500enum bfi_itnim_i2h {
501 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
502 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
503 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
504};
505
506struct bfi_itnim_create_req_s {
507 struct bfi_mhdr_s mh; /* common msg header */
508 u16 fw_handle; /* f/w handle for itnim */
509 u8 class; /* FC class for IO */
510 u8 seq_rec; /* sequence recovery support */
511 u8 msg_no; /* seq id of the msg */
512};
513
514struct bfi_itnim_create_rsp_s {
515 struct bfi_mhdr_s mh; /* common msg header */
516 u16 bfa_handle; /* bfa handle for itnim */
517 u8 status; /* fcp request status */
518 u8 seq_id; /* seq id of the msg */
519};
520
521struct bfi_itnim_delete_req_s {
522 struct bfi_mhdr_s mh; /* common msg header */
523 u16 fw_handle; /* f/w itnim handle */
524 u8 seq_id; /* seq id of the msg */
525 u8 rsvd;
526};
527
528struct bfi_itnim_delete_rsp_s {
529 struct bfi_mhdr_s mh; /* common msg header */
530 u16 bfa_handle; /* bfa handle for itnim */
531 u8 status; /* fcp request status */
532 u8 seq_id; /* seq id of the msg */
533};
534
535struct bfi_itnim_sler_event_s {
536 struct bfi_mhdr_s mh; /* common msg header */
537 u16 bfa_handle; /* bfa handle for itnim */
538 u16 rsvd;
539};
540
541union bfi_itnim_h2i_msg_u {
542 struct bfi_itnim_create_req_s *create_req;
543 struct bfi_itnim_delete_req_s *delete_req;
544 struct bfi_msg_s *msg;
545};
546
547union bfi_itnim_i2h_msg_u {
548 struct bfi_itnim_create_rsp_s *create_rsp;
549 struct bfi_itnim_delete_rsp_s *delete_rsp;
550 struct bfi_itnim_sler_event_s *sler_event;
551 struct bfi_msg_s *msg;
552};
553
554/*
555 * Initiator mode IO interface defines.
556 */
557
558enum bfi_ioim_h2i {
559 BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
560 BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
561};
562
563enum bfi_ioim_i2h {
564 BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
565 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
566};
567
568/**
569 * IO command DIF info
570 */
571struct bfi_ioim_dif_s {
572 u32 dif_info[4];
573};
574
575/**
576 * FCP IO messages overview
577 *
578 * @note
579 * - Max CDB length supported is 64 bytes.
580 * - SCSI Linked commands and SCSI bi-directional Commands not
581 * supported.
582 *
583 */
584struct bfi_ioim_req_s {
585 struct bfi_mhdr_s mh; /* Common msg header */
586 u16 io_tag; /* I/O tag */
587 u16 rport_hdl; /* itnim/rport firmware handle */
588 struct fcp_cmnd_s cmnd; /* IO request info */
589
590 /**
591 * SG elements array within the IO request must be double word
592 * aligned. This aligment is required to optimize SGM setup for the IO.
593 */
594 struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
595 u8 io_timeout;
596 u8 dif_en;
597 u8 rsvd_a[2];
598 struct bfi_ioim_dif_s dif;
599};
600
601/**
602 * This table shows various IO status codes from firmware and their
603 * meaning. Host driver can use these status codes to further process
604 * IO completions.
605 *
606 * BFI_IOIM_STS_OK : IO completed with error free SCSI &
607 * transport status.
608 * io-tag can be reused.
609 *
610 * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
611 * - io-tag can be reused.
612 *
613 * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
614 * host request.
615 * - io-tag cannot be reused yet.
616 *
617 * BFI_IOIM_STS_ABORTED : IO was aborted successfully
618 * internally by f/w.
619 * - io-tag cannot be reused yet.
620 *
621 * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
622 * in the firmware and
623 * - io-tag cannot be reused yet.
624 *
625 * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
626 * with sequence level error
627 * logic and hence host needs to retry
628 * this IO with a different IO tag
629 * - io-tag cannot be used yet.
630 *
631 * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
632 * is required because 2 consecutive ABTS
633 * timedout and host needs logout and
634 * re-login with the target
635 * - io-tag cannot be used yet.
636 *
637 * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
638 * but the data tranferred is less than
639 * the fcp data length in the command.
640 * ex. SCSI INQUIRY where transferred
641 * data length and residue count in FCP
642 * response accounts for total fcp-dl
643 * - io-tag can be reused.
644 *
645 * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
646 * but the data transerred is more than
647 * fcp data length in the command. ex.
648 * TAPE IOs where blocks can of unequal
649 * lengths.
650 * - io-tag can be reused.
651 *
652 * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
653 * during abort process
654 * - io-tag can be reused.
655 *
656 * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
657 * ex target sent more data than
658 * requested, or there was data frame
659 * loss and other reasons
660 * - io-tag cannot be used yet.
661 *
662 * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
663 * CRC err or Ref Tag err or App tag err.
664 * - io-tag can be reused.
665 *
666 * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
667 * Management command from the host
668 * - io-tag can be reused.
669 *
670 * BFI_IOIM_STS_UTAG : Firmware does not know about this
671 * io_tag.
672 * - io-tag can be reused.
673 */
674enum bfi_ioim_status {
675 BFI_IOIM_STS_OK = 0,
676 BFI_IOIM_STS_HOST_ABORTED = 1,
677 BFI_IOIM_STS_ABORTED = 2,
678 BFI_IOIM_STS_TIMEDOUT = 3,
679 BFI_IOIM_STS_RES_FREE = 4,
680 BFI_IOIM_STS_SQER_NEEDED = 5,
681 BFI_IOIM_STS_PROTO_ERR = 6,
682 BFI_IOIM_STS_UTAG = 7,
683 BFI_IOIM_STS_PATHTOV = 8,
684};
685
686#define BFI_IOIM_SNSLEN (256)
687/**
688 * I/O response message
689 */
690struct bfi_ioim_rsp_s {
691 struct bfi_mhdr_s mh; /* common msg header */
692 u16 io_tag; /* completed IO tag */
693 u16 bfa_rport_hndl; /* releated rport handle */
694 u8 io_status; /* IO completion status */
695 u8 reuse_io_tag; /* IO tag can be reused */
696 u16 abort_tag; /* host abort request tag */
697 u8 scsi_status; /* scsi status from target */
698 u8 sns_len; /* scsi sense length */
699 u8 resid_flags; /* IO residue flags */
700 u8 rsvd_a;
701 u32 residue; /* IO residual length in bytes */
702 u32 rsvd_b[3];
703};
704
705struct bfi_ioim_abort_req_s {
706 struct bfi_mhdr_s mh; /* Common msg header */
707 u16 io_tag; /* I/O tag */
708 u16 abort_tag; /* unique request tag */
709};
710
711/*
712 * Initiator mode task management command interface defines.
713 */
714
715enum bfi_tskim_h2i {
716 BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
717 BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
718};
719
720enum bfi_tskim_i2h {
721 BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
722};
723
724struct bfi_tskim_req_s {
725 struct bfi_mhdr_s mh; /* Common msg header */
726 u16 tsk_tag; /* task management tag */
727 u16 itn_fhdl; /* itn firmware handle */
728 lun_t lun; /* LU number */
729 u8 tm_flags; /* see enum fcp_tm_cmnd */
730 u8 t_secs; /* Timeout value in seconds */
731 u8 rsvd[2];
732};
733
734struct bfi_tskim_abortreq_s {
735 struct bfi_mhdr_s mh; /* Common msg header */
736 u16 tsk_tag; /* task management tag */
737 u16 rsvd;
738};
739
740enum bfi_tskim_status {
741 /*
742 * Following are FCP-4 spec defined status codes,
743 * **DO NOT CHANGE THEM **
744 */
745 BFI_TSKIM_STS_OK = 0,
746 BFI_TSKIM_STS_NOT_SUPP = 4,
747 BFI_TSKIM_STS_FAILED = 5,
748
749 /**
750 * Defined by BFA
751 */
752 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
753 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
754};
755
756struct bfi_tskim_rsp_s {
757 struct bfi_mhdr_s mh; /* Common msg header */
758 u16 tsk_tag; /* task mgmt cmnd tag */
759 u8 tsk_status; /* @ref bfi_tskim_status */
760 u8 rsvd;
761};
762
763#pragma pack()
764
765#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/fab.c b/drivers/scsi/bfa/fab.c
deleted file mode 100644
index 7e3a4d5d7bb4..000000000000
--- a/drivers/scsi/bfa/fab.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "lport_priv.h"
23
24/**
25 * fab.c port fab implementation.
26 */
27
28/**
29 * bfa_fcs_port_fab_public port fab public functions
30 */
31
32/**
33 * Called by port to initialize fabric services of the base port.
34 */
35void
36bfa_fcs_port_fab_init(struct bfa_fcs_port_s *port)
37{
38 bfa_fcs_port_ns_init(port);
39 bfa_fcs_port_scn_init(port);
40 bfa_fcs_port_ms_init(port);
41}
42
43/**
44 * Called by port to notify transition to online state.
45 */
46void
47bfa_fcs_port_fab_online(struct bfa_fcs_port_s *port)
48{
49 bfa_fcs_port_ns_online(port);
50 bfa_fcs_port_scn_online(port);
51}
52
53/**
54 * Called by port to notify transition to offline state.
55 */
56void
57bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *port)
58{
59 bfa_fcs_port_ns_offline(port);
60 bfa_fcs_port_scn_offline(port);
61 bfa_fcs_port_ms_offline(port);
62}
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
deleted file mode 100644
index ddd4ba9317e6..000000000000
--- a/drivers/scsi/bfa/fabric.c
+++ /dev/null
@@ -1,1323 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fabric.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_lport.h"
24#include "fcs_vport.h"
25#include "fcs_trcmod.h"
26#include "fcs_fcxp.h"
27#include "fcs_auth.h"
28#include "fcs.h"
29#include "fcbuild.h"
30#include <log/bfa_log_fcs.h>
31#include <aen/bfa_aen_port.h>
32#include <bfa_svc.h>
33
34BFA_TRC_FILE(FCS, FABRIC);
35
36#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
38
39#define bfa_fcs_fabric_set_opertype(__fabric) do { \
40 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
41 == BFA_PPORT_TOPOLOGY_P2P) \
42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
43 else \
44 (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \
45} while (0)
46
47/*
48 * forward declarations
49 */
50static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
51static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
52static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
53static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
54static void bfa_fcs_fabric_delay(void *cbarg);
55static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
56static void bfa_fcs_fabric_delete_comp(void *cbarg);
57static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
58 struct fchs_s *fchs, u16 len);
59static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
60 struct fchs_s *fchs, u16 len);
61static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
62static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
63 struct bfa_fcxp_s *fcxp,
64 void *cbarg, bfa_status_t status,
65 u32 rsp_len,
66 u32 resid_len,
67 struct fchs_s *rspfchs);
68/**
69 * fcs_fabric_sm fabric state machine functions
70 */
71
72/**
73 * Fabric state machine events
74 */
75enum bfa_fcs_fabric_event {
76 BFA_FCS_FABRIC_SM_CREATE = 1, /* fabric create from driver */
77 BFA_FCS_FABRIC_SM_DELETE = 2, /* fabric delete from driver */
78 BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
79 BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
80 BFA_FCS_FABRIC_SM_CONT_OP = 5, /* continue op from flogi/auth */
81 BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* continue op from flogi/auth */
82 BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* no fabric from flogi/auth
83 */
84 BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* perform EVFP from
85 *flogi/auth */
86 BFA_FCS_FABRIC_SM_ISOLATE = 9, /* isolate from EVFP processing */
87 BFA_FCS_FABRIC_SM_NO_TAGGING = 10,/* no VFT tagging from EVFP */
88 BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
89 BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* authentication failed */
90 BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* authentication successful
91 */
92 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
93 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
94 BFA_FCS_FABRIC_SM_START = 16, /* fabric delete from driver */
95};
96
97static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
98 enum bfa_fcs_fabric_event event);
99static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
100 enum bfa_fcs_fabric_event event);
101static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
102 enum bfa_fcs_fabric_event event);
103static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
104 enum bfa_fcs_fabric_event event);
105static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
106 enum bfa_fcs_fabric_event event);
107static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
108 enum bfa_fcs_fabric_event event);
109static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
110 enum bfa_fcs_fabric_event event);
111static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
112 enum bfa_fcs_fabric_event event);
113static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
114 enum bfa_fcs_fabric_event event);
115static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
116 enum bfa_fcs_fabric_event event);
117static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
118 enum bfa_fcs_fabric_event event);
119static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
120 enum bfa_fcs_fabric_event event);
121static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
122 enum bfa_fcs_fabric_event event);
123static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
124 enum bfa_fcs_fabric_event event);
125/**
126 * Beginning state before fabric creation.
127 */
128static void
129bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
130 enum bfa_fcs_fabric_event event)
131{
132 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
133 bfa_trc(fabric->fcs, event);
134
135 switch (event) {
136 case BFA_FCS_FABRIC_SM_CREATE:
137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
138 bfa_fcs_fabric_init(fabric);
139 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
140 break;
141
142 case BFA_FCS_FABRIC_SM_LINK_UP:
143 case BFA_FCS_FABRIC_SM_LINK_DOWN:
144 break;
145
146 default:
147 bfa_sm_fault(fabric->fcs, event);
148 }
149}
150
151/**
152 * Beginning state before fabric creation.
153 */
154static void
155bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
156 enum bfa_fcs_fabric_event event)
157{
158 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
159 bfa_trc(fabric->fcs, event);
160
161 switch (event) {
162 case BFA_FCS_FABRIC_SM_START:
163 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
164 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
165 bfa_fcs_fabric_login(fabric);
166 } else
167 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
168 break;
169
170 case BFA_FCS_FABRIC_SM_LINK_UP:
171 case BFA_FCS_FABRIC_SM_LINK_DOWN:
172 break;
173
174 case BFA_FCS_FABRIC_SM_DELETE:
175 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
176 bfa_fcs_modexit_comp(fabric->fcs);
177 break;
178
179 default:
180 bfa_sm_fault(fabric->fcs, event);
181 }
182}
183
184/**
185 * Link is down, awaiting LINK UP event from port. This is also the
186 * first state at fabric creation.
187 */
188static void
189bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
190 enum bfa_fcs_fabric_event event)
191{
192 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
193 bfa_trc(fabric->fcs, event);
194
195 switch (event) {
196 case BFA_FCS_FABRIC_SM_LINK_UP:
197 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
198 bfa_fcs_fabric_login(fabric);
199 break;
200
201 case BFA_FCS_FABRIC_SM_RETRY_OP:
202 break;
203
204 case BFA_FCS_FABRIC_SM_DELETE:
205 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
206 bfa_fcs_fabric_delete(fabric);
207 break;
208
209 default:
210 bfa_sm_fault(fabric->fcs, event);
211 }
212}
213
214/**
215 * FLOGI is in progress, awaiting FLOGI reply.
216 */
217static void
218bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
219 enum bfa_fcs_fabric_event event)
220{
221 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
222 bfa_trc(fabric->fcs, event);
223
224 switch (event) {
225 case BFA_FCS_FABRIC_SM_CONT_OP:
226
227 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
228 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
229
230 if (fabric->auth_reqd && fabric->is_auth) {
231 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
232 bfa_trc(fabric->fcs, event);
233 } else {
234 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
235 bfa_fcs_fabric_notify_online(fabric);
236 }
237 break;
238
239 case BFA_FCS_FABRIC_SM_RETRY_OP:
240 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
241 bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
242 bfa_fcs_fabric_delay, fabric,
243 BFA_FCS_FABRIC_RETRY_DELAY);
244 break;
245
246 case BFA_FCS_FABRIC_SM_LOOPBACK:
247 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
248 bfa_lps_discard(fabric->lps);
249 bfa_fcs_fabric_set_opertype(fabric);
250 break;
251
252 case BFA_FCS_FABRIC_SM_NO_FABRIC:
253 fabric->fab_type = BFA_FCS_FABRIC_N2N;
254 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
255 bfa_fcs_fabric_notify_online(fabric);
256 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
257 break;
258
259 case BFA_FCS_FABRIC_SM_LINK_DOWN:
260 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
261 bfa_lps_discard(fabric->lps);
262 break;
263
264 case BFA_FCS_FABRIC_SM_DELETE:
265 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
266 bfa_lps_discard(fabric->lps);
267 bfa_fcs_fabric_delete(fabric);
268 break;
269
270 default:
271 bfa_sm_fault(fabric->fcs, event);
272 }
273}
274
275
276static void
277bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
278 enum bfa_fcs_fabric_event event)
279{
280 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
281 bfa_trc(fabric->fcs, event);
282
283 switch (event) {
284 case BFA_FCS_FABRIC_SM_DELAYED:
285 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
286 bfa_fcs_fabric_login(fabric);
287 break;
288
289 case BFA_FCS_FABRIC_SM_LINK_DOWN:
290 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
291 bfa_timer_stop(&fabric->delay_timer);
292 break;
293
294 case BFA_FCS_FABRIC_SM_DELETE:
295 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
296 bfa_timer_stop(&fabric->delay_timer);
297 bfa_fcs_fabric_delete(fabric);
298 break;
299
300 default:
301 bfa_sm_fault(fabric->fcs, event);
302 }
303}
304
305/**
306 * Authentication is in progress, awaiting authentication results.
307 */
308static void
309bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
310 enum bfa_fcs_fabric_event event)
311{
312 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
313 bfa_trc(fabric->fcs, event);
314
315 switch (event) {
316 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
317 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
318 bfa_lps_discard(fabric->lps);
319 break;
320
321 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
322 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
323 bfa_fcs_fabric_notify_online(fabric);
324 break;
325
326 case BFA_FCS_FABRIC_SM_PERF_EVFP:
327 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
328 break;
329
330 case BFA_FCS_FABRIC_SM_LINK_DOWN:
331 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
332 bfa_lps_discard(fabric->lps);
333 break;
334
335 case BFA_FCS_FABRIC_SM_DELETE:
336 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
337 bfa_fcs_fabric_delete(fabric);
338 break;
339
340 default:
341 bfa_sm_fault(fabric->fcs, event);
342 }
343}
344
345/**
346 * Authentication failed
347 */
348static void
349bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
350 enum bfa_fcs_fabric_event event)
351{
352 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
353 bfa_trc(fabric->fcs, event);
354
355 switch (event) {
356 case BFA_FCS_FABRIC_SM_LINK_DOWN:
357 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
358 bfa_fcs_fabric_notify_offline(fabric);
359 break;
360
361 case BFA_FCS_FABRIC_SM_DELETE:
362 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
363 bfa_fcs_fabric_delete(fabric);
364 break;
365
366 default:
367 bfa_sm_fault(fabric->fcs, event);
368 }
369}
370
371/**
372 * Port is in loopback mode.
373 */
374static void
375bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
376 enum bfa_fcs_fabric_event event)
377{
378 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
379 bfa_trc(fabric->fcs, event);
380
381 switch (event) {
382 case BFA_FCS_FABRIC_SM_LINK_DOWN:
383 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
384 bfa_fcs_fabric_notify_offline(fabric);
385 break;
386
387 case BFA_FCS_FABRIC_SM_DELETE:
388 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
389 bfa_fcs_fabric_delete(fabric);
390 break;
391
392 default:
393 bfa_sm_fault(fabric->fcs, event);
394 }
395}
396
397/**
398 * There is no attached fabric - private loop or NPort-to-NPort topology.
399 */
400static void
401bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
402 enum bfa_fcs_fabric_event event)
403{
404 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
405 bfa_trc(fabric->fcs, event);
406
407 switch (event) {
408 case BFA_FCS_FABRIC_SM_LINK_DOWN:
409 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
410 bfa_lps_discard(fabric->lps);
411 bfa_fcs_fabric_notify_offline(fabric);
412 break;
413
414 case BFA_FCS_FABRIC_SM_DELETE:
415 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
416 bfa_fcs_fabric_delete(fabric);
417 break;
418
419 case BFA_FCS_FABRIC_SM_NO_FABRIC:
420 bfa_trc(fabric->fcs, fabric->bb_credit);
421 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
422 break;
423
424 default:
425 bfa_sm_fault(fabric->fcs, event);
426 }
427}
428
429/**
430 * Fabric is online - normal operating state.
431 */
432static void
433bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
434 enum bfa_fcs_fabric_event event)
435{
436 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
437 bfa_trc(fabric->fcs, event);
438
439 switch (event) {
440 case BFA_FCS_FABRIC_SM_LINK_DOWN:
441 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
442 bfa_lps_discard(fabric->lps);
443 bfa_fcs_fabric_notify_offline(fabric);
444 break;
445
446 case BFA_FCS_FABRIC_SM_DELETE:
447 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
448 bfa_fcs_fabric_delete(fabric);
449 break;
450
451 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
452 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
453 bfa_lps_discard(fabric->lps);
454 break;
455
456 case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
457 break;
458
459 default:
460 bfa_sm_fault(fabric->fcs, event);
461 }
462}
463
464/**
465 * Exchanging virtual fabric parameters.
466 */
467static void
468bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
469 enum bfa_fcs_fabric_event event)
470{
471 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
472 bfa_trc(fabric->fcs, event);
473
474 switch (event) {
475 case BFA_FCS_FABRIC_SM_CONT_OP:
476 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
477 break;
478
479 case BFA_FCS_FABRIC_SM_ISOLATE:
480 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
481 break;
482
483 default:
484 bfa_sm_fault(fabric->fcs, event);
485 }
486}
487
488/**
489 * EVFP exchange complete and VFT tagging is enabled.
490 */
491static void
492bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
493 enum bfa_fcs_fabric_event event)
494{
495 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
496 bfa_trc(fabric->fcs, event);
497}
498
499/**
500 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
501 */
502static void
503bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
504 enum bfa_fcs_fabric_event event)
505{
506 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
507 bfa_trc(fabric->fcs, event);
508
509 bfa_log(fabric->fcs->logm, BFA_LOG_FCS_FABRIC_ISOLATED,
510 fabric->bport.port_cfg.pwwn, fabric->fcs->port_vfid,
511 fabric->event_arg.swp_vfid);
512}
513
514/**
515 * Fabric is being deleted, awaiting vport delete completions.
516 */
517static void
518bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
519 enum bfa_fcs_fabric_event event)
520{
521 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
522 bfa_trc(fabric->fcs, event);
523
524 switch (event) {
525 case BFA_FCS_FABRIC_SM_DELCOMP:
526 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
527 bfa_fcs_modexit_comp(fabric->fcs);
528 break;
529
530 case BFA_FCS_FABRIC_SM_LINK_UP:
531 break;
532
533 case BFA_FCS_FABRIC_SM_LINK_DOWN:
534 bfa_fcs_fabric_notify_offline(fabric);
535 break;
536
537 default:
538 bfa_sm_fault(fabric->fcs, event);
539 }
540}
541
542
543
544/**
545 * fcs_fabric_private fabric private functions
546 */
547
548static void
549bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
550{
551 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
552
553 port_cfg->roles = BFA_PORT_ROLE_FCP_IM;
554 port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
555 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
556}
557
558/**
559 * Port Symbolic Name Creation for base port.
560 */
561void
562bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
563{
564 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
565 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
566 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
567
568 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
569
570 /*
571 * Model name/number
572 */
573 strncpy((char *)&port_cfg->sym_name, model,
574 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
575 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
576 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
577
578 /*
579 * Driver Version
580 */
581 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
582 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
583 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
584 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
585
586 /*
587 * Host machine name
588 */
589 strncat((char *)&port_cfg->sym_name,
590 (char *)driver_info->host_machine_name,
591 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
592 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
593 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
594
595 /*
596 * Host OS Info :
597 * If OS Patch Info is not there, do not truncate any bytes from the
598 * OS name string and instead copy the entire OS info string (64 bytes).
599 */
600 if (driver_info->host_os_patch[0] == '\0') {
601 strncat((char *)&port_cfg->sym_name,
602 (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN);
603 strncat((char *)&port_cfg->sym_name,
604 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
605 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
606 } else {
607 strncat((char *)&port_cfg->sym_name,
608 (char *)driver_info->host_os_name,
609 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
610 strncat((char *)&port_cfg->sym_name,
611 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
612 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
613
614 /*
615 * Append host OS Patch Info
616 */
617 strncat((char *)&port_cfg->sym_name,
618 (char *)driver_info->host_os_patch,
619 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
620 }
621
622 /*
623 * null terminate
624 */
625 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
626}
627
628/**
629 * bfa lps login completion callback
630 */
631void
632bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
633{
634 struct bfa_fcs_fabric_s *fabric = uarg;
635
636 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
637 bfa_trc(fabric->fcs, status);
638
639 switch (status) {
640 case BFA_STATUS_OK:
641 fabric->stats.flogi_accepts++;
642 break;
643
644 case BFA_STATUS_INVALID_MAC:
645 /*
646 * Only for CNA
647 */
648 fabric->stats.flogi_acc_err++;
649 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
650
651 return;
652
653 case BFA_STATUS_EPROTOCOL:
654 switch (bfa_lps_get_extstatus(fabric->lps)) {
655 case BFA_EPROTO_BAD_ACCEPT:
656 fabric->stats.flogi_acc_err++;
657 break;
658
659 case BFA_EPROTO_UNKNOWN_RSP:
660 fabric->stats.flogi_unknown_rsp++;
661 break;
662
663 default:
664 break;
665 }
666 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
667
668 return;
669
670 case BFA_STATUS_FABRIC_RJT:
671 fabric->stats.flogi_rejects++;
672 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
673 return;
674
675 default:
676 fabric->stats.flogi_rsp_err++;
677 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
678 return;
679 }
680
681 fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
682 bfa_trc(fabric->fcs, fabric->bb_credit);
683
684 if (!bfa_lps_is_brcd_fabric(fabric->lps))
685 fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps);
686
687 /*
688 * Check port type. It should be 1 = F-port.
689 */
690 if (bfa_lps_is_fport(fabric->lps)) {
691 fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
692 fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
693 fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
694 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
695 } else {
696 /*
697 * Nport-2-Nport direct attached
698 */
699 fabric->bport.port_topo.pn2n.rem_port_wwn =
700 bfa_lps_get_peer_pwwn(fabric->lps);
701 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
702 }
703
704 bfa_trc(fabric->fcs, fabric->bport.pid);
705 bfa_trc(fabric->fcs, fabric->is_npiv);
706 bfa_trc(fabric->fcs, fabric->is_auth);
707}
708
709/**
710 * Allocate and send FLOGI.
711 */
712static void
713bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
714{
715 struct bfa_s *bfa = fabric->fcs->bfa;
716 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
717 u8 alpa = 0;
718
719 if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
720 alpa = bfa_fcport_get_myalpa(bfa);
721
722 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
723 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
724
725 fabric->stats.flogi_sent++;
726}
727
728static void
729bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
730{
731 struct bfa_fcs_vport_s *vport;
732 struct list_head *qe, *qen;
733
734 bfa_trc(fabric->fcs, fabric->fabric_name);
735
736 bfa_fcs_fabric_set_opertype(fabric);
737 fabric->stats.fabric_onlines++;
738
739 /**
740 * notify online event to base and then virtual ports
741 */
742 bfa_fcs_port_online(&fabric->bport);
743
744 list_for_each_safe(qe, qen, &fabric->vport_q) {
745 vport = (struct bfa_fcs_vport_s *)qe;
746 bfa_fcs_vport_online(vport);
747 }
748}
749
750static void
751bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
752{
753 struct bfa_fcs_vport_s *vport;
754 struct list_head *qe, *qen;
755
756 bfa_trc(fabric->fcs, fabric->fabric_name);
757 fabric->stats.fabric_offlines++;
758
759 /**
760 * notify offline event first to vports and then base port.
761 */
762 list_for_each_safe(qe, qen, &fabric->vport_q) {
763 vport = (struct bfa_fcs_vport_s *)qe;
764 bfa_fcs_vport_offline(vport);
765 }
766
767 bfa_fcs_port_offline(&fabric->bport);
768
769 fabric->fabric_name = 0;
770 fabric->fabric_ip_addr[0] = 0;
771}
772
773static void
774bfa_fcs_fabric_delay(void *cbarg)
775{
776 struct bfa_fcs_fabric_s *fabric = cbarg;
777
778 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
779}
780
781/**
782 * Delete all vports and wait for vport delete completions.
783 */
784static void
785bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
786{
787 struct bfa_fcs_vport_s *vport;
788 struct list_head *qe, *qen;
789
790 list_for_each_safe(qe, qen, &fabric->vport_q) {
791 vport = (struct bfa_fcs_vport_s *)qe;
792 bfa_fcs_vport_fcs_delete(vport);
793 }
794
795 bfa_fcs_port_delete(&fabric->bport);
796 bfa_wc_wait(&fabric->wc);
797}
798
799static void
800bfa_fcs_fabric_delete_comp(void *cbarg)
801{
802 struct bfa_fcs_fabric_s *fabric = cbarg;
803
804 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
805}
806
807
808
809/**
810 * fcs_fabric_public fabric public functions
811 */
812
813/**
814 * Attach time initialization
815 */
816void
817bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
818{
819 struct bfa_fcs_fabric_s *fabric;
820
821 fabric = &fcs->fabric;
822 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
823
824 /**
825 * Initialize base fabric.
826 */
827 fabric->fcs = fcs;
828 INIT_LIST_HEAD(&fabric->vport_q);
829 INIT_LIST_HEAD(&fabric->vf_q);
830 fabric->lps = bfa_lps_alloc(fcs->bfa);
831 bfa_assert(fabric->lps);
832
833 /**
834 * Initialize fabric delete completion handler. Fabric deletion is complete
835 * when the last vport delete is complete.
836 */
837 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
838 bfa_wc_up(&fabric->wc); /* For the base port */
839
840 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
841 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
842}
843
844void
845bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
846{
847 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
848 bfa_trc(fcs, 0);
849}
850
851/**
852 * Module cleanup
853 */
854void
855bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
856{
857 struct bfa_fcs_fabric_s *fabric;
858
859 bfa_trc(fcs, 0);
860
861 /**
862 * Cleanup base fabric.
863 */
864 fabric = &fcs->fabric;
865 bfa_lps_delete(fabric->lps);
866 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
867}
868
869/**
870 * Fabric module start -- kick starts FCS actions
871 */
872void
873bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
874{
875 struct bfa_fcs_fabric_s *fabric;
876
877 bfa_trc(fcs, 0);
878 fabric = &fcs->fabric;
879 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
880}
881
882/**
883 * Suspend fabric activity as part of driver suspend.
884 */
885void
886bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
887{
888}
889
890bfa_boolean_t
891bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
892{
893 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
894}
895
896bfa_boolean_t
897bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
898{
899 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
900}
901
902enum bfa_pport_type
903bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
904{
905 return fabric->oper_type;
906}
907
908/**
909 * Link up notification from BFA physical port module.
910 */
911void
912bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
913{
914 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
915 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
916}
917
918/**
919 * Link down notification from BFA physical port module.
920 */
921void
922bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
923{
924 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
925 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
926}
927
928/**
929 * A child vport is being created in the fabric.
930 *
931 * Call from vport module at vport creation. A list of base port and vports
932 * belonging to a fabric is maintained to propagate link events.
933 *
934 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
935 * param[in] vport - Vport being created.
936 *
937 * @return None (always succeeds)
938 */
939void
940bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
941 struct bfa_fcs_vport_s *vport)
942{
943 /**
944 * - add vport to fabric's vport_q
945 */
946 bfa_trc(fabric->fcs, fabric->vf_id);
947
948 list_add_tail(&vport->qe, &fabric->vport_q);
949 fabric->num_vports++;
950 bfa_wc_up(&fabric->wc);
951}
952
953/**
954 * A child vport is being deleted from fabric.
955 *
956 * Vport is being deleted.
957 */
958void
959bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
960 struct bfa_fcs_vport_s *vport)
961{
962 list_del(&vport->qe);
963 fabric->num_vports--;
964 bfa_wc_down(&fabric->wc);
965}
966
967/**
968 * Base port is deleted.
969 */
970void
971bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
972{
973 bfa_wc_down(&fabric->wc);
974}
975
976/**
977 * Check if fabric is online.
978 *
979 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
980 *
981 * @return TRUE/FALSE
982 */
983int
984bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
985{
986 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
987}
988
989
990bfa_status_t
991bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
992 struct bfa_port_cfg_s *port_cfg,
993 struct bfad_vf_s *vf_drv)
994{
995 bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
996 return BFA_STATUS_OK;
997}
998
999/**
1000 * Lookup for a vport withing a fabric given its pwwn
1001 */
1002struct bfa_fcs_vport_s *
1003bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1004{
1005 struct bfa_fcs_vport_s *vport;
1006 struct list_head *qe;
1007
1008 list_for_each(qe, &fabric->vport_q) {
1009 vport = (struct bfa_fcs_vport_s *)qe;
1010 if (bfa_fcs_port_get_pwwn(&vport->lport) == pwwn)
1011 return vport;
1012 }
1013
1014 return NULL;
1015}
1016
1017/**
1018 * In a given fabric, return the number of lports.
1019 *
1020 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
1021 *
1022* @return : 1 or more.
1023 */
1024u16
1025bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
1026{
1027 return fabric->num_vports;
1028}
1029
1030/*
1031 * Get OUI of the attached switch.
1032 *
1033 * Note : Use of this function should be avoided as much as possible.
1034 * This function should be used only if there is any requirement
1035 * to check for FOS version below 6.3.
1036 * To check if the attached fabric is a brocade fabric, use
1037 * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
1038 * or above only.
1039 */
1040
1041u16
1042bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1043{
1044 wwn_t fab_nwwn;
1045 u8 *tmp;
1046 u16 oui;
1047
1048 fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
1049
1050 tmp = (uint8_t *)&fab_nwwn;
1051 oui = (tmp[3] << 8) | tmp[4];
1052
1053 return oui;
1054}
1055
1056/**
1057 * Unsolicited frame receive handling.
1058 */
1059void
1060bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1061 u16 len)
1062{
1063 u32 pid = fchs->d_id;
1064 struct bfa_fcs_vport_s *vport;
1065 struct list_head *qe;
1066 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1067 struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
1068
1069 bfa_trc(fabric->fcs, len);
1070 bfa_trc(fabric->fcs, pid);
1071
1072 /**
1073 * Look for our own FLOGI frames being looped back. This means an
1074 * external loopback cable is in place. Our own FLOGI frames are
1075 * sometimes looped back when switch port gets temporarily bypassed.
1076 */
1077 if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT))
1078 && (els_cmd->els_code == FC_ELS_FLOGI)
1079 && (flogi->port_name == bfa_fcs_port_get_pwwn(&fabric->bport))) {
1080 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
1081 return;
1082 }
1083
1084 /**
1085 * FLOGI/EVFP exchanges should be consumed by base fabric.
1086 */
1087 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
1088 bfa_trc(fabric->fcs, pid);
1089 bfa_fcs_fabric_process_uf(fabric, fchs, len);
1090 return;
1091 }
1092
1093 if (fabric->bport.pid == pid) {
1094 /**
1095 * All authentication frames should be routed to auth
1096 */
1097 bfa_trc(fabric->fcs, els_cmd->els_code);
1098 if (els_cmd->els_code == FC_ELS_AUTH) {
1099 bfa_trc(fabric->fcs, els_cmd->els_code);
1100 fabric->auth.response = (u8 *) els_cmd;
1101 return;
1102 }
1103
1104 bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
1105 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1106 return;
1107 }
1108
1109 /**
1110 * look for a matching local port ID
1111 */
1112 list_for_each(qe, &fabric->vport_q) {
1113 vport = (struct bfa_fcs_vport_s *)qe;
1114 if (vport->lport.pid == pid) {
1115 bfa_fcs_port_uf_recv(&vport->lport, fchs, len);
1116 return;
1117 }
1118 }
1119 bfa_trc(fabric->fcs, els_cmd->els_code);
1120 bfa_fcs_port_uf_recv(&fabric->bport, fchs, len);
1121}
1122
1123/**
1124 * Unsolicited frames to be processed by fabric.
1125 */
1126static void
1127bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1128 u16 len)
1129{
1130 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1131
1132 bfa_trc(fabric->fcs, els_cmd->els_code);
1133
1134 switch (els_cmd->els_code) {
1135 case FC_ELS_FLOGI:
1136 bfa_fcs_fabric_process_flogi(fabric, fchs, len);
1137 break;
1138
1139 default:
1140 /*
1141 * need to generate a LS_RJT
1142 */
1143 break;
1144 }
1145}
1146
1147/**
1148 * Process incoming FLOGI
1149 */
1150static void
1151bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1152 struct fchs_s *fchs, u16 len)
1153{
1154 struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
1155 struct bfa_fcs_port_s *bport = &fabric->bport;
1156
1157 bfa_trc(fabric->fcs, fchs->s_id);
1158
1159 fabric->stats.flogi_rcvd++;
1160 /*
1161 * Check port type. It should be 0 = n-port.
1162 */
1163 if (flogi->csp.port_type) {
1164 /*
1165 * @todo: may need to send a LS_RJT
1166 */
1167 bfa_trc(fabric->fcs, flogi->port_name);
1168 fabric->stats.flogi_rejected++;
1169 return;
1170 }
1171
1172 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
1173 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1174 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1175
1176 /*
1177 * Send a Flogi Acc
1178 */
1179 bfa_fcs_fabric_send_flogi_acc(fabric);
1180 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
1181}
1182
1183static void
1184bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1185{
1186 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
1187 struct bfa_fcs_port_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
1188 struct bfa_s *bfa = fabric->fcs->bfa;
1189 struct bfa_fcxp_s *fcxp;
1190 u16 reqlen;
1191 struct fchs_s fchs;
1192
1193 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1194 /**
1195 * Do not expect this failure -- expect remote node to retry
1196 */
1197 if (!fcxp)
1198 return;
1199
1200 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1201 bfa_os_hton3b(FC_FABRIC_PORT),
1202 n2n_port->reply_oxid, pcfg->pwwn,
1203 pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
1204 bfa_fcport_get_rx_bbcredit(bfa));
1205
1206 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1207 BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
1208 bfa_fcs_fabric_flogiacc_comp, fabric,
1209 FC_MAX_PDUSZ, 0); /* Timeout 0 indicates no
1210 * response expected
1211 */
1212}
1213
1214/**
1215 * Flogi Acc completion callback.
1216 */
1217static void
1218bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1219 bfa_status_t status, u32 rsp_len,
1220 u32 resid_len, struct fchs_s *rspfchs)
1221{
1222 struct bfa_fcs_fabric_s *fabric = cbarg;
1223
1224 bfa_trc(fabric->fcs, status);
1225}
1226
1227/*
1228 *
1229 * @param[in] fabric - fabric
1230 * @param[in] result - 1
1231 *
1232 * @return - none
1233 */
1234void
1235bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric, enum auth_status status)
1236{
1237 bfa_trc(fabric->fcs, status);
1238
1239 if (status == FC_AUTH_STATE_SUCCESS)
1240 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_SUCCESS);
1241 else
1242 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_FAILED);
1243}
1244
1245/**
1246 * Send AEN notification
1247 */
1248static void
1249bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
1250 enum bfa_port_aen_event event)
1251{
1252 union bfa_aen_data_u aen_data;
1253 struct bfa_log_mod_s *logmod = port->fcs->logm;
1254 wwn_t pwwn = bfa_fcs_port_get_pwwn(port);
1255 wwn_t fwwn = bfa_fcs_port_get_fabric_name(port);
1256 char pwwn_ptr[BFA_STRING_32];
1257 char fwwn_ptr[BFA_STRING_32];
1258
1259 wwn2str(pwwn_ptr, pwwn);
1260 wwn2str(fwwn_ptr, fwwn);
1261
1262 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
1263 pwwn_ptr, fwwn_ptr);
1264
1265 aen_data.port.pwwn = pwwn;
1266 aen_data.port.fwwn = fwwn;
1267}
1268
1269/*
1270 *
1271 * @param[in] fabric - fabric
1272 * @param[in] wwn_t - new fabric name
1273 *
1274 * @return - none
1275 */
1276void
1277bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1278 wwn_t fabric_name)
1279{
1280 bfa_trc(fabric->fcs, fabric_name);
1281
1282 if (fabric->fabric_name == 0) {
1283 /*
1284 * With BRCD switches, we don't get Fabric Name in FLOGI.
1285 * Don't generate a fabric name change event in this case.
1286 */
1287 fabric->fabric_name = fabric_name;
1288 } else {
1289 fabric->fabric_name = fabric_name;
1290 /*
1291 * Generate a Event
1292 */
1293 bfa_fcs_fabric_aen_post(&fabric->bport,
1294 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1295 }
1296
1297}
1298
1299/**
1300 *
1301 * @param[in] fabric - fabric
1302 * @param[in] node_symname -
1303 * Caller allocated buffer to receive the symbolic name
1304 *
1305 * @return - none
1306 */
1307void
1308bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname)
1309{
1310 bfa_os_memcpy(node_symname,
1311 fcs->fabric.bport.port_cfg.sym_name.symname,
1312 BFA_SYMNAME_MAXLEN);
1313}
1314
1315/**
1316 * Not used by FCS.
1317 */
1318void
1319bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
1320{
1321}
1322
1323
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
deleted file mode 100644
index 981d98d542b9..000000000000
--- a/drivers/scsi/bfa/fcbuild.h
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * fcbuild.h - FC link service frame building and parsing routines
19 */
20
21#ifndef __FCBUILD_H__
22#define __FCBUILD_H__
23
24#include <bfa_os_inc.h>
25#include <protocol/fc.h>
26#include <protocol/fcp.h>
27#include <protocol/ct.h>
28#include <defs/bfa_defs_port.h>
29#include <defs/bfa_defs_pport.h>
30
31/*
32 * Utility Macros/functions
33 */
34
35#define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof)
36#define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof)
37
38#define wwn_is_equal(_wwn1, _wwn2) \
39 (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
40
41#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
42
43/*
44 * Given the fc response length, this routine will return
45 * the length of the actual payload bytes following the CT header.
46 *
47 * Assumes the input response length does not include the crc, eof, etc.
48 */
49static inline u32
50fc_get_ctresp_pyld_len(u32 resp_len)
51{
52 return resp_len - sizeof(struct ct_hdr_s);
53}
54
55/*
56 * Convert bfa speed to rpsc speed value.
57 */
58static inline enum bfa_pport_speed
59fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
60{
61 switch (speed) {
62
63 case RPSC_OP_SPEED_1G:
64 return BFA_PPORT_SPEED_1GBPS;
65
66 case RPSC_OP_SPEED_2G:
67 return BFA_PPORT_SPEED_2GBPS;
68
69 case RPSC_OP_SPEED_4G:
70 return BFA_PPORT_SPEED_4GBPS;
71
72 case RPSC_OP_SPEED_8G:
73 return BFA_PPORT_SPEED_8GBPS;
74
75 case RPSC_OP_SPEED_10G:
76 return BFA_PPORT_SPEED_10GBPS;
77
78 default:
79 return BFA_PPORT_SPEED_UNKNOWN;
80 }
81}
82
83/*
84 * Convert RPSC speed to bfa speed value.
85 */
86static inline enum fc_rpsc_op_speed_s
87fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
88{
89 switch (op_speed) {
90
91 case BFA_PPORT_SPEED_1GBPS:
92 return RPSC_OP_SPEED_1G;
93
94 case BFA_PPORT_SPEED_2GBPS:
95 return RPSC_OP_SPEED_2G;
96
97 case BFA_PPORT_SPEED_4GBPS:
98 return RPSC_OP_SPEED_4G;
99
100 case BFA_PPORT_SPEED_8GBPS:
101 return RPSC_OP_SPEED_8G;
102
103 case BFA_PPORT_SPEED_10GBPS:
104 return RPSC_OP_SPEED_10G;
105
106 default:
107 return RPSC_OP_SPEED_NOT_EST;
108 }
109}
110enum fc_parse_status {
111 FC_PARSE_OK = 0,
112 FC_PARSE_FAILURE = 1,
113 FC_PARSE_BUSY = 2,
114 FC_PARSE_LEN_INVAL,
115 FC_PARSE_ACC_INVAL,
116 FC_PARSE_PWWN_NOT_EQUAL,
117 FC_PARSE_NWWN_NOT_EQUAL,
118 FC_PARSE_RXSZ_INVAL,
119 FC_PARSE_NOT_FCP,
120 FC_PARSE_OPAFLAG_INVAL,
121 FC_PARSE_RPAFLAG_INVAL,
122 FC_PARSE_OPA_INVAL,
123 FC_PARSE_RPA_INVAL,
124
125};
126
127struct fc_templates_s {
128 struct fchs_s fc_els_req;
129 struct fchs_s fc_bls_req;
130 struct fc_logi_s plogi;
131 struct fc_rrq_s rrq;
132};
133
134void fcbuild_init(void);
135
136u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
137 u32 s_id, u16 ox_id, wwn_t port_name,
138 wwn_t node_name, u16 pdu_size, u8 set_npiv,
139 u8 set_auth, u16 local_bb_credits);
140u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi,
141 u32 s_id, u16 ox_id, wwn_t port_name,
142 wwn_t node_name, u16 pdu_size);
143u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
144 u32 s_id, u16 ox_id, wwn_t port_name,
145 wwn_t node_name, u16 pdu_size,
146 u16 local_bb_credits);
147u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
148 u32 s_id, u16 ox_id, wwn_t port_name,
149 wwn_t node_name, u16 pdu_size);
150enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
151u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
152 u16 ox_id);
153enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
154u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
155 u32 s_id, u16 ox_id, u16 rrq_oxid);
156enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
157u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
158 u16 ox_id, u8 *name);
159u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
160 u16 ox_id, enum bfa_port_role role);
161u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
162 u16 ox_id, u8 *fc4_bitmap,
163 u32 bitmap_size);
164u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
165 u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
166u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
167 u16 ox_id, wwn_t port_name);
168u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
169 u16 ox_id, u32 port_id);
170u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
171 u8 set_br_reg, u32 s_id, u16 ox_id);
172u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
173 u32 s_id, u16 ox_id,
174 wwn_t port_name, wwn_t node_name, u16 pdu_size);
175
176u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
177 u32 d_id, u32 s_id, u16 ox_id,
178 wwn_t port_name, wwn_t node_name);
179enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
180 u32 host_dap,
181 wwn_t node_name, wwn_t port_name);
182enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
183 wwn_t port_name, wwn_t node_name);
184u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
185 u32 d_id, u32 s_id, u16 ox_id,
186 wwn_t port_name, wwn_t node_name);
187u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
188 u32 d_id, u32 s_id, u16 ox_id,
189 u8 reason_code, u8 reason_code_expl);
190u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
191 u32 d_id, u32 s_id, u16 ox_id);
192u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
193 u32 s_id, u16 ox_id);
194enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
195
196u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
197 u32 s_id, u16 ox_id,
198 enum bfa_port_role role);
199u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
200 u32 d_id, u32 s_id, u16 ox_id,
201 u32 data_format);
202u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
203 u32 d_id, u32 s_id, u16 ox_id,
204 u32 data_format,
205 struct fc_rnid_common_id_data_s *common_id_data,
206 struct fc_rnid_general_topology_data_s *
207 gen_topo_data);
208u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
209 u32 d_id, u32 s_id,
210 u32 *pid_list, u16 npids);
211u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
212 u32 d_id, u32 s_id, u16 ox_id);
213u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
214 u32 d_id, u32 s_id, u16 ox_id,
215 struct fc_rpsc_speed_info_s *oper_speed);
216u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
217 u8 fc4_type);
218u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
219 u32 port_id, wwn_t port_name);
220u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
221 u32 port_id, wwn_t node_name);
222u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
223 u32 port_id, u32 cos);
224u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
225 u32 port_id, u8 port_type);
226u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
227 u32 port_id);
228u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo,
229 u32 d_id, u32 s_id, u16 ox_id,
230 wwn_t port_name);
231u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
232 u32 s_id, u16 ox_id);
233u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
234 u16 cmd_code);
235u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
236 wwn_t wwn);
237u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id,
238 wwn_t wwn);
239void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
240void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
241 u16 ox_id);
242enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
243enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
244 wwn_t port_name);
245enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
246enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
247 wwn_t port_name);
248u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc,
249 u32 d_id, u32 s_id, u16 ox_id,
250 u16 rx_id);
251int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
252u16 fc_tprlo_acc_build(struct fchs_s *fchs,
253 struct fc_tprlo_acc_s *tprlo_acc,
254 u32 d_id, u32 s_id, u16 ox_id,
255 int num_pages);
256u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
257 u32 d_id, u32 s_id, u16 ox_id,
258 int num_pages);
259u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
260u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
261 u16 ox_id, wwn_t port_name, wwn_t node_name,
262 u16 pdu_size);
263u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
264u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
265 u16 ox_id, int num_pages);
266u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
267u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
268 u16 ox_id, int num_pages,
269 enum fc_tprlo_type tprlo_type, u32 tpr_id);
270u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
271u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
272 u16 ox_id, u32 reason_code,
273 u32 reason_expl);
274u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
275 u16 ox_id, u32 port_id);
276u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
277u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
278 u32 s_id, u16 ox_id);
279#endif
diff --git a/drivers/scsi/bfa/fcptm.c b/drivers/scsi/bfa/fcptm.c
deleted file mode 100644
index 8c8b08c72e7a..000000000000
--- a/drivers/scsi/bfa/fcptm.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * This file contains dummy FCPTM routines to aid in Initiator Mode only
20 * compilation of OS driver.
21 *
22 */
23
24#include "bfa_os_inc.h"
25#include "fcs_rport.h"
26#include "fcs_fcptm.h"
27#include "fcs/bfa_fcs_rport.h"
28
29struct bfa_fcs_tin_s *
30bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport)
31{
32 return NULL;
33}
34
35void
36bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin)
37{
38}
39
40void
41bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin)
42{
43}
44
45void
46bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin)
47{
48}
49
50void
51bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
52{
53}
54
55void
56bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len)
57{
58}
59
60void
61bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin)
62{
63}
64
65void
66bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin)
67{
68}
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h
deleted file mode 100644
index 8d08230e6295..000000000000
--- a/drivers/scsi/bfa/fcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs.h FCS module functions
20 */
21
22
23#ifndef __FCS_H__
24#define __FCS_H__
25
26#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
27
28void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
29
30#endif /* __FCS_H__ */
diff --git a/drivers/scsi/bfa/fcs_auth.h b/drivers/scsi/bfa/fcs_auth.h
deleted file mode 100644
index 65d155fea3d7..000000000000
--- a/drivers/scsi/bfa/fcs_auth.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_AUTH_H__
24#define __FCS_AUTH_H__
25
26#include <fcs/bfa_fcs.h>
27#include <fcs/bfa_fcs_vport.h>
28#include <fcs/bfa_fcs_lport.h>
29
30/*
31 * fcs friend functions: only between fcs modules
32 */
33void bfa_fcs_auth_uf_recv(struct bfa_fcs_fabric_s *fabric, int len);
34void bfa_fcs_auth_start(struct bfa_fcs_fabric_s *fabric);
35void bfa_fcs_auth_stop(struct bfa_fcs_fabric_s *fabric);
36
37#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
deleted file mode 100644
index 432ab8ab8c3c..000000000000
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_FABRIC_H__
23#define __FCS_FABRIC_H__
24
25#include <fcs/bfa_fcs.h>
26#include <fcs/bfa_fcs_vport.h>
27#include <fcs/bfa_fcs_lport.h>
28
29#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
30
31/*
32* fcs friend functions: only between fcs modules
33 */
34void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
35void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
36void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
37void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
38void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
39void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
40void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
41 struct bfa_fcs_vport_s *vport);
42void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
43 struct bfa_fcs_vport_s *vport);
44int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
45struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
46 struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
47void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
48void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
49 struct fchs_s *fchs, u16 len);
50u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
51bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
52bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
53enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
54void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
55void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
56
57bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
58 struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg,
59 struct bfad_vf_s *vf_drv);
60void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric,
61 enum auth_status status);
62
63void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
64 wwn_t fabric_name);
65u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
66void bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname);
67
68#endif /* __FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
deleted file mode 100644
index 11e6e7bce9f6..000000000000
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __FCS_FCPIM_H__
18#define __FCS_FCPIM_H__
19
20#include <defs/bfa_defs_port.h>
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_rport.h>
23
24/*
25 * Following routines are from FCPIM and will be called by rport.
26 */
27struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
28void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
29void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
30void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
31bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
32
33void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
36
37void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
38 u16 len);
39#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcptm.h b/drivers/scsi/bfa/fcs_fcptm.h
deleted file mode 100644
index ffff0829fd31..000000000000
--- a/drivers/scsi/bfa/fcs_fcptm.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_FCPTM_H__
19#define __FCS_FCPTM_H__
20
21#include <defs/bfa_defs_port.h>
22#include <fcs/bfa_fcs_lport.h>
23#include <fcs/bfa_fcs_rport.h>
24
25/*
26 * Following routines are from FCPTM and will be called by rport.
27 */
28struct bfa_fcs_tin_s *bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport);
29void bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin);
30void bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin);
31void bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin);
32void bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
33 u16 len);
34void bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin);
35void bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin);
36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcptm_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcptm_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs,
43 u16 len);
44
45#endif /* __FCS_FCPTM_H__ */
diff --git a/drivers/scsi/bfa/fcs_fcxp.h b/drivers/scsi/bfa/fcs_fcxp.h
deleted file mode 100644
index 8277fe9c2b70..000000000000
--- a/drivers/scsi/bfa/fcs_fcxp.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_fcxp.h FCXP helper macros for FCS
20 */
21
22
23#ifndef __FCS_FCXP_H__
24#define __FCS_FCXP_H__
25
26#define bfa_fcs_fcxp_alloc(__fcs) \
27 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
28
29#endif /* __FCS_FCXP_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
deleted file mode 100644
index a6508c8ab184..000000000000
--- a/drivers/scsi/bfa/fcs_lport.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_lport.h FCS logical port interfaces
20 */
21
22#ifndef __FCS_LPORT_H__
23#define __FCS_LPORT_H__
24
25#define __VPORT_H__
26#include <defs/bfa_defs_port.h>
27#include <bfa_svc.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <fcs/bfa_fcs_rport.h>
30#include <fcs/bfa_fcs_vport.h>
31#include <fcs_fabric.h>
32#include <fcs_ms.h>
33#include <cs/bfa_q.h>
34#include <fcbuild.h>
35
36/*
37 * PID used in P2P/N2N ( In Big Endian)
38 */
39#define N2N_LOCAL_PID 0x010000
40#define N2N_REMOTE_PID 0x020000
41
42/*
43 * Misc Timeouts
44 */
45/*
46 * To be used when spawning a timer before retrying a failed command. Milli
47 * Secs.
48 */
49#define BFA_FCS_RETRY_TIMEOUT 2000
50
51/*
52 * Check for Port/Vport Mode/Role
53 */
54#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
55 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
56
57#define BFA_FCS_VPORT_IS_TARGET_MODE(port) \
58 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
59
60#define BFA_FCS_VPORT_IS_IPFC_MODE(port) \
61 (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IPFC)
62
63/*
64 * Is this a Well Known Address
65 */
66#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
67
68/*
69 * Pointer to elements within Port
70 */
71#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
72#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
73#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
74#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
75#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
76
77/*
78 * handler for unsolicied frames
79 */
80void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
81 u16 len);
82
83/*
84 * Following routines will be called by Fabric to indicate port
85 * online/offline to vport.
86 */
87void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
88 uint16_t vf_id, struct bfa_fcs_vport_s *vport);
89void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
90 struct bfa_port_cfg_s *port_cfg);
91void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
92void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
93void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
94bfa_boolean_t bfa_fcs_port_is_online(struct bfa_fcs_port_s *port);
95
96/*
97 * Lookup rport based on PID
98 */
99struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pid(
100 struct bfa_fcs_port_s *port, u32 pid);
101
102/*
103 * Lookup rport based on PWWN
104 */
105struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pwwn(
106 struct bfa_fcs_port_s *port, wwn_t pwwn);
107struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_nwwn(
108 struct bfa_fcs_port_s *port, wwn_t nwwn);
109void bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
110 struct bfa_fcs_rport_s *rport);
111void bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
112 struct bfa_fcs_rport_s *rport);
113
114void bfa_fcs_port_modinit(struct bfa_fcs_s *fcs);
115void bfa_fcs_port_modexit(struct bfa_fcs_s *fcs);
116void bfa_fcs_port_lip(struct bfa_fcs_port_s *port);
117
118#endif /* __FCS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_ms.h b/drivers/scsi/bfa/fcs_ms.h
deleted file mode 100644
index b6a8c12876f4..000000000000
--- a/drivers/scsi/bfa/fcs_ms.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_ms.h FCS ms interfaces
20 */
21#ifndef __FCS_MS_H__
22#define __FCS_MS_H__
23
24/* MS FCS routines */
25void bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port);
26void bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port);
27void bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port);
28void bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port);
29
30/* FDMI FCS routines */
31void bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms);
32void bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms);
33void bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms);
34
35#endif
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
deleted file mode 100644
index 408c06a7d164..000000000000
--- a/drivers/scsi/bfa/fcs_port.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_pport.h FCS physical port interfaces
20 */
21
22
23#ifndef __FCS_PPORT_H__
24#define __FCS_PPORT_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
30
31#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
deleted file mode 100644
index e634fb7a69b8..000000000000
--- a/drivers/scsi/bfa/fcs_rport.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_rport.h FCS rport interfaces and defines
20 */
21
22#ifndef __FCS_RPORT_H__
23#define __FCS_RPORT_H__
24
25#include <fcs/bfa_fcs_rport.h>
26
27#define BFA_FCS_RPORT_MAX_RETRIES (5)
28
29void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
30 u16 len);
31void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
32
33struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_port_s *port,
34 u32 pid);
35void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
36void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
37void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
38void bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
39 struct fc_logi_s *plogi_rsp);
40void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port,
41 struct fchs_s *rx_fchs,
42 struct fc_logi_s *plogi);
43void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
44 struct fc_logi_s *plogi);
45void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
46void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id);
47void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
48void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
49void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport);
50void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
51int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
52struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port,
53 wwn_t wwn);
54
55
56/* Rport Features */
57void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
58void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
59void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
60
61#endif /* __FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_trcmod.h b/drivers/scsi/bfa/fcs_trcmod.h
deleted file mode 100644
index 41b5ae8d7644..000000000000
--- a/drivers/scsi/bfa/fcs_trcmod.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_trcmod.h BFA FCS trace modules
20 */
21
22#ifndef __FCS_TRCMOD_H__
23#define __FCS_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_FCS_FABRIC = 1,
33 BFA_TRC_FCS_VFAPI = 2,
34 BFA_TRC_FCS_PORT = 3,
35 BFA_TRC_FCS_VPORT = 4,
36 BFA_TRC_FCS_VP_API = 5,
37 BFA_TRC_FCS_VPS = 6,
38 BFA_TRC_FCS_RPORT = 7,
39 BFA_TRC_FCS_FCPIM = 8,
40 BFA_TRC_FCS_FCPTM = 9,
41 BFA_TRC_FCS_NS = 10,
42 BFA_TRC_FCS_SCN = 11,
43 BFA_TRC_FCS_LOOP = 12,
44 BFA_TRC_FCS_UF = 13,
45 BFA_TRC_FCS_PPORT = 14,
46 BFA_TRC_FCS_FCPIP = 15,
47 BFA_TRC_FCS_PORT_API = 16,
48 BFA_TRC_FCS_RPORT_API = 17,
49 BFA_TRC_FCS_AUTH = 18,
50 BFA_TRC_FCS_N2N = 19,
51 BFA_TRC_FCS_MS = 20,
52 BFA_TRC_FCS_FDMI = 21,
53 BFA_TRC_FCS_RPORT_FTRS = 22,
54};
55
56#endif /* __FCS_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
deleted file mode 100644
index f591072214fe..000000000000
--- a/drivers/scsi/bfa/fcs_uf.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * fcs_uf.h FCS unsolicited frame receive
20 */
21
22
23#ifndef __FCS_UF_H__
24#define __FCS_UF_H__
25
26/*
27 * fcs friend functions: only between fcs modules
28 */
29void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
30
31#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
deleted file mode 100644
index bb647a4a5dde..000000000000
--- a/drivers/scsi/bfa/fcs_vport.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCS_VPORT_H__
19#define __FCS_VPORT_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23#include <defs/bfa_defs_pci.h>
24
25void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
29void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
30
31#endif /* __FCS_VPORT_H__ */
32
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
deleted file mode 100644
index 2b50eabf4b1e..000000000000
--- a/drivers/scsi/bfa/fdmi.c
+++ /dev/null
@@ -1,1230 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22
23#include <bfa.h>
24#include <bfa_svc.h>
25#include "fcs_lport.h"
26#include "fcs_rport.h"
27#include "lport_priv.h"
28#include "fcs_trcmod.h"
29#include "fcs_fcxp.h"
30#include <fcs/bfa_fcs_fdmi.h>
31
32BFA_TRC_FILE(FCS, FDMI);
33
34#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
35
36/*
37 * forward declarations
38 */
39static void bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg,
40 struct bfa_fcxp_s *fcxp_alloced);
41static void bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_fdmi_rhba_response(void *fcsarg,
46 struct bfa_fcxp_s *fcxp,
47 void *cbarg,
48 bfa_status_t req_status,
49 u32 rsp_len,
50 u32 resid_len,
51 struct fchs_s *rsp_fchs);
52static void bfa_fcs_port_fdmi_rprt_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_fdmi_rpa_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_fdmi_timeout(void *arg);
67static u16 bfa_fcs_port_fdmi_build_rhba_pyld(
68 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
69static u16 bfa_fcs_port_fdmi_build_rprt_pyld(
70 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
71static u16 bfa_fcs_port_fdmi_build_rpa_pyld(
72 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
73static u16 bfa_fcs_port_fdmi_build_portattr_block(
74 struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
75static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
76 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
77static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
78 struct bfa_fcs_fdmi_port_attr_s *port_attr);
79/**
80 * fcs_fdmi_sm FCS FDMI state machine
81 */
82
83/**
84 * FDMI State Machine events
85 */
86enum port_fdmi_event {
87 FDMISM_EVENT_PORT_ONLINE = 1,
88 FDMISM_EVENT_PORT_OFFLINE = 2,
89 FDMISM_EVENT_RSP_OK = 4,
90 FDMISM_EVENT_RSP_ERROR = 5,
91 FDMISM_EVENT_TIMEOUT = 6,
92 FDMISM_EVENT_RHBA_SENT = 7,
93 FDMISM_EVENT_RPRT_SENT = 8,
94 FDMISM_EVENT_RPA_SENT = 9,
95};
96
97static void bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
98 enum port_fdmi_event event);
99static void bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
100 enum port_fdmi_event event);
101static void bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
102 enum port_fdmi_event event);
103static void bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
104 enum port_fdmi_event event);
105static void bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
106 enum port_fdmi_event event);
107static void bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
108 enum port_fdmi_event event);
109static void bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
110 enum port_fdmi_event event);
111static void bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
112 enum port_fdmi_event event);
113static void bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
114 enum port_fdmi_event event);
115static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
116 enum port_fdmi_event event);
117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
118 enum port_fdmi_event event);
119static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
120 enum port_fdmi_event event);
121
122/**
123 * Start in offline state - awaiting MS to send start.
124 */
125static void
126bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
127 enum port_fdmi_event event)
128{
129 struct bfa_fcs_port_s *port = fdmi->ms->port;
130
131 bfa_trc(port->fcs, port->port_cfg.pwwn);
132 bfa_trc(port->fcs, event);
133
134 fdmi->retry_cnt = 0;
135
136 switch (event) {
137 case FDMISM_EVENT_PORT_ONLINE:
138 if (port->vport) {
139 /*
140 * For Vports, register a new port.
141 */
142 bfa_sm_set_state(fdmi,
143 bfa_fcs_port_fdmi_sm_sending_rprt);
144 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
145 } else {
146 /*
147 * For a base port, we should first register the HBA
148 * atribute. The HBA attribute also contains the base
149 * port registration.
150 */
151 bfa_sm_set_state(fdmi,
152 bfa_fcs_port_fdmi_sm_sending_rhba);
153 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
154 }
155 break;
156
157 case FDMISM_EVENT_PORT_OFFLINE:
158 break;
159
160 default:
161 bfa_sm_fault(port->fcs, event);
162 }
163}
164
165static void
166bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
167 enum port_fdmi_event event)
168{
169 struct bfa_fcs_port_s *port = fdmi->ms->port;
170
171 bfa_trc(port->fcs, port->port_cfg.pwwn);
172 bfa_trc(port->fcs, event);
173
174 switch (event) {
175 case FDMISM_EVENT_RHBA_SENT:
176 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba);
177 break;
178
179 case FDMISM_EVENT_PORT_OFFLINE:
180 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
181 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
182 &fdmi->fcxp_wqe);
183 break;
184
185 default:
186 bfa_sm_fault(port->fcs, event);
187 }
188}
189
190static void
191bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
192 enum port_fdmi_event event)
193{
194 struct bfa_fcs_port_s *port = fdmi->ms->port;
195
196 bfa_trc(port->fcs, port->port_cfg.pwwn);
197 bfa_trc(port->fcs, event);
198
199 switch (event) {
200 case FDMISM_EVENT_RSP_ERROR:
201 /*
202 * if max retries have not been reached, start timer for a
203 * delayed retry
204 */
205 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
206 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba_retry);
207 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
208 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
209 fdmi, BFA_FCS_RETRY_TIMEOUT);
210 } else {
211 /*
212 * set state to offline
213 */
214 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
215 }
216 break;
217
218 case FDMISM_EVENT_RSP_OK:
219 /*
220 * Initiate Register Port Attributes
221 */
222 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
223 fdmi->retry_cnt = 0;
224 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
225 break;
226
227 case FDMISM_EVENT_PORT_OFFLINE:
228 bfa_fcxp_discard(fdmi->fcxp);
229 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
230 break;
231
232 default:
233 bfa_sm_fault(port->fcs, event);
234 }
235}
236
237static void
238bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
239 enum port_fdmi_event event)
240{
241 struct bfa_fcs_port_s *port = fdmi->ms->port;
242
243 bfa_trc(port->fcs, port->port_cfg.pwwn);
244 bfa_trc(port->fcs, event);
245
246 switch (event) {
247 case FDMISM_EVENT_TIMEOUT:
248 /*
249 * Retry Timer Expired. Re-send
250 */
251 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rhba);
252 bfa_fcs_port_fdmi_send_rhba(fdmi, NULL);
253 break;
254
255 case FDMISM_EVENT_PORT_OFFLINE:
256 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
257 bfa_timer_stop(&fdmi->timer);
258 break;
259
260 default:
261 bfa_sm_fault(port->fcs, event);
262 }
263}
264
265/*
266* RPRT : Register Port
267 */
268static void
269bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
270 enum port_fdmi_event event)
271{
272 struct bfa_fcs_port_s *port = fdmi->ms->port;
273
274 bfa_trc(port->fcs, port->port_cfg.pwwn);
275 bfa_trc(port->fcs, event);
276
277 switch (event) {
278 case FDMISM_EVENT_RPRT_SENT:
279 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt);
280 break;
281
282 case FDMISM_EVENT_PORT_OFFLINE:
283 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
284 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
285 &fdmi->fcxp_wqe);
286 break;
287
288 default:
289 bfa_sm_fault(port->fcs, event);
290 }
291}
292
293static void
294bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
295 enum port_fdmi_event event)
296{
297 struct bfa_fcs_port_s *port = fdmi->ms->port;
298
299 bfa_trc(port->fcs, port->port_cfg.pwwn);
300 bfa_trc(port->fcs, event);
301
302 switch (event) {
303 case FDMISM_EVENT_RSP_ERROR:
304 /*
305 * if max retries have not been reached, start timer for a
306 * delayed retry
307 */
308 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
309 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt_retry);
310 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
311 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
312 fdmi, BFA_FCS_RETRY_TIMEOUT);
313
314 } else {
315 /*
316 * set state to offline
317 */
318 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
319 fdmi->retry_cnt = 0;
320 }
321 break;
322
323 case FDMISM_EVENT_RSP_OK:
324 fdmi->retry_cnt = 0;
325 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
326 break;
327
328 case FDMISM_EVENT_PORT_OFFLINE:
329 bfa_fcxp_discard(fdmi->fcxp);
330 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
331 break;
332
333 default:
334 bfa_sm_fault(port->fcs, event);
335 }
336}
337
338static void
339bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
340 enum port_fdmi_event event)
341{
342 struct bfa_fcs_port_s *port = fdmi->ms->port;
343
344 bfa_trc(port->fcs, port->port_cfg.pwwn);
345 bfa_trc(port->fcs, event);
346
347 switch (event) {
348 case FDMISM_EVENT_TIMEOUT:
349 /*
350 * Retry Timer Expired. Re-send
351 */
352 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rprt);
353 bfa_fcs_port_fdmi_send_rprt(fdmi, NULL);
354 break;
355
356 case FDMISM_EVENT_PORT_OFFLINE:
357 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
358 bfa_timer_stop(&fdmi->timer);
359 break;
360
361 default:
362 bfa_sm_fault(port->fcs, event);
363 }
364}
365
366/*
367 * Register Port Attributes
368 */
369static void
370bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
371 enum port_fdmi_event event)
372{
373 struct bfa_fcs_port_s *port = fdmi->ms->port;
374
375 bfa_trc(port->fcs, port->port_cfg.pwwn);
376 bfa_trc(port->fcs, event);
377
378 switch (event) {
379 case FDMISM_EVENT_RPA_SENT:
380 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa);
381 break;
382
383 case FDMISM_EVENT_PORT_OFFLINE:
384 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
385 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
386 &fdmi->fcxp_wqe);
387 break;
388
389 default:
390 bfa_sm_fault(port->fcs, event);
391 }
392}
393
394static void
395bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
396 enum port_fdmi_event event)
397{
398 struct bfa_fcs_port_s *port = fdmi->ms->port;
399
400 bfa_trc(port->fcs, port->port_cfg.pwwn);
401 bfa_trc(port->fcs, event);
402
403 switch (event) {
404 case FDMISM_EVENT_RSP_ERROR:
405 /*
406 * if max retries have not been reached, start timer for a
407 * delayed retry
408 */
409 if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
410 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa_retry);
411 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
412 &fdmi->timer, bfa_fcs_port_fdmi_timeout,
413 fdmi, BFA_FCS_RETRY_TIMEOUT);
414 } else {
415 /*
416 * set state to offline
417 */
418 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
419 fdmi->retry_cnt = 0;
420 }
421 break;
422
423 case FDMISM_EVENT_RSP_OK:
424 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
425 fdmi->retry_cnt = 0;
426 break;
427
428 case FDMISM_EVENT_PORT_OFFLINE:
429 bfa_fcxp_discard(fdmi->fcxp);
430 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
431 break;
432
433 default:
434 bfa_sm_fault(port->fcs, event);
435 }
436}
437
438static void
439bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
440 enum port_fdmi_event event)
441{
442 struct bfa_fcs_port_s *port = fdmi->ms->port;
443
444 bfa_trc(port->fcs, port->port_cfg.pwwn);
445 bfa_trc(port->fcs, event);
446
447 switch (event) {
448 case FDMISM_EVENT_TIMEOUT:
449 /*
450 * Retry Timer Expired. Re-send
451 */
452 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
453 bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
454 break;
455
456 case FDMISM_EVENT_PORT_OFFLINE:
457 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
458 bfa_timer_stop(&fdmi->timer);
459 break;
460
461 default:
462 bfa_sm_fault(port->fcs, event);
463 }
464}
465
466static void
467bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
468 enum port_fdmi_event event)
469{
470 struct bfa_fcs_port_s *port = fdmi->ms->port;
471
472 bfa_trc(port->fcs, port->port_cfg.pwwn);
473 bfa_trc(port->fcs, event);
474
475 switch (event) {
476 case FDMISM_EVENT_PORT_OFFLINE:
477 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
478 break;
479
480 default:
481 bfa_sm_fault(port->fcs, event);
482 }
483}
484
485/**
486 * FDMI is disabled state.
487 */
488static void
489bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
490 enum port_fdmi_event event)
491{
492 struct bfa_fcs_port_s *port = fdmi->ms->port;
493
494 bfa_trc(port->fcs, port->port_cfg.pwwn);
495 bfa_trc(port->fcs, event);
496
497 /* No op State. It can only be enabled at Driver Init. */
498}
499
500/**
501* RHBA : Register HBA Attributes.
502 */
503static void
504bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
505{
506 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
507 struct bfa_fcs_port_s *port = fdmi->ms->port;
508 struct fchs_s fchs;
509 int len, attr_len;
510 struct bfa_fcxp_s *fcxp;
511 u8 *pyld;
512
513 bfa_trc(port->fcs, port->port_cfg.pwwn);
514
515 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
516 if (!fcxp) {
517 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
518 bfa_fcs_port_fdmi_send_rhba, fdmi);
519 return;
520 }
521 fdmi->fcxp = fcxp;
522
523 pyld = bfa_fcxp_get_reqbuf(fcxp);
524 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
525
526 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
527 FDMI_RHBA);
528
529 attr_len = bfa_fcs_port_fdmi_build_rhba_pyld(fdmi,
530 (u8 *) ((struct ct_hdr_s *) pyld + 1));
531
532 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
533 FC_CLASS_3, (len + attr_len), &fchs,
534 bfa_fcs_port_fdmi_rhba_response, (void *)fdmi,
535 FC_MAX_PDUSZ, FC_FCCT_TOV);
536
537 bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
538}
539
540static u16
541bfa_fcs_port_fdmi_build_rhba_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
542 u8 *pyld)
543{
544 struct bfa_fcs_port_s *port = fdmi->ms->port;
545 struct bfa_fcs_fdmi_hba_attr_s hba_attr; /* @todo */
546 struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; /* @todo */
547 struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
548 struct fdmi_attr_s *attr;
549 u8 *curr_ptr;
550 u16 len, count;
551
552 /*
553 * get hba attributes
554 */
555 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
556
557 rhba->hba_id = bfa_fcs_port_get_pwwn(port);
558 rhba->port_list.num_ports = bfa_os_htonl(1);
559 rhba->port_list.port_entry = bfa_fcs_port_get_pwwn(port);
560
561 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
562
563 count = 0;
564 len += sizeof(rhba->hba_attr_blk.attr_count);
565
566 /*
567 * fill out the invididual entries of the HBA attrib Block
568 */
569 curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
570
571 /*
572 * Node Name
573 */
574 attr = (struct fdmi_attr_s *) curr_ptr;
575 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
576 attr->len = sizeof(wwn_t);
577 memcpy(attr->value, &bfa_fcs_port_get_nwwn(port), attr->len);
578 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
579 len += attr->len;
580 count++;
581 attr->len =
582 bfa_os_htons(attr->len + sizeof(attr->type) +
583 sizeof(attr->len));
584
585 /*
586 * Manufacturer
587 */
588 attr = (struct fdmi_attr_s *) curr_ptr;
589 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
590 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
591 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
592 /* variable fields need to be 4 byte aligned */
593 attr->len = fc_roundup(attr->len, sizeof(u32));
594 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
595 len += attr->len;
596 count++;
597 attr->len =
598 bfa_os_htons(attr->len + sizeof(attr->type) +
599 sizeof(attr->len));
600
601 /*
602 * Serial Number
603 */
604 attr = (struct fdmi_attr_s *) curr_ptr;
605 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
606 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
607 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
608 /* variable fields need to be 4 byte aligned */
609 attr->len = fc_roundup(attr->len, sizeof(u32));
610 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
611 len += attr->len;
612 count++;
613 attr->len =
614 bfa_os_htons(attr->len + sizeof(attr->type) +
615 sizeof(attr->len));
616
617 /*
618 * Model
619 */
620 attr = (struct fdmi_attr_s *) curr_ptr;
621 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
622 attr->len = (u16) strlen(fcs_hba_attr->model);
623 memcpy(attr->value, fcs_hba_attr->model, attr->len);
624 /* variable fields need to be 4 byte aligned */
625 attr->len = fc_roundup(attr->len, sizeof(u32));
626 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
627 len += attr->len;
628 count++;
629 attr->len =
630 bfa_os_htons(attr->len + sizeof(attr->type) +
631 sizeof(attr->len));
632
633 /*
634 * Model Desc
635 */
636 attr = (struct fdmi_attr_s *) curr_ptr;
637 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
638 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
639 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
640 /* variable fields need to be 4 byte aligned */
641 attr->len = fc_roundup(attr->len, sizeof(u32));
642 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
643 len += attr->len;
644 count++;
645 attr->len =
646 bfa_os_htons(attr->len + sizeof(attr->type) +
647 sizeof(attr->len));
648
649 /*
650 * H/W Version
651 */
652 if (fcs_hba_attr->hw_version[0] != '\0') {
653 attr = (struct fdmi_attr_s *) curr_ptr;
654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
655 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
656 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
657 /* variable fields need to be 4 byte aligned */
658 attr->len = fc_roundup(attr->len, sizeof(u32));
659 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
660 len += attr->len;
661 count++;
662 attr->len =
663 bfa_os_htons(attr->len + sizeof(attr->type) +
664 sizeof(attr->len));
665 }
666
667 /*
668 * Driver Version
669 */
670 attr = (struct fdmi_attr_s *) curr_ptr;
671 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
672 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
673 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
674 /* variable fields need to be 4 byte aligned */
675 attr->len = fc_roundup(attr->len, sizeof(u32));
676 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
677 len += attr->len;;
678 count++;
679 attr->len =
680 bfa_os_htons(attr->len + sizeof(attr->type) +
681 sizeof(attr->len));
682
683 /*
684 * Option Rom Version
685 */
686 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
687 attr = (struct fdmi_attr_s *) curr_ptr;
688 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
689 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
690 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
691 /* variable fields need to be 4 byte aligned */
692 attr->len = fc_roundup(attr->len, sizeof(u32));
693 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
694 len += attr->len;
695 count++;
696 attr->len =
697 bfa_os_htons(attr->len + sizeof(attr->type) +
698 sizeof(attr->len));
699 }
700
701 /*
702 * f/w Version = driver version
703 */
704 attr = (struct fdmi_attr_s *) curr_ptr;
705 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
706 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
707 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
708 /* variable fields need to be 4 byte aligned */
709 attr->len = fc_roundup(attr->len, sizeof(u32));
710 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
711 len += attr->len;
712 count++;
713 attr->len =
714 bfa_os_htons(attr->len + sizeof(attr->type) +
715 sizeof(attr->len));
716
717 /*
718 * OS Name
719 */
720 if (fcs_hba_attr->os_name[0] != '\0') {
721 attr = (struct fdmi_attr_s *) curr_ptr;
722 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
723 attr->len = (u16) strlen(fcs_hba_attr->os_name);
724 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
725 /* variable fields need to be 4 byte aligned */
726 attr->len = fc_roundup(attr->len, sizeof(u32));
727 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
728 len += attr->len;
729 count++;
730 attr->len =
731 bfa_os_htons(attr->len + sizeof(attr->type) +
732 sizeof(attr->len));
733 }
734
735 /*
736 * MAX_CT_PAYLOAD
737 */
738 attr = (struct fdmi_attr_s *) curr_ptr;
739 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
740 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
741 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
742 len += attr->len;
743 count++;
744 attr->len =
745 bfa_os_htons(attr->len + sizeof(attr->type) +
746 sizeof(attr->len));
747
748 /*
749 * Update size of payload
750 */
751 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
752
753 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
754 return len;
755}
756
757static void
758bfa_fcs_port_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
759 void *cbarg, bfa_status_t req_status,
760 u32 rsp_len, u32 resid_len,
761 struct fchs_s *rsp_fchs)
762{
763 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
764 struct bfa_fcs_port_s *port = fdmi->ms->port;
765 struct ct_hdr_s *cthdr = NULL;
766
767 bfa_trc(port->fcs, port->port_cfg.pwwn);
768
769 /*
770 * Sanity Checks
771 */
772 if (req_status != BFA_STATUS_OK) {
773 bfa_trc(port->fcs, req_status);
774 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
775 return;
776 }
777
778 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
779 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
780
781 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
782 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
783 return;
784 }
785
786 bfa_trc(port->fcs, cthdr->reason_code);
787 bfa_trc(port->fcs, cthdr->exp_code);
788 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
789}
790
791/**
792* RPRT : Register Port
793 */
794static void
795bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
796{
797 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
798 struct bfa_fcs_port_s *port = fdmi->ms->port;
799 struct fchs_s fchs;
800 u16 len, attr_len;
801 struct bfa_fcxp_s *fcxp;
802 u8 *pyld;
803
804 bfa_trc(port->fcs, port->port_cfg.pwwn);
805
806 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
807 if (!fcxp) {
808 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
809 bfa_fcs_port_fdmi_send_rprt, fdmi);
810 return;
811 }
812 fdmi->fcxp = fcxp;
813
814 pyld = bfa_fcxp_get_reqbuf(fcxp);
815 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
816
817 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
818 FDMI_RPRT);
819
820 attr_len = bfa_fcs_port_fdmi_build_rprt_pyld(fdmi,
821 (u8 *) ((struct ct_hdr_s *) pyld + 1));
822
823 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
824 FC_CLASS_3, len + attr_len, &fchs,
825 bfa_fcs_port_fdmi_rprt_response, (void *)fdmi,
826 FC_MAX_PDUSZ, FC_FCCT_TOV);
827
828 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
829}
830
831/**
832 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
833 */
834static u16
835bfa_fcs_port_fdmi_build_portattr_block(struct bfa_fcs_port_fdmi_s *fdmi,
836 u8 *pyld)
837{
838 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
839 struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
840 struct fdmi_attr_s *attr;
841 u8 *curr_ptr;
842 u16 len;
843 u8 count = 0;
844
845 /*
846 * get port attributes
847 */
848 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
849
850 len = sizeof(port_attrib->attr_count);
851
852 /*
853 * fill out the invididual entries
854 */
855 curr_ptr = (u8 *) &port_attrib->port_attr;
856
857 /*
858 * FC4 Types
859 */
860 attr = (struct fdmi_attr_s *) curr_ptr;
861 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
862 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
863 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
864 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
865 len += attr->len;
866 ++count;
867 attr->len =
868 bfa_os_htons(attr->len + sizeof(attr->type) +
869 sizeof(attr->len));
870
871 /*
872 * Supported Speed
873 */
874 attr = (struct fdmi_attr_s *) curr_ptr;
875 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
876 attr->len = sizeof(fcs_port_attr.supp_speed);
877 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
878 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
879 len += attr->len;
880 ++count;
881 attr->len =
882 bfa_os_htons(attr->len + sizeof(attr->type) +
883 sizeof(attr->len));
884
885 /*
886 * current Port Speed
887 */
888 attr = (struct fdmi_attr_s *) curr_ptr;
889 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
890 attr->len = sizeof(fcs_port_attr.curr_speed);
891 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
892 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
893 len += attr->len;
894 ++count;
895 attr->len =
896 bfa_os_htons(attr->len + sizeof(attr->type) +
897 sizeof(attr->len));
898
899 /*
900 * max frame size
901 */
902 attr = (struct fdmi_attr_s *) curr_ptr;
903 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
904 attr->len = sizeof(fcs_port_attr.max_frm_size);
905 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
906 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
907 len += attr->len;
908 ++count;
909 attr->len =
910 bfa_os_htons(attr->len + sizeof(attr->type) +
911 sizeof(attr->len));
912
913 /*
914 * OS Device Name
915 */
916 if (fcs_port_attr.os_device_name[0] != '\0') {
917 attr = (struct fdmi_attr_s *) curr_ptr;
918 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
919 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
920 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
921 /* variable fields need to be 4 byte aligned */
922 attr->len = fc_roundup(attr->len, sizeof(u32));
923 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
924 len += attr->len;
925 ++count;
926 attr->len =
927 bfa_os_htons(attr->len + sizeof(attr->type) +
928 sizeof(attr->len));
929
930 }
931 /*
932 * Host Name
933 */
934 if (fcs_port_attr.host_name[0] != '\0') {
935 attr = (struct fdmi_attr_s *) curr_ptr;
936 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
937 attr->len = (u16) strlen(fcs_port_attr.host_name);
938 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
939 /* variable fields need to be 4 byte aligned */
940 attr->len = fc_roundup(attr->len, sizeof(u32));
941 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
942 len += attr->len;
943 ++count;
944 attr->len =
945 bfa_os_htons(attr->len + sizeof(attr->type) +
946 sizeof(attr->len));
947
948 }
949
950 /*
951 * Update size of payload
952 */
953 port_attrib->attr_count = bfa_os_htonl(count);
954 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
955 return len;
956}
957
958static u16
959bfa_fcs_port_fdmi_build_rprt_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
960 u8 *pyld)
961{
962 struct bfa_fcs_port_s *port = fdmi->ms->port;
963 struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
964 u16 len;
965
966 rprt->hba_id = bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
967 rprt->port_name = bfa_fcs_port_get_pwwn(port);
968
969 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
970 (u8 *) &rprt->port_attr_blk);
971
972 len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
973
974 return len;
975}
976
977static void
978bfa_fcs_port_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
979 void *cbarg, bfa_status_t req_status,
980 u32 rsp_len, u32 resid_len,
981 struct fchs_s *rsp_fchs)
982{
983 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
984 struct bfa_fcs_port_s *port = fdmi->ms->port;
985 struct ct_hdr_s *cthdr = NULL;
986
987 bfa_trc(port->fcs, port->port_cfg.pwwn);
988
989 /*
990 * Sanity Checks
991 */
992 if (req_status != BFA_STATUS_OK) {
993 bfa_trc(port->fcs, req_status);
994 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
995 return;
996 }
997
998 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
999 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1000
1001 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1002 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1003 return;
1004 }
1005
1006 bfa_trc(port->fcs, cthdr->reason_code);
1007 bfa_trc(port->fcs, cthdr->exp_code);
1008 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1009}
1010
1011/**
1012* RPA : Register Port Attributes.
1013 */
1014static void
1015bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1016{
1017 struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg;
1018 struct bfa_fcs_port_s *port = fdmi->ms->port;
1019 struct fchs_s fchs;
1020 u16 len, attr_len;
1021 struct bfa_fcxp_s *fcxp;
1022 u8 *pyld;
1023
1024 bfa_trc(port->fcs, port->port_cfg.pwwn);
1025
1026 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1027 if (!fcxp) {
1028 bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1029 bfa_fcs_port_fdmi_send_rpa, fdmi);
1030 return;
1031 }
1032 fdmi->fcxp = fcxp;
1033
1034 pyld = bfa_fcxp_get_reqbuf(fcxp);
1035 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
1036
1037 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port),
1038 FDMI_RPA);
1039
1040 attr_len = bfa_fcs_port_fdmi_build_rpa_pyld(fdmi,
1041 (u8 *) ((struct ct_hdr_s *) pyld + 1));
1042
1043 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1044 FC_CLASS_3, len + attr_len, &fchs,
1045 bfa_fcs_port_fdmi_rpa_response, (void *)fdmi,
1046 FC_MAX_PDUSZ, FC_FCCT_TOV);
1047
1048 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
1049}
1050
1051static u16
1052bfa_fcs_port_fdmi_build_rpa_pyld(struct bfa_fcs_port_fdmi_s *fdmi,
1053 u8 *pyld)
1054{
1055 struct bfa_fcs_port_s *port = fdmi->ms->port;
1056 struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
1057 u16 len;
1058
1059 rpa->port_name = bfa_fcs_port_get_pwwn(port);
1060
1061 len = bfa_fcs_port_fdmi_build_portattr_block(fdmi,
1062 (u8 *) &rpa->port_attr_blk);
1063
1064 len += sizeof(rpa->port_name);
1065
1066 return len;
1067}
1068
1069static void
1070bfa_fcs_port_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1071 void *cbarg, bfa_status_t req_status,
1072 u32 rsp_len, u32 resid_len,
1073 struct fchs_s *rsp_fchs)
1074{
1075 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg;
1076 struct bfa_fcs_port_s *port = fdmi->ms->port;
1077 struct ct_hdr_s *cthdr = NULL;
1078
1079 bfa_trc(port->fcs, port->port_cfg.pwwn);
1080
1081 /*
1082 * Sanity Checks
1083 */
1084 if (req_status != BFA_STATUS_OK) {
1085 bfa_trc(port->fcs, req_status);
1086 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1087 return;
1088 }
1089
1090 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1091 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1092
1093 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1094 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
1095 return;
1096 }
1097
1098 bfa_trc(port->fcs, cthdr->reason_code);
1099 bfa_trc(port->fcs, cthdr->exp_code);
1100 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1101}
1102
1103static void
1104bfa_fcs_port_fdmi_timeout(void *arg)
1105{
1106 struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)arg;
1107
1108 bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
1109}
1110
1111static void
1112bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1113 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
1114{
1115 struct bfa_fcs_port_s *port = fdmi->ms->port;
1116 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1117
1118 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
1119
1120 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
1121 hba_attr->manufacturer);
1122 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
1123 hba_attr->serial_num);
1124 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
1125 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
1126 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
1127 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
1128 hba_attr->option_rom_ver);
1129 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
1130
1131 strncpy(hba_attr->driver_version, (char *)driver_info->version,
1132 sizeof(hba_attr->driver_version));
1133
1134 strncpy(hba_attr->os_name, driver_info->host_os_name,
1135 sizeof(hba_attr->os_name));
1136
1137 /*
1138 * If there is a patch level, append it to the os name along with a
1139 * separator
1140 */
1141 if (driver_info->host_os_patch[0] != '\0') {
1142 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1143 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1144 strncat(hba_attr->os_name, driver_info->host_os_patch,
1145 sizeof(driver_info->host_os_patch));
1146 }
1147
1148 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
1149
1150}
1151
1152static void
1153bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1154 struct bfa_fcs_fdmi_port_attr_s *port_attr)
1155{
1156 struct bfa_fcs_port_s *port = fdmi->ms->port;
1157 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1158 struct bfa_pport_attr_s pport_attr;
1159
1160 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
1161
1162 /*
1163 * get pport attributes from hal
1164 */
1165 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1166
1167 /*
1168 * get FC4 type Bitmask
1169 */
1170 fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
1171
1172 /*
1173 * Supported Speeds
1174 */
1175 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
1176
1177 /*
1178 * Current Speed
1179 */
1180 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
1181
1182 /*
1183 * Max PDU Size.
1184 */
1185 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
1186
1187 /*
1188 * OS device Name
1189 */
1190 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
1191 sizeof(port_attr->os_device_name));
1192
1193 /*
1194 * Host name
1195 */
1196 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
1197 sizeof(port_attr->host_name));
1198
1199}
1200
1201
1202void
1203bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
1204{
1205 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1206
1207 fdmi->ms = ms;
1208 if (ms->port->fcs->fdmi_enabled)
1209 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
1210 else
1211 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
1212}
1213
1214void
1215bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms)
1216{
1217 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1218
1219 fdmi->ms = ms;
1220 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
1221}
1222
1223void
1224bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms)
1225{
1226 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1227
1228 fdmi->ms = ms;
1229 bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
1230}
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
deleted file mode 100644
index 6abbab005db6..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_AEN_H__
18#define __BFA_AEN_H__
19
20#include "defs/bfa_defs_aen.h"
21#include "defs/bfa_defs_status.h"
22#include "cs/bfa_debug.h"
23
24#define BFA_AEN_MAX_ENTRY 512
25
26extern int bfa_aen_max_cfg_entry;
27struct bfa_aen_s {
28 void *bfad;
29 int max_entry;
30 int write_index;
31 int read_index;
32 int bfad_num;
33 int seq_num;
34 void (*aen_cb_notify)(void *bfad);
35 void (*gettimeofday)(struct bfa_timeval_s *tv);
36 struct bfa_trc_mod_s *trcmod;
37 int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
38 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
39};
40
41
42/**
43 * Public APIs
44 */
45static inline void
46bfa_aen_set_max_cfg_entry(int max_entry)
47{
48 bfa_aen_max_cfg_entry = max_entry;
49}
50
51static inline int
52bfa_aen_get_max_cfg_entry(void)
53{
54 return bfa_aen_max_cfg_entry;
55}
56
57static inline int
58bfa_aen_get_meminfo(void)
59{
60 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
61}
62
63static inline int
64bfa_aen_get_wi(struct bfa_aen_s *aen)
65{
66 return aen->write_index;
67}
68
69static inline int
70bfa_aen_get_ri(struct bfa_aen_s *aen)
71{
72 return aen->read_index;
73}
74
75static inline int
76bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id)
77{
78 bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
79 return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
80 % aen->max_entry;
81}
82
83int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
84 void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
85 void (*gettimeofday)(struct bfa_timeval_s *));
86
87void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
88 int aen_type, union bfa_aen_data_u *aen_data);
89
90bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
91 struct bfa_aen_entry_s *aen_entry,
92 int entry_req, enum bfa_aen_app app_id, int *entry_ret);
93
94int bfa_aen_get_inst(struct bfa_aen_s *aen);
95
96#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h b/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
deleted file mode 100644
index 260d3ea1cab3..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ADAPTER Module */
19#ifndef __bfa_aen_adapter_h__
20#define __bfa_aen_adapter_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ADAPTER_ADD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_ADD)
27#define BFA_AEN_ADAPTER_REMOVE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_REMOVE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h b/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
deleted file mode 100644
index 12cd7aab5d53..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_AUDIT Module */
19#ifndef __bfa_aen_audit_h__
20#define __bfa_aen_audit_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_AUDIT_AUTH_ENABLE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_ENABLE)
27#define BFA_AEN_AUDIT_AUTH_DISABLE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_DISABLE)
29
30#endif
31
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h b/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
deleted file mode 100644
index 507d0b58d149..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ETHPORT Module */
19#ifndef __bfa_aen_ethport_h__
20#define __bfa_aen_ethport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ETHPORT_LINKUP \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKUP)
27#define BFA_AEN_ETHPORT_LINKDOWN \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKDOWN)
29#define BFA_AEN_ETHPORT_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_ENABLE)
31#define BFA_AEN_ETHPORT_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_DISABLE)
33
34#endif
35
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
deleted file mode 100644
index 4daf96faa266..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_IOC Module */
19#ifndef __bfa_aen_ioc_h__
20#define __bfa_aen_ioc_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_IOC_HBGOOD \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBGOOD)
27#define BFA_AEN_IOC_HBFAIL \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBFAIL)
29#define BFA_AEN_IOC_ENABLE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_ENABLE)
31#define BFA_AEN_IOC_DISABLE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE)
33#define BFA_AEN_IOC_FWMISMATCH \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH)
35#define BFA_AEN_IOC_FWCFG_ERROR \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWCFG_ERROR)
37#define BFA_AEN_IOC_INVALID_VENDOR \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_VENDOR)
39#define BFA_AEN_IOC_INVALID_NWWN \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_NWWN)
41#define BFA_AEN_IOC_INVALID_PWWN \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_PWWN)
43
44#endif
45
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h b/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
deleted file mode 100644
index a7d8ddcfef99..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_ITNIM Module */
19#ifndef __bfa_aen_itnim_h__
20#define __bfa_aen_itnim_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_ITNIM_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_ONLINE)
27#define BFA_AEN_ITNIM_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_OFFLINE)
29#define BFA_AEN_ITNIM_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_DISCONNECT)
31
32#endif
33
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h b/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
deleted file mode 100644
index 5a8ebb65193f..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_LPORT Module */
19#ifndef __bfa_aen_lport_h__
20#define __bfa_aen_lport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_LPORT_NEW \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW)
27#define BFA_AEN_LPORT_DELETE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE)
29#define BFA_AEN_LPORT_ONLINE \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_ONLINE)
31#define BFA_AEN_LPORT_OFFLINE \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_OFFLINE)
33#define BFA_AEN_LPORT_DISCONNECT \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DISCONNECT)
35#define BFA_AEN_LPORT_NEW_PROP \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_PROP)
37#define BFA_AEN_LPORT_DELETE_PROP \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_PROP)
39#define BFA_AEN_LPORT_NEW_STANDARD \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_STANDARD)
41#define BFA_AEN_LPORT_DELETE_STANDARD \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_STANDARD)
43#define BFA_AEN_LPORT_NPIV_DUP_WWN \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_DUP_WWN)
45#define BFA_AEN_LPORT_NPIV_FABRIC_MAX \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_FABRIC_MAX)
47#define BFA_AEN_LPORT_NPIV_UNKNOWN \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_UNKNOWN)
49
50#endif
51
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_port.h b/drivers/scsi/bfa/include/aen/bfa_aen_port.h
deleted file mode 100644
index 9add905a622d..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_port.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_PORT Module */
19#ifndef __bfa_aen_port_h__
20#define __bfa_aen_port_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_PORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ONLINE)
27#define BFA_AEN_PORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_OFFLINE)
29#define BFA_AEN_PORT_RLIR \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_RLIR)
31#define BFA_AEN_PORT_SFP_INSERT \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_INSERT)
33#define BFA_AEN_PORT_SFP_REMOVE \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_REMOVE)
35#define BFA_AEN_PORT_SFP_POM \
36 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_POM)
37#define BFA_AEN_PORT_ENABLE \
38 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ENABLE)
39#define BFA_AEN_PORT_DISABLE \
40 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISABLE)
41#define BFA_AEN_PORT_AUTH_ON \
42 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_ON)
43#define BFA_AEN_PORT_AUTH_OFF \
44 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_OFF)
45#define BFA_AEN_PORT_DISCONNECT \
46 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISCONNECT)
47#define BFA_AEN_PORT_QOS_NEG \
48 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_QOS_NEG)
49#define BFA_AEN_PORT_FABRIC_NAME_CHANGE \
50 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_FABRIC_NAME_CHANGE)
51#define BFA_AEN_PORT_SFP_ACCESS_ERROR \
52 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_ACCESS_ERROR)
53#define BFA_AEN_PORT_SFP_UNSUPPORT \
54 BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_UNSUPPORT)
55
56#endif
57
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h b/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
deleted file mode 100644
index 7e4be1fd5e15..000000000000
--- a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for BFA_AEN_CAT_RPORT Module */
19#ifndef __bfa_aen_rport_h__
20#define __bfa_aen_rport_h__
21
22#include <cs/bfa_log.h>
23#include <defs/bfa_defs_aen.h>
24
25#define BFA_AEN_RPORT_ONLINE \
26 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_ONLINE)
27#define BFA_AEN_RPORT_OFFLINE \
28 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_OFFLINE)
29#define BFA_AEN_RPORT_DISCONNECT \
30 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_DISCONNECT)
31#define BFA_AEN_RPORT_QOS_PRIO \
32 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_PRIO)
33#define BFA_AEN_RPORT_QOS_FLOWID \
34 BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_FLOWID)
35
36#endif
37
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
deleted file mode 100644
index d52b32f5695c..000000000000
--- a/drivers/scsi/bfa/include/bfa.h
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_H__
18#define __BFA_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_debug.h>
22#include <cs/bfa_q.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25#include <cs/bfa_plog.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_ioc.h>
28#include <defs/bfa_defs_iocfc.h>
29#include <aen/bfa_aen.h>
30#include <bfi/bfi.h>
31
32struct bfa_s;
33#include <bfa_intr_priv.h>
34
35struct bfa_pcidev_s;
36
37/**
38 * PCI devices supported by the current BFA
39 */
40struct bfa_pciid_s {
41 u16 device_id;
42 u16 vendor_id;
43};
44
45extern char bfa_version[];
46
47/**
48 * BFA Power Mgmt Commands
49 */
50enum bfa_pm_cmd {
51 BFA_PM_CTL_D0 = 0,
52 BFA_PM_CTL_D1 = 1,
53 BFA_PM_CTL_D2 = 2,
54 BFA_PM_CTL_D3 = 3,
55};
56
57/**
58 * BFA memory resources
59 */
60enum bfa_mem_type {
61 BFA_MEM_TYPE_KVA = 1, /*! Kernel Virtual Memory *(non-dma-able) */
62 BFA_MEM_TYPE_DMA = 2, /*! DMA-able memory */
63 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
64};
65
66struct bfa_mem_elem_s {
67 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
68 u32 mem_len; /* Total Length in Bytes */
69 u8 *kva; /* kernel virtual address */
70 u64 dma; /* dma address if DMA memory */
71 u8 *kva_curp; /* kva allocation cursor */
72 u64 dma_curp; /* dma allocation cursor */
73};
74
75struct bfa_meminfo_s {
76 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
77};
78#define bfa_meminfo_kva(_m) \
79 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
80#define bfa_meminfo_dma_virt(_m) \
81 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
82#define bfa_meminfo_dma_phys(_m) \
83 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
84
85/**
86 * Generic Scatter Gather Element used by driver
87 */
88struct bfa_sge_s {
89 u32 sg_len;
90 void *sg_addr;
91};
92
93#define bfa_sge_to_be(__sge) do { \
94 ((u32 *)(__sge))[0] = bfa_os_htonl(((u32 *)(__sge))[0]); \
95 ((u32 *)(__sge))[1] = bfa_os_htonl(((u32 *)(__sge))[1]); \
96 ((u32 *)(__sge))[2] = bfa_os_htonl(((u32 *)(__sge))[2]); \
97} while (0)
98
99
100/*
101 * bfa stats interfaces
102 */
103#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
104
105#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
107#define bfa_ioc_clear_stats(__bfa) \
108 bfa_ioc_clr_stats(&(__bfa)->ioc)
109#define bfa_get_nports(__bfa) \
110 bfa_ioc_get_nports(&(__bfa)->ioc)
111#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
112 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
113#define bfa_get_adapter_model(__bfa, __model) \
114 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
115#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
116 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
117#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
118 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
119#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
120 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
121#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
122 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
123#define bfa_get_ioc_state(__bfa) \
124 bfa_ioc_get_state(&(__bfa)->ioc)
125#define bfa_get_type(__bfa) \
126 bfa_ioc_get_type(&(__bfa)->ioc)
127#define bfa_get_mac(__bfa) \
128 bfa_ioc_get_mac(&(__bfa)->ioc)
129#define bfa_get_mfg_mac(__bfa) \
130 bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
131#define bfa_get_fw_clock_res(__bfa) \
132 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
133
134/*
135 * bfa API functions
136 */
137void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
138void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
139void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
140void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
141 struct bfa_meminfo_s *meminfo);
142void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
143 struct bfa_meminfo_s *meminfo,
144 struct bfa_pcidev_s *pcidev);
145void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
146void bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod);
147void bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen);
148void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
149void bfa_detach(struct bfa_s *bfa);
150void bfa_init(struct bfa_s *bfa);
151void bfa_start(struct bfa_s *bfa);
152void bfa_stop(struct bfa_s *bfa);
153void bfa_attach_fcs(struct bfa_s *bfa);
154void bfa_cb_init(void *bfad, bfa_status_t status);
155void bfa_cb_stop(void *bfad, bfa_status_t status);
156void bfa_cb_updateq(void *bfad, bfa_status_t status);
157
158bfa_boolean_t bfa_intx(struct bfa_s *bfa);
159void bfa_isr_enable(struct bfa_s *bfa);
160void bfa_isr_disable(struct bfa_s *bfa);
161void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
162 u32 *num_vecs, u32 *max_vec_bit);
163#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec))
164
165void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
166void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
167void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
168
169typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
170void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
171bfa_status_t bfa_iocfc_get_stats(struct bfa_s *bfa,
172 struct bfa_iocfc_stats_s *stats,
173 bfa_cb_ioc_t cbfn, void *cbarg);
174bfa_status_t bfa_iocfc_clear_stats(struct bfa_s *bfa,
175 bfa_cb_ioc_t cbfn, void *cbarg);
176void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
177
178void bfa_adapter_get_attr(struct bfa_s *bfa,
179 struct bfa_adapter_attr_s *ad_attr);
180u64 bfa_adapter_get_id(struct bfa_s *bfa);
181
182bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
183 struct bfa_iocfc_intr_attr_s *attr);
184
185void bfa_iocfc_enable(struct bfa_s *bfa);
186void bfa_iocfc_disable(struct bfa_s *bfa);
187void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
188void bfa_chip_reset(struct bfa_s *bfa);
189void bfa_cb_ioc_disable(void *bfad);
190void bfa_timer_tick(struct bfa_s *bfa);
191#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
192 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
193
194/*
195 * BFA debug API functions
196 */
197bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
198bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
199void bfa_debug_fwsave_clear(struct bfa_s *bfa);
200
201#include "bfa_priv.h"
202
203#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_fcpim.h b/drivers/scsi/bfa/include/bfa_fcpim.h
deleted file mode 100644
index 4bc9453081df..000000000000
--- a/drivers/scsi/bfa/include/bfa_fcpim.h
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPIM_H__
19#define __BFA_FCPIM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcpim.h>
24#include <defs/bfa_defs_fcpim.h>
25
26/*
27 * forward declarations
28 */
29struct bfa_itnim_s;
30struct bfa_ioim_s;
31struct bfa_tskim_s;
32struct bfad_ioim_s;
33struct bfad_tskim_s;
34
35/*
36 * bfa fcpim module API functions
37 */
38void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
39u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
40void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
41u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
42bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
43 struct bfa_fcpim_stats_s *modstats);
44bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
45void bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state);
46void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
47void bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect);
48
49#define bfa_fcpim_ioredirect_enabled(__bfa) \
50 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
51
52#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
53{ \
54 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
55 __fcpim->reqq++; \
56 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
57 *(__qid) = __fcpim->reqq; \
58}
59
60#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
61 *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
62
63
64/*
65 * bfa itnim API functions
66 */
67struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
68 struct bfa_rport_s *rport, void *itnim);
69void bfa_itnim_delete(struct bfa_itnim_s *itnim);
70void bfa_itnim_online(struct bfa_itnim_s *itnim,
71 bfa_boolean_t seq_rec);
72void bfa_itnim_offline(struct bfa_itnim_s *itnim);
73void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
74 struct bfa_itnim_hal_stats_s *stats);
75void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
76
77#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
78
79/**
80 * BFA completion callback for bfa_itnim_online().
81 *
82 * @param[in] itnim FCS or driver itnim instance
83 *
84 * return None
85 */
86void bfa_cb_itnim_online(void *itnim);
87
88/**
89 * BFA completion callback for bfa_itnim_offline().
90 *
91 * @param[in] itnim FCS or driver itnim instance
92 *
93 * return None
94 */
95void bfa_cb_itnim_offline(void *itnim);
96void bfa_cb_itnim_tov_begin(void *itnim);
97void bfa_cb_itnim_tov(void *itnim);
98
99/**
100 * BFA notification to FCS/driver for second level error recovery.
101 *
102 * Atleast one I/O request has timedout and target is unresponsive to
103 * repeated abort requests. Second level error recovery should be initiated
104 * by starting implicit logout and recovery procedures.
105 *
106 * @param[in] itnim FCS or driver itnim instance
107 *
108 * return None
109 */
110void bfa_cb_itnim_sler(void *itnim);
111
112/*
113 * bfa ioim API functions
114 */
115struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
116 struct bfad_ioim_s *dio,
117 struct bfa_itnim_s *itnim,
118 u16 nsgles);
119
120void bfa_ioim_free(struct bfa_ioim_s *ioim);
121void bfa_ioim_start(struct bfa_ioim_s *ioim);
122void bfa_ioim_abort(struct bfa_ioim_s *ioim);
123void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
124 bfa_boolean_t iotov);
125
126
127/**
128 * I/O completion notification.
129 *
130 * @param[in] dio driver IO structure
131 * @param[in] io_status IO completion status
132 * @param[in] scsi_status SCSI status returned by target
133 * @param[in] sns_len SCSI sense length, 0 if none
134 * @param[in] sns_info SCSI sense data, if any
135 * @param[in] residue Residual length
136 *
137 * @return None
138 */
139void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
140 enum bfi_ioim_status io_status,
141 u8 scsi_status, int sns_len,
142 u8 *sns_info, s32 residue);
143
144/**
145 * I/O good completion notification.
146 *
147 * @param[in] dio driver IO structure
148 *
149 * @return None
150 */
151void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
152
153/**
154 * I/O abort completion notification
155 *
156 * @param[in] dio driver IO that was aborted
157 *
158 * @return None
159 */
160void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
161void bfa_cb_ioim_resfree(void *hcb_bfad);
162
163void bfa_cb_ioim_resfree(void *hcb_bfad);
164
165/*
166 * bfa tskim API functions
167 */
168struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
169 struct bfad_tskim_s *dtsk);
170void bfa_tskim_free(struct bfa_tskim_s *tskim);
171void bfa_tskim_start(struct bfa_tskim_s *tskim,
172 struct bfa_itnim_s *itnim, lun_t lun,
173 enum fcp_tm_cmnd tm, u8 t_secs);
174void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
175 enum bfi_tskim_status tsk_status);
176
177#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_fcptm.h b/drivers/scsi/bfa/include/bfa_fcptm.h
deleted file mode 100644
index 5f5ffe0bb1bb..000000000000
--- a/drivers/scsi/bfa/include/bfa_fcptm.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCPTM_H__
19#define __BFA_FCPTM_H__
20
21#include <bfa.h>
22#include <bfa_svc.h>
23#include <bfi/bfi_fcptm.h>
24
25/*
26 * forward declarations
27 */
28struct bfa_tin_s;
29struct bfa_iotm_s;
30struct bfa_tsktm_s;
31
32/*
33 * bfa fcptm module API functions
34 */
35void bfa_fcptm_path_tov_set(struct bfa_s *bfa, u16 path_tov);
36u16 bfa_fcptm_path_tov_get(struct bfa_s *bfa);
37void bfa_fcptm_qdepth_set(struct bfa_s *bfa, u16 q_depth);
38u16 bfa_fcptm_qdepth_get(struct bfa_s *bfa);
39
40/*
41 * bfa tin API functions
42 */
43void bfa_tin_get_stats(struct bfa_tin_s *tin, struct bfa_tin_stats_s *stats);
44void bfa_tin_clear_stats(struct bfa_tin_s *tin);
45
46#endif /* __BFA_FCPTM_H__ */
47
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
deleted file mode 100644
index 7840943d73b0..000000000000
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ /dev/null
@@ -1,338 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_SVC_H__
18#define __BFA_SVC_H__
19
20/*
21 * forward declarations
22 */
23struct bfa_fcxp_s;
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_pport.h>
27#include <defs/bfa_defs_rport.h>
28#include <defs/bfa_defs_qos.h>
29#include <defs/bfa_defs_fcport.h>
30#include <cs/bfa_sm.h>
31#include <bfa.h>
32
33/**
34 * BFA rport information.
35 */
36struct bfa_rport_info_s {
37 u16 max_frmsz; /* max rcv pdu size */
38 u32 pid:24, /* remote port ID */
39 lp_tag:8; /* tag */
40 u32 local_pid:24, /* local port ID */
41 cisc:8; /* CIRO supported */
42 u8 fc_class; /* supported FC classes. enum fc_cos */
43 u8 vf_en; /* virtual fabric enable */
44 u16 vf_id; /* virtual fabric ID */
45 enum bfa_pport_speed speed; /* Rport's current speed */
46};
47
48/**
49 * BFA rport data structure
50 */
51struct bfa_rport_s {
52 struct list_head qe; /* queue element */
53 bfa_sm_t sm; /* state machine */
54 struct bfa_s *bfa; /* backpointer to BFA */
55 void *rport_drv; /* fcs/driver rport object */
56 u16 fw_handle; /* firmware rport handle */
57 u16 rport_tag; /* BFA rport tag */
58 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
59 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
60 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
61 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
62 struct bfa_rport_qos_attr_s qos_attr;
63 union a {
64 bfa_status_t status; /* f/w status */
65 void *fw_msg; /* QoS scn event */
66 } event_arg;
67};
68#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
69
70/**
71 * Send completion callback.
72 */
73typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
74 void *cbarg, enum bfa_status req_status,
75 u32 rsp_len, u32 resid_len,
76 struct fchs_s *rsp_fchs);
77
78/**
79 * BFA fcxp allocation (asynchronous)
80 */
81typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
82
83struct bfa_fcxp_wqe_s {
84 struct list_head qe;
85 bfa_fcxp_alloc_cbfn_t alloc_cbfn;
86 void *alloc_cbarg;
87};
88
89typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
90typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
91
92#define BFA_UF_BUFSZ (2 * 1024 + 256)
93
94/**
95 * @todo private
96 */
97struct bfa_uf_buf_s {
98 u8 d[BFA_UF_BUFSZ];
99};
100
101
102struct bfa_uf_s {
103 struct list_head qe; /* queue element */
104 struct bfa_s *bfa; /* bfa instance */
105 u16 uf_tag; /* identifying tag fw msgs */
106 u16 vf_id;
107 u16 src_rport_handle;
108 u16 rsvd;
109 u8 *data_ptr;
110 u16 data_len; /* actual receive length */
111 u16 pb_len; /* posted buffer length */
112 void *buf_kva; /* buffer virtual address */
113 u64 buf_pa; /* buffer physical address */
114 struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
115 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
116};
117
118typedef void (*bfa_cb_pport_t) (void *cbarg, enum bfa_status status);
119
120/**
121 * bfa lport login/logout service interface
122 */
123struct bfa_lps_s {
124 struct list_head qe; /* queue element */
125 struct bfa_s *bfa; /* parent bfa instance */
126 bfa_sm_t sm; /* finite state machine */
127 u8 lp_tag; /* lport tag */
128 u8 reqq; /* lport request queue */
129 u8 alpa; /* ALPA for loop topologies */
130 u32 lp_pid; /* lport port ID */
131 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */
132 bfa_boolean_t auth_en; /* enable authentication */
133 bfa_boolean_t auth_req; /* authentication required */
134 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
135 bfa_boolean_t fport; /* attached peer is F_PORT */
136 bfa_boolean_t brcd_switch;/* attached peer is brcd switch */
137 bfa_status_t status; /* login status */
138 u16 pdusz; /* max receive PDU size */
139 u16 pr_bbcred; /* BB_CREDIT from peer */
140 u8 lsrjt_rsn; /* LSRJT reason */
141 u8 lsrjt_expl; /* LSRJT explanation */
142 wwn_t pwwn; /* port wwn of lport */
143 wwn_t nwwn; /* node wwn of lport */
144 wwn_t pr_pwwn; /* port wwn of lport peer */
145 wwn_t pr_nwwn; /* node wwn of lport peer */
146 mac_t lp_mac; /* fpma/spma MAC for lport */
147 mac_t fcf_mac; /* FCF MAC of lport */
148 struct bfa_reqq_wait_s wqe; /* request wait queue element */
149 void *uarg; /* user callback arg */
150 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
151 struct bfi_lps_login_rsp_s *loginrsp;
152 bfa_eproto_status_t ext_status;
153};
154
155#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
156
157/*
158 * bfa pport API functions
159 */
160bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
161bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
162bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
163 enum bfa_pport_speed speed);
164enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
165bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
166 enum bfa_pport_topology topo);
167enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
168bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
169bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
170u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
171bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
172bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
173u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
174u32 bfa_fcport_mypid(struct bfa_s *bfa);
175u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
176bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
177bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
178bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
179void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
180wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
181void bfa_fcport_event_register(struct bfa_s *bfa,
182 void (*event_cbfn) (void *cbarg,
183 bfa_pport_event_t event), void *event_cbarg);
184bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
185void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
186void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
187bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
188 enum bfa_pport_speed speed);
189enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
190
191void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
192void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
193void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
194 bfa_boolean_t link_e2e_beacon);
195void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
196void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
197 struct bfa_qos_attr_s *qos_attr);
198void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
199 struct bfa_qos_vc_attr_s *qos_vc_attr);
200bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
201 union bfa_fcport_stats_u *stats,
202 bfa_cb_pport_t cbfn, void *cbarg);
203bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
204 void *cbarg);
205bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
206 union bfa_fcport_stats_u *stats,
207 bfa_cb_pport_t cbfn, void *cbarg);
208bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
209 void *cbarg);
210
211bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
212bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
213bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
214 union bfa_fcport_stats_u *stats,
215 bfa_cb_pport_t cbfn, void *cbarg);
216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
217 void *cbarg);
218bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
219
220/*
221 * bfa rport API functions
222 */
223struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
224void bfa_rport_delete(struct bfa_rport_s *rport);
225void bfa_rport_online(struct bfa_rport_s *rport,
226 struct bfa_rport_info_s *rport_info);
227void bfa_rport_offline(struct bfa_rport_s *rport);
228void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed);
229void bfa_rport_get_stats(struct bfa_rport_s *rport,
230 struct bfa_rport_hal_stats_s *stats);
231void bfa_rport_clear_stats(struct bfa_rport_s *rport);
232void bfa_cb_rport_online(void *rport);
233void bfa_cb_rport_offline(void *rport);
234void bfa_cb_rport_qos_scn_flowid(void *rport,
235 struct bfa_rport_qos_attr_s old_qos_attr,
236 struct bfa_rport_qos_attr_s new_qos_attr);
237void bfa_cb_rport_qos_scn_prio(void *rport,
238 struct bfa_rport_qos_attr_s old_qos_attr,
239 struct bfa_rport_qos_attr_s new_qos_attr);
240void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
241 struct bfa_rport_qos_attr_s *qos_attr);
242
243/*
244 * bfa fcxp API functions
245 */
246struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
247 int nreq_sgles, int nrsp_sgles,
248 bfa_fcxp_get_sgaddr_t get_req_sga,
249 bfa_fcxp_get_sglen_t get_req_sglen,
250 bfa_fcxp_get_sgaddr_t get_rsp_sga,
251 bfa_fcxp_get_sglen_t get_rsp_sglen);
252void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
253 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *cbarg);
254void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
255 struct bfa_fcxp_wqe_s *wqe);
256void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
257
258void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
259void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
260
261void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
262
263void bfa_fcxp_send(struct bfa_fcxp_s *fcxp,
264 struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag,
265 bfa_boolean_t cts, enum fc_cos cos,
266 u32 reqlen, struct fchs_s *fchs,
267 bfa_cb_fcxp_send_t cbfn,
268 void *cbarg,
269 u32 rsp_maxlen, u8 rsp_timeout);
270bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
271u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
272u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
273
274static inline void *
275bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
276{
277 return uf->data_ptr;
278}
279
280static inline u16
281bfa_uf_get_frmlen(struct bfa_uf_s *uf)
282{
283 return uf->data_len;
284}
285
286/**
287 * Callback prototype for unsolicited frame receive handler.
288 *
289 * @param[in] cbarg callback arg for receive handler
290 * @param[in] uf unsolicited frame descriptor
291 *
292 * @return None
293 */
294typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
295
296/*
297 * bfa uf API functions
298 */
299void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
300 void *cbarg);
301void bfa_uf_free(struct bfa_uf_s *uf);
302
303/**
304 * bfa lport service api
305 */
306
307u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
308struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
309void bfa_lps_delete(struct bfa_lps_s *lps);
310void bfa_lps_discard(struct bfa_lps_s *lps);
311void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
312 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en);
313void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
314 wwn_t nwwn);
315void bfa_lps_flogo(struct bfa_lps_s *lps);
316void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
317u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
318bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
319bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
320bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
321bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
322bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
323u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
324u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
325u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
326wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
327wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
328u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
329u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
330mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
331void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
332void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
333void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
334void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
335void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
336
337#endif /* __BFA_SVC_H__ */
338
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
deleted file mode 100644
index f71087448222..000000000000
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TIMER_H__
18#define __BFA_TIMER_H__
19
20#include <bfa_os_inc.h>
21#include <cs/bfa_q.h>
22
23struct bfa_s;
24
25typedef void (*bfa_timer_cbfn_t)(void *);
26
27/**
28 * BFA timer data structure
29 */
30struct bfa_timer_s {
31 struct list_head qe;
32 bfa_timer_cbfn_t timercb;
33 void *arg;
34 int timeout; /**< in millisecs. */
35};
36
37/**
38 * Timer module structure
39 */
40struct bfa_timer_mod_s {
41 struct list_head timer_q;
42};
43
44#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
45
46void bfa_timer_beat(struct bfa_timer_mod_s *mod);
47void bfa_timer_init(struct bfa_timer_mod_s *mod);
48void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
49 bfa_timer_cbfn_t timercb, void *arg,
50 unsigned int timeout);
51void bfa_timer_stop(struct bfa_timer_s *timer);
52
53#endif /* __BFA_TIMER_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
deleted file mode 100644
index a550e80cabd2..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_H__
19#define __BFI_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_status.h>
23
24#pragma pack(1)
25
26/**
27 * Msg header common to all msgs
28 */
29struct bfi_mhdr_s {
30 u8 msg_class; /* @ref bfi_mclass_t */
31 u8 msg_id; /* msg opcode with in the class */
32 union {
33 struct {
34 u8 rsvd;
35 u8 lpu_id; /* msg destination */
36 } h2i;
37 u16 i2htok; /* token in msgs to host */
38 } mtag;
39};
40
41#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
42 (_mh).msg_class = (_mc); \
43 (_mh).msg_id = (_op); \
44 (_mh).mtag.h2i.lpu_id = (_lpuid); \
45} while (0)
46
47#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
48 (_mh).msg_class = (_mc); \
49 (_mh).msg_id = (_op); \
50 (_mh).mtag.i2htok = (_i2htok); \
51} while (0)
52
53/*
54 * Message opcodes: 0-127 to firmware, 128-255 to host
55 */
56#define BFI_I2H_OPCODE_BASE 128
57#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
58
59/**
60 ****************************************************************************
61 *
62 * Scatter Gather Element and Page definition
63 *
64 ****************************************************************************
65 */
66
67#define BFI_SGE_INLINE 1
68#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
69
70/**
71 * SG Flags
72 */
73enum {
74 BFI_SGE_DATA = 0, /* data address, not last */
75 BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
76 BFI_SGE_DATA_LAST = 3, /* data address, last */
77 BFI_SGE_LINK = 2, /* link address */
78 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
79};
80
81/**
82 * DMA addresses
83 */
84union bfi_addr_u {
85 struct {
86 u32 addr_lo;
87 u32 addr_hi;
88 } a32;
89};
90
91/**
92 * Scatter Gather Element
93 */
94struct bfi_sge_s {
95#ifdef __BIGENDIAN
96 u32 flags:2,
97 rsvd:2,
98 sg_len:28;
99#else
100 u32 sg_len:28,
101 rsvd:2,
102 flags:2;
103#endif
104 union bfi_addr_u sga;
105};
106
107/**
108 * Scatter Gather Page
109 */
110#define BFI_SGPG_DATA_SGES 7
111#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
112#define BFI_SGPG_RSVD_WD_LEN 8
113struct bfi_sgpg_s {
114 struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
115 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
116};
117
118/*
119 * Large Message structure - 128 Bytes size Msgs
120 */
121#define BFI_LMSG_SZ 128
122#define BFI_LMSG_PL_WSZ \
123 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
124
125struct bfi_msg_s {
126 struct bfi_mhdr_s mhdr;
127 u32 pl[BFI_LMSG_PL_WSZ];
128};
129
130/**
131 * Mailbox message structure
132 */
133#define BFI_MBMSG_SZ 7
134struct bfi_mbmsg_s {
135 struct bfi_mhdr_s mh;
136 u32 pl[BFI_MBMSG_SZ];
137};
138
139/**
140 * Message Classes
141 */
142enum bfi_mclass {
143 BFI_MC_IOC = 1, /* IO Controller (IOC) */
144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
145 BFI_MC_FLASH = 3, /* Flash message class */
146 BFI_MC_CEE = 4, /* CEE */
147 BFI_MC_FCPORT = 5, /* FC port */
148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
149 BFI_MC_LL = 7, /* Link Layer */
150 BFI_MC_UF = 8, /* Unsolicited frame receive */
151 BFI_MC_FCXP = 9, /* FC Transport */
152 BFI_MC_LPS = 10, /* lport fc login services */
153 BFI_MC_RPORT = 11, /* Remote port */
154 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
155 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
156 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
157 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
158 BFI_MC_IOIM = 16, /* IO (Initiator mode) */
159 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
160 BFI_MC_TSKIM = 18, /* Initiator Task management */
161 BFI_MC_SBOOT = 19, /* SAN boot services */
162 BFI_MC_IPFC = 20, /* IP over FC Msgs */
163 BFI_MC_PORT = 21, /* Physical port */
164 BFI_MC_MAX = 32
165};
166
167#define BFI_IOC_MAX_CQS 4
168#define BFI_IOC_MAX_CQS_ASIC 8
169#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
170
171#pragma pack()
172
173#endif /* __BFI_H__ */
174
diff --git a/drivers/scsi/bfa/include/bfi/bfi_boot.h b/drivers/scsi/bfa/include/bfi/bfi_boot.h
deleted file mode 100644
index 5955afe7d108..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_boot.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/*
18 * bfi_boot.h
19 */
20
21#ifndef __BFI_BOOT_H__
22#define __BFI_BOOT_H__
23
24#define BFI_BOOT_TYPE_OFF 8
25#define BFI_BOOT_PARAM_OFF 12
26
27#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
28#define BFI_BOOT_TYPE_FLASH 1
29#define BFI_BOOT_TYPE_MEMTEST 2
30
31#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
32#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
33
34#endif
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cee.h b/drivers/scsi/bfa/include/bfi/bfi_cee.h
deleted file mode 100644
index 0970596583ea..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_cee.h
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17/**
18 * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
19 * All rights reserved.
20 *
21 * bfi_dcbx.h BFI Interface (Mailbox commands and related structures)
22 * between host driver and DCBX/LLDP firmware module.
23 *
24**/
25
26#ifndef __BFI_CEE_H__
27#define __BFI_CEE_H__
28
29#include <bfi/bfi.h>
30
31#pragma pack(1)
32
33
34enum bfi_cee_h2i_msgs_e {
35 BFI_CEE_H2I_GET_CFG_REQ = 1,
36 BFI_CEE_H2I_RESET_STATS = 2,
37 BFI_CEE_H2I_GET_STATS_REQ = 3,
38};
39
40
41enum bfi_cee_i2h_msgs_e {
42 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
43 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
44 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
45};
46
47
48/* Data structures */
49
50/*
51 * BFI_CEE_H2I_RESET_STATS
52 */
53struct bfi_lldp_reset_stats_s {
54 struct bfi_mhdr_s mh;
55};
56
57/*
58 * BFI_CEE_H2I_RESET_STATS
59 */
60struct bfi_cee_reset_stats_s {
61 struct bfi_mhdr_s mh;
62};
63
64/*
65 * BFI_CEE_H2I_GET_CFG_REQ
66 */
67struct bfi_cee_get_req_s {
68 struct bfi_mhdr_s mh;
69 union bfi_addr_u dma_addr;
70};
71
72
73/*
74 * BFI_CEE_I2H_GET_CFG_RSP
75 */
76struct bfi_cee_get_rsp_s {
77 struct bfi_mhdr_s mh;
78 u8 cmd_status;
79 u8 rsvd[3];
80};
81
82/*
83 * BFI_CEE_H2I_GET_STATS_REQ
84 */
85struct bfi_cee_stats_req_s {
86 struct bfi_mhdr_s mh;
87 union bfi_addr_u dma_addr;
88};
89
90
91/*
92 * BFI_CEE_I2H_GET_STATS_RSP
93 */
94struct bfi_cee_stats_rsp_s {
95 struct bfi_mhdr_s mh;
96 u8 cmd_status;
97 u8 rsvd[3];
98};
99
100
101
102union bfi_cee_h2i_msg_u {
103 struct bfi_mhdr_s mh;
104 struct bfi_cee_get_req_s get_req;
105 struct bfi_cee_stats_req_s stats_req;
106};
107
108
109union bfi_cee_i2h_msg_u {
110 struct bfi_mhdr_s mh;
111 struct bfi_cee_get_rsp_s get_rsp;
112 struct bfi_cee_stats_rsp_s stats_rsp;
113};
114
115#pragma pack()
116
117
118#endif /* __BFI_CEE_H__ */
119
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
deleted file mode 100644
index c0ef5a93b797..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ /dev/null
@@ -1,640 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * bfi_ctreg.h catapult host block register definitions
20 *
21 * !!! Do not edit. Auto generated. !!!
22 */
23
24#ifndef __BFI_CTREG_H__
25#define __BFI_CTREG_H__
26
27
28#define HOSTFN0_LPU_MBOX0_0 0x00019200
29#define HOSTFN1_LPU_MBOX0_8 0x00019260
30#define LPU_HOSTFN0_MBOX0_0 0x00019280
31#define LPU_HOSTFN1_MBOX0_8 0x000192e0
32#define HOSTFN2_LPU_MBOX0_0 0x00019400
33#define HOSTFN3_LPU_MBOX0_8 0x00019460
34#define LPU_HOSTFN2_MBOX0_0 0x00019480
35#define LPU_HOSTFN3_MBOX0_8 0x000194e0
36#define HOSTFN0_INT_STATUS 0x00014000
37#define __HOSTFN0_HALT_OCCURRED 0x01000000
38#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
39#define __HOSTFN0_INT_STATUS_LVL_SH 20
40#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
41#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
42#define __HOSTFN0_INT_STATUS_P_SH 16
43#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
44#define __HOSTFN0_INT_STATUS_F 0x0000ffff
45#define HOSTFN0_INT_MSK 0x00014004
46#define HOST_PAGE_NUM_FN0 0x00014008
47#define __HOST_PAGE_NUM_FN 0x000001ff
48#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
49#define __MSIX_ERR_INDEX_FN 0x000001ff
50#define HOSTFN1_INT_STATUS 0x00014100
51#define __HOSTFN1_HALT_OCCURRED 0x01000000
52#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
53#define __HOSTFN1_INT_STATUS_LVL_SH 20
54#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
55#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
56#define __HOSTFN1_INT_STATUS_P_SH 16
57#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
58#define __HOSTFN1_INT_STATUS_F 0x0000ffff
59#define HOSTFN1_INT_MSK 0x00014104
60#define HOST_PAGE_NUM_FN1 0x00014108
61#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
62#define APP_PLL_425_CTL_REG 0x00014204
63#define __P_425_PLL_LOCK 0x80000000
64#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
65#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
66#define __APP_PLL_425_RESET_TIMER_SH 17
67#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
68#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
69#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
70#define __APP_PLL_425_CNTLMT0_1_SH 14
71#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
72#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
73#define __APP_PLL_425_JITLMT0_1_SH 12
74#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
75#define __APP_PLL_425_HREF 0x00000800
76#define __APP_PLL_425_HDIV 0x00000400
77#define __APP_PLL_425_P0_1_MK 0x00000300
78#define __APP_PLL_425_P0_1_SH 8
79#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
80#define __APP_PLL_425_Z0_2_MK 0x000000e0
81#define __APP_PLL_425_Z0_2_SH 5
82#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
83#define __APP_PLL_425_RSEL200500 0x00000010
84#define __APP_PLL_425_ENARST 0x00000008
85#define __APP_PLL_425_BYPASS 0x00000004
86#define __APP_PLL_425_LRESETN 0x00000002
87#define __APP_PLL_425_ENABLE 0x00000001
88#define APP_PLL_312_CTL_REG 0x00014208
89#define __P_312_PLL_LOCK 0x80000000
90#define __ENABLE_MAC_AHB_1 0x00800000
91#define __ENABLE_MAC_AHB_0 0x00400000
92#define __ENABLE_MAC_1 0x00200000
93#define __ENABLE_MAC_0 0x00100000
94#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
95#define __APP_PLL_312_RESET_TIMER_SH 17
96#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
97#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
98#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
99#define __APP_PLL_312_CNTLMT0_1_SH 14
100#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
101#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
102#define __APP_PLL_312_JITLMT0_1_SH 12
103#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
104#define __APP_PLL_312_HREF 0x00000800
105#define __APP_PLL_312_HDIV 0x00000400
106#define __APP_PLL_312_P0_1_MK 0x00000300
107#define __APP_PLL_312_P0_1_SH 8
108#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
109#define __APP_PLL_312_Z0_2_MK 0x000000e0
110#define __APP_PLL_312_Z0_2_SH 5
111#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
112#define __APP_PLL_312_RSEL200500 0x00000010
113#define __APP_PLL_312_ENARST 0x00000008
114#define __APP_PLL_312_BYPASS 0x00000004
115#define __APP_PLL_312_LRESETN 0x00000002
116#define __APP_PLL_312_ENABLE 0x00000001
117#define MBIST_CTL_REG 0x00014220
118#define __EDRAM_BISTR_START 0x00000004
119#define __MBIST_RESET 0x00000002
120#define __MBIST_START 0x00000001
121#define MBIST_STAT_REG 0x00014224
122#define __EDRAM_BISTR_STATUS 0x00000008
123#define __EDRAM_BISTR_DONE 0x00000004
124#define __MEM_BIT_STATUS 0x00000002
125#define __MBIST_DONE 0x00000001
126#define HOST_SEM0_REG 0x00014230
127#define __HOST_SEMAPHORE 0x00000001
128#define HOST_SEM1_REG 0x00014234
129#define HOST_SEM2_REG 0x00014238
130#define HOST_SEM3_REG 0x0001423c
131#define HOST_SEM0_INFO_REG 0x00014240
132#define HOST_SEM1_INFO_REG 0x00014244
133#define HOST_SEM2_INFO_REG 0x00014248
134#define HOST_SEM3_INFO_REG 0x0001424c
135#define ETH_MAC_SER_REG 0x00014288
136#define __APP_EMS_CKBUFAMPIN 0x00000020
137#define __APP_EMS_REFCLKSEL 0x00000010
138#define __APP_EMS_CMLCKSEL 0x00000008
139#define __APP_EMS_REFCKBUFEN2 0x00000004
140#define __APP_EMS_REFCKBUFEN1 0x00000002
141#define __APP_EMS_CHANNEL_SEL 0x00000001
142#define HOSTFN2_INT_STATUS 0x00014300
143#define __HOSTFN2_HALT_OCCURRED 0x01000000
144#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
145#define __HOSTFN2_INT_STATUS_LVL_SH 20
146#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
147#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
148#define __HOSTFN2_INT_STATUS_P_SH 16
149#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
150#define __HOSTFN2_INT_STATUS_F 0x0000ffff
151#define HOSTFN2_INT_MSK 0x00014304
152#define HOST_PAGE_NUM_FN2 0x00014308
153#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
154#define HOSTFN3_INT_STATUS 0x00014400
155#define __HALT_OCCURRED 0x01000000
156#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
157#define __HOSTFN3_INT_STATUS_LVL_SH 20
158#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
159#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
160#define __HOSTFN3_INT_STATUS_P_SH 16
161#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
162#define __HOSTFN3_INT_STATUS_F 0x0000ffff
163#define HOSTFN3_INT_MSK 0x00014404
164#define HOST_PAGE_NUM_FN3 0x00014408
165#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
166#define FNC_ID_REG 0x00014600
167#define __FUNCTION_NUMBER 0x00000007
168#define FNC_PERS_REG 0x00014604
169#define __F3_FUNCTION_ACTIVE 0x80000000
170#define __F3_FUNCTION_MODE 0x40000000
171#define __F3_PORT_MAP_MK 0x30000000
172#define __F3_PORT_MAP_SH 28
173#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
174#define __F3_VM_MODE 0x08000000
175#define __F3_INTX_STATUS_MK 0x07000000
176#define __F3_INTX_STATUS_SH 24
177#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
178#define __F2_FUNCTION_ACTIVE 0x00800000
179#define __F2_FUNCTION_MODE 0x00400000
180#define __F2_PORT_MAP_MK 0x00300000
181#define __F2_PORT_MAP_SH 20
182#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
183#define __F2_VM_MODE 0x00080000
184#define __F2_INTX_STATUS_MK 0x00070000
185#define __F2_INTX_STATUS_SH 16
186#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
187#define __F1_FUNCTION_ACTIVE 0x00008000
188#define __F1_FUNCTION_MODE 0x00004000
189#define __F1_PORT_MAP_MK 0x00003000
190#define __F1_PORT_MAP_SH 12
191#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
192#define __F1_VM_MODE 0x00000800
193#define __F1_INTX_STATUS_MK 0x00000700
194#define __F1_INTX_STATUS_SH 8
195#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
196#define __F0_FUNCTION_ACTIVE 0x00000080
197#define __F0_FUNCTION_MODE 0x00000040
198#define __F0_PORT_MAP_MK 0x00000030
199#define __F0_PORT_MAP_SH 4
200#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
201#define __F0_VM_MODE 0x00000008
202#define __F0_INTX_STATUS 0x00000007
203enum {
204 __F0_INTX_STATUS_MSIX = 0x0,
205 __F0_INTX_STATUS_INTA = 0x1,
206 __F0_INTX_STATUS_INTB = 0x2,
207 __F0_INTX_STATUS_INTC = 0x3,
208 __F0_INTX_STATUS_INTD = 0x4,
209};
210#define OP_MODE 0x0001460c
211#define __APP_ETH_CLK_LOWSPEED 0x00000004
212#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
213#define __GLOBAL_FCOE_MODE 0x00000001
214#define HOST_SEM4_REG 0x00014610
215#define HOST_SEM5_REG 0x00014614
216#define HOST_SEM6_REG 0x00014618
217#define HOST_SEM7_REG 0x0001461c
218#define HOST_SEM4_INFO_REG 0x00014620
219#define HOST_SEM5_INFO_REG 0x00014624
220#define HOST_SEM6_INFO_REG 0x00014628
221#define HOST_SEM7_INFO_REG 0x0001462c
222#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
223#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
224#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
225#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
226#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
227#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
228#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
229#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
230#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
231#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
232#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
233#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
234#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
235#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
236#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
237#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
238#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
239#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
240#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
241#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
242#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
243#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
244#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
245#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
246#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
247#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
248#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
249#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
250#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
251#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
252#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
253#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
254#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
255#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
256#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
257#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
258#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
259#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
260#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
261#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
262#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
263#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
264#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
265#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
266#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
267#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
268#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
269#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
270#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
271#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
272#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
273#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
274#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
275#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
276#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
277#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
278#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
279#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
280#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
281#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
282#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
283#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
284#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
285#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
286#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
287#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
288#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
289#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
290#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
291#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
292#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
293#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
294#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
295#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
296#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
297#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
298#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
299#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
300#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
301#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
302#define FW_INIT_HALT_P0 0x000191ac
303#define __FW_INIT_HALT_P 0x00000001
304#define FW_INIT_HALT_P1 0x000191bc
305#define CPE_PI_PTR_Q0 0x00038000
306#define __CPE_PI_UNUSED_MK 0xffff0000
307#define __CPE_PI_UNUSED_SH 16
308#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
309#define __CPE_PI_PTR 0x0000ffff
310#define CPE_PI_PTR_Q1 0x00038040
311#define CPE_CI_PTR_Q0 0x00038004
312#define __CPE_CI_UNUSED_MK 0xffff0000
313#define __CPE_CI_UNUSED_SH 16
314#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
315#define __CPE_CI_PTR 0x0000ffff
316#define CPE_CI_PTR_Q1 0x00038044
317#define CPE_DEPTH_Q0 0x00038008
318#define __CPE_DEPTH_UNUSED_MK 0xf8000000
319#define __CPE_DEPTH_UNUSED_SH 27
320#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
321#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
322#define __CPE_MSIX_VEC_INDEX_SH 16
323#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
324#define __CPE_DEPTH 0x0000ffff
325#define CPE_DEPTH_Q1 0x00038048
326#define CPE_QCTRL_Q0 0x0003800c
327#define __CPE_CTRL_UNUSED30_MK 0xfc000000
328#define __CPE_CTRL_UNUSED30_SH 26
329#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
330#define __CPE_FUNC_INT_CTRL_MK 0x03000000
331#define __CPE_FUNC_INT_CTRL_SH 24
332#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
333enum {
334 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
335 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
336 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
337 __CPE_FUNC_INT_CTRL_HALF = 0x3,
338};
339#define __CPE_CTRL_UNUSED20_MK 0x00f00000
340#define __CPE_CTRL_UNUSED20_SH 20
341#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
342#define __CPE_SCI_TH_MK 0x000f0000
343#define __CPE_SCI_TH_SH 16
344#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
345#define __CPE_CTRL_UNUSED10_MK 0x0000c000
346#define __CPE_CTRL_UNUSED10_SH 14
347#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
348#define __CPE_ACK_PENDING 0x00002000
349#define __CPE_CTRL_UNUSED40_MK 0x00001c00
350#define __CPE_CTRL_UNUSED40_SH 10
351#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
352#define __CPE_PCIEID_MK 0x00000300
353#define __CPE_PCIEID_SH 8
354#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
355#define __CPE_CTRL_UNUSED00_MK 0x000000fe
356#define __CPE_CTRL_UNUSED00_SH 1
357#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
358#define __CPE_ESIZE 0x00000001
359#define CPE_QCTRL_Q1 0x0003804c
360#define __CPE_CTRL_UNUSED31_MK 0xfc000000
361#define __CPE_CTRL_UNUSED31_SH 26
362#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
363#define __CPE_CTRL_UNUSED21_MK 0x00f00000
364#define __CPE_CTRL_UNUSED21_SH 20
365#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
366#define __CPE_CTRL_UNUSED11_MK 0x0000c000
367#define __CPE_CTRL_UNUSED11_SH 14
368#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
369#define __CPE_CTRL_UNUSED41_MK 0x00001c00
370#define __CPE_CTRL_UNUSED41_SH 10
371#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
372#define __CPE_CTRL_UNUSED01_MK 0x000000fe
373#define __CPE_CTRL_UNUSED01_SH 1
374#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
375#define RME_PI_PTR_Q0 0x00038020
376#define __LATENCY_TIME_STAMP_MK 0xffff0000
377#define __LATENCY_TIME_STAMP_SH 16
378#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
379#define __RME_PI_PTR 0x0000ffff
380#define RME_PI_PTR_Q1 0x00038060
381#define RME_CI_PTR_Q0 0x00038024
382#define __DELAY_TIME_STAMP_MK 0xffff0000
383#define __DELAY_TIME_STAMP_SH 16
384#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
385#define __RME_CI_PTR 0x0000ffff
386#define RME_CI_PTR_Q1 0x00038064
387#define RME_DEPTH_Q0 0x00038028
388#define __RME_DEPTH_UNUSED_MK 0xf8000000
389#define __RME_DEPTH_UNUSED_SH 27
390#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
391#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
392#define __RME_MSIX_VEC_INDEX_SH 16
393#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
394#define __RME_DEPTH 0x0000ffff
395#define RME_DEPTH_Q1 0x00038068
396#define RME_QCTRL_Q0 0x0003802c
397#define __RME_INT_LATENCY_TIMER_MK 0xff000000
398#define __RME_INT_LATENCY_TIMER_SH 24
399#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
400#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
401#define __RME_INT_DELAY_TIMER_SH 16
402#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
403#define __RME_INT_DELAY_DISABLE 0x00008000
404#define __RME_DLY_DELAY_DISABLE 0x00004000
405#define __RME_ACK_PENDING 0x00002000
406#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
407#define __RME_CTRL_UNUSED10_MK 0x00000c00
408#define __RME_CTRL_UNUSED10_SH 10
409#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
410#define __RME_PCIEID_MK 0x00000300
411#define __RME_PCIEID_SH 8
412#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
413#define __RME_CTRL_UNUSED00_MK 0x000000fe
414#define __RME_CTRL_UNUSED00_SH 1
415#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
416#define __RME_ESIZE 0x00000001
417#define RME_QCTRL_Q1 0x0003806c
418#define __RME_CTRL_UNUSED11_MK 0x00000c00
419#define __RME_CTRL_UNUSED11_SH 10
420#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
421#define __RME_CTRL_UNUSED01_MK 0x000000fe
422#define __RME_CTRL_UNUSED01_SH 1
423#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
424#define PSS_CTL_REG 0x00018800
425#define __PSS_I2C_CLK_DIV_MK 0x007f0000
426#define __PSS_I2C_CLK_DIV_SH 16
427#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
428#define __PSS_LMEM_INIT_DONE 0x00001000
429#define __PSS_LMEM_RESET 0x00000200
430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
458#define PMM_1T_RESET_REG_P0 0x0002381c
459#define __PMM_1T_RESET_P 0x00000001
460#define PMM_1T_RESET_REG_P1 0x00023c1c
461#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
462#define __RXQ0_ADD_VECTORS_P 0x80000000
463#define __RXQ0_STOP_P 0x40000000
464#define __RXQ0_PRD_PTR_P 0x0000ffff
465#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
466#define __RXQ1_ADD_VECTORS_P 0x80000000
467#define __RXQ1_STOP_P 0x40000000
468#define __RXQ1_PRD_PTR_P 0x0000ffff
469#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
470#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
471#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
472#define __TXQ0_ADD_VECTORS_P 0x80000000
473#define __TXQ0_STOP_P 0x40000000
474#define __TXQ0_PRD_PTR_P 0x0000ffff
475#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
476#define __TXQ1_ADD_VECTORS_P 0x80000000
477#define __TXQ1_STOP_P 0x40000000
478#define __TXQ1_PRD_PTR_P 0x0000ffff
479#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
480#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
481#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
482#define __IB1_0_ACK_P 0x80000000
483#define __IB1_0_DISABLE_P 0x40000000
484#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
485#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
486#define __IB1_1_ACK_P 0x80000000
487#define __IB1_1_DISABLE_P 0x40000000
488#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
489#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
490#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
491#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
492#define __IB2_0_ACK_P 0x80000000
493#define __IB2_0_DISABLE_P 0x40000000
494#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
496#define __IB2_1_ACK_P 0x80000000
497#define __IB2_1_DISABLE_P 0x40000000
498#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
499#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
500#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
501
502
503/*
504 * These definitions are either in error/missing in spec. Its auto-generated
505 * from hard coded values in regparse.pl.
506 */
507#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
508#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
509#define __EMPHPRE_AT_4G_FIX 0x00000003
510#define __SFP_TXRATE_EN_FIX 0x00000100
511#define __SFP_RXRATE_EN_FIX 0x00000080
512
513
514/*
515 * These register definitions are auto-generated from hard coded values
516 * in regparse.pl.
517 */
518
519
520/*
521 * These register mapping definitions are auto-generated from mapping tables
522 * in regparse.pl.
523 */
524#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
525#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
526#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
527#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
528#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
529
530#define CPE_DEPTH_Q(__n) \
531 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
532#define CPE_QCTRL_Q(__n) \
533 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
534#define CPE_PI_PTR_Q(__n) \
535 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
536#define CPE_CI_PTR_Q(__n) \
537 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
538#define RME_DEPTH_Q(__n) \
539 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
540#define RME_QCTRL_Q(__n) \
541 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
542#define RME_PI_PTR_Q(__n) \
543 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
544#define RME_CI_PTR_Q(__n) \
545 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
546#define HQM_QSET_RXQ_DRBL_P0(__n) \
547 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * (HQM_QSET1_RXQ_DRBL_P0 - \
548 HQM_QSET0_RXQ_DRBL_P0))
549#define HQM_QSET_TXQ_DRBL_P0(__n) \
550 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * (HQM_QSET1_TXQ_DRBL_P0 - \
551 HQM_QSET0_TXQ_DRBL_P0))
552#define HQM_QSET_IB_DRBL_1_P0(__n) \
553 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * (HQM_QSET1_IB_DRBL_1_P0 - \
554 HQM_QSET0_IB_DRBL_1_P0))
555#define HQM_QSET_IB_DRBL_2_P0(__n) \
556 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * (HQM_QSET1_IB_DRBL_2_P0 - \
557 HQM_QSET0_IB_DRBL_2_P0))
558#define HQM_QSET_RXQ_DRBL_P1(__n) \
559 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * (HQM_QSET1_RXQ_DRBL_P1 - \
560 HQM_QSET0_RXQ_DRBL_P1))
561#define HQM_QSET_TXQ_DRBL_P1(__n) \
562 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * (HQM_QSET1_TXQ_DRBL_P1 - \
563 HQM_QSET0_TXQ_DRBL_P1))
564#define HQM_QSET_IB_DRBL_1_P1(__n) \
565 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * (HQM_QSET1_IB_DRBL_1_P1 - \
566 HQM_QSET0_IB_DRBL_1_P1))
567#define HQM_QSET_IB_DRBL_2_P1(__n) \
568 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * (HQM_QSET1_IB_DRBL_2_P1 - \
569 HQM_QSET0_IB_DRBL_2_P1))
570
571#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
572#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
573#define CPE_Q_MASK(__q) ((__q) & 0x3)
574#define RME_Q_MASK(__q) ((__q) & 0x3)
575
576
577/*
578 * PCI MSI-X vector defines
579 */
580enum {
581 BFA_MSIX_CPE_Q0 = 0,
582 BFA_MSIX_CPE_Q1 = 1,
583 BFA_MSIX_CPE_Q2 = 2,
584 BFA_MSIX_CPE_Q3 = 3,
585 BFA_MSIX_RME_Q0 = 4,
586 BFA_MSIX_RME_Q1 = 5,
587 BFA_MSIX_RME_Q2 = 6,
588 BFA_MSIX_RME_Q3 = 7,
589 BFA_MSIX_LPU_ERR = 8,
590 BFA_MSIX_CT_MAX = 9,
591};
592
593/*
594 * And corresponding host interrupt status bit field defines
595 */
596#define __HFN_INT_CPE_Q0 0x00000001U
597#define __HFN_INT_CPE_Q1 0x00000002U
598#define __HFN_INT_CPE_Q2 0x00000004U
599#define __HFN_INT_CPE_Q3 0x00000008U
600#define __HFN_INT_CPE_Q4 0x00000010U
601#define __HFN_INT_CPE_Q5 0x00000020U
602#define __HFN_INT_CPE_Q6 0x00000040U
603#define __HFN_INT_CPE_Q7 0x00000080U
604#define __HFN_INT_RME_Q0 0x00000100U
605#define __HFN_INT_RME_Q1 0x00000200U
606#define __HFN_INT_RME_Q2 0x00000400U
607#define __HFN_INT_RME_Q3 0x00000800U
608#define __HFN_INT_RME_Q4 0x00001000U
609#define __HFN_INT_RME_Q5 0x00002000U
610#define __HFN_INT_RME_Q6 0x00004000U
611#define __HFN_INT_RME_Q7 0x00008000U
612#define __HFN_INT_ERR_EMC 0x00010000U
613#define __HFN_INT_ERR_LPU0 0x00020000U
614#define __HFN_INT_ERR_LPU1 0x00040000U
615#define __HFN_INT_ERR_PSS 0x00080000U
616#define __HFN_INT_MBOX_LPU0 0x00100000U
617#define __HFN_INT_MBOX_LPU1 0x00200000U
618#define __HFN_INT_MBOX1_LPU0 0x00400000U
619#define __HFN_INT_MBOX1_LPU1 0x00800000U
620#define __HFN_INT_LL_HALT 0x01000000U
621#define __HFN_INT_CPE_MASK 0x000000ffU
622#define __HFN_INT_RME_MASK 0x0000ff00U
623
624
625/*
626 * catapult memory map.
627 */
628#define LL_PGN_HQM0 0x0096
629#define LL_PGN_HQM1 0x0097
630#define PSS_SMEM_PAGE_START 0x8000
631#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
632#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
633
634/*
635 * End of catapult memory map
636 */
637
638
639#endif /* __BFI_CTREG_H__ */
640
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fabric.h b/drivers/scsi/bfa/include/bfi/bfi_fabric.h
deleted file mode 100644
index c0669ed41078..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fabric.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FABRIC_H__
19#define __BFI_FABRIC_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_fabric_h2i_msgs {
26 BFI_FABRIC_H2I_CREATE_REQ = 1,
27 BFI_FABRIC_H2I_DELETE_REQ = 2,
28 BFI_FABRIC_H2I_SETAUTH = 3,
29};
30
31enum bfi_fabric_i2h_msgs {
32 BFI_FABRIC_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_FABRIC_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_FABRIC_I2H_SETAUTH_RSP = BFA_I2HM(3),
35 BFI_FABRIC_I2H_ONLINE = BFA_I2HM(4),
36 BFI_FABRIC_I2H_OFFLINE = BFA_I2HM(5),
37};
38
39struct bfi_fabric_create_req_s {
40 bfi_mhdr_t mh; /* common msg header */
41 u8 vf_en; /* virtual fabric enable */
42 u8 rsvd;
43 u16 vf_id; /* virtual fabric ID */
44 wwn_t pwwn; /* port name */
45 wwn_t nwwn; /* node name */
46};
47
48struct bfi_fabric_create_rsp_s {
49 bfi_mhdr_t mh; /* common msg header */
50 u16 bfa_handle; /* host fabric handle */
51 u8 status; /* fabric create status */
52 u8 rsvd;
53};
54
55struct bfi_fabric_delete_req_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u16 fw_handle; /* firmware fabric handle */
58 u16 rsvd;
59};
60
61struct bfi_fabric_delete_rsp_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 bfa_handle; /* host fabric handle */
64 u8 status; /* fabric deletion status */
65 u8 rsvd;
66};
67
68#define BFI_FABRIC_AUTHSECRET_LEN 64
69struct bfi_fabric_setauth_req_s {
70 bfi_mhdr_t mh; /* common msg header */
71 u16 fw_handle; /* f/w handle of fabric */
72 u8 algorithm;
73 u8 group;
74 u8 secret[BFI_FABRIC_AUTHSECRET_LEN];
75};
76
77union bfi_fabric_h2i_msg_u {
78 bfi_msg_t *msg;
79 struct bfi_fabric_create_req_s *create_req;
80 struct bfi_fabric_delete_req_s *delete_req;
81};
82
83union bfi_fabric_i2h_msg_u {
84 bfi_msg_t *msg;
85 struct bfi_fabric_create_rsp_s *create_rsp;
86 struct bfi_fabric_delete_rsp_s *delete_rsp;
87};
88
89#pragma pack()
90
91#endif /* __BFI_FABRIC_H__ */
92
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h b/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
deleted file mode 100644
index 52c059fb4c3a..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h
+++ /dev/null
@@ -1,301 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCPIM_H__
19#define __BFI_FCPIM_H__
20
21#include "bfi.h"
22#include <protocol/fcp.h>
23
24#pragma pack(1)
25
26/*
27 * Initiator mode I-T nexus interface defines.
28 */
29
30enum bfi_itnim_h2i {
31 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
32 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
33};
34
35enum bfi_itnim_i2h {
36 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
37 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
38 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
39};
40
41struct bfi_itnim_create_req_s {
42 struct bfi_mhdr_s mh; /* common msg header */
43 u16 fw_handle; /* f/w handle for itnim */
44 u8 class; /* FC class for IO */
45 u8 seq_rec; /* sequence recovery support */
46 u8 msg_no; /* seq id of the msg */
47};
48
49struct bfi_itnim_create_rsp_s {
50 struct bfi_mhdr_s mh; /* common msg header */
51 u16 bfa_handle; /* bfa handle for itnim */
52 u8 status; /* fcp request status */
53 u8 seq_id; /* seq id of the msg */
54};
55
56struct bfi_itnim_delete_req_s {
57 struct bfi_mhdr_s mh; /* common msg header */
58 u16 fw_handle; /* f/w itnim handle */
59 u8 seq_id; /* seq id of the msg */
60 u8 rsvd;
61};
62
63struct bfi_itnim_delete_rsp_s {
64 struct bfi_mhdr_s mh; /* common msg header */
65 u16 bfa_handle; /* bfa handle for itnim */
66 u8 status; /* fcp request status */
67 u8 seq_id; /* seq id of the msg */
68};
69
70struct bfi_itnim_sler_event_s {
71 struct bfi_mhdr_s mh; /* common msg header */
72 u16 bfa_handle; /* bfa handle for itnim */
73 u16 rsvd;
74};
75
76union bfi_itnim_h2i_msg_u {
77 struct bfi_itnim_create_req_s *create_req;
78 struct bfi_itnim_delete_req_s *delete_req;
79 struct bfi_msg_s *msg;
80};
81
82union bfi_itnim_i2h_msg_u {
83 struct bfi_itnim_create_rsp_s *create_rsp;
84 struct bfi_itnim_delete_rsp_s *delete_rsp;
85 struct bfi_itnim_sler_event_s *sler_event;
86 struct bfi_msg_s *msg;
87};
88
89/*
90 * Initiator mode IO interface defines.
91 */
92
93enum bfi_ioim_h2i {
94 BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
95 BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
96};
97
98enum bfi_ioim_i2h {
99 BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
100 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),/* ABORT rsp */
101};
102
103/**
104 * IO command DIF info
105 */
106struct bfi_ioim_dif_s {
107 u32 dif_info[4];
108};
109
110/**
111 * FCP IO messages overview
112 *
113 * @note
114 * - Max CDB length supported is 64 bytes.
115 * - SCSI Linked commands and SCSI bi-directional Commands not
116 * supported.
117 *
118 */
119struct bfi_ioim_req_s {
120 struct bfi_mhdr_s mh; /* Common msg header */
121 u16 io_tag; /* I/O tag */
122 u16 rport_hdl; /* itnim/rport firmware handle */
123 struct fcp_cmnd_s cmnd; /* IO request info */
124
125 /**
126 * SG elements array within the IO request must be double word
127 * aligned. This aligment is required to optimize SGM setup for the IO.
128 */
129 struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
130 u8 io_timeout;
131 u8 dif_en;
132 u8 rsvd_a[2];
133 struct bfi_ioim_dif_s dif;
134};
135
136/**
137 * This table shows various IO status codes from firmware and their
138 * meaning. Host driver can use these status codes to further process
139 * IO completions.
140 *
141 * BFI_IOIM_STS_OK : IO completed with error free SCSI &
142 * transport status.
143 * - io-tag can be reused.
144 *
145 * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
146 * - io-tag can be reused.
147 *
148 * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
149 * host request.
150 * - io-tag cannot be reused yet.
151 *
152 * BFI_IOIM_STS_ABORTED : IO was aborted successfully
153 * internally by f/w.
154 * - io-tag cannot be reused yet.
155 *
156 * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
157 * in the firmware and
158 * - io-tag cannot be reused yet.
159 *
160 * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
161 * with sequence level error
162 * logic and hence host needs to retry
163 * this IO with a different IO tag
164 * - io-tag cannot be used yet.
165 *
166 * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
167 * is required because 2 consecutive ABTS
168 * timedout and host needs logout and
169 * re-login with the target
170 * - io-tag cannot be used yet.
171 *
172 * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
173 * but the data tranferred is less than
174 * the fcp data length in the command.
175 * ex. SCSI INQUIRY where transferred
176 * data length and residue count in FCP
177 * response accounts for total fcp-dl
178 * - io-tag can be reused.
179 *
180 * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
181 * but the data transerred is more than
182 * fcp data length in the command. ex.
183 * TAPE IOs where blocks can of unequal
184 * lengths.
185 * - io-tag can be reused.
186 *
187 * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
188 * during abort process
189 * - io-tag can be reused.
190 *
191 * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
192 * ex target sent more data than
193 * requested, or there was data frame
194 * loss and other reasons
195 * - io-tag cannot be used yet.
196 *
197 * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
198 * CRC err or Ref Tag err or App tag err.
199 * - io-tag can be reused.
200 *
201 * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
202 * Management command from the host
203 * - io-tag can be reused.
204 *
205 * BFI_IOIM_STS_UTAG : Firmware does not know about this
206 * io_tag.
207 * - io-tag can be reused.
208 */
209enum bfi_ioim_status {
210 BFI_IOIM_STS_OK = 0,
211 BFI_IOIM_STS_HOST_ABORTED = 1,
212 BFI_IOIM_STS_ABORTED = 2,
213 BFI_IOIM_STS_TIMEDOUT = 3,
214 BFI_IOIM_STS_RES_FREE = 4,
215 BFI_IOIM_STS_SQER_NEEDED = 5,
216 BFI_IOIM_STS_PROTO_ERR = 6,
217 BFI_IOIM_STS_UTAG = 7,
218 BFI_IOIM_STS_PATHTOV = 8,
219};
220
221#define BFI_IOIM_SNSLEN (256)
222/**
223 * I/O response message
224 */
225struct bfi_ioim_rsp_s {
226 struct bfi_mhdr_s mh; /* common msg header */
227 u16 io_tag; /* completed IO tag */
228 u16 bfa_rport_hndl; /* releated rport handle */
229 u8 io_status; /* IO completion status */
230 u8 reuse_io_tag; /* IO tag can be reused */
231 u16 abort_tag; /* host abort request tag */
232 u8 scsi_status; /* scsi status from target */
233 u8 sns_len; /* scsi sense length */
234 u8 resid_flags; /* IO residue flags */
235 u8 rsvd_a;
236 u32 residue; /* IO residual length in bytes */
237 u32 rsvd_b[3];
238};
239
240struct bfi_ioim_abort_req_s {
241 struct bfi_mhdr_s mh; /* Common msg header */
242 u16 io_tag; /* I/O tag */
243 u16 abort_tag; /* unique request tag */
244};
245
246/*
247 * Initiator mode task management command interface defines.
248 */
249
250enum bfi_tskim_h2i {
251 BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
252 BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
253};
254
255enum bfi_tskim_i2h {
256 BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
257};
258
259struct bfi_tskim_req_s {
260 struct bfi_mhdr_s mh; /* Common msg header */
261 u16 tsk_tag; /* task management tag */
262 u16 itn_fhdl; /* itn firmware handle */
263 lun_t lun; /* LU number */
264 u8 tm_flags; /* see fcp_tm_cmnd_t */
265 u8 t_secs; /* Timeout value in seconds */
266 u8 rsvd[2];
267};
268
269struct bfi_tskim_abortreq_s {
270 struct bfi_mhdr_s mh; /* Common msg header */
271 u16 tsk_tag; /* task management tag */
272 u16 rsvd;
273};
274
275enum bfi_tskim_status {
276 /*
277 * Following are FCP-4 spec defined status codes,
278 * **DO NOT CHANGE THEM **
279 */
280 BFI_TSKIM_STS_OK = 0,
281 BFI_TSKIM_STS_NOT_SUPP = 4,
282 BFI_TSKIM_STS_FAILED = 5,
283
284 /**
285 * Defined by BFA
286 */
287 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
288 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
289};
290
291struct bfi_tskim_rsp_s {
292 struct bfi_mhdr_s mh; /* Common msg header */
293 u16 tsk_tag; /* task mgmt cmnd tag */
294 u8 tsk_status; /* @ref bfi_tskim_status */
295 u8 rsvd;
296};
297
298#pragma pack()
299
300#endif /* __BFI_FCPIM_H__ */
301
diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h b/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
deleted file mode 100644
index e0e995a32828..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_FCXP_H__
19#define __BFI_FCXP_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_fcxp_h2i {
26 BFI_FCXP_H2I_SEND_REQ = 1,
27};
28
29enum bfi_fcxp_i2h {
30 BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
31};
32
33#define BFA_FCXP_MAX_SGES 2
34
35/**
36 * FCXP send request structure
37 */
38struct bfi_fcxp_send_req_s {
39 struct bfi_mhdr_s mh; /* Common msg header */
40 u16 fcxp_tag; /* driver request tag */
41 u16 max_frmsz; /* max send frame size */
42 u16 vf_id; /* vsan tag if applicable */
43 u16 rport_fw_hndl; /* FW Handle for the remote port */
44 u8 class; /* FC class used for req/rsp */
45 u8 rsp_timeout; /* timeout in secs, 0-no response */
46 u8 cts; /* continue sequence */
47 u8 lp_tag; /* lport tag */
48 struct fchs_s fchs; /* request FC header structure */
49 u32 req_len; /* request payload length */
50 u32 rsp_maxlen; /* max response length expected */
51 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
52 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
53};
54
55/**
56 * FCXP send response structure
57 */
58struct bfi_fcxp_send_rsp_s {
59 struct bfi_mhdr_s mh; /* Common msg header */
60 u16 fcxp_tag; /* send request tag */
61 u8 req_status; /* request status */
62 u8 rsvd;
63 u32 rsp_len; /* actual response length */
64 u32 residue_len; /* residual response length */
65 struct fchs_s fchs; /* response FC header structure */
66};
67
68#pragma pack()
69
70#endif /* __BFI_FCXP_H__ */
71
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
deleted file mode 100644
index 450ded6e9bc2..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ /dev/null
@@ -1,208 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOC_H__
19#define __BFI_IOC_H__
20
21#include "bfi.h"
22#include <defs/bfa_defs_ioc.h>
23
24#pragma pack(1)
25
26enum bfi_ioc_h2i_msgs {
27 BFI_IOC_H2I_ENABLE_REQ = 1,
28 BFI_IOC_H2I_DISABLE_REQ = 2,
29 BFI_IOC_H2I_GETATTR_REQ = 3,
30 BFI_IOC_H2I_DBG_SYNC = 4,
31 BFI_IOC_H2I_DBG_DUMP = 5,
32};
33
34enum bfi_ioc_i2h_msgs {
35 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
36 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
37 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
38 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
39 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
40};
41
42/**
43 * BFI_IOC_H2I_GETATTR_REQ message
44 */
45struct bfi_ioc_getattr_req_s {
46 struct bfi_mhdr_s mh;
47 union bfi_addr_u attr_addr;
48};
49
50struct bfi_ioc_attr_s {
51 wwn_t mfg_pwwn; /* Mfg port wwn */
52 wwn_t mfg_nwwn; /* Mfg node wwn */
53 mac_t mfg_mac; /* Mfg mac */
54 u16 rsvd_a;
55 wwn_t pwwn;
56 wwn_t nwwn;
57 mac_t mac; /* PBC or Mfg mac */
58 u16 rsvd_b;
59 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
60 u8 pcie_gen;
61 u8 pcie_lanes_orig;
62 u8 pcie_lanes;
63 u8 rx_bbcredit; /* receive buffer credits */
64 u32 adapter_prop; /* adapter properties */
65 u16 maxfrsize; /* max receive frame size */
66 char asic_rev;
67 u8 rsvd_c;
68 char fw_version[BFA_VERSION_LEN];
69 char optrom_version[BFA_VERSION_LEN];
70 struct bfa_mfg_vpd_s vpd;
71 u32 card_type; /* card type */
72};
73
74/**
75 * BFI_IOC_I2H_GETATTR_REPLY message
76 */
77struct bfi_ioc_getattr_reply_s {
78 struct bfi_mhdr_s mh; /* Common msg header */
79 u8 status; /* cfg reply status */
80 u8 rsvd[3];
81};
82
83/**
84 * Firmware memory page offsets
85 */
86#define BFI_IOC_SMEM_PG0_CB (0x40)
87#define BFI_IOC_SMEM_PG0_CT (0x180)
88
89/**
90 * Firmware trace offset
91 */
92#define BFI_IOC_TRC_OFF (0x4b00)
93#define BFI_IOC_TRC_ENTS 256
94
95#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
96#define BFI_IOC_MD5SUM_SZ 4
97struct bfi_ioc_image_hdr_s {
98 u32 signature; /* constant signature */
99 u32 rsvd_a;
100 u32 exec; /* exec vector */
101 u32 param; /* parameters */
102 u32 rsvd_b[4];
103 u32 md5sum[BFI_IOC_MD5SUM_SZ];
104};
105
106/**
107 * BFI_IOC_I2H_READY_EVENT message
108 */
109struct bfi_ioc_rdy_event_s {
110 struct bfi_mhdr_s mh; /* common msg header */
111 u8 init_status; /* init event status */
112 u8 rsvd[3];
113};
114
115struct bfi_ioc_hbeat_s {
116 struct bfi_mhdr_s mh; /* common msg header */
117 u32 hb_count; /* current heart beat count */
118};
119
120/**
121 * IOC hardware/firmware state
122 */
123enum bfi_ioc_state {
124 BFI_IOC_UNINIT = 0, /* not initialized */
125 BFI_IOC_INITING = 1, /* h/w is being initialized */
126 BFI_IOC_HWINIT = 2, /* h/w is initialized */
127 BFI_IOC_CFG = 3, /* IOC configuration in progress */
128 BFI_IOC_OP = 4, /* IOC is operational */
129 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
130 BFI_IOC_DISABLED = 6, /* IOC is disabled */
131 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
132 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
133 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
134};
135
136#define BFI_IOC_ENDIAN_SIG 0x12345678
137
138enum {
139 BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
140 BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
141 BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
142 BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
143 BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
144 BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
145 BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
146 BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
147 BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
148 BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
149};
150
151#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
152 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
153 BFI_ADAPTER_ ## __prop ## _SH)
154#define BFI_ADAPTER_SETP(__prop, __val) \
155 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
156#define BFI_ADAPTER_IS_PROTO(__adap_type) \
157 ((__adap_type) & BFI_ADAPTER_PROTO)
158#define BFI_ADAPTER_IS_TTV(__adap_type) \
159 ((__adap_type) & BFI_ADAPTER_TTV)
160#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
161 ((__adap_type) & BFI_ADAPTER_UNSUPP)
162#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
163 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
164 BFI_ADAPTER_UNSUPP))
165
166/**
167 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
168 */
169struct bfi_ioc_ctrl_req_s {
170 struct bfi_mhdr_s mh;
171 u8 ioc_class;
172 u8 rsvd[3];
173};
174
175/**
176 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
177 */
178struct bfi_ioc_ctrl_reply_s {
179 struct bfi_mhdr_s mh; /* Common msg header */
180 u8 status; /* enable/disable status */
181 u8 rsvd[3];
182};
183
184#define BFI_IOC_MSGSZ 8
185/**
186 * H2I Messages
187 */
188union bfi_ioc_h2i_msg_u {
189 struct bfi_mhdr_s mh;
190 struct bfi_ioc_ctrl_req_s enable_req;
191 struct bfi_ioc_ctrl_req_s disable_req;
192 struct bfi_ioc_getattr_req_s getattr_req;
193 u32 mboxmsg[BFI_IOC_MSGSZ];
194};
195
196/**
197 * I2H Messages
198 */
199union bfi_ioc_i2h_msg_u {
200 struct bfi_mhdr_s mh;
201 struct bfi_ioc_rdy_event_s rdy_event;
202 u32 mboxmsg[BFI_IOC_MSGSZ];
203};
204
205#pragma pack()
206
207#endif /* __BFI_IOC_H__ */
208
diff --git a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
deleted file mode 100644
index ccdfcc5d7e0b..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_IOCFC_H__
19#define __BFI_IOCFC_H__
20
21#include "bfi.h"
22#include <bfi/bfi_pbc.h>
23#include <defs/bfa_defs_ioc.h>
24#include <defs/bfa_defs_iocfc.h>
25#include <defs/bfa_defs_boot.h>
26
27#pragma pack(1)
28
29enum bfi_iocfc_h2i_msgs {
30 BFI_IOCFC_H2I_CFG_REQ = 1,
31 BFI_IOCFC_H2I_GET_STATS_REQ = 2,
32 BFI_IOCFC_H2I_CLEAR_STATS_REQ = 3,
33 BFI_IOCFC_H2I_SET_INTR_REQ = 4,
34 BFI_IOCFC_H2I_UPDATEQ_REQ = 5,
35};
36
37enum bfi_iocfc_i2h_msgs {
38 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
39 BFI_IOCFC_I2H_GET_STATS_RSP = BFA_I2HM(2),
40 BFI_IOCFC_I2H_CLEAR_STATS_RSP = BFA_I2HM(3),
41 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(5),
42};
43
44struct bfi_iocfc_cfg_s {
45 u8 num_cqs; /* Number of CQs to be used */
46 u8 sense_buf_len; /* SCSI sense length */
47 u8 trunk_enabled; /* port trunking enabled */
48 u8 trunk_ports; /* trunk ports bit map */
49 u32 endian_sig; /* endian signature of host */
50
51 /**
52 * Request and response circular queue base addresses, size and
53 * shadow index pointers.
54 */
55 union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
56 union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
57 u16 req_cq_elems[BFI_IOC_MAX_CQS];
58 union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
59 union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
60 u16 rsp_cq_elems[BFI_IOC_MAX_CQS];
61
62 union bfi_addr_u stats_addr; /* DMA-able address for stats */
63 union bfi_addr_u cfgrsp_addr; /* config response dma address */
64 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
65 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
66};
67
68/**
69 * Boot target wwn information for this port. This contains either the stored
70 * or discovered boot target port wwns for the port.
71 */
72struct bfi_iocfc_bootwwns {
73 wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
74 u8 nwwns;
75 u8 rsvd[7];
76};
77
78struct bfi_iocfc_cfgrsp_s {
79 struct bfa_iocfc_fwcfg_s fwcfg;
80 struct bfa_iocfc_intr_attr_s intr_attr;
81 struct bfi_iocfc_bootwwns bootwwns;
82 struct bfi_pbc_s pbc_cfg;
83};
84
85/**
86 * BFI_IOCFC_H2I_CFG_REQ message
87 */
88struct bfi_iocfc_cfg_req_s {
89 struct bfi_mhdr_s mh;
90 union bfi_addr_u ioc_cfg_dma_addr;
91};
92
93/**
94 * BFI_IOCFC_I2H_CFG_REPLY message
95 */
96struct bfi_iocfc_cfg_reply_s {
97 struct bfi_mhdr_s mh; /* Common msg header */
98 u8 cfg_success; /* cfg reply status */
99 u8 lpu_bm; /* LPUs assigned for this IOC */
100 u8 rsvd[2];
101};
102
103/**
104 * BFI_IOCFC_H2I_GET_STATS_REQ & BFI_IOCFC_H2I_CLEAR_STATS_REQ messages
105 */
106struct bfi_iocfc_stats_req_s {
107 struct bfi_mhdr_s mh; /* msg header */
108 u32 msgtag; /* msgtag for reply */
109};
110
111/**
112 * BFI_IOCFC_I2H_GET_STATS_RSP & BFI_IOCFC_I2H_CLEAR_STATS_RSP messages
113 */
114struct bfi_iocfc_stats_rsp_s {
115 struct bfi_mhdr_s mh; /* common msg header */
116 u8 status; /* reply status */
117 u8 rsvd[3];
118 u32 msgtag; /* msgtag for reply */
119};
120
121/**
122 * BFI_IOCFC_H2I_SET_INTR_REQ message
123 */
124struct bfi_iocfc_set_intr_req_s {
125 struct bfi_mhdr_s mh; /* common msg header */
126 u8 coalesce; /* enable intr coalescing*/
127 u8 rsvd[3];
128 u16 delay; /* delay timer 0..1125us */
129 u16 latency; /* latency timer 0..225us */
130};
131
132/**
133 * BFI_IOCFC_H2I_UPDATEQ_REQ message
134 */
135struct bfi_iocfc_updateq_req_s {
136 struct bfi_mhdr_s mh; /* common msg header */
137 u32 reqq_ba; /* reqq base addr */
138 u32 rspq_ba; /* rspq base addr */
139 u32 reqq_sci; /* reqq shadow ci */
140 u32 rspq_spi; /* rspq shadow pi */
141};
142
143/**
144 * BFI_IOCFC_I2H_UPDATEQ_RSP message
145 */
146struct bfi_iocfc_updateq_rsp_s {
147 struct bfi_mhdr_s mh; /* common msg header */
148 u8 status; /* updateq status */
149 u8 rsvd[3];
150};
151
152/**
153 * H2I Messages
154 */
155union bfi_iocfc_h2i_msg_u {
156 struct bfi_mhdr_s mh;
157 struct bfi_iocfc_cfg_req_s cfg_req;
158 struct bfi_iocfc_stats_req_s stats_get;
159 struct bfi_iocfc_stats_req_s stats_clr;
160 struct bfi_iocfc_updateq_req_s updateq_req;
161 u32 mboxmsg[BFI_IOC_MSGSZ];
162};
163
164/**
165 * I2H Messages
166 */
167union bfi_iocfc_i2h_msg_u {
168 struct bfi_mhdr_s mh;
169 struct bfi_iocfc_cfg_reply_s cfg_reply;
170 struct bfi_iocfc_stats_rsp_s stats_get_rsp;
171 struct bfi_iocfc_stats_rsp_s stats_clr_rsp;
172 struct bfi_iocfc_updateq_rsp_s updateq_rsp;
173 u32 mboxmsg[BFI_IOC_MSGSZ];
174};
175
176#pragma pack()
177
178#endif /* __BFI_IOCFC_H__ */
179
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lport.h b/drivers/scsi/bfa/include/bfi/bfi_lport.h
deleted file mode 100644
index 29010614bac9..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_lport.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPORT_H__
19#define __BFI_LPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lport_h2i_msgs {
26 BFI_LPORT_H2I_CREATE_REQ = 1,
27 BFI_LPORT_H2I_DELETE_REQ = 2,
28};
29
30enum bfi_lport_i2h_msgs {
31 BFI_LPORT_I2H_CREATE_RSP = BFA_I2HM(1),
32 BFI_LPORT_I2H_DELETE_RSP = BFA_I2HM(2),
33 BFI_LPORT_I2H_ONLINE = BFA_I2HM(3),
34 BFI_LPORT_I2H_OFFLINE = BFA_I2HM(4),
35};
36
37#define BFI_LPORT_MAX_SYNNAME 64
38
39enum bfi_lport_role_e {
40 BFI_LPORT_ROLE_FCPIM = 1,
41 BFI_LPORT_ROLE_FCPTM = 2,
42 BFI_LPORT_ROLE_IPFC = 4,
43};
44
45struct bfi_lport_create_req_s {
46 bfi_mhdr_t mh; /* common msg header */
47 u16 fabric_fwhdl; /* parent fabric instance */
48 u8 roles; /* lport FC-4 roles */
49 u8 rsvd;
50 wwn_t pwwn; /* port name */
51 wwn_t nwwn; /* node name */
52 u8 symname[BFI_LPORT_MAX_SYNNAME];
53};
54
55struct bfi_lport_create_rsp_s {
56 bfi_mhdr_t mh; /* common msg header */
57 u8 status; /* lport creation status */
58 u8 rsvd[3];
59};
60
61struct bfi_lport_delete_req_s {
62 bfi_mhdr_t mh; /* common msg header */
63 u16 fw_handle; /* firmware lport handle */
64 u16 rsvd;
65};
66
67struct bfi_lport_delete_rsp_s {
68 bfi_mhdr_t mh; /* common msg header */
69 u16 bfa_handle; /* host lport handle */
70 u8 status; /* lport deletion status */
71 u8 rsvd;
72};
73
74union bfi_lport_h2i_msg_u {
75 bfi_msg_t *msg;
76 struct bfi_lport_create_req_s *create_req;
77 struct bfi_lport_delete_req_s *delete_req;
78};
79
80union bfi_lport_i2h_msg_u {
81 bfi_msg_t *msg;
82 struct bfi_lport_create_rsp_s *create_rsp;
83 struct bfi_lport_delete_rsp_s *delete_rsp;
84};
85
86#pragma pack()
87
88#endif /* __BFI_LPORT_H__ */
89
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
deleted file mode 100644
index 7ed31bbb8696..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_LPS_H__
19#define __BFI_LPS_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_lps_h2i_msgs {
26 BFI_LPS_H2I_LOGIN_REQ = 1,
27 BFI_LPS_H2I_LOGOUT_REQ = 2,
28};
29
30enum bfi_lps_i2h_msgs {
31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
33 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
34};
35
36struct bfi_lps_login_req_s {
37 struct bfi_mhdr_s mh; /* common msg header */
38 u8 lp_tag;
39 u8 alpa;
40 u16 pdu_size;
41 wwn_t pwwn;
42 wwn_t nwwn;
43 u8 fdisc;
44 u8 auth_en;
45 u8 rsvd[2];
46};
47
48struct bfi_lps_login_rsp_s {
49 struct bfi_mhdr_s mh; /* common msg header */
50 u8 lp_tag;
51 u8 status;
52 u8 lsrjt_rsn;
53 u8 lsrjt_expl;
54 wwn_t port_name;
55 wwn_t node_name;
56 u16 bb_credit;
57 u8 f_port;
58 u8 npiv_en;
59 u32 lp_pid:24;
60 u32 auth_req:8;
61 mac_t lp_mac;
62 mac_t fcf_mac;
63 u8 ext_status;
64 u8 brcd_switch;/* attached peer is brcd switch */
65};
66
67struct bfi_lps_logout_req_s {
68 struct bfi_mhdr_s mh; /* common msg header */
69 u8 lp_tag;
70 u8 rsvd[3];
71 wwn_t port_name;
72};
73
74struct bfi_lps_logout_rsp_s {
75 struct bfi_mhdr_s mh; /* common msg header */
76 u8 lp_tag;
77 u8 status;
78 u8 rsvd[2];
79};
80
81struct bfi_lps_cvl_event_s {
82 struct bfi_mhdr_s mh; /* common msg header */
83 u8 lp_tag;
84 u8 rsvd[3];
85};
86
87union bfi_lps_h2i_msg_u {
88 struct bfi_mhdr_s *msg;
89 struct bfi_lps_login_req_s *login_req;
90 struct bfi_lps_logout_req_s *logout_req;
91};
92
93union bfi_lps_i2h_msg_u {
94 struct bfi_msg_s *msg;
95 struct bfi_lps_login_rsp_s *login_rsp;
96 struct bfi_lps_logout_rsp_s *logout_rsp;
97 struct bfi_lps_cvl_event_s *cvl_event;
98};
99
100#pragma pack()
101
102#endif /* __BFI_LPS_H__ */
103
104
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pbc.h b/drivers/scsi/bfa/include/bfi/bfi_pbc.h
deleted file mode 100644
index 88a4154c30c0..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_pbc.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_PBC_H__
19#define __BFI_PBC_H__
20
21#pragma pack(1)
22
23#define BFI_PBC_MAX_BLUNS 8
24#define BFI_PBC_MAX_VPORTS 16
25
26#define BFI_PBC_PORT_DISABLED 2
27/**
28 * PBC boot lun configuration
29 */
30struct bfi_pbc_blun_s {
31 wwn_t tgt_pwwn;
32 lun_t tgt_lun;
33};
34
35/**
36 * PBC virtual port configuration
37 */
38struct bfi_pbc_vport_s {
39 wwn_t vp_pwwn;
40 wwn_t vp_nwwn;
41};
42
43/**
44 * BFI pre-boot configuration information
45 */
46struct bfi_pbc_s {
47 u8 port_enabled;
48 u8 boot_enabled;
49 u8 nbluns;
50 u8 nvports;
51 u8 port_speed;
52 u8 rsvd_a;
53 u16 hss;
54 wwn_t pbc_pwwn;
55 wwn_t pbc_nwwn;
56 struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
57 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
58};
59
60#pragma pack()
61
62#endif /* __BFI_PBC_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_port.h b/drivers/scsi/bfa/include/bfi/bfi_port.h
deleted file mode 100644
index 3ec3bea110ba..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_port.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PORT_H__
18#define __BFI_PORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_port_h2i {
26 BFI_PORT_H2I_ENABLE_REQ = (1),
27 BFI_PORT_H2I_DISABLE_REQ = (2),
28 BFI_PORT_H2I_GET_STATS_REQ = (3),
29 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
30};
31
32enum bfi_port_i2h {
33 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
34 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
35 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
36 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
37};
38
39/**
40 * Generic REQ type
41 */
42struct bfi_port_generic_req_s {
43 struct bfi_mhdr_s mh; /* msg header */
44 u32 msgtag; /* msgtag for reply */
45 u32 rsvd;
46};
47
48/**
49 * Generic RSP type
50 */
51struct bfi_port_generic_rsp_s {
52 struct bfi_mhdr_s mh; /* common msg header */
53 u8 status; /* port enable status */
54 u8 rsvd[3];
55 u32 msgtag; /* msgtag for reply */
56};
57
58/**
59 * @todo
60 * BFI_PORT_H2I_ENABLE_REQ
61 */
62
63/**
64 * @todo
65 * BFI_PORT_I2H_ENABLE_RSP
66 */
67
68/**
69 * BFI_PORT_H2I_DISABLE_REQ
70 */
71
72/**
73 * BFI_PORT_I2H_DISABLE_RSP
74 */
75
76/**
77 * BFI_PORT_H2I_GET_STATS_REQ
78 */
79struct bfi_port_get_stats_req_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 union bfi_addr_u dma_addr;
82};
83
84/**
85 * BFI_PORT_I2H_GET_STATS_RSP
86 */
87
88/**
89 * BFI_PORT_H2I_CLEAR_STATS_REQ
90 */
91
92/**
93 * BFI_PORT_I2H_CLEAR_STATS_RSP
94 */
95
96union bfi_port_h2i_msg_u {
97 struct bfi_mhdr_s mh;
98 struct bfi_port_generic_req_s enable_req;
99 struct bfi_port_generic_req_s disable_req;
100 struct bfi_port_get_stats_req_s getstats_req;
101 struct bfi_port_generic_req_s clearstats_req;
102};
103
104union bfi_port_i2h_msg_u {
105 struct bfi_mhdr_s mh;
106 struct bfi_port_generic_rsp_s enable_rsp;
107 struct bfi_port_generic_rsp_s disable_rsp;
108 struct bfi_port_generic_rsp_s getstats_rsp;
109 struct bfi_port_generic_rsp_s clearstats_rsp;
110};
111
112#pragma pack()
113
114#endif /* __BFI_PORT_H__ */
115
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
deleted file mode 100644
index 50dcf45c7470..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFI_PPORT_H__
18#define __BFI_PPORT_H__
19
20#include <bfi/bfi.h>
21#include <defs/bfa_defs_pport.h>
22
23#pragma pack(1)
24
25enum bfi_fcport_h2i {
26 BFI_FCPORT_H2I_ENABLE_REQ = (1),
27 BFI_FCPORT_H2I_DISABLE_REQ = (2),
28 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
29 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
30 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
31};
32
33enum bfi_fcport_i2h {
34 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
35 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
36 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
37 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
38 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
39 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
40};
41
42/**
43 * Generic REQ type
44 */
45struct bfi_fcport_req_s {
46 struct bfi_mhdr_s mh; /* msg header */
47 u32 msgtag; /* msgtag for reply */
48};
49
50/**
51 * Generic RSP type
52 */
53struct bfi_fcport_rsp_s {
54 struct bfi_mhdr_s mh; /* common msg header */
55 u8 status; /* port enable status */
56 u8 rsvd[3];
57 u32 msgtag; /* msgtag for reply */
58};
59
60/**
61 * BFI_FCPORT_H2I_ENABLE_REQ
62 */
63struct bfi_fcport_enable_req_s {
64 struct bfi_mhdr_s mh; /* msg header */
65 u32 rsvd1;
66 wwn_t nwwn; /* node wwn of physical port */
67 wwn_t pwwn; /* port wwn of physical port */
68 struct bfa_pport_cfg_s port_cfg; /* port configuration */
69 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
70 u32 msgtag; /* msgtag for reply */
71 u32 rsvd2;
72};
73
74/**
75 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
76 */
77struct bfi_fcport_set_svc_params_req_s {
78 struct bfi_mhdr_s mh; /* msg header */
79 u16 tx_bbcredit; /* Tx credits */
80 u16 rsvd;
81};
82
83/**
84 * BFI_FCPORT_I2H_EVENT
85 */
86struct bfi_fcport_event_s {
87 struct bfi_mhdr_s mh; /* common msg header */
88 struct bfa_pport_link_s link_state;
89};
90
91/**
92 * fcport H2I message
93 */
94union bfi_fcport_h2i_msg_u {
95 struct bfi_mhdr_s *mhdr;
96 struct bfi_fcport_enable_req_s *penable;
97 struct bfi_fcport_req_s *pdisable;
98 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
99 struct bfi_fcport_req_s *pstatsget;
100 struct bfi_fcport_req_s *pstatsclear;
101};
102
103/**
104 * fcport I2H message
105 */
106union bfi_fcport_i2h_msg_u {
107 struct bfi_msg_s *msg;
108 struct bfi_fcport_rsp_s *penable_rsp;
109 struct bfi_fcport_rsp_s *pdisable_rsp;
110 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
111 struct bfi_fcport_rsp_s *pstatsget_rsp;
112 struct bfi_fcport_rsp_s *pstatsclear_rsp;
113 struct bfi_fcport_event_s *event;
114};
115
116#pragma pack()
117
118#endif /* __BFI_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h
deleted file mode 100644
index e1cd83b56ec6..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_rport.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_RPORT_H__
19#define __BFI_RPORT_H__
20
21#include <bfi/bfi.h>
22
23#pragma pack(1)
24
25enum bfi_rport_h2i_msgs {
26 BFI_RPORT_H2I_CREATE_REQ = 1,
27 BFI_RPORT_H2I_DELETE_REQ = 2,
28 BFI_RPORT_H2I_SET_SPEED_REQ = 3,
29};
30
31enum bfi_rport_i2h_msgs {
32 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
33 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
34 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
35};
36
37struct bfi_rport_create_req_s {
38 struct bfi_mhdr_s mh; /* common msg header */
39 u16 bfa_handle; /* host rport handle */
40 u16 max_frmsz; /* max rcv pdu size */
41 u32 pid:24, /* remote port ID */
42 lp_tag:8; /* local port tag */
43 u32 local_pid:24, /* local port ID */
44 cisc:8;
45 u8 fc_class; /* supported FC classes */
46 u8 vf_en; /* virtual fabric enable */
47 u16 vf_id; /* virtual fabric ID */
48};
49
50struct bfi_rport_create_rsp_s {
51 struct bfi_mhdr_s mh; /* common msg header */
52 u8 status; /* rport creation status */
53 u8 rsvd[3];
54 u16 bfa_handle; /* host rport handle */
55 u16 fw_handle; /* firmware rport handle */
56 struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
57};
58
59struct bfa_rport_speed_req_s {
60 struct bfi_mhdr_s mh; /* common msg header */
61 u16 fw_handle; /* firmware rport handle */
62 u8 speed; /*! rport's speed via RPSC */
63 u8 rsvd;
64};
65
66struct bfi_rport_delete_req_s {
67 struct bfi_mhdr_s mh; /* common msg header */
68 u16 fw_handle; /* firmware rport handle */
69 u16 rsvd;
70};
71
72struct bfi_rport_delete_rsp_s {
73 struct bfi_mhdr_s mh; /* common msg header */
74 u16 bfa_handle; /* host rport handle */
75 u8 status; /* rport deletion status */
76 u8 rsvd;
77};
78
79struct bfi_rport_qos_scn_s {
80 struct bfi_mhdr_s mh; /* common msg header */
81 u16 bfa_handle; /* host rport handle */
82 u16 rsvd;
83 struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
84 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
85};
86
87union bfi_rport_h2i_msg_u {
88 struct bfi_msg_s *msg;
89 struct bfi_rport_create_req_s *create_req;
90 struct bfi_rport_delete_req_s *delete_req;
91 struct bfi_rport_speed_req_s *speed_req;
92};
93
94union bfi_rport_i2h_msg_u {
95 struct bfi_msg_s *msg;
96 struct bfi_rport_create_rsp_s *create_rsp;
97 struct bfi_rport_delete_rsp_s *delete_rsp;
98 struct bfi_rport_qos_scn_s *qos_scn_evt;
99};
100
101#pragma pack()
102
103#endif /* __BFI_RPORT_H__ */
104
diff --git a/drivers/scsi/bfa/include/bfi/bfi_uf.h b/drivers/scsi/bfa/include/bfi/bfi_uf.h
deleted file mode 100644
index f328a9e7e622..000000000000
--- a/drivers/scsi/bfa/include/bfi/bfi_uf.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFI_UF_H__
19#define __BFI_UF_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25enum bfi_uf_h2i {
26 BFI_UF_H2I_BUF_POST = 1,
27};
28
29enum bfi_uf_i2h {
30 BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
31};
32
33#define BFA_UF_MAX_SGES 2
34
35struct bfi_uf_buf_post_s {
36 struct bfi_mhdr_s mh; /* Common msg header */
37 u16 buf_tag; /* buffer tag */
38 u16 buf_len; /* total buffer length */
39 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
40};
41
42struct bfi_uf_frm_rcvd_s {
43 struct bfi_mhdr_s mh; /* Common msg header */
44 u16 buf_tag; /* buffer tag */
45 u16 rsvd;
46 u16 frm_len; /* received frame length */
47 u16 xfr_len; /* tranferred length */
48};
49
50#pragma pack()
51
52#endif /* __BFI_UF_H__ */
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
deleted file mode 100644
index a75a1f3be315..000000000000
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_cna_trcmod.h CNA trace modules
20 */
21
22#ifndef __BFA_CNA_TRCMOD_H__
23#define __BFA_CNA_TRCMOD_H__
24
25#include <cs/bfa_trc.h>
26
27/*
28 * !!! Only append to the enums defined here to avoid any versioning
29 * !!! needed between trace utility and driver version
30 */
31enum {
32 BFA_TRC_CNA_CEE = 1,
33 BFA_TRC_CNA_PORT = 2,
34 BFA_TRC_CNA_IOC = 3,
35 BFA_TRC_CNA_DIAG = 4,
36 BFA_TRC_CNA_IOC_CB = 5,
37 BFA_TRC_CNA_IOC_CT = 6,
38};
39
40#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h b/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
deleted file mode 100644
index 77f297f68046..000000000000
--- a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_CEE_H__
19#define __BFA_CEE_H__
20
21#include <defs/bfa_defs_cee.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
28typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
29typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, bfa_status_t status);
30
31struct bfa_cee_cbfn_s {
32 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
33 void *get_attr_cbarg;
34 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
35 void *get_stats_cbarg;
36 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
37 void *reset_stats_cbarg;
38};
39
40struct bfa_cee_s {
41 void *dev;
42 bfa_boolean_t get_attr_pending;
43 bfa_boolean_t get_stats_pending;
44 bfa_boolean_t reset_stats_pending;
45 bfa_status_t get_attr_status;
46 bfa_status_t get_stats_status;
47 bfa_status_t reset_stats_status;
48 struct bfa_cee_cbfn_s cbfn;
49 struct bfa_ioc_hbfail_notify_s hbfail;
50 struct bfa_trc_mod_s *trcmod;
51 struct bfa_log_mod_s *logmod;
52 struct bfa_cee_attr_s *attr;
53 struct bfa_cee_stats_s *stats;
54 struct bfa_dma_s attr_dma;
55 struct bfa_dma_s stats_dma;
56 struct bfa_ioc_s *ioc;
57 struct bfa_mbox_cmd_s get_cfg_mb;
58 struct bfa_mbox_cmd_s get_stats_mb;
59 struct bfa_mbox_cmd_s reset_stats_mb;
60};
61
62u32 bfa_cee_meminfo(void);
63void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva,
64 u64 dma_pa);
65void bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
66 struct bfa_trc_mod_s *trcmod,
67 struct bfa_log_mod_s *logmod);
68void bfa_cee_detach(struct bfa_cee_s *cee);
69bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
70 struct bfa_cee_attr_s *attr,
71 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
72bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
73 struct bfa_cee_stats_s *stats,
74 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
75bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
76 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
77#endif /* __BFA_CEE_H__ */
diff --git a/drivers/scsi/bfa/include/cna/port/bfa_port.h b/drivers/scsi/bfa/include/cna/port/bfa_port.h
deleted file mode 100644
index d7babaf97848..000000000000
--- a/drivers/scsi/bfa/include/cna/port/bfa_port.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_PORT_H__
19#define __BFA_PORT_H__
20
21#include <defs/bfa_defs_port.h>
22#include <bfa_ioc.h>
23#include <cs/bfa_trc.h>
24#include <cs/bfa_log.h>
25
26typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
27typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
28
29struct bfa_port_s {
30 void *dev;
31 struct bfa_ioc_s *ioc;
32 struct bfa_trc_mod_s *trcmod;
33 struct bfa_log_mod_s *logmod;
34 u32 msgtag;
35 bfa_boolean_t stats_busy;
36 struct bfa_mbox_cmd_s stats_mb;
37 bfa_port_stats_cbfn_t stats_cbfn;
38 void *stats_cbarg;
39 bfa_status_t stats_status;
40 u32 stats_reset_time;
41 union bfa_pport_stats_u *stats;
42 struct bfa_dma_s stats_dma;
43 bfa_boolean_t endis_pending;
44 struct bfa_mbox_cmd_s endis_mb;
45 bfa_port_endis_cbfn_t endis_cbfn;
46 void *endis_cbarg;
47 bfa_status_t endis_status;
48 struct bfa_ioc_hbfail_notify_s hbfail;
49};
50
51void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
52 void *dev, struct bfa_trc_mod_s *trcmod,
53 struct bfa_log_mod_s *logmod);
54void bfa_port_detach(struct bfa_port_s *port);
55void bfa_port_hbfail(void *arg);
56
57bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
58 union bfa_pport_stats_u *stats,
59 bfa_port_stats_cbfn_t cbfn, void *cbarg);
60bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
61 bfa_port_stats_cbfn_t cbfn, void *cbarg);
62bfa_status_t bfa_port_enable(struct bfa_port_s *port,
63 bfa_port_endis_cbfn_t cbfn, void *cbarg);
64bfa_status_t bfa_port_disable(struct bfa_port_s *port,
65 bfa_port_endis_cbfn_t cbfn, void *cbarg);
66u32 bfa_port_meminfo(void);
67void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva,
68 u64 dma_pa);
69
70#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h b/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
deleted file mode 100644
index 1563ee512218..000000000000
--- a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __ETHPORT_DEFS_H__
18#define __ETHPORT_DEFS_H__
19
20struct bnad_drv_stats {
21 u64 netif_queue_stop;
22 u64 netif_queue_wakeup;
23 u64 tso4;
24 u64 tso6;
25 u64 tso_err;
26 u64 tcpcsum_offload;
27 u64 udpcsum_offload;
28 u64 csum_help;
29 u64 csum_help_err;
30
31 u64 hw_stats_updates;
32 u64 netif_rx_schedule;
33 u64 netif_rx_complete;
34 u64 netif_rx_dropped;
35};
36#endif
diff --git a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h b/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
deleted file mode 100644
index eb7548030d0f..000000000000
--- a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved.
4 *
5 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License (GPL) Version 2 as
9 * published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef __PHYPORT_DEFS_H__
18#define __PHYPORT_DEFS_H__
19
20#define BNA_TXF_ID_MAX 64
21#define BNA_RXF_ID_MAX 64
22
23/*
24 * Statistics
25 */
26
27/*
28 * TxF Frame Statistics
29 */
30struct bna_stats_txf {
31 u64 ucast_octets;
32 u64 ucast;
33 u64 ucast_vlan;
34
35 u64 mcast_octets;
36 u64 mcast;
37 u64 mcast_vlan;
38
39 u64 bcast_octets;
40 u64 bcast;
41 u64 bcast_vlan;
42
43 u64 errors;
44 u64 filter_vlan; /* frames filtered due to VLAN */
45 u64 filter_mac_sa; /* frames filtered due to SA check */
46};
47
48/*
49 * RxF Frame Statistics
50 */
51struct bna_stats_rxf {
52 u64 ucast_octets;
53 u64 ucast;
54 u64 ucast_vlan;
55
56 u64 mcast_octets;
57 u64 mcast;
58 u64 mcast_vlan;
59
60 u64 bcast_octets;
61 u64 bcast;
62 u64 bcast_vlan;
63 u64 frame_drops;
64};
65
66/*
67 * FC Tx Frame Statistics
68 */
69struct bna_stats_fc_tx {
70 u64 txf_ucast_octets;
71 u64 txf_ucast;
72 u64 txf_ucast_vlan;
73
74 u64 txf_mcast_octets;
75 u64 txf_mcast;
76 u64 txf_mcast_vlan;
77
78 u64 txf_bcast_octets;
79 u64 txf_bcast;
80 u64 txf_bcast_vlan;
81
82 u64 txf_parity_errors;
83 u64 txf_timeout;
84 u64 txf_fid_parity_errors;
85};
86
87/*
88 * FC Rx Frame Statistics
89 */
90struct bna_stats_fc_rx {
91 u64 rxf_ucast_octets;
92 u64 rxf_ucast;
93 u64 rxf_ucast_vlan;
94
95 u64 rxf_mcast_octets;
96 u64 rxf_mcast;
97 u64 rxf_mcast_vlan;
98
99 u64 rxf_bcast_octets;
100 u64 rxf_bcast;
101 u64 rxf_bcast_vlan;
102};
103
104/*
105 * RAD Frame Statistics
106 */
107struct cna_stats_rad {
108 u64 rx_frames;
109 u64 rx_octets;
110 u64 rx_vlan_frames;
111
112 u64 rx_ucast;
113 u64 rx_ucast_octets;
114 u64 rx_ucast_vlan;
115
116 u64 rx_mcast;
117 u64 rx_mcast_octets;
118 u64 rx_mcast_vlan;
119
120 u64 rx_bcast;
121 u64 rx_bcast_octets;
122 u64 rx_bcast_vlan;
123
124 u64 rx_drops;
125};
126
127/*
128 * BPC Tx Registers
129 */
130struct cna_stats_bpc_tx {
131 u64 tx_pause[8];
132 u64 tx_zero_pause[8]; /* Pause cancellation */
133 u64 tx_first_pause[8]; /* Pause initiation rather
134 *than retention */
135};
136
137/*
138 * BPC Rx Registers
139 */
140struct cna_stats_bpc_rx {
141 u64 rx_pause[8];
142 u64 rx_zero_pause[8]; /* Pause cancellation */
143 u64 rx_first_pause[8]; /* Pause initiation rather
144 *than retention */
145};
146
147/*
148 * MAC Rx Statistics
149 */
150struct cna_stats_mac_rx {
151 u64 frame_64; /* both rx and tx counter */
152 u64 frame_65_127; /* both rx and tx counter */
153 u64 frame_128_255; /* both rx and tx counter */
154 u64 frame_256_511; /* both rx and tx counter */
155 u64 frame_512_1023; /* both rx and tx counter */
156 u64 frame_1024_1518; /* both rx and tx counter */
157 u64 frame_1518_1522; /* both rx and tx counter */
158 u64 rx_bytes;
159 u64 rx_packets;
160 u64 rx_fcs_error;
161 u64 rx_multicast;
162 u64 rx_broadcast;
163 u64 rx_control_frames;
164 u64 rx_pause;
165 u64 rx_unknown_opcode;
166 u64 rx_alignment_error;
167 u64 rx_frame_length_error;
168 u64 rx_code_error;
169 u64 rx_carrier_sense_error;
170 u64 rx_undersize;
171 u64 rx_oversize;
172 u64 rx_fragments;
173 u64 rx_jabber;
174 u64 rx_drop;
175};
176
177/*
178 * MAC Tx Statistics
179 */
180struct cna_stats_mac_tx {
181 u64 tx_bytes;
182 u64 tx_packets;
183 u64 tx_multicast;
184 u64 tx_broadcast;
185 u64 tx_pause;
186 u64 tx_deferral;
187 u64 tx_excessive_deferral;
188 u64 tx_single_collision;
189 u64 tx_muliple_collision;
190 u64 tx_late_collision;
191 u64 tx_excessive_collision;
192 u64 tx_total_collision;
193 u64 tx_pause_honored;
194 u64 tx_drop;
195 u64 tx_jabber;
196 u64 tx_fcs_error;
197 u64 tx_control_frame;
198 u64 tx_oversize;
199 u64 tx_undersize;
200 u64 tx_fragments;
201};
202
203/*
204 * Complete statistics
205 */
206struct bna_stats {
207 struct cna_stats_mac_rx mac_rx_stats;
208 struct cna_stats_bpc_rx bpc_rx_stats;
209 struct cna_stats_rad rad_stats;
210 struct bna_stats_fc_rx fc_rx_stats;
211 struct cna_stats_mac_tx mac_tx_stats;
212 struct cna_stats_bpc_tx bpc_tx_stats;
213 struct bna_stats_fc_tx fc_tx_stats;
214 struct bna_stats_rxf rxf_stats[BNA_TXF_ID_MAX];
215 struct bna_stats_txf txf_stats[BNA_RXF_ID_MAX];
216};
217
218#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h
deleted file mode 100644
index 650f8d0aaff9..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_checksum.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_checksum.h BFA checksum utilities
20 */
21
22#ifndef __BFA_CHECKSUM_H__
23#define __BFA_CHECKSUM_H__
24
25static inline u32
26bfa_checksum_u32(u32 *buf, int sz)
27{
28 int i, m = sz >> 2;
29 u32 sum = 0;
30
31 for (i = 0; i < m; i++)
32 sum ^= buf[i];
33
34 return sum;
35}
36
37static inline u16
38bfa_checksum_u16(u16 *buf, int sz)
39{
40 int i, m = sz >> 1;
41 u16 sum = 0;
42
43 for (i = 0; i < m; i++)
44 sum ^= buf[i];
45
46 return sum;
47}
48
49static inline u8
50bfa_checksum_u8(u8 *buf, int sz)
51{
52 int i;
53 u8 sum = 0;
54
55 for (i = 0; i < sz; i++)
56 sum ^= buf[i];
57
58 return sum;
59}
60#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_debug.h b/drivers/scsi/bfa/include/cs/bfa_debug.h
deleted file mode 100644
index 75a911ea7936..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_debug.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_debug.h BFA debug interfaces
20 */
21
22#ifndef __BFA_DEBUG_H__
23#define __BFA_DEBUG_H__
24
25#define bfa_assert(__cond) do { \
26 if (!(__cond)) \
27 bfa_panic(__LINE__, __FILE__, #__cond); \
28} while (0)
29
30#define bfa_sm_fault(__mod, __event) do { \
31 bfa_trc(__mod, (((uint32_t)0xDEAD << 16) | __event)); \
32 bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \
33} while (0)
34
35#ifndef BFA_PERF_BUILD
36#define bfa_assert_fp(__cond) bfa_assert(__cond)
37#else
38#define bfa_assert_fp(__cond)
39#endif
40
41struct bfa_log_mod_s;
42void bfa_panic(int line, char *file, char *panicstr);
43void bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event);
44
45#endif /* __BFA_DEBUG_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
deleted file mode 100644
index bc334e0a93fa..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_log.h BFA log library data structure and function definition
20 */
21
22#ifndef __BFA_LOG_H__
23#define __BFA_LOG_H__
24
25#include <bfa_os_inc.h>
26#include <defs/bfa_defs_status.h>
27#include <defs/bfa_defs_aen.h>
28
29/*
30 * BFA log module definition
31 *
32 * To create a new module id:
33 * Add a #define at the end of the list below. Select a value for your
34 * definition so that it is one (1) greater than the previous
35 * definition. Modify the definition of BFA_LOG_MODULE_ID_MAX to become
36 * your new definition.
37 * Should have no gaps in between the values because this is used in arrays.
38 * IMPORTANT: AEN_IDs must be at the begining, otherwise update bfa_defs_aen.h
39 */
40
41enum bfa_log_module_id {
42 BFA_LOG_UNUSED_ID = 0,
43
44 /* AEN defs begin */
45 BFA_LOG_AEN_MIN = BFA_LOG_UNUSED_ID,
46
47 BFA_LOG_AEN_ID_ADAPTER = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ADAPTER,/* 1 */
48 BFA_LOG_AEN_ID_PORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_PORT, /* 2 */
49 BFA_LOG_AEN_ID_LPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_LPORT, /* 3 */
50 BFA_LOG_AEN_ID_RPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_RPORT, /* 4 */
51 BFA_LOG_AEN_ID_ITNIM = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ITNIM, /* 5 */
52 BFA_LOG_AEN_ID_TIN = BFA_LOG_AEN_MIN + BFA_AEN_CAT_TIN, /* 6 */
53 BFA_LOG_AEN_ID_IPFC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IPFC, /* 7 */
54 BFA_LOG_AEN_ID_AUDIT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_AUDIT, /* 8 */
55 BFA_LOG_AEN_ID_IOC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IOC, /* 9 */
56 BFA_LOG_AEN_ID_ETHPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ETHPORT,/* 10 */
57
58 BFA_LOG_AEN_MAX = BFA_LOG_AEN_ID_ETHPORT,
59 /* AEN defs end */
60
61 BFA_LOG_MODULE_ID_MIN = BFA_LOG_AEN_MAX,
62
63 BFA_LOG_FW_ID = BFA_LOG_MODULE_ID_MIN + 1,
64 BFA_LOG_HAL_ID = BFA_LOG_MODULE_ID_MIN + 2,
65 BFA_LOG_FCS_ID = BFA_LOG_MODULE_ID_MIN + 3,
66 BFA_LOG_WDRV_ID = BFA_LOG_MODULE_ID_MIN + 4,
67 BFA_LOG_LINUX_ID = BFA_LOG_MODULE_ID_MIN + 5,
68 BFA_LOG_SOLARIS_ID = BFA_LOG_MODULE_ID_MIN + 6,
69
70 BFA_LOG_MODULE_ID_MAX = BFA_LOG_SOLARIS_ID,
71
72 /* Not part of any arrays */
73 BFA_LOG_MODULE_ID_ALL = BFA_LOG_MODULE_ID_MAX + 1,
74 BFA_LOG_AEN_ALL = BFA_LOG_MODULE_ID_MAX + 2,
75 BFA_LOG_DRV_ALL = BFA_LOG_MODULE_ID_MAX + 3,
76};
77
78/*
79 * BFA log catalog name
80 */
81#define BFA_LOG_CAT_NAME "BFA"
82
83/*
84 * bfa log severity values
85 */
86enum bfa_log_severity {
87 BFA_LOG_INVALID = 0,
88 BFA_LOG_CRITICAL = 1,
89 BFA_LOG_ERROR = 2,
90 BFA_LOG_WARNING = 3,
91 BFA_LOG_INFO = 4,
92 BFA_LOG_NONE = 5,
93 BFA_LOG_LEVEL_MAX = BFA_LOG_NONE
94};
95
96#define BFA_LOG_MODID_OFFSET 16
97
98
99struct bfa_log_msgdef_s {
100 u32 msg_id; /* message id */
101 int attributes; /* attributes */
102 int severity; /* severity level */
103 char *msg_value;
104 /* msg string */
105 char *message;
106 /* msg format string */
107 int arg_type; /* argument type */
108 int arg_num; /* number of argument */
109};
110
111/*
112 * supported argument type
113 */
114enum bfa_log_arg_type {
115 BFA_LOG_S = 0, /* string */
116 BFA_LOG_D, /* decimal */
117 BFA_LOG_I, /* integer */
118 BFA_LOG_O, /* oct number */
119 BFA_LOG_U, /* unsigned integer */
120 BFA_LOG_X, /* hex number */
121 BFA_LOG_F, /* floating */
122 BFA_LOG_C, /* character */
123 BFA_LOG_L, /* double */
124 BFA_LOG_P /* pointer */
125};
126
127#define BFA_LOG_ARG_TYPE 2
128#define BFA_LOG_ARG0 (0 * BFA_LOG_ARG_TYPE)
129#define BFA_LOG_ARG1 (1 * BFA_LOG_ARG_TYPE)
130#define BFA_LOG_ARG2 (2 * BFA_LOG_ARG_TYPE)
131#define BFA_LOG_ARG3 (3 * BFA_LOG_ARG_TYPE)
132
133#define BFA_LOG_GET_MOD_ID(msgid) ((msgid >> BFA_LOG_MODID_OFFSET) & 0xff)
134#define BFA_LOG_GET_MSG_IDX(msgid) (msgid & 0xffff)
135#define BFA_LOG_GET_MSG_ID(msgdef) ((msgdef)->msg_id)
136#define BFA_LOG_GET_MSG_FMT_STRING(msgdef) ((msgdef)->message)
137#define BFA_LOG_GET_SEVERITY(msgdef) ((msgdef)->severity)
138
139/*
140 * Event attributes
141 */
142#define BFA_LOG_ATTR_NONE 0
143#define BFA_LOG_ATTR_AUDIT 1
144#define BFA_LOG_ATTR_LOG 2
145#define BFA_LOG_ATTR_FFDC 4
146
147#define BFA_LOG_CREATE_ID(msw, lsw) \
148 (((u32)msw << BFA_LOG_MODID_OFFSET) | lsw)
149
150struct bfa_log_mod_s;
151
152/**
153 * callback function
154 */
155typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
156 const char *format, ...);
157
158
159struct bfa_log_mod_s {
160 char instance_info[BFA_STRING_32]; /* instance info */
161 int log_level[BFA_LOG_MODULE_ID_MAX + 1];
162 /* log level for modules */
163 bfa_log_cb_t cbfn; /* callback function */
164};
165
166extern int bfa_log_init(struct bfa_log_mod_s *log_mod,
167 char *instance_name, bfa_log_cb_t cbfn);
168extern int bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...);
169extern bfa_status_t bfa_log_set_level(struct bfa_log_mod_s *log_mod,
170 int mod_id, enum bfa_log_severity log_level);
171extern bfa_status_t bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
172 enum bfa_log_severity log_level);
173extern bfa_status_t bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
174 enum bfa_log_severity log_level);
175extern enum bfa_log_severity bfa_log_get_level(struct bfa_log_mod_s *log_mod,
176 int mod_id);
177extern enum bfa_log_severity bfa_log_get_msg_level(
178 struct bfa_log_mod_s *log_mod, u32 msg_id);
179/*
180 * array of messages generated from xml files
181 */
182extern struct bfa_log_msgdef_s bfa_log_msg_array[];
183
184#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_perf.h b/drivers/scsi/bfa/include/cs/bfa_perf.h
deleted file mode 100644
index 45aa5f978ff5..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_perf.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFAD_PERF_H__
18#define __BFAD_PERF_H__
19
20#ifdef BFAD_PERF_BUILD
21
22#undef bfa_trc
23#undef bfa_trc32
24#undef bfa_assert
25#undef BFA_TRC_FILE
26
27#define bfa_trc(_trcp, _data)
28#define bfa_trc32(_trcp, _data)
29#define bfa_assert(__cond)
30#define BFA_TRC_FILE(__mod, __submod)
31
32#endif
33
34#endif /* __BFAD_PERF_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_q.h b/drivers/scsi/bfa/include/cs/bfa_q.h
deleted file mode 100644
index ea895facedbc..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_q.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_q.h Circular queue definitions.
20 */
21
22#ifndef __BFA_Q_H__
23#define __BFA_Q_H__
24
25#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
26#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
27#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
28
29/*
30 * bfa_q_qe_init - to initialize a queue element
31 */
32#define bfa_q_qe_init(_qe) { \
33 bfa_q_next(_qe) = (struct list_head *) NULL; \
34 bfa_q_prev(_qe) = (struct list_head *) NULL; \
35}
36
37/*
38 * bfa_q_deq - dequeue an element from head of the queue
39 */
40#define bfa_q_deq(_q, _qe) { \
41 if (!list_empty(_q)) { \
42 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
43 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
44 (struct list_head *) (_q); \
45 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
46 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
47 } else { \
48 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
49 } \
50}
51
52/*
53 * bfa_q_deq_tail - dequeue an element from tail of the queue
54 */
55#define bfa_q_deq_tail(_q, _qe) { \
56 if (!list_empty(_q)) { \
57 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
58 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
59 (struct list_head *) (_q); \
60 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe); \
61 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
62 } else { \
63 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
64 } \
65}
66
67/*
68 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
69 * consistent across modules)
70 */
71#ifndef BFA_PERF_BUILD
72#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
73#else
74#define BFA_Q_DBG_INIT(_qe)
75#endif
76
77#define bfa_q_is_on_q(_q, _qe) \
78 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
79extern int bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe);
80
81#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
deleted file mode 100644
index 11fba9082f05..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfasm.h State machine defines
20 */
21
22#ifndef __BFA_SM_H__
23#define __BFA_SM_H__
24
25typedef void (*bfa_sm_t)(void *sm, int event);
26/**
27 * oc - object class eg. bfa_ioc
28 * st - state, eg. reset
29 * otype - object type, eg. struct bfa_ioc_s
30 * etype - object type, eg. enum ioc_event
31 */
32#define bfa_sm_state_decl(oc, st, otype, etype) \
33 static void oc ## _sm_ ## st(otype * fsm, etype event)
34
35#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
36#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
37#define bfa_sm_get_state(_sm) ((_sm)->sm)
38#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
39
40/**
41 * For converting from state machine function to state encoding.
42 */
43struct bfa_sm_table_s {
44 bfa_sm_t sm; /* state machine function */
45 int state; /* state machine encoding */
46 char *name; /* state name for display */
47};
48#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
49
50int bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm);
51
52/**
53 * State machine with entry actions.
54 */
55typedef void (*bfa_fsm_t)(void *fsm, int event);
56
57/**
58 * oc - object class eg. bfa_ioc
59 * st - state, eg. reset
60 * otype - object type, eg. struct bfa_ioc_s
61 * etype - object type, eg. enum ioc_event
62 */
63#define bfa_fsm_state_decl(oc, st, otype, etype) \
64 static void oc ## _sm_ ## st(otype * fsm, etype event); \
65 static void oc ## _sm_ ## st ## _entry(otype * fsm)
66
67#define bfa_fsm_set_state(_fsm, _state) do { \
68 (_fsm)->fsm = (bfa_fsm_t)(_state); \
69 _state ## _entry(_fsm); \
70} while (0)
71
72#define bfa_fsm_send_event(_fsm, _event) \
73 ((_fsm)->fsm((_fsm), (_event)))
74#define bfa_fsm_cmp_state(_fsm, _state) \
75 ((_fsm)->fsm == (bfa_fsm_t)(_state))
76
77#endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h
deleted file mode 100644
index 310771c888e7..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_trc.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_TRC_H__
18#define __BFA_TRC_H__
19
20#include <bfa_os_inc.h>
21
22#ifndef BFA_TRC_MAX
23#define BFA_TRC_MAX (4 * 1024)
24#endif
25
26#ifndef BFA_TRC_TS
27#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
28#endif
29
30struct bfa_trc_s {
31#ifdef __BIGENDIAN
32 u16 fileno;
33 u16 line;
34#else
35 u16 line;
36 u16 fileno;
37#endif
38 u32 timestamp;
39 union {
40 struct {
41 u32 rsvd;
42 u32 u32;
43 } u32;
44 u64 u64;
45 } data;
46};
47
48
49struct bfa_trc_mod_s {
50 u32 head;
51 u32 tail;
52 u32 ntrc;
53 u32 stopped;
54 u32 ticks;
55 u32 rsvd[3];
56 struct bfa_trc_s trc[BFA_TRC_MAX];
57};
58
59
60enum {
61 BFA_TRC_FW = 1, /* firmware modules */
62 BFA_TRC_HAL = 2, /* BFA modules */
63 BFA_TRC_FCS = 3, /* BFA FCS modules */
64 BFA_TRC_LDRV = 4, /* Linux driver modules */
65 BFA_TRC_SDRV = 5, /* Solaris driver modules */
66 BFA_TRC_VDRV = 6, /* vmware driver modules */
67 BFA_TRC_WDRV = 7, /* windows driver modules */
68 BFA_TRC_AEN = 8, /* AEN module */
69 BFA_TRC_BIOS = 9, /* bios driver modules */
70 BFA_TRC_EFI = 10, /* EFI driver modules */
71 BNA_TRC_WDRV = 11, /* BNA windows driver modules */
72 BNA_TRC_VDRV = 12, /* BNA vmware driver modules */
73 BNA_TRC_SDRV = 13, /* BNA Solaris driver modules */
74 BNA_TRC_LDRV = 14, /* BNA Linux driver modules */
75 BNA_TRC_HAL = 15, /* BNA modules */
76 BFA_TRC_CNA = 16, /* Common modules */
77 BNA_TRC_IMDRV = 17 /* BNA windows intermediate driver modules */
78};
79#define BFA_TRC_MOD_SH 10
80#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
81
82/**
83 * Define a new tracing file (module). Module should match one defined above.
84 */
85#define BFA_TRC_FILE(__mod, __submod) \
86 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
87 BFA_TRC_MOD(__mod))
88
89
90#define bfa_trc32(_trcp, _data) \
91 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
92
93
94#ifndef BFA_BOOT_BUILD
95#define bfa_trc(_trcp, _data) \
96 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
97#else
98void bfa_boot_trc(struct bfa_trc_mod_s *trcmod, u16 fileno,
99 u16 line, u32 data);
100#define bfa_trc(_trcp, _data) \
101 bfa_boot_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
102#endif
103
104
105static inline void
106bfa_trc_init(struct bfa_trc_mod_s *trcm)
107{
108 trcm->head = trcm->tail = trcm->stopped = 0;
109 trcm->ntrc = BFA_TRC_MAX;
110}
111
112
113static inline void
114bfa_trc_stop(struct bfa_trc_mod_s *trcm)
115{
116 trcm->stopped = 1;
117}
118
119#ifdef FWTRC
120extern void dc_flush(void *data);
121#else
122#define dc_flush(data)
123#endif
124
125
126static inline void
127__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
128{
129 int tail = trcm->tail;
130 struct bfa_trc_s *trc = &trcm->trc[tail];
131
132 if (trcm->stopped)
133 return;
134
135 trc->fileno = (u16) fileno;
136 trc->line = (u16) line;
137 trc->data.u64 = data;
138 trc->timestamp = BFA_TRC_TS(trcm);
139 dc_flush(trc);
140
141 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
142 if (trcm->tail == trcm->head)
143 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
144 dc_flush(trcm);
145}
146
147
148static inline void
149__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
150{
151 int tail = trcm->tail;
152 struct bfa_trc_s *trc = &trcm->trc[tail];
153
154 if (trcm->stopped)
155 return;
156
157 trc->fileno = (u16) fileno;
158 trc->line = (u16) line;
159 trc->data.u32.u32 = data;
160 trc->timestamp = BFA_TRC_TS(trcm);
161 dc_flush(trc);
162
163 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
164 if (trcm->tail == trcm->head)
165 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
166 dc_flush(trcm);
167}
168
169#ifndef BFA_PERF_BUILD
170#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
171#else
172#define bfa_trc_fp(_trcp, _data)
173#endif
174
175#endif /* __BFA_TRC_H__ */
176
diff --git a/drivers/scsi/bfa/include/cs/bfa_wc.h b/drivers/scsi/bfa/include/cs/bfa_wc.h
deleted file mode 100644
index 0460bd4fc7c4..000000000000
--- a/drivers/scsi/bfa/include/cs/bfa_wc.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_wc.h Generic wait counter.
20 */
21
22#ifndef __BFA_WC_H__
23#define __BFA_WC_H__
24
25typedef void (*bfa_wc_resume_t) (void *cbarg);
26
27struct bfa_wc_s {
28 bfa_wc_resume_t wc_resume;
29 void *wc_cbarg;
30 int wc_count;
31};
32
33static inline void
34bfa_wc_up(struct bfa_wc_s *wc)
35{
36 wc->wc_count++;
37}
38
39static inline void
40bfa_wc_down(struct bfa_wc_s *wc)
41{
42 wc->wc_count--;
43 if (wc->wc_count == 0)
44 wc->wc_resume(wc->wc_cbarg);
45}
46
47/**
48 * Initialize a waiting counter.
49 */
50static inline void
51bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
52{
53 wc->wc_resume = wc_resume;
54 wc->wc_cbarg = wc_cbarg;
55 wc->wc_count = 0;
56 bfa_wc_up(wc);
57}
58
59/**
60 * Wait for counter to reach zero
61 */
62static inline void
63bfa_wc_wait(struct bfa_wc_s *wc)
64{
65 bfa_wc_down(wc);
66}
67
68#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
deleted file mode 100644
index aea0360d67d5..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ADAPTER_H__
18#define __BFA_DEFS_ADAPTER_H__
19
20#include <protocol/types.h>
21#include <defs/bfa_defs_version.h>
22#include <defs/bfa_defs_mfg.h>
23
24/**
25 * BFA adapter level attributes.
26 */
27enum {
28 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
29 /*
30 *!< adapter serial num length
31 */
32 BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
33 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
34 BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
35 BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
36 BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
37};
38
39struct bfa_adapter_attr_s {
40 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
41 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
42 u32 card_type;
43 char model[BFA_ADAPTER_MODEL_NAME_LEN];
44 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
45 wwn_t pwwn;
46 char node_symname[FC_SYMNAME_MAX];
47 char hw_ver[BFA_VERSION_LEN];
48 char fw_ver[BFA_VERSION_LEN];
49 char optrom_ver[BFA_VERSION_LEN];
50 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
51 struct bfa_mfg_vpd_s vpd;
52 struct mac_s mac;
53
54 u8 nports;
55 u8 max_speed;
56 u8 prototype;
57 char asic_rev;
58
59 u8 pcie_gen;
60 u8 pcie_lanes_orig;
61 u8 pcie_lanes;
62 u8 cna_capable;
63 u8 is_mezz;
64};
65
66/**
67 * BFA adapter level events
68 * Arguments below are in BFAL context from Mgmt
69 * BFA_PORT_AEN_ADD: [in]: None [out]: serial_num, pwwn, nports
70 * BFA_PORT_AEN_REMOVE: [in]: pwwn [out]: serial_num, pwwn, nports
71 */
72enum bfa_adapter_aen_event {
73 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
74 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
75};
76
77struct bfa_adapter_aen_data_s {
78 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
79 u32 nports; /* Number of NPorts */
80 wwn_t pwwn; /* WWN of one of its physical port */
81};
82
83#endif /* __BFA_DEFS_ADAPTER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
deleted file mode 100644
index 35244698fcdc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AEN_H__
19#define __BFA_DEFS_AEN_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_ioc.h>
23#include <defs/bfa_defs_adapter.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_lport.h>
26#include <defs/bfa_defs_rport.h>
27#include <defs/bfa_defs_itnim.h>
28#include <defs/bfa_defs_tin.h>
29#include <defs/bfa_defs_ipfc.h>
30#include <defs/bfa_defs_audit.h>
31#include <defs/bfa_defs_ethport.h>
32
33#define BFA_AEN_MAX_APP 5
34
35enum bfa_aen_app {
36 bfa_aen_app_bcu = 0, /* No thread for bcu */
37 bfa_aen_app_hcm = 1,
38 bfa_aen_app_cim = 2,
39 bfa_aen_app_snia = 3,
40 bfa_aen_app_test = 4, /* To be removed after unit test */
41};
42
43enum bfa_aen_category {
44 BFA_AEN_CAT_ADAPTER = 1,
45 BFA_AEN_CAT_PORT = 2,
46 BFA_AEN_CAT_LPORT = 3,
47 BFA_AEN_CAT_RPORT = 4,
48 BFA_AEN_CAT_ITNIM = 5,
49 BFA_AEN_CAT_TIN = 6,
50 BFA_AEN_CAT_IPFC = 7,
51 BFA_AEN_CAT_AUDIT = 8,
52 BFA_AEN_CAT_IOC = 9,
53 BFA_AEN_CAT_ETHPORT = 10,
54 BFA_AEN_MAX_CAT = 10
55};
56
57#pragma pack(1)
58union bfa_aen_data_u {
59 struct bfa_adapter_aen_data_s adapter;
60 struct bfa_port_aen_data_s port;
61 struct bfa_lport_aen_data_s lport;
62 struct bfa_rport_aen_data_s rport;
63 struct bfa_itnim_aen_data_s itnim;
64 struct bfa_audit_aen_data_s audit;
65 struct bfa_ioc_aen_data_s ioc;
66 struct bfa_ethport_aen_data_s ethport;
67};
68
69struct bfa_aen_entry_s {
70 enum bfa_aen_category aen_category;
71 int aen_type;
72 union bfa_aen_data_u aen_data;
73 struct bfa_timeval_s aen_tv;
74 s32 seq_num;
75 s32 bfad_num;
76 s32 rsvd[1];
77};
78
79#pragma pack()
80
81#define bfa_aen_event_t int
82
83#endif /* __BFA_DEFS_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h b/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
deleted file mode 100644
index 8e3a962bf20c..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_AUDIT_H__
19#define __BFA_DEFS_AUDIT_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA audit events
25 */
26enum bfa_audit_aen_event {
27 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
28 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
29};
30
31/**
32 * audit event data
33 */
34struct bfa_audit_aen_data_s {
35 wwn_t pwwn;
36};
37
38#endif /* __BFA_DEFS_AUDIT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
deleted file mode 100644
index f56ed871bb99..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_AUTH_H__
18#define __BFA_DEFS_AUTH_H__
19
20#include <defs/bfa_defs_types.h>
21
22#define PUBLIC_KEY 15409
23#define PRIVATE_KEY 19009
24#define KEY_LEN 32399
25#define BFA_AUTH_SECRET_STRING_LEN 256
26#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE
27#define BFA_AUTH_FAIL_TIMEOUT 0xFF
28
29/**
30 * Authentication status
31 */
32enum bfa_auth_status {
33 BFA_AUTH_STATUS_NONE = 0, /* no authentication */
34 BFA_AUTH_UNINIT = 1, /* state - uninit */
35 BFA_AUTH_NEG_SEND = 2, /* state - negotiate send */
36 BFA_AUTH_CHAL_WAIT = 3, /* state - challenge wait */
37 BFA_AUTH_NEG_RETRY = 4, /* state - negotiate retry */
38 BFA_AUTH_REPLY_SEND = 5, /* state - reply send */
39 BFA_AUTH_STATUS_WAIT = 6, /* state - status wait */
40 BFA_AUTH_SUCCESS = 7, /* state - success */
41 BFA_AUTH_FAILED = 8, /* state - failed */
42 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
43};
44
45enum bfa_auth_rej_code {
46 BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */
47 BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */
48};
49
50/**
51 * Authentication reject codes
52 */
53enum bfa_auth_rej_code_exp {
54 BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */
55 BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */
56 BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */
57 BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */
58 BFA_AUTH_AUTH_FAILED = 5, /* auth failed */
59 BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */
60 BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */
61 BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */
62 BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */
63 BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */
64};
65
66struct auth_proto_stats_s {
67 u32 auth_rjts;
68 u32 auth_negs;
69 u32 auth_dones;
70
71 u32 dhchap_challenges;
72 u32 dhchap_replies;
73 u32 dhchap_successes;
74};
75
76/**
77 * Authentication related statistics
78 */
79struct bfa_auth_stats_s {
80 u32 auth_failures; /* authentication failures */
81 u32 auth_successes; /* authentication successes*/
82 struct auth_proto_stats_s auth_rx_stats; /* Rx protocol stats */
83 struct auth_proto_stats_s auth_tx_stats; /* Tx protocol stats */
84};
85
86/**
87 * Authentication hash function algorithms
88 */
89enum bfa_auth_algo {
90 BFA_AUTH_ALGO_MD5 = 1, /* Message-Digest algorithm 5 */
91 BFA_AUTH_ALGO_SHA1 = 2, /* Secure Hash Algorithm 1 */
92 BFA_AUTH_ALGO_MS = 3, /* MD5, then SHA-1 */
93 BFA_AUTH_ALGO_SM = 4, /* SHA-1, then MD5 */
94};
95
96/**
97 * DH Groups
98 *
99 * Current value could be combination of one or more of the following values
100 */
101enum bfa_auth_group {
102 BFA_AUTH_GROUP_DHNULL = 0, /* DH NULL (value == 0) */
103 BFA_AUTH_GROUP_DH768 = 1, /* DH group 768 (value == 1) */
104 BFA_AUTH_GROUP_DH1024 = 2, /* DH group 1024 (value == 2) */
105 BFA_AUTH_GROUP_DH1280 = 4, /* DH group 1280 (value == 3) */
106 BFA_AUTH_GROUP_DH1536 = 8, /* DH group 1536 (value == 4) */
107
108 BFA_AUTH_GROUP_ALL = 256 /* Use default DH group order
109 * 0, 1, 2, 3, 4 */
110};
111
112/**
113 * Authentication secret sources
114 */
115enum bfa_auth_secretsource {
116 BFA_AUTH_SECSRC_LOCAL = 1, /* locally configured */
117 BFA_AUTH_SECSRC_RADIUS = 2, /* use radius server */
118 BFA_AUTH_SECSRC_TACACS = 3, /* TACACS server */
119};
120
121/**
122 * Authentication attributes
123 */
124struct bfa_auth_attr_s {
125 enum bfa_auth_status status;
126 enum bfa_auth_algo algo;
127 enum bfa_auth_group dh_grp;
128 enum bfa_auth_rej_code rjt_code;
129 enum bfa_auth_rej_code_exp rjt_code_exp;
130 u8 secret_set;
131 u8 resv[3];
132};
133
134#endif /* __BFA_DEFS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
deleted file mode 100644
index 0fca10b6ad10..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_BOOT_H__
19#define __BFA_DEFS_BOOT_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_pport.h>
24
25enum {
26 BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
27 BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
28
29};
30
31#define BOOT_CFG_REV1 1
32
33/**
34 * Boot options setting. Boot options setting determines from where
35 * to get the boot lun information
36 */
37enum bfa_boot_bootopt {
38 BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
39 BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
40 BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
41};
42
43/**
44 * Boot lun information.
45 */
46struct bfa_boot_bootlun_s {
47 wwn_t pwwn; /* port wwn of target */
48 lun_t lun; /* 64-bit lun */
49};
50
51/**
52 * BOOT boot configuraton
53 */
54struct bfa_boot_cfg_s {
55 u8 version;
56 u8 rsvd1;
57 u16 chksum;
58
59 u8 enable; /* enable/disable SAN boot */
60 u8 speed; /* boot speed settings */
61 u8 topology; /* boot topology setting */
62 u8 bootopt; /* bfa_boot_bootopt_t */
63
64 u32 nbluns; /* number of boot luns */
65
66 u32 rsvd2;
67
68 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
69 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
70};
71
72struct bfa_boot_pbc_s {
73 u8 enable; /* enable/disable SAN boot */
74 u8 speed; /* boot speed settings */
75 u8 topology; /* boot topology setting */
76 u8 rsvd1;
77 u32 nbluns; /* number of boot luns */
78 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
79};
80
81#endif /* __BFA_DEFS_BOOT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
deleted file mode 100644
index 6eaf519eccdc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_cee.h Interface declarations between host based
7 * BFAL and DCBX/LLDP module in Firmware
8 *
9 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License (GPL) Version 2 as
13 * published by the Free Software Foundation
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 */
20#ifndef __BFA_DEFS_CEE_H__
21#define __BFA_DEFS_CEE_H__
22
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_pport.h>
25#include <protocol/types.h>
26
27#pragma pack(1)
28
29#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
30
31#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
32#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
33#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
34#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
35#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
36#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
37#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
38#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
39#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
40#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
41#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
42
43
44/* LLDP string type */
45struct bfa_cee_lldp_str_s {
46 u8 sub_type;
47 u8 len;
48 u8 rsvd[2];
49 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
50};
51
52
53/* LLDP parameters */
54struct bfa_cee_lldp_cfg_s {
55 struct bfa_cee_lldp_str_s chassis_id;
56 struct bfa_cee_lldp_str_s port_id;
57 struct bfa_cee_lldp_str_s port_desc;
58 struct bfa_cee_lldp_str_s sys_name;
59 struct bfa_cee_lldp_str_s sys_desc;
60 struct bfa_cee_lldp_str_s mgmt_addr;
61 u16 time_to_interval;
62 u16 enabled_system_cap;
63};
64
65enum bfa_cee_dcbx_version_e {
66 DCBX_PROTOCOL_PRECEE = 1,
67 DCBX_PROTOCOL_CEE = 2,
68};
69
70enum bfa_cee_lls_e {
71 CEE_LLS_DOWN_NO_TLV = 0, /* LLS is down because the TLV not sent by
72 * the peer */
73 CEE_LLS_DOWN = 1, /* LLS is down as advertised by the peer */
74 CEE_LLS_UP = 2,
75};
76
77/* CEE/DCBX parameters */
78struct bfa_cee_dcbx_cfg_s {
79 u8 pgid[8];
80 u8 pg_percentage[8];
81 u8 pfc_enabled; /* bitmap of priorties with PFC enabled */
82 u8 fcoe_user_priority; /* bitmap of priorities used for FcoE
83 * traffic */
84 u8 dcbx_version; /* operating version:CEE or preCEE */
85 u8 lls_fcoe; /* FCoE Logical Link Status */
86 u8 lls_lan; /* LAN Logical Link Status */
87 u8 rsvd[3];
88};
89
90/* CEE status */
91/* Making this to tri-state for the benefit of port list command */
92enum bfa_cee_status_e {
93 CEE_UP = 0,
94 CEE_PHY_UP = 1,
95 CEE_LOOPBACK = 2,
96 CEE_PHY_DOWN = 3,
97};
98
99/* CEE Query */
100struct bfa_cee_attr_s {
101 u8 cee_status;
102 u8 error_reason;
103 struct bfa_cee_lldp_cfg_s lldp_remote;
104 struct bfa_cee_dcbx_cfg_s dcbx_remote;
105 mac_t src_mac;
106 u8 link_speed;
107 u8 nw_priority;
108 u8 filler[2];
109};
110
111
112
113
114/* LLDP/DCBX/CEE Statistics */
115
116struct bfa_cee_lldp_stats_s {
117 u32 frames_transmitted;
118 u32 frames_aged_out;
119 u32 frames_discarded;
120 u32 frames_in_error;
121 u32 frames_rcvd;
122 u32 tlvs_discarded;
123 u32 tlvs_unrecognized;
124};
125
126struct bfa_cee_dcbx_stats_s {
127 u32 subtlvs_unrecognized;
128 u32 negotiation_failed;
129 u32 remote_cfg_changed;
130 u32 tlvs_received;
131 u32 tlvs_invalid;
132 u32 seqno;
133 u32 ackno;
134 u32 recvd_seqno;
135 u32 recvd_ackno;
136};
137
138struct bfa_cee_cfg_stats_s {
139 u32 cee_status_down;
140 u32 cee_status_up;
141 u32 cee_hw_cfg_changed;
142 u32 recvd_invalid_cfg;
143};
144
145
146struct bfa_cee_stats_s {
147 struct bfa_cee_lldp_stats_s lldp_stats;
148 struct bfa_cee_dcbx_stats_s dcbx_stats;
149 struct bfa_cee_cfg_stats_s cfg_stats;
150};
151
152#pragma pack()
153
154
155#endif /* __BFA_DEFS_CEE_H__ */
156
157
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
deleted file mode 100644
index 7d00d00d3969..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_DRIVER_H__
19#define __BFA_DEFS_DRIVER_H__
20
21/**
22 * Driver statistics
23 */
24struct bfa_driver_stats_s {
25 u16 tm_io_abort;
26 u16 tm_io_abort_comp;
27 u16 tm_lun_reset;
28 u16 tm_lun_reset_comp;
29 u16 tm_target_reset;
30 u16 tm_bus_reset;
31 u16 ioc_restart; /* IOC restart count */
32 u16 rsvd;
33 u64 control_req;
34 u64 input_req;
35 u64 output_req;
36 u64 input_words;
37 u64 output_words;
38};
39
40
41#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
deleted file mode 100644
index b4fa0923aa89..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_ETHPORT_H__
19#define __BFA_DEFS_ETHPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24#include <cna/pstats/phyport_defs.h>
25#include <cna/pstats/ethport_defs.h>
26
27struct bna_tx_info_s {
28 u32 miniport_state;
29 u32 adapter_state;
30 u64 tx_count;
31 u64 tx_wi;
32 u64 tx_sg;
33 u64 tx_tcp_chksum;
34 u64 tx_udp_chksum;
35 u64 tx_ip_chksum;
36 u64 tx_lsov1;
37 u64 tx_lsov2;
38 u64 tx_max_sg_len ;
39};
40
41struct bna_rx_queue_info_s {
42 u16 q_id ;
43 u16 buf_size ;
44 u16 buf_count ;
45 u16 rsvd ;
46 u64 rx_count ;
47 u64 rx_dropped ;
48 u64 rx_unsupported ;
49 u64 rx_internal_err ;
50 u64 rss_count ;
51 u64 vlan_count ;
52 u64 rx_tcp_chksum ;
53 u64 rx_udp_chksum ;
54 u64 rx_ip_chksum ;
55 u64 rx_hds ;
56};
57
58struct bna_rx_q_set_s {
59 u16 q_set_type;
60 u32 miniport_state;
61 u32 adapter_state;
62 struct bna_rx_queue_info_s rx_queue[2];
63};
64
65struct bna_port_stats_s {
66 struct bna_tx_info_s tx_stats;
67 u16 qset_count ;
68 struct bna_rx_q_set_s rx_qset[8];
69};
70
71struct bfa_ethport_stats_s {
72 struct bna_stats_txf txf_stats[1];
73 struct bna_stats_rxf rxf_stats[1];
74 struct bnad_drv_stats drv_stats;
75};
76
77/**
78 * Ethernet port events
79 * Arguments below are in BFAL context from Mgmt
80 * BFA_PORT_AEN_ETH_LINKUP: [in]: mac [out]: mac
81 * BFA_PORT_AEN_ETH_LINKDOWN: [in]: mac [out]: mac
82 * BFA_PORT_AEN_ETH_ENABLE: [in]: mac [out]: mac
83 * BFA_PORT_AEN_ETH_DISABLE: [in]: mac [out]: mac
84 *
85 */
86enum bfa_ethport_aen_event {
87 BFA_ETHPORT_AEN_LINKUP = 1, /* Base Port Ethernet link up event */
88 BFA_ETHPORT_AEN_LINKDOWN = 2, /* Base Port Ethernet link down event */
89 BFA_ETHPORT_AEN_ENABLE = 3, /* Base Port Ethernet link enable event */
90 BFA_ETHPORT_AEN_DISABLE = 4, /* Base Port Ethernet link disable
91 * event */
92};
93
94struct bfa_ethport_aen_data_s {
95 mac_t mac; /* MAC address of the physical port */
96};
97
98
99#endif /* __BFA_DEFS_ETHPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
deleted file mode 100644
index c08f4f5026ac..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_FCPIM_H__
18#define __BFA_DEFS_FCPIM_H__
19
20struct bfa_fcpim_stats_s {
21 u32 total_ios; /* Total IO count */
22 u32 qresumes; /* IO waiting for CQ space */
23 u32 no_iotags; /* NO IO contexts */
24 u32 io_aborts; /* IO abort requests */
25 u32 no_tskims; /* NO task management contexts */
26 u32 iocomp_ok; /* IO completions with OK status */
27 u32 iocomp_underrun; /* IO underrun (good) */
28 u32 iocomp_overrun; /* IO overrun (good) */
29 u32 iocomp_aborted; /* Aborted IO requests */
30 u32 iocomp_timedout; /* IO timeouts */
31 u32 iocom_nexus_abort; /* IO selection timeouts */
32 u32 iocom_proto_err; /* IO protocol errors */
33 u32 iocom_dif_err; /* IO SBC-3 protection errors */
34 u32 iocom_tm_abort; /* IO aborted by TM requests */
35 u32 iocom_sqer_needed; /* IO retry for SQ error
36 *recovery */
37 u32 iocom_res_free; /* Delayed freeing of IO resources */
38 u32 iocomp_scsierr; /* IO with non-good SCSI status */
39 u32 iocom_hostabrts; /* Host IO abort requests */
40 u32 iocom_utags; /* IO comp with unknown tags */
41 u32 io_cleanups; /* IO implicitly aborted */
42 u32 io_tmaborts; /* IO aborted due to TM commands */
43 u32 rsvd;
44};
45#endif /*__BFA_DEFS_FCPIM_H__*/
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
deleted file mode 100644
index af86a6396439..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_fcport.h
7 *
8 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License (GPL) Version 2 as
12 * published by the Free Software Foundation
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19#ifndef __BFA_DEFS_FCPORT_H__
20#define __BFA_DEFS_FCPORT_H__
21
22#include <defs/bfa_defs_types.h>
23#include <protocol/types.h>
24
25#pragma pack(1)
26
27/**
28 * FCoE statistics
29 */
30struct bfa_fcoe_stats_s {
31 u64 secs_reset; /* Seconds since stats reset */
32 u64 cee_linkups; /* CEE link up */
33 u64 cee_linkdns; /* CEE link down */
34 u64 fip_linkups; /* FIP link up */
35 u64 fip_linkdns; /* FIP link down */
36 u64 fip_fails; /* FIP failures */
37 u64 mac_invalids; /* Invalid mac assignments */
38 u64 vlan_req; /* Vlan requests */
39 u64 vlan_notify; /* Vlan notifications */
40 u64 vlan_err; /* Vlan notification errors */
41 u64 vlan_timeouts; /* Vlan request timeouts */
42 u64 vlan_invalids; /* Vlan invalids */
43 u64 disc_req; /* Discovery requests */
44 u64 disc_rsp; /* Discovery responses */
45 u64 disc_err; /* Discovery error frames */
46 u64 disc_unsol; /* Discovery unsolicited */
47 u64 disc_timeouts; /* Discovery timeouts */
48 u64 disc_fcf_unavail; /* Discovery FCF not avail */
49 u64 linksvc_unsupp; /* FIP link service req unsupp. */
50 u64 linksvc_err; /* FIP link service req errors */
51 u64 logo_req; /* FIP logos received */
52 u64 clrvlink_req; /* Clear virtual link requests */
53 u64 op_unsupp; /* FIP operation unsupp. */
54 u64 untagged; /* FIP untagged frames */
55 u64 txf_ucast; /* Tx FCoE unicast frames */
56 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
57 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
58 u64 txf_mcast; /* Tx FCoE mutlicast frames */
59 u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */
60 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
61 u64 txf_bcast; /* Tx FCoE broadcast frames */
62 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
63 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
64 u64 txf_timeout; /* Tx timeouts */
65 u64 txf_parity_errors; /* Transmit parity err */
66 u64 txf_fid_parity_errors; /* Transmit FID parity err */
67 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
68 u64 rxf_ucast; /* Rx FCoE unicast frames */
69 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
70 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
71 u64 rxf_mcast; /* Rx FCoE multicast frames */
72 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
73 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
74 u64 rxf_bcast; /* Rx FCoE broadcast frames */
75 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
76};
77
78/**
79 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
80 */
81union bfa_fcport_stats_u {
82 struct bfa_qos_stats_s fcqos;
83 struct bfa_fcoe_stats_s fcoe;
84};
85
86#pragma pack()
87
88#endif /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
deleted file mode 100644
index add0a05d941d..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOC_H__
19#define __BFA_DEFS_IOC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27enum {
28 BFA_IOC_DRIVER_LEN = 16,
29 BFA_IOC_CHIP_REV_LEN = 8,
30};
31
32/**
33 * Driver and firmware versions.
34 */
35struct bfa_ioc_driver_attr_s {
36 char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
37 char driver_ver[BFA_VERSION_LEN]; /* driver version */
38 char fw_ver[BFA_VERSION_LEN]; /* firmware version*/
39 char bios_ver[BFA_VERSION_LEN]; /* bios version */
40 char efi_ver[BFA_VERSION_LEN]; /* EFI version */
41 char ob_ver[BFA_VERSION_LEN]; /* openboot version*/
42};
43
44/**
45 * IOC PCI device attributes
46 */
47struct bfa_ioc_pci_attr_s {
48 u16 vendor_id; /* PCI vendor ID */
49 u16 device_id; /* PCI device ID */
50 u16 ssid; /* subsystem ID */
51 u16 ssvid; /* subsystem vendor ID */
52 u32 pcifn; /* PCI device function */
53 u32 rsvd; /* padding */
54 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
55};
56
57/**
58 * IOC states
59 */
60enum bfa_ioc_state {
61 BFA_IOC_RESET = 1, /* IOC is in reset state */
62 BFA_IOC_SEMWAIT = 2, /* Waiting for IOC hardware semaphore */
63 BFA_IOC_HWINIT = 3, /* IOC hardware is being initialized */
64 BFA_IOC_GETATTR = 4, /* IOC is being configured */
65 BFA_IOC_OPERATIONAL = 5, /* IOC is operational */
66 BFA_IOC_INITFAIL = 6, /* IOC hardware failure */
67 BFA_IOC_HBFAIL = 7, /* IOC heart-beat failure */
68 BFA_IOC_DISABLING = 8, /* IOC is being disabled */
69 BFA_IOC_DISABLED = 9, /* IOC is disabled */
70 BFA_IOC_FWMISMATCH = 10, /* IOC firmware different from drivers */
71};
72
73/**
74 * IOC firmware stats
75 */
76struct bfa_fw_ioc_stats_s {
77 u32 hb_count;
78 u32 cfg_reqs;
79 u32 enable_reqs;
80 u32 disable_reqs;
81 u32 stats_reqs;
82 u32 clrstats_reqs;
83 u32 unknown_reqs;
84 u32 ic_reqs; /* interrupt coalesce reqs */
85};
86
87/**
88 * IOC driver stats
89 */
90struct bfa_ioc_drv_stats_s {
91 u32 ioc_isrs;
92 u32 ioc_enables;
93 u32 ioc_disables;
94 u32 ioc_hbfails;
95 u32 ioc_boots;
96 u32 stats_tmos;
97 u32 hb_count;
98 u32 disable_reqs;
99 u32 enable_reqs;
100 u32 disable_replies;
101 u32 enable_replies;
102};
103
104/**
105 * IOC statistics
106 */
107struct bfa_ioc_stats_s {
108 struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
109 struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
110};
111
112
113enum bfa_ioc_type_e {
114 BFA_IOC_TYPE_FC = 1,
115 BFA_IOC_TYPE_FCoE = 2,
116 BFA_IOC_TYPE_LL = 3,
117};
118
119/**
120 * IOC attributes returned in queries
121 */
122struct bfa_ioc_attr_s {
123 enum bfa_ioc_type_e ioc_type;
124 enum bfa_ioc_state state; /* IOC state */
125 struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */
129 u8 rsvd[7]; /* 64bit align */
130};
131
132/**
133 * BFA IOC level events
134 */
135enum bfa_ioc_aen_event {
136 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
137 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
138 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
139 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
140 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
141 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
142 BFA_IOC_AEN_INVALID_VENDOR = 7,
143 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
144 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
145
146};
147
148/**
149 * BFA IOC level event data, now just a place holder
150 */
151struct bfa_ioc_aen_data_s {
152 wwn_t pwwn;
153 s16 ioc_type;
154 mac_t mac;
155};
156
157#endif /* __BFA_DEFS_IOC_H__ */
158
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
deleted file mode 100644
index 31e728a631ed..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ /dev/null
@@ -1,322 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IOCFC_H__
19#define __BFA_DEFS_IOCFC_H__
20
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23#include <defs/bfa_defs_version.h>
24#include <defs/bfa_defs_adapter.h>
25#include <defs/bfa_defs_pm.h>
26
27#define BFA_IOCFC_INTR_DELAY 1125
28#define BFA_IOCFC_INTR_LATENCY 225
29#define BFA_IOCFCOE_INTR_DELAY 25
30#define BFA_IOCFCOE_INTR_LATENCY 5
31
32/**
33 * Interrupt coalescing configuration.
34 */
35struct bfa_iocfc_intr_attr_s {
36 bfa_boolean_t coalesce; /* enable/disable coalescing */
37 u16 latency; /* latency in microseconds */
38 u16 delay; /* delay in microseconds */
39};
40
41/**
42 * IOC firmware configuraton
43 */
44struct bfa_iocfc_fwcfg_s {
45 u16 num_fabrics; /* number of fabrics */
46 u16 num_lports; /* number of local lports */
47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_iotm_reqs; /* number of TM IO reqs */
51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs;
55 u8 fw_tick_res; /*!< FW clock resolution in ms */
56 u8 rsvd[4];
57
58};
59
60struct bfa_iocfc_drvcfg_s {
61 u16 num_reqq_elems; /* number of req queue elements */
62 u16 num_rspq_elems; /* number of rsp queue elements */
63 u16 num_sgpgs; /* number of total SG pages */
64 u16 num_sboot_tgts; /* number of SAN boot targets */
65 u16 num_sboot_luns; /* number of SAN boot luns */
66 u16 ioc_recover; /* IOC recovery mode */
67 u16 min_cfg; /* minimum configuration */
68 u16 path_tov; /* device path timeout */
69 bfa_boolean_t delay_comp; /* delay completion of
70 failed inflight IOs */
71 u32 rsvd;
72};
73/**
74 * IOC configuration
75 */
76struct bfa_iocfc_cfg_s {
77 struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
78 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
79};
80
81/**
82 * IOC firmware IO stats
83 */
84struct bfa_fw_io_stats_s {
85 u32 host_abort; /* IO aborted by host driver*/
86 u32 host_cleanup; /* IO clean up by host driver */
87
88 u32 fw_io_timeout; /* IOs timedout */
89 u32 fw_frm_parse; /* frame parsed by f/w */
90 u32 fw_frm_data; /* fcp_data frame parsed by f/w */
91 u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
92 u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
93 u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
94 u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
95 u32 fw_frm_unknown; /* unknown parsed by f/w */
96 u32 fw_data_dma; /* f/w DMA'ed the data frame */
97 u32 fw_frm_drop; /* f/w drop the frame */
98
99 u32 rec_timeout; /* FW rec timed out */
100 u32 error_rec; /* FW sending rec on
101 * an error condition*/
102 u32 wait_for_si; /* FW wait for SI */
103 u32 rec_rsp_inval; /* REC rsp invalid */
104 u32 seqr_io_abort; /* target does not know cmd so abort */
105 u32 seqr_io_retry; /* SEQR failed so retry IO */
106
107 u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
108 u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
109 u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
110
111 u32 fcp_data_lost; /* fcp data lost */
112
113 u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
114 u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
115 u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
116
117 u32 io_abort_timeout; /* ABTS timedout */
118 u32 sler_initiated; /* SLER initiated */
119
120 u32 unexp_fcp_rsp; /* fcp response in wrong state */
121
122 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
123 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
124 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
125 u32 fcp_rsp_resid_inval; /* invalid residue */
126 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
127 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
128 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
129 u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
130 u32 fcp_conf_req; /* FCP conf requested */
131
132 u32 tgt_aborted_io; /* target initiated abort */
133
134 u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
135 u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
136 u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
137 u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
138 u32 ioh_hit_class2_event; /* IOH hit class2 */
139 u32 ioh_miss_other_event; /* IOH miss other */
140 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
141 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
142 * bytes xfered */
143 u32 ioh_seq_len_err_event; /* IOH seq len error */
144 u32 ioh_data_oor_event; /* Data out of range */
145 u32 ioh_ro_ooo_event; /* Relative offset out of range */
146 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
147 u32 ioh_unexp_frame_event; /* unexpected frame recieved
148 * count */
149 u32 ioh_err_int; /* IOH error int during data-phase
150 * for scsi write
151 */
152};
153
154/**
155 * IOC port firmware stats
156 */
157
158struct bfa_fw_port_fpg_stats_s {
159 u32 intr_evt;
160 u32 intr;
161 u32 intr_excess;
162 u32 intr_cause0;
163 u32 intr_other;
164 u32 intr_other_ign;
165 u32 sig_lost;
166 u32 sig_regained;
167 u32 sync_lost;
168 u32 sync_to;
169 u32 sync_regained;
170 u32 div2_overflow;
171 u32 div2_underflow;
172 u32 efifo_overflow;
173 u32 efifo_underflow;
174 u32 idle_rx;
175 u32 lrr_rx;
176 u32 lr_rx;
177 u32 ols_rx;
178 u32 nos_rx;
179 u32 lip_rx;
180 u32 arbf0_rx;
181 u32 arb_rx;
182 u32 mrk_rx;
183 u32 const_mrk_rx;
184 u32 prim_unknown;
185};
186
187
188struct bfa_fw_port_lksm_stats_s {
189 u32 hwsm_success; /* hwsm state machine success */
190 u32 hwsm_fails; /* hwsm fails */
191 u32 hwsm_wdtov; /* hwsm timed out */
192 u32 swsm_success; /* swsm success */
193 u32 swsm_fails; /* swsm fails */
194 u32 swsm_wdtov; /* swsm timed out */
195 u32 busybufs; /* link init failed due to busybuf */
196 u32 buf_waits; /* bufwait state entries */
197 u32 link_fails; /* link failures */
198 u32 psp_errors; /* primitive sequence protocol errors */
199 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
200 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
201 u32 lr_tx; /* No. of times LR tx started */
202 u32 lrr_tx; /* No. of times LRR tx started */
203 u32 ols_tx; /* No. of times OLS tx started */
204 u32 nos_tx; /* No. of times NOS tx started */
205 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
206 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
207};
208
209
210struct bfa_fw_port_snsm_stats_s {
211 u32 hwsm_success; /* Successful hwsm terminations */
212 u32 hwsm_fails; /* hwsm fail count */
213 u32 hwsm_wdtov; /* hwsm timed out */
214 u32 swsm_success; /* swsm success */
215 u32 swsm_wdtov; /* swsm timed out */
216 u32 error_resets; /* error resets initiated by upsm */
217 u32 sync_lost; /* Sync loss count */
218 u32 sig_lost; /* Signal loss count */
219};
220
221
222struct bfa_fw_port_physm_stats_s {
223 u32 module_inserts; /* Module insert count */
224 u32 module_xtracts; /* Module extracts count */
225 u32 module_invalids; /* Invalid module inserted count */
226 u32 module_read_ign; /* Module validation status ignored */
227 u32 laser_faults; /* Laser fault count */
228 u32 rsvd;
229};
230
231
232struct bfa_fw_fip_stats_s {
233 u32 vlan_req; /* vlan discovery requests */
234 u32 vlan_notify; /* vlan notifications */
235 u32 vlan_err; /* vlan response error */
236 u32 vlan_timeouts; /* vlan disvoery timeouts */
237 u32 vlan_invalids; /* invalid vlan in discovery advert. */
238 u32 disc_req; /* Discovery solicit requests */
239 u32 disc_rsp; /* Discovery solicit response */
240 u32 disc_err; /* Discovery advt. parse errors */
241 u32 disc_unsol; /* Discovery unsolicited */
242 u32 disc_timeouts; /* Discovery timeouts */
243 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
244 u32 linksvc_unsupp; /* Unsupported link service req */
245 u32 linksvc_err; /* Parse error in link service req */
246 u32 logo_req; /* FIP logos received */
247 u32 clrvlink_req; /* Clear virtual link req */
248 u32 op_unsupp; /* Unsupported FIP operation */
249 u32 untagged; /* Untagged frames (ignored) */
250 u32 invalid_version; /*!< Invalid FIP version */
251};
252
253
254struct bfa_fw_lps_stats_s {
255 u32 mac_invalids; /* Invalid mac assigned */
256 u32 rsvd;
257};
258
259
260struct bfa_fw_fcoe_stats_s {
261 u32 cee_linkups; /* CEE link up count */
262 u32 cee_linkdns; /* CEE link down count */
263 u32 fip_linkups; /* FIP link up count */
264 u32 fip_linkdns; /* FIP link up count */
265 u32 fip_fails; /* FIP fail count */
266 u32 mac_invalids; /* Invalid mac assigned */
267};
268
269/**
270 * IOC firmware FCoE port stats
271 */
272struct bfa_fw_fcoe_port_stats_s {
273 struct bfa_fw_fcoe_stats_s fcoe_stats;
274 struct bfa_fw_fip_stats_s fip_stats;
275};
276
277/**
278 * IOC firmware FC port stats
279 */
280struct bfa_fw_fc_port_stats_s {
281 struct bfa_fw_port_fpg_stats_s fpg_stats;
282 struct bfa_fw_port_physm_stats_s physm_stats;
283 struct bfa_fw_port_snsm_stats_s snsm_stats;
284 struct bfa_fw_port_lksm_stats_s lksm_stats;
285};
286
287/**
288 * IOC firmware FC port stats
289 */
290union bfa_fw_port_stats_s {
291 struct bfa_fw_fc_port_stats_s fc_stats;
292 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
293};
294
295/**
296 * IOC firmware stats
297 */
298struct bfa_fw_stats_s {
299 struct bfa_fw_ioc_stats_s ioc_stats;
300 struct bfa_fw_io_stats_s io_stats;
301 union bfa_fw_port_stats_s port_stats;
302};
303
304/**
305 * IOC statistics
306 */
307struct bfa_iocfc_stats_s {
308 struct bfa_fw_stats_s fw_stats; /* firmware IOC stats */
309};
310
311/**
312 * IOC attributes returned in queries
313 */
314struct bfa_iocfc_attr_s {
315 struct bfa_iocfc_cfg_s config; /* IOCFC config */
316 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
317};
318
319#define BFA_IOCFC_PATHTOV_MAX 60
320#define BFA_IOCFC_QDEPTH_MAX 2000
321
322#endif /* __BFA_DEFS_IOC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
deleted file mode 100644
index 7cb63ea98f38..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_IPFC_H__
18#define __BFA_DEFS_IPFC_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * FCS ip remote port states
26 */
27enum bfa_iprp_state {
28 BFA_IPRP_UNINIT = 0, /* PORT is not yet initialized */
29 BFA_IPRP_ONLINE = 1, /* process login is complete */
30 BFA_IPRP_OFFLINE = 2, /* iprp is offline */
31};
32
33/**
34 * FCS remote port statistics
35 */
36struct bfa_iprp_stats_s {
37 u32 offlines;
38 u32 onlines;
39 u32 rscns;
40 u32 plogis;
41 u32 logos;
42 u32 plogi_timeouts;
43 u32 plogi_rejects;
44};
45
46/**
47 * FCS iprp attribute returned in queries
48 */
49struct bfa_iprp_attr_s {
50 enum bfa_iprp_state state;
51};
52
53struct bfa_ipfc_stats_s {
54 u32 arp_sent;
55 u32 arp_recv;
56 u32 arp_reply_sent;
57 u32 arp_reply_recv;
58 u32 farp_sent;
59 u32 farp_recv;
60 u32 farp_reply_sent;
61 u32 farp_reply_recv;
62 u32 farp_reject_sent;
63 u32 farp_reject_recv;
64};
65
66struct bfa_ipfc_attr_s {
67 bfa_boolean_t enabled;
68};
69
70#endif /* __BFA_DEFS_IPFC_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
deleted file mode 100644
index d77788b3999a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_ITNIM_H__
18#define __BFA_DEFS_ITNIM_H__
19
20#include <bfa_os_inc.h>
21#include <protocol/types.h>
22
23/**
24 * FCS itnim states
25 */
26enum bfa_itnim_state {
27 BFA_ITNIM_OFFLINE = 0, /* offline */
28 BFA_ITNIM_PRLI_SEND = 1, /* prli send */
29 BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
30 BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
31 BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
32 BFA_ITNIM_ONLINE = 5, /* online */
33 BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
34 BFA_ITNIM_INITIATIOR = 7, /* initiator */
35};
36
37struct bfa_itnim_latency_s {
38 u32 min;
39 u32 max;
40 u32 count;
41 u32 clock_res;
42 u32 avg;
43 u32 rsvd;
44};
45
46struct bfa_itnim_hal_stats_s {
47 u32 onlines; /* ITN nexus onlines (PRLI done) */
48 u32 offlines; /* ITN Nexus offlines */
49 u32 creates; /* ITN create requests */
50 u32 deletes; /* ITN delete requests */
51 u32 create_comps; /* ITN create completions */
52 u32 delete_comps; /* ITN delete completions */
53 u32 sler_events; /* SLER (sequence level error
54 * recovery) events */
55 u32 ioc_disabled; /* Num IOC disables */
56 u32 cleanup_comps; /* ITN cleanup completions */
57 u32 tm_cmnds; /* task management(TM) cmnds sent */
58 u32 tm_fw_rsps; /* TM cmds firmware responses */
59 u32 tm_success; /* TM successes */
60 u32 tm_failures; /* TM failures */
61 u32 tm_io_comps; /* TM IO completions */
62 u32 tm_qresumes; /* TM queue resumes (after waiting
63 * for resources)
64 */
65 u32 tm_iocdowns; /* TM cmnds affected by IOC down */
66 u32 tm_cleanups; /* TM cleanups */
67 u32 tm_cleanup_comps;
68 /* TM cleanup completions */
69 u32 ios; /* IO requests */
70 u32 io_comps; /* IO completions */
71 u64 input_reqs; /* INPUT requests */
72 u64 output_reqs; /* OUTPUT requests */
73};
74
75/**
76 * FCS remote port statistics
77 */
78struct bfa_itnim_stats_s {
79 u32 onlines; /* num rport online */
80 u32 offlines; /* num rport offline */
81 u32 prli_sent; /* num prli sent out */
82 u32 fcxp_alloc_wait;/* num fcxp alloc waits */
83 u32 prli_rsp_err; /* num prli rsp errors */
84 u32 prli_rsp_acc; /* num prli rsp accepts */
85 u32 initiator; /* rport is an initiator */
86 u32 prli_rsp_parse_err; /* prli rsp parsing errors */
87 u32 prli_rsp_rjt; /* num prli rsp rejects */
88 u32 timeout; /* num timeouts detected */
89 u32 sler; /* num sler notification from BFA */
90 u32 rsvd;
91 struct bfa_itnim_hal_stats_s hal_stats;
92};
93
94/**
95 * FCS itnim attributes returned in queries
96 */
97struct bfa_itnim_attr_s {
98 enum bfa_itnim_state state; /* FCS itnim state */
99 u8 retry; /* data retransmision support */
100 u8 task_retry_id; /* task retry ident support */
101 u8 rec_support; /* REC supported */
102 u8 conf_comp; /* confirmed completion supp */
103 struct bfa_itnim_latency_s io_latency; /* IO latency */
104};
105
106/**
107 * BFA ITNIM events.
108 * Arguments below are in BFAL context from Mgmt
109 * BFA_ITNIM_AEN_NEW: [in]: None [out]: vf_id, lpwwn
110 * BFA_ITNIM_AEN_DELETE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
111 * [out]: vf_id, ppwwn, lpwwn, rpwwn
112 * BFA_ITNIM_AEN_ONLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
113 * [out]: vf_id, ppwwn, lpwwn, rpwwn
114 * BFA_ITNIM_AEN_OFFLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
115 * [out]: vf_id, ppwwn, lpwwn, rpwwn
116 * BFA_ITNIM_AEN_DISCONNECT:[in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets),
117 * [out]: vf_id, ppwwn, lpwwn, rpwwn
118 */
119enum bfa_itnim_aen_event {
120 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
121 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
122 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
123};
124
125/**
126 * BFA ITNIM event data structure.
127 */
128struct bfa_itnim_aen_data_s {
129 u16 vf_id; /* vf_id of the IT nexus */
130 u16 rsvd[3];
131 wwn_t ppwwn; /* WWN of its physical port */
132 wwn_t lpwwn; /* WWN of logical port */
133 wwn_t rpwwn; /* WWN of remote(target) port */
134};
135
136#endif /* __BFA_DEFS_ITNIM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_led.h b/drivers/scsi/bfa/include/defs/bfa_defs_led.h
deleted file mode 100644
index 62039273264e..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_led.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LED_H__
19#define __BFA_DEFS_LED_H__
20
21#define BFA_LED_MAX_NUM 3
22
23enum bfa_led_op {
24 BFA_LED_OFF = 0,
25 BFA_LED_ON = 1,
26 BFA_LED_FLICK = 2,
27 BFA_LED_BLINK = 3,
28};
29
30enum bfa_led_color {
31 BFA_LED_GREEN = 0,
32 BFA_LED_AMBER = 1,
33};
34
35#endif /* __BFA_DEFS_LED_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
deleted file mode 100644
index 0952a139c47c..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_LPORT_H__
19#define __BFA_DEFS_LPORT_H__
20
21#include <defs/bfa_defs_types.h>
22#include <defs/bfa_defs_port.h>
23
24/**
25 * BFA AEN logical port events.
26 * Arguments below are in BFAL context from Mgmt
27 * BFA_LPORT_AEN_NEW: [in]: None [out]: vf_id, ppwwn, lpwwn, roles
28 * BFA_LPORT_AEN_DELETE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
29 * BFA_LPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
30 * BFA_LPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
31 * BFA_LPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
32 * BFA_LPORT_AEN_NEW_PROP: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
33 * BFA_LPORT_AEN_DELETE_PROP: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
34 * BFA_LPORT_AEN_NEW_STANDARD: [in]: None [out]: vf_id, ppwwn. lpwwn, roles
35 * BFA_LPORT_AEN_DELETE_STANDARD: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
36 * BFA_LPORT_AEN_NPIV_DUP_WWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
37 * BFA_LPORT_AEN_NPIV_FABRIC_MAX: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
38 * BFA_LPORT_AEN_NPIV_UNKNOWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles
39 */
40enum bfa_lport_aen_event {
41 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
42 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
43 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
44 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
45 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
46 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
47 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
48 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
49 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
50 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort configured with
51 * duplicate WWN event
52 */
53 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
54 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code event */
55};
56
57/**
58 * BFA AEN event data structure
59 */
60struct bfa_lport_aen_data_s {
61 u16 vf_id; /* vf_id of this logical port */
62 s16 roles; /* Logical port mode,IM/TM/IP etc */
63 u32 rsvd;
64 wwn_t ppwwn; /* WWN of its physical port */
65 wwn_t lpwwn; /* WWN of this logical port */
66};
67
68#endif /* __BFA_DEFS_LPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
deleted file mode 100644
index d22fb7909643..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_MFG_H__
18#define __BFA_DEFS_MFG_H__
19
20#include <bfa_os_inc.h>
21
22/**
23 * Manufacturing block version
24 */
25#define BFA_MFG_VERSION 2
26
27/**
28 * Manufacturing block encrypted version
29 */
30#define BFA_MFG_ENC_VER 2
31
32/**
33 * Manufacturing block version 1 length
34 */
35#define BFA_MFG_VER1_LEN 128
36
37/**
38 * Manufacturing block header length
39 */
40#define BFA_MFG_HDR_LEN 4
41
42/**
43 * Checksum size
44 */
45#define BFA_MFG_CHKSUM_SIZE 16
46
47/**
48 * Manufacturing block format
49 */
50#define BFA_MFG_SERIALNUM_SIZE 11
51#define BFA_MFG_PARTNUM_SIZE 14
52#define BFA_MFG_SUPPLIER_ID_SIZE 10
53#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
54#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
55#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
56#define STRSZ(_n) (((_n) + 4) & ~3)
57
58/**
59 * Manufacturing card type
60 */
61enum {
62 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
63 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
64 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
65 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
66 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
67 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
68 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
69 BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
70 BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
71 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
72};
73
74#pragma pack(1)
75
76/**
77 * Card type to port number conversion
78 */
79#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
80
81/**
82 * Check if Mezz card
83 */
84#define bfa_mfg_is_mezz(type) (( \
85 (type) == BFA_MFG_TYPE_JAYHAWK || \
86 (type) == BFA_MFG_TYPE_WANCHESE))
87
88/**
89 * Check if card type valid
90 */
91#define bfa_mfg_is_card_type_valid(type) (( \
92 (type) == BFA_MFG_TYPE_FC8P2 || \
93 (type) == BFA_MFG_TYPE_FC8P1 || \
94 (type) == BFA_MFG_TYPE_FC4P2 || \
95 (type) == BFA_MFG_TYPE_FC4P1 || \
96 (type) == BFA_MFG_TYPE_CNA10P2 || \
97 (type) == BFA_MFG_TYPE_CNA10P1 || \
98 bfa_mfg_is_mezz(type)))
99
100/**
101 * All numerical fields are in big-endian format.
102 */
103struct bfa_mfg_block_s {
104};
105
106/**
107 * VPD data length
108 */
109#define BFA_MFG_VPD_LEN 512
110
111#define BFA_MFG_VPD_PCI_HDR_OFF 137
112#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */
113#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */
114
115/**
116 * VPD vendor tag
117 */
118enum {
119 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
120 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
121 BFA_MFG_VPD_HP = 2, /* vendor HP */
122 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
123 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
124 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
125 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
126 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
127};
128
129/**
130 * All numerical fields are in big-endian format.
131 */
132struct bfa_mfg_vpd_s {
133 u8 version; /* vpd data version */
134 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
135 u8 chksum; /* u8 checksum */
136 u8 vendor; /* vendor */
137 u8 len; /* vpd data length excluding header */
138 u8 rsv;
139 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
140};
141
142#pragma pack()
143
144#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
deleted file mode 100644
index ea7d89bbc0bb..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PCI_H__
19#define __BFA_DEFS_PCI_H__
20
21/**
22 * PCI device and vendor ID information
23 */
24enum {
25 BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
26 BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
27 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
28 BFA_PCI_DEVICE_ID_CT = 0x14,
29 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
30};
31
32#define bfa_asic_id_ct(devid) \
33 ((devid) == BFA_PCI_DEVICE_ID_CT || \
34 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
35
36/**
37 * PCI sub-system device and vendor ID information
38 */
39enum {
40 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
41};
42
43/**
44 * Maximum number of device address ranges mapped through different BAR(s)
45 */
46#define BFA_PCI_ACCESS_RANGES 1
47
48#endif /* __BFA_DEFS_PCI_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h b/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
deleted file mode 100644
index e8d6d959006e..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PM_H__
19#define __BFA_DEFS_PM_H__
20
21#include <bfa_os_inc.h>
22
23/**
24 * BFA power management device states
25 */
26enum bfa_pm_ds {
27 BFA_PM_DS_D0 = 0, /* full power mode */
28 BFA_PM_DS_D1 = 1, /* power save state 1 */
29 BFA_PM_DS_D2 = 2, /* power save state 2 */
30 BFA_PM_DS_D3 = 3, /* power off state */
31};
32
33#endif /* __BFA_DEFS_PM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h b/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
deleted file mode 100644
index d9fa278472b7..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_POM_H__
18#define __BFA_DEFS_POM_H__
19
20#include <bfa_os_inc.h>
21#include <defs/bfa_defs_types.h>
22
23/**
24 * POM health status levels for each attributes.
25 */
26enum bfa_pom_entry_health {
27 BFA_POM_HEALTH_NOINFO = 1, /* no information */
28 BFA_POM_HEALTH_NORMAL = 2, /* health is normal */
29 BFA_POM_HEALTH_WARNING = 3, /* warning level */
30 BFA_POM_HEALTH_ALARM = 4, /* alarming level */
31};
32
33/**
34 * Reading of temperature/voltage/current/power
35 */
36struct bfa_pom_entry_s {
37 enum bfa_pom_entry_health health; /* POM entry health */
38 u32 curr_value; /* current value */
39 u32 thr_warn_high; /* threshold warning high */
40 u32 thr_warn_low; /* threshold warning low */
41 u32 thr_alarm_low; /* threshold alaram low */
42 u32 thr_alarm_high; /* threshold alarm high */
43};
44
45/**
46 * POM attributes
47 */
48struct bfa_pom_attr_s {
49 struct bfa_pom_entry_s temperature; /* centigrade */
50 struct bfa_pom_entry_s voltage; /* volts */
51 struct bfa_pom_entry_s curr; /* milli amps */
52 struct bfa_pom_entry_s txpower; /* micro watts */
53 struct bfa_pom_entry_s rxpower; /* micro watts */
54};
55
56#endif /* __BFA_DEFS_POM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
deleted file mode 100644
index ebdf0d1731a4..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PORT_H__
19#define __BFA_DEFS_PORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_ioc.h>
25
26#define BFA_FCS_FABRIC_IPADDR_SZ 16
27
28/**
29 * symbolic names for base port/virtual port
30 */
31#define BFA_SYMNAME_MAXLEN 128 /* vmware/windows uses 128 bytes */
32struct bfa_port_symname_s {
33 char symname[BFA_SYMNAME_MAXLEN];
34};
35
36/**
37* Roles of FCS port:
38 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
39 * - Create multiple ports if both IM and TM functions required.
40 * - Atleast one role must be specified.
41 */
42enum bfa_port_role {
43 BFA_PORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
44 BFA_PORT_ROLE_FCP_TM = 0x02, /* FCP target role */
45 BFA_PORT_ROLE_FCP_IPFC = 0x04, /* IP over FC role */
46 BFA_PORT_ROLE_FCP_MAX = BFA_PORT_ROLE_FCP_IPFC | BFA_PORT_ROLE_FCP_IM
47};
48
49/**
50 * FCS port configuration.
51 */
52struct bfa_port_cfg_s {
53 wwn_t pwwn; /* port wwn */
54 wwn_t nwwn; /* node wwn */
55 struct bfa_port_symname_s sym_name; /* vm port symbolic name */
56 bfa_boolean_t preboot_vp; /* vport created from PBC */
57 enum bfa_port_role roles; /* FCS port roles */
58 u8 tag[16]; /* opaque tag from application */
59};
60
61/**
62 * FCS port states
63 */
64enum bfa_port_state {
65 BFA_PORT_UNINIT = 0, /* PORT is not yet initialized */
66 BFA_PORT_FDISC = 1, /* FDISC is in progress */
67 BFA_PORT_ONLINE = 2, /* login to fabric is complete */
68 BFA_PORT_OFFLINE = 3, /* No login to fabric */
69};
70
71/**
72 * FCS port type. Required for VmWare.
73 */
74enum bfa_port_type {
75 BFA_PORT_TYPE_PHYSICAL = 0,
76 BFA_PORT_TYPE_VIRTUAL,
77};
78
79/**
80 * FCS port offline reason. Required for VmWare.
81 */
82enum bfa_port_offline_reason {
83 BFA_PORT_OFFLINE_UNKNOWN = 0,
84 BFA_PORT_OFFLINE_LINKDOWN,
85 BFA_PORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
86 * fabric */
87 BFA_PORT_OFFLINE_FAB_NORESOURCES,
88 BFA_PORT_OFFLINE_FAB_LOGOUT,
89};
90
91/**
92 * FCS lport info. Required for VmWare.
93 */
94struct bfa_port_info_s {
95 u8 port_type; /* bfa_port_type_t : physical or
96 * virtual */
97 u8 port_state; /* one of bfa_port_state values */
98 u8 offline_reason; /* one of bfa_port_offline_reason_t
99 * values */
100 wwn_t port_wwn;
101 wwn_t node_wwn;
102
103 /*
104 * following 4 feilds are valid for Physical Ports only
105 */
106 u32 max_vports_supp; /* Max supported vports */
107 u32 num_vports_inuse; /* Num of in use vports */
108 u32 max_rports_supp; /* Max supported rports */
109 u32 num_rports_inuse; /* Num of doscovered rports */
110
111};
112
113/**
114 * FCS port statistics
115 */
116struct bfa_port_stats_s {
117 u32 ns_plogi_sent;
118 u32 ns_plogi_rsp_err;
119 u32 ns_plogi_acc_err;
120 u32 ns_plogi_accepts;
121 u32 ns_rejects; /* NS command rejects */
122 u32 ns_plogi_unknown_rsp;
123 u32 ns_plogi_alloc_wait;
124
125 u32 ns_retries; /* NS command retries */
126 u32 ns_timeouts; /* NS command timeouts */
127
128 u32 ns_rspnid_sent;
129 u32 ns_rspnid_accepts;
130 u32 ns_rspnid_rsp_err;
131 u32 ns_rspnid_rejects;
132 u32 ns_rspnid_alloc_wait;
133
134 u32 ns_rftid_sent;
135 u32 ns_rftid_accepts;
136 u32 ns_rftid_rsp_err;
137 u32 ns_rftid_rejects;
138 u32 ns_rftid_alloc_wait;
139
140 u32 ns_rffid_sent;
141 u32 ns_rffid_accepts;
142 u32 ns_rffid_rsp_err;
143 u32 ns_rffid_rejects;
144 u32 ns_rffid_alloc_wait;
145
146 u32 ns_gidft_sent;
147 u32 ns_gidft_accepts;
148 u32 ns_gidft_rsp_err;
149 u32 ns_gidft_rejects;
150 u32 ns_gidft_unknown_rsp;
151 u32 ns_gidft_alloc_wait;
152
153 /*
154 * Mgmt Server stats
155 */
156 u32 ms_retries; /* MS command retries */
157 u32 ms_timeouts; /* MS command timeouts */
158 u32 ms_plogi_sent;
159 u32 ms_plogi_rsp_err;
160 u32 ms_plogi_acc_err;
161 u32 ms_plogi_accepts;
162 u32 ms_rejects; /* MS command rejects */
163 u32 ms_plogi_unknown_rsp;
164 u32 ms_plogi_alloc_wait;
165
166 u32 num_rscn; /* Num of RSCN received */
167 u32 num_portid_rscn;/* Num portid format RSCN
168 * received */
169
170 u32 uf_recvs; /* unsolicited recv frames */
171 u32 uf_recv_drops; /* dropped received frames */
172
173 u32 rsvd; /* padding for 64 bit alignment */
174};
175
176/**
177 * BFA port attribute returned in queries
178 */
179struct bfa_port_attr_s {
180 enum bfa_port_state state; /* port state */
181 u32 pid; /* port ID */
182 struct bfa_port_cfg_s port_cfg; /* port configuration */
183 enum bfa_pport_type port_type; /* current topology */
184 u32 loopback; /* cable is externally looped back */
185 wwn_t fabric_name; /* attached switch's nwwn */
186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
187 * fabric's ip addr */
188 struct mac_s fpma_mac; /* Lport's FPMA Mac address */
189 u16 authfail; /* auth failed state */
190};
191
192/**
193 * BFA physical port Level events
194 * Arguments below are in BFAL context from Mgmt
195 * BFA_PORT_AEN_ONLINE: [in]: pwwn [out]: pwwn
196 * BFA_PORT_AEN_OFFLINE: [in]: pwwn [out]: pwwn
197 * BFA_PORT_AEN_RLIR: [in]: None [out]: pwwn, rlir_data, rlir_len
198 * BFA_PORT_AEN_SFP_INSERT: [in]: pwwn [out]: port_id, pwwn
199 * BFA_PORT_AEN_SFP_REMOVE: [in]: pwwn [out]: port_id, pwwn
200 * BFA_PORT_AEN_SFP_POM: [in]: pwwn [out]: level, port_id, pwwn
201 * BFA_PORT_AEN_ENABLE: [in]: pwwn [out]: pwwn
202 * BFA_PORT_AEN_DISABLE: [in]: pwwn [out]: pwwn
203 * BFA_PORT_AEN_AUTH_ON: [in]: pwwn [out]: pwwn
204 * BFA_PORT_AEN_AUTH_OFF: [in]: pwwn [out]: pwwn
205 * BFA_PORT_AEN_DISCONNECT: [in]: pwwn [out]: pwwn
206 * BFA_PORT_AEN_QOS_NEG: [in]: pwwn [out]: pwwn
207 * BFA_PORT_AEN_FABRIC_NAME_CHANGE: [in]: pwwn, [out]: pwwn, fwwn
208 *
209 */
210enum bfa_port_aen_event {
211 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
212 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
213 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
214 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
215 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
216 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
217 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
218 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
219 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
220 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
221 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
222 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
223 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change
224 * event */
225 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
226 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
227};
228
229enum bfa_port_aen_sfp_pom {
230 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
231 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
232 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
233 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
234};
235
236struct bfa_port_aen_data_s {
237 wwn_t pwwn; /* WWN of the physical port */
238 wwn_t fwwn; /* WWN of the fabric port */
239 s32 phy_port_num; /*! For SFP related events */
240 s16 ioc_type;
241 s16 level; /* Only transitions will
242 * be informed */
243 struct mac_s mac; /* MAC address of the ethernet port,
244 * applicable to CNA port only */
245 s16 rsvd;
246};
247
248#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
deleted file mode 100644
index 2de675839c2f..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ /dev/null
@@ -1,393 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_PPORT_H__
19#define __BFA_DEFS_PPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/fc.h>
23#include <defs/bfa_defs_types.h>
24#include <defs/bfa_defs_qos.h>
25#include <cna/pstats/phyport_defs.h>
26
27/* Modify char* port_stt[] in bfal_port.c if a new state was added */
28enum bfa_pport_states {
29 BFA_PPORT_ST_UNINIT = 1,
30 BFA_PPORT_ST_ENABLING_QWAIT = 2,
31 BFA_PPORT_ST_ENABLING = 3,
32 BFA_PPORT_ST_LINKDOWN = 4,
33 BFA_PPORT_ST_LINKUP = 5,
34 BFA_PPORT_ST_DISABLING_QWAIT = 6,
35 BFA_PPORT_ST_DISABLING = 7,
36 BFA_PPORT_ST_DISABLED = 8,
37 BFA_PPORT_ST_STOPPED = 9,
38 BFA_PPORT_ST_IOCDOWN = 10,
39 BFA_PPORT_ST_IOCDIS = 11,
40 BFA_PPORT_ST_FWMISMATCH = 12,
41 BFA_PPORT_ST_PREBOOT_DISABLED = 13,
42 BFA_PPORT_ST_MAX_STATE,
43};
44
45/**
46 * Port speed settings. Each specific speed is a bit field. Use multiple
47 * bits to specify speeds to be selected for auto-negotiation.
48 */
49enum bfa_pport_speed {
50 BFA_PPORT_SPEED_UNKNOWN = 0,
51 BFA_PPORT_SPEED_1GBPS = 1,
52 BFA_PPORT_SPEED_2GBPS = 2,
53 BFA_PPORT_SPEED_4GBPS = 4,
54 BFA_PPORT_SPEED_8GBPS = 8,
55 BFA_PPORT_SPEED_10GBPS = 10,
56 BFA_PPORT_SPEED_AUTO =
57 (BFA_PPORT_SPEED_1GBPS | BFA_PPORT_SPEED_2GBPS |
58 BFA_PPORT_SPEED_4GBPS | BFA_PPORT_SPEED_8GBPS),
59};
60
61/**
62 * Port operational type (in sync with SNIA port type).
63 */
64enum bfa_pport_type {
65 BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */
66 BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */
67 BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */
68 BFA_PPORT_TYPE_NLPORT = 6, /* public loop */
69 BFA_PPORT_TYPE_LPORT = 20, /* private loop */
70 BFA_PPORT_TYPE_P2P = 21, /* P2P with no switched fabric */
71 BFA_PPORT_TYPE_VPORT = 22, /* NPIV - virtual port */
72};
73
74/**
75 * Port topology setting. A port's topology and fabric login status
76 * determine its operational type.
77 */
78enum bfa_pport_topology {
79 BFA_PPORT_TOPOLOGY_NONE = 0, /* No valid topology */
80 BFA_PPORT_TOPOLOGY_P2P = 1, /* P2P only */
81 BFA_PPORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
82 BFA_PPORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
83};
84
85/**
86 * Physical port loopback types.
87 */
88enum bfa_pport_opmode {
89 BFA_PPORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
90 BFA_PPORT_OPMODE_LB_INT = 0x01, /* internal loop back */
91 BFA_PPORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
92 BFA_PPORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
93 BFA_PPORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
94 BFA_PPORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
95};
96
97#define BFA_PPORT_OPMODE_LB_HARD(_mode) \
98 ((_mode == BFA_PPORT_OPMODE_LB_INT) || \
99 (_mode == BFA_PPORT_OPMODE_LB_SLW) || \
100 (_mode == BFA_PPORT_OPMODE_LB_EXT))
101
102/**
103 Port State (in sync with SNIA port state).
104 */
105enum bfa_pport_snia_state {
106 BFA_PPORT_STATE_UNKNOWN = 1, /* port is not initialized */
107 BFA_PPORT_STATE_ONLINE = 2, /* port is ONLINE */
108 BFA_PPORT_STATE_DISABLED = 3, /* port is disabled by user */
109 BFA_PPORT_STATE_BYPASSED = 4, /* port is bypassed (in LOOP) */
110 BFA_PPORT_STATE_DIAG = 5, /* port diagnostics is active */
111 BFA_PPORT_STATE_LINKDOWN = 6, /* link is down */
112 BFA_PPORT_STATE_LOOPBACK = 8, /* port is looped back */
113};
114
115/**
116 * Port link state
117 */
118enum bfa_pport_linkstate {
119 BFA_PPORT_LINKUP = 1, /* Physical port/Trunk link up */
120 BFA_PPORT_LINKDOWN = 2, /* Physical port/Trunk link down */
121 BFA_PPORT_TRUNK_LINKDOWN = 3, /* Trunk link down (new tmaster) */
122};
123
124/**
125 * Port link state event
126 */
127#define bfa_pport_event_t enum bfa_pport_linkstate
128
129/**
130 * Port link state reason code
131 */
132enum bfa_pport_linkstate_rsn {
133 BFA_PPORT_LINKSTATE_RSN_NONE = 0,
134 BFA_PPORT_LINKSTATE_RSN_DISABLED = 1,
135 BFA_PPORT_LINKSTATE_RSN_RX_NOS = 2,
136 BFA_PPORT_LINKSTATE_RSN_RX_OLS = 3,
137 BFA_PPORT_LINKSTATE_RSN_RX_LIP = 4,
138 BFA_PPORT_LINKSTATE_RSN_RX_LIPF7 = 5,
139 BFA_PPORT_LINKSTATE_RSN_SFP_REMOVED = 6,
140 BFA_PPORT_LINKSTATE_RSN_PORT_FAULT = 7,
141 BFA_PPORT_LINKSTATE_RSN_RX_LOS = 8,
142 BFA_PPORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
143 BFA_PPORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
144 BFA_PPORT_LINKSTATE_RSN_TIMEOUT = 11,
145
146
147
148 /* CEE related reason codes/errors */
149 CEE_LLDP_INFO_AGED_OUT = 20,
150 CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
151 CEE_PEER_NOT_ADVERTISE_DCBX = 22,
152 CEE_PEER_NOT_ADVERTISE_PG = 23,
153 CEE_PEER_NOT_ADVERTISE_PFC = 24,
154 CEE_PEER_NOT_ADVERTISE_FCOE = 25,
155 CEE_PG_NOT_COMPATIBLE = 26,
156 CEE_PFC_NOT_COMPATIBLE = 27,
157 CEE_FCOE_NOT_COMPATIBLE = 28,
158 CEE_BAD_PG_RCVD = 29,
159 CEE_BAD_BW_RCVD = 30,
160 CEE_BAD_PFC_RCVD = 31,
161 CEE_BAD_FCOE_PRI_RCVD = 32,
162 CEE_FCOE_PRI_PFC_OFF = 33,
163 CEE_DUP_CONTROL_TLV_RCVD = 34,
164 CEE_DUP_FEAT_TLV_RCVD = 35,
165 CEE_APPLY_NEW_CFG = 36, /* reason, not an error */
166 CEE_PROTOCOL_INIT = 37, /* reason, not an error */
167 CEE_PHY_LINK_DOWN = 38,
168 CEE_LLS_FCOE_ABSENT = 39,
169 CEE_LLS_FCOE_DOWN = 40
170};
171
172/**
173 * Default Target Rate Limiting Speed.
174 */
175#define BFA_PPORT_DEF_TRL_SPEED BFA_PPORT_SPEED_1GBPS
176
177/**
178 * Physical port configuration
179 */
180struct bfa_pport_cfg_s {
181 u8 topology; /* bfa_pport_topology */
182 u8 speed; /* enum bfa_pport_speed */
183 u8 trunked; /* trunked or not */
184 u8 qos_enabled; /* qos enabled or not */
185 u8 trunk_ports; /* bitmap of trunked ports */
186 u8 cfg_hardalpa; /* is hard alpa configured */
187 u16 maxfrsize; /* maximum frame size */
188 u8 hardalpa; /* configured hard alpa */
189 u8 rx_bbcredit; /* receive buffer credits */
190 u8 tx_bbcredit; /* transmit buffer credits */
191 u8 ratelimit; /* ratelimit enabled or not */
192 u8 trl_def_speed; /* ratelimit default speed */
193 u8 rsvd[3];
194 u16 path_tov; /* device path timeout */
195 u16 q_depth; /* SCSI Queue depth */
196};
197
198/**
199 * Port attribute values.
200 */
201struct bfa_pport_attr_s {
202 /*
203 * Static fields
204 */
205 wwn_t nwwn; /* node wwn */
206 wwn_t pwwn; /* port wwn */
207 wwn_t factorynwwn; /* factory node wwn */
208 wwn_t factorypwwn; /* factory port wwn */
209 enum fc_cos cos_supported; /* supported class of services */
210 u32 rsvd;
211 struct fc_symname_s port_symname; /* port symbolic name */
212 enum bfa_pport_speed speed_supported; /* supported speeds */
213 bfa_boolean_t pbind_enabled; /* Will be set if Persistent binding
214 * enabled. Relevant only in Windows
215 */
216
217 /*
218 * Configured values
219 */
220 struct bfa_pport_cfg_s pport_cfg; /* pport cfg */
221
222 /*
223 * Dynamic field - info from BFA
224 */
225 enum bfa_pport_states port_state; /* current port state */
226 enum bfa_pport_speed speed; /* current speed */
227 enum bfa_pport_topology topology; /* current topology */
228 bfa_boolean_t beacon; /* current beacon status */
229 bfa_boolean_t link_e2e_beacon;/* set if link beacon on */
230 bfa_boolean_t plog_enabled; /* set if portlog is enabled*/
231
232 /*
233 * Dynamic field - info from FCS
234 */
235 u32 pid; /* port ID */
236 enum bfa_pport_type port_type; /* current topology */
237 u32 loopback; /* external loopback */
238 u32 authfail; /* auth fail state */
239 u32 rsvd2; /* padding for 64 bit */
240};
241
242/**
243 * FC Port statistics.
244 */
245struct bfa_pport_fc_stats_s {
246 u64 secs_reset; /* Seconds since stats is reset */
247 u64 tx_frames; /* Tx frames */
248 u64 tx_words; /* Tx words */
249 u64 tx_lip; /* Tx LIP */
250 u64 tx_nos; /* Tx NOS */
251 u64 tx_ols; /* Tx OLS */
252 u64 tx_lr; /* Tx LR */
253 u64 tx_lrr; /* Tx LRR */
254 u64 rx_frames; /* Rx frames */
255 u64 rx_words; /* Rx words */
256 u64 lip_count; /* Rx LIP */
257 u64 nos_count; /* Rx NOS */
258 u64 ols_count; /* Rx OLS */
259 u64 lr_count; /* Rx LR */
260 u64 lrr_count; /* Rx LRR */
261 u64 invalid_crcs; /* Rx CRC err frames */
262 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
263 u64 undersized_frm; /* Rx undersized frames */
264 u64 oversized_frm; /* Rx oversized frames */
265 u64 bad_eof_frm; /* Rx frames with bad EOF */
266 u64 error_frames; /* Errored frames */
267 u64 dropped_frames; /* Dropped frames */
268 u64 link_failures; /* Link Failure (LF) count */
269 u64 loss_of_syncs; /* Loss of sync count */
270 u64 loss_of_signals;/* Loss of signal count */
271 u64 primseq_errs; /* Primitive sequence protocol err. */
272 u64 bad_os_count; /* Invalid ordered sets */
273 u64 err_enc_out; /* Encoding err nonframe_8b10b */
274 u64 err_enc; /* Encoding err frame_8b10b */
275};
276
277/**
278 * Eth Port statistics.
279 */
280struct bfa_pport_eth_stats_s {
281 u64 secs_reset; /* Seconds since stats is reset */
282 u64 frame_64; /* Frames 64 bytes */
283 u64 frame_65_127; /* Frames 65-127 bytes */
284 u64 frame_128_255; /* Frames 128-255 bytes */
285 u64 frame_256_511; /* Frames 256-511 bytes */
286 u64 frame_512_1023; /* Frames 512-1023 bytes */
287 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
288 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
289 u64 tx_bytes; /* Tx bytes */
290 u64 tx_packets; /* Tx packets */
291 u64 tx_mcast_packets; /* Tx multicast packets */
292 u64 tx_bcast_packets; /* Tx broadcast packets */
293 u64 tx_control_frame; /* Tx control frame */
294 u64 tx_drop; /* Tx drops */
295 u64 tx_jabber; /* Tx jabber */
296 u64 tx_fcs_error; /* Tx FCS error */
297 u64 tx_fragments; /* Tx fragments */
298 u64 rx_bytes; /* Rx bytes */
299 u64 rx_packets; /* Rx packets */
300 u64 rx_mcast_packets; /* Rx multicast packets */
301 u64 rx_bcast_packets; /* Rx broadcast packets */
302 u64 rx_control_frames; /* Rx control frames */
303 u64 rx_unknown_opcode; /* Rx unknown opcode */
304 u64 rx_drop; /* Rx drops */
305 u64 rx_jabber; /* Rx jabber */
306 u64 rx_fcs_error; /* Rx FCS errors */
307 u64 rx_alignment_error; /* Rx alignment errors */
308 u64 rx_frame_length_error; /* Rx frame len errors */
309 u64 rx_code_error; /* Rx code errors */
310 u64 rx_fragments; /* Rx fragments */
311 u64 rx_pause; /* Rx pause */
312 u64 rx_zero_pause; /* Rx zero pause */
313 u64 tx_pause; /* Tx pause */
314 u64 tx_zero_pause; /* Tx zero pause */
315 u64 rx_fcoe_pause; /* Rx FCoE pause */
316 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
317 u64 tx_fcoe_pause; /* Tx FCoE pause */
318 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
319};
320
321/**
322 * Port statistics.
323 */
324union bfa_pport_stats_u {
325 struct bfa_pport_fc_stats_s fc;
326 struct bfa_pport_eth_stats_s eth;
327};
328
329/**
330 * Port FCP mappings.
331 */
332struct bfa_pport_fcpmap_s {
333 char osdevname[256];
334 u32 bus;
335 u32 target;
336 u32 oslun;
337 u32 fcid;
338 wwn_t nwwn;
339 wwn_t pwwn;
340 u64 fcplun;
341 char luid[256];
342};
343
344/**
345 * Port RNI */
346struct bfa_pport_rnid_s {
347 wwn_t wwn;
348 u32 unittype;
349 u32 portid;
350 u32 attached_nodes_num;
351 u16 ip_version;
352 u16 udp_port;
353 u8 ipaddr[16];
354 u16 rsvd;
355 u16 topologydiscoveryflags;
356};
357
358struct bfa_fcport_fcf_s {
359 wwn_t name; /* FCF name */
360 wwn_t fabric_name; /* Fabric Name */
361 u8 fipenabled; /* FIP enabled or not */
362 u8 fipfailed; /* FIP failed or not */
363 u8 resv[2];
364 u8 pri; /* FCF priority */
365 u8 version; /* FIP version used */
366 u8 available; /* Available for login */
367 u8 fka_disabled; /* FKA is disabled */
368 u8 maxsz_verified; /* FCoE max size verified */
369 u8 fc_map[3]; /* FC map */
370 u16 vlan; /* FCoE vlan tag/priority */
371 u32 fka_adv_per; /* FIP ka advert. period */
372 struct mac_s mac; /* FCF mac */
373};
374
375/**
376 * Link state information
377 */
378struct bfa_pport_link_s {
379 u8 linkstate; /* Link state bfa_pport_linkstate */
380 u8 linkstate_rsn; /* bfa_pport_linkstate_rsn_t */
381 u8 topology; /* P2P/LOOP bfa_pport_topology */
382 u8 speed; /* Link speed (1/2/4/8 G) */
383 u32 linkstate_opt; /* Linkstate optional data (debug) */
384 u8 trunked; /* Trunked or not (1 or 0) */
385 u8 resvd[3];
386 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
387 union {
388 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
389 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */
390 } vc_fcf;
391};
392
393#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h b/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
deleted file mode 100644
index aadbacd1d2d7..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_QOS_H__
19#define __BFA_DEFS_QOS_H__
20
21/**
22 * QoS states
23 */
24enum bfa_qos_state {
25 BFA_QOS_ONLINE = 1, /* QoS is online */
26 BFA_QOS_OFFLINE = 2, /* QoS is offline */
27};
28
29
30/**
31 * QoS Priority levels.
32 */
33enum bfa_qos_priority {
34 BFA_QOS_UNKNOWN = 0,
35 BFA_QOS_HIGH = 1, /* QoS Priority Level High */
36 BFA_QOS_MED = 2, /* QoS Priority Level Medium */
37 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
38};
39
40
41/**
42 * QoS bandwidth allocation for each priority level
43 */
44enum bfa_qos_bw_alloc {
45 BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
46 BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
47 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
48};
49
50/**
51 * QoS attribute returned in QoS Query
52 */
53struct bfa_qos_attr_s {
54 enum bfa_qos_state state; /* QoS current state */
55 u32 total_bb_cr; /* Total BB Credits */
56};
57
58/**
59 * These fields should be displayed only from the CLI.
60 * There will be a separate BFAL API (get_qos_vc_attr ?)
61 * to retrieve this.
62 *
63 */
64#define BFA_QOS_MAX_VC 16
65
66struct bfa_qos_vc_info_s {
67 u8 vc_credit;
68 u8 borrow_credit;
69 u8 priority;
70 u8 resvd;
71};
72
73struct bfa_qos_vc_attr_s {
74 u16 total_vc_count; /* Total VC Count */
75 u16 shared_credit;
76 u32 elp_opmode_flags;
77 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
78 * total_vc_count */
79};
80
81/**
82 * QoS statistics
83 */
84struct bfa_qos_stats_s {
85 u32 flogi_sent; /* QoS Flogi sent */
86 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
87 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
88 u32 flogi_retries; /* QoS Flogi retries */
89
90 u32 elp_recvd; /* QoS ELP received */
91 u32 elp_accepted; /* QoS ELP Accepted */
92 u32 elp_rejected; /* QoS ELP rejected */
93 u32 elp_dropped; /* QoS ELP dropped */
94
95 u32 qos_rscn_recvd; /* QoS RSCN received */
96 u32 rsvd; /* padding for 64 bit alignment */
97};
98
99#endif /* __BFA_DEFS_QOS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h b/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
deleted file mode 100644
index e0af59d6d2f6..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h
+++ /dev/null
@@ -1,199 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_RPORT_H__
19#define __BFA_DEFS_RPORT_H__
20
21#include <bfa_os_inc.h>
22#include <protocol/types.h>
23#include <defs/bfa_defs_pport.h>
24#include <defs/bfa_defs_port.h>
25#include <defs/bfa_defs_qos.h>
26
27/**
28 * FCS remote port states
29 */
30enum bfa_rport_state {
31 BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
32 BFA_RPORT_OFFLINE = 1, /* rport is offline */
33 BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
34 BFA_RPORT_ONLINE = 3, /* login to rport is complete */
35 BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
36 BFA_RPORT_NSQUERY = 5, /* nameserver query */
37 BFA_RPORT_ADISC = 6, /* ADISC authentication */
38 BFA_RPORT_LOGO = 7, /* logging out with rport */
39 BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
40 BFA_RPORT_NSDISC = 9, /* re-discover rport */
41};
42
43/**
44 * Rport Scsi Function : Initiator/Target.
45 */
46enum bfa_rport_function {
47 BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
48 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
49};
50
51/**
52 * port/node symbolic names for rport
53 */
54#define BFA_RPORT_SYMNAME_MAXLEN 255
55struct bfa_rport_symname_s {
56 char symname[BFA_RPORT_SYMNAME_MAXLEN];
57};
58
59struct bfa_rport_hal_stats_s {
60 u32 sm_un_cr; /* uninit: create events */
61 u32 sm_un_unexp; /* uninit: exception events */
62 u32 sm_cr_on; /* created: online events */
63 u32 sm_cr_del; /* created: delete events */
64 u32 sm_cr_hwf; /* created: IOC down */
65 u32 sm_cr_unexp; /* created: exception events */
66 u32 sm_fwc_rsp; /* fw create: f/w responses */
67 u32 sm_fwc_del; /* fw create: delete events */
68 u32 sm_fwc_off; /* fw create: offline events */
69 u32 sm_fwc_hwf; /* fw create: IOC down */
70 u32 sm_fwc_unexp; /* fw create: exception events*/
71 u32 sm_on_off; /* online: offline events */
72 u32 sm_on_del; /* online: delete events */
73 u32 sm_on_hwf; /* online: IOC down events */
74 u32 sm_on_unexp; /* online: exception events */
75 u32 sm_fwd_rsp; /* fw delete: fw responses */
76 u32 sm_fwd_del; /* fw delete: delete events */
77 u32 sm_fwd_hwf; /* fw delete: IOC down events */
78 u32 sm_fwd_unexp; /* fw delete: exception events*/
79 u32 sm_off_del; /* offline: delete events */
80 u32 sm_off_on; /* offline: online events */
81 u32 sm_off_hwf; /* offline: IOC down events */
82 u32 sm_off_unexp; /* offline: exception events */
83 u32 sm_del_fwrsp; /* delete: fw responses */
84 u32 sm_del_hwf; /* delete: IOC down events */
85 u32 sm_del_unexp; /* delete: exception events */
86 u32 sm_delp_fwrsp; /* delete pend: fw responses */
87 u32 sm_delp_hwf; /* delete pend: IOC downs */
88 u32 sm_delp_unexp; /* delete pend: exceptions */
89 u32 sm_offp_fwrsp; /* off-pending: fw responses */
90 u32 sm_offp_del; /* off-pending: deletes */
91 u32 sm_offp_hwf; /* off-pending: IOC downs */
92 u32 sm_offp_unexp; /* off-pending: exceptions */
93 u32 sm_iocd_off; /* IOC down: offline events */
94 u32 sm_iocd_del; /* IOC down: delete events */
95 u32 sm_iocd_on; /* IOC down: online events */
96 u32 sm_iocd_unexp; /* IOC down: exceptions */
97 u32 rsvd;
98};
99
100/**
101 * FCS remote port statistics
102 */
103struct bfa_rport_stats_s {
104 u32 offlines; /* remote port offline count */
105 u32 onlines; /* remote port online count */
106 u32 rscns; /* RSCN affecting rport */
107 u32 plogis; /* plogis sent */
108 u32 plogi_accs; /* plogi accepts */
109 u32 plogi_timeouts; /* plogi timeouts */
110 u32 plogi_rejects; /* rcvd plogi rejects */
111 u32 plogi_failed; /* local failure */
112 u32 plogi_rcvd; /* plogis rcvd */
113 u32 prli_rcvd; /* inbound PRLIs */
114 u32 adisc_rcvd; /* ADISCs received */
115 u32 adisc_rejects; /* recvd ADISC rejects */
116 u32 adisc_sent; /* ADISC requests sent */
117 u32 adisc_accs; /* ADISC accepted by rport */
118 u32 adisc_failed; /* ADISC failed (no response) */
119 u32 adisc_rejected; /* ADISC rejected by us */
120 u32 logos; /* logos sent */
121 u32 logo_accs; /* LOGO accepts from rport */
122 u32 logo_failed; /* LOGO failures */
123 u32 logo_rejected; /* LOGO rejects from rport */
124 u32 logo_rcvd; /* LOGO from remote port */
125
126 u32 rpsc_rcvd; /* RPSC received */
127 u32 rpsc_rejects; /* recvd RPSC rejects */
128 u32 rpsc_sent; /* RPSC requests sent */
129 u32 rpsc_accs; /* RPSC accepted by rport */
130 u32 rpsc_failed; /* RPSC failed (no response) */
131 u32 rpsc_rejected; /* RPSC rejected by us */
132
133 u32 rsvd;
134 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
135};
136
137/**
138 * Rport's QoS attributes
139 */
140struct bfa_rport_qos_attr_s {
141 enum bfa_qos_priority qos_priority; /* rport's QoS priority */
142 u32 qos_flow_id; /* QoS flow Id */
143};
144
145/**
146 * FCS remote port attributes returned in queries
147 */
148struct bfa_rport_attr_s {
149 wwn_t nwwn; /* node wwn */
150 wwn_t pwwn; /* port wwn */
151 enum fc_cos cos_supported; /* supported class of services */
152 u32 pid; /* port ID */
153 u32 df_sz; /* Max payload size */
154 enum bfa_rport_state state; /* Rport State machine state */
155 enum fc_cos fc_cos; /* FC classes of services */
156 bfa_boolean_t cisc; /* CISC capable device */
157 struct bfa_rport_symname_s symname; /* Symbolic Name */
158 enum bfa_rport_function scsi_function; /* Initiator/Target */
159 struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
160 enum bfa_pport_speed curr_speed; /* operating speed got from
161 * RPSC ELS. UNKNOWN, if RPSC
162 * is not supported */
163 bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
164 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
165 * will be used if RPSC is not
166 * supported by the rport */
167};
168
169#define bfa_rport_aen_qos_data_t struct bfa_rport_qos_attr_s
170
171/**
172 * BFA remote port events
173 * Arguments below are in BFAL context from Mgmt
174 * BFA_RPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
175 * BFA_RPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn
176 * BFA_RPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn
177 * BFA_RPORT_AEN_QOS_PRIO: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn, prio
178 * BFA_RPORT_AEN_QOS_FLOWID:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn, flow_id
179 */
180enum bfa_rport_aen_event {
181 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
182 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
183 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
184 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
185 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
186};
187
188struct bfa_rport_aen_data_s {
189 u16 vf_id; /* vf_id of this logical port */
190 u16 rsvd[3];
191 wwn_t ppwwn; /* WWN of its physical port */
192 wwn_t lpwwn; /* WWN of this logical port */
193 wwn_t rpwwn; /* WWN of this remote port */
194 union {
195 bfa_rport_aen_qos_data_t qos;
196 } priv;
197};
198
199#endif /* __BFA_DEFS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
deleted file mode 100644
index 6eb4e62096fc..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_STATUS_H__
18#define __BFA_DEFS_STATUS_H__
19
20/**
21 * API status return values
22 *
23 * NOTE: The error msgs are auto generated from the comments. Only singe line
24 * comments are supported
25 */
26enum bfa_status {
27 BFA_STATUS_OK = 0, /* Success */
28 BFA_STATUS_FAILED = 1, /* Operation failed */
29 BFA_STATUS_EINVAL = 2, /* Invalid params Check input
30 * parameters */
31 BFA_STATUS_ENOMEM = 3, /* Out of resources */
32 BFA_STATUS_ENOSYS = 4, /* Function not implemented */
33 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if
34 * persists, contact support */
35 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
36 BFA_STATUS_ENOFCPORTS = 7, /* No FC ports resources */
37 BFA_STATUS_NOFLASH = 8, /* Flash not present */
38 BFA_STATUS_BADFLASH = 9, /* Flash is corrupted or bad */
39 BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
40 BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
41 BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted
42 * contact support */
43 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
44 BFA_STATUS_ABORTED = 14, /* Operation aborted */
45 BFA_STATUS_NODEV = 15, /* Dev is not present */
46 BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
47 BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
48 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
49 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
50 BFA_STATUS_FCPT_LS_RJT = 20, /* Got LS_RJT for FC Pass
51 * through Req */
52 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
53 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported
54 * limit */
55 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed
56 * setting */
57 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
58 BFA_STATUS_CNFG_FAILED = 25, /* Setting can not be persisted */
59 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
60 BFA_STATUS_NO_ADAPTER = 27, /* No Brocade Adapter Found */
61 BFA_STATUS_LINKDOWN = 28, /* Link is down - Check or replace
62 * SFP/cable */
63 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
64 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
65 BFA_STATUS_NSLOGIN_FAILED = 31, /* Nameserver login failed */
66 BFA_STATUS_NO_RPORTS = 32, /* No remote ports found */
67 BFA_STATUS_NSQUERY_FAILED = 33, /* Nameserver query failed */
68 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
69 BFA_STATUS_RPORT_OFFLINE = 35, /* RPORT is not online */
70 BFA_STATUS_TGTOPEN_FAILED = 36, /* Remote SCSI target open failed */
71 BFA_STATUS_BAD_LUNS = 37, /* No valid LUNs found */
72 BFA_STATUS_IO_FAILURE = 38, /* SCSI target IO failure */
73 BFA_STATUS_NO_FABRIC = 39, /* No switched fabric present */
74 BFA_STATUS_EBADF = 40, /* Bad file descriptor */
75 BFA_STATUS_EINTR = 41, /* A signal was caught during ioctl */
76 BFA_STATUS_EIO = 42, /* I/O error */
77 BFA_STATUS_ENOTTY = 43, /* Inappropriate I/O control
78 * operation */
79 BFA_STATUS_ENXIO = 44, /* No such device or address */
80 BFA_STATUS_EFOPEN = 45, /* Failed to open file */
81 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
82 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port
83 * first */
84 BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */
85 BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace
86 * SFP/cable */
87 BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace
88 * SFP/cable or for Mezz card check and
89 * replace pass through module */
90 BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace
91 * SFP/cable */
92 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the
93 * rport */
94 BFA_STATUS_CHECKSUM_FAIL = 53, /* checksum failure */
95 BFA_STATUS_GZME_FAILED = 54, /* Get zone member query failed */
96 BFA_STATUS_SCSISTART_REQD = 55, /* SCSI disk require START command */
97 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
98 * contact support */
99 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
100 BFA_STATUS_MISMATCH = 58, /* Version mismatch */
101 BFA_STATUS_IOC_ENABLED = 59, /* IOC is already enabled */
102 BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled disable
103 * adapter first */
104 BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational. Enable IOC
105 * and if it still fails,
106 * contact support */
107 BFA_STATUS_ADDR_MAP_FAILURE = 62, /* PCI base address not mapped
108 * in OS */
109 BFA_STATUS_SAME_NAME = 63, /* Name exists! use a different
110 * name */
111 BFA_STATUS_PENDING = 64, /* API completes asynchronously */
112 BFA_STATUS_8G_SPD = 65, /* Speed setting not valid for
113 * 8G HBA */
114 BFA_STATUS_4G_SPD = 66, /* Speed setting not valid for
115 * 4G HBA */
116 BFA_STATUS_AD_IS_ENABLE = 67, /* Adapter is already enabled */
117 BFA_STATUS_EINVAL_TOV = 68, /* Invalid path failover TOV */
118 BFA_STATUS_EINVAL_QDEPTH = 69, /* Invalid queue depth value */
119 BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version
120 * mismatch */
121 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
122 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
123 BFA_STATUS_BEACON_OFF = 73, /* Port Beacon already off */
124 BFA_STATUS_LBEACON_ON = 74, /* Link End-to-End Beacon already
125 * on */
126 BFA_STATUS_LBEACON_OFF = 75, /* Link End-to-End Beacon already
127 * off */
128 BFA_STATUS_PORT_NOT_INITED = 76, /* Port not initialized */
129 BFA_STATUS_RPSC_ENABLED = 77, /* Target has a valid speed */
130 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
131 BFA_STATUS_BAD_FILE = 79, /* Not a valid Brocade Boot Code
132 * file */
133 BFA_STATUS_RLIM_EN = 80, /* Target rate limiting is already
134 * enabled */
135 BFA_STATUS_RLIM_DIS = 81, /* Target rate limiting is already
136 * disabled */
137 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
138 BFA_STATUS_ADAPTER_DISABLED = 83, /* Adapter is already disabled */
139 BFA_STATUS_BIOS_DISABLED = 84, /* Bios is already disabled */
140 BFA_STATUS_AUTH_ENABLED = 85, /* Authentication is already
141 * enabled */
142 BFA_STATUS_AUTH_DISABLED = 86, /* Authentication is already
143 * disabled */
144 BFA_STATUS_ERROR_TRL_ENABLED = 87, /* Target rate limiting is
145 * enabled */
146 BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
147 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
148 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact
149 * support */
150 BFA_STATUS_INVALID_DEVID = 91, /* Invalid device id provided */
151 BFA_STATUS_QOS_ENABLED = 92, /* QOS is already enabled */
152 BFA_STATUS_QOS_DISABLED = 93, /* QOS is already disabled */
153 BFA_STATUS_INCORRECT_DRV_CONFIG = 94, /* Check configuration
154 * key/value pair */
155 BFA_STATUS_REG_FAIL = 95, /* Can't read windows registry */
156 BFA_STATUS_IM_INV_CODE = 96, /* Invalid IOCTL code */
157 BFA_STATUS_IM_INV_VLAN = 97, /* Invalid VLAN ID */
158 BFA_STATUS_IM_INV_ADAPT_NAME = 98, /* Invalid adapter name */
159 BFA_STATUS_IM_LOW_RESOURCES = 99, /* Memory allocation failure in
160 * driver */
161 BFA_STATUS_IM_VLANID_IS_PVID = 100, /* Given VLAN id same as PVID */
162 BFA_STATUS_IM_VLANID_EXISTS = 101, /* Given VLAN id already exists */
163 BFA_STATUS_IM_FW_UPDATE_FAIL = 102, /* Updating firmware with new
164 * VLAN ID failed */
165 BFA_STATUS_PORTLOG_ENABLED = 103, /* Port Log is already enabled */
166 BFA_STATUS_PORTLOG_DISABLED = 104, /* Port Log is already disabled */
167 BFA_STATUS_FILE_NOT_FOUND = 105, /* Specified file could not be
168 * found */
169 BFA_STATUS_QOS_FC_ONLY = 106, /* QOS can be enabled for FC mode
170 * only */
171 BFA_STATUS_RLIM_FC_ONLY = 107, /* RATELIM can be enabled for FC mode
172 * only */
173 BFA_STATUS_CT_SPD = 108, /* Invalid speed selection for Catapult. */
174 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
175 BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please
176 * bring down first */
177 BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G CNA */
178 BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */
179 BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already
180 * exists */
181 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
182 * of another team */
183 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
184 * Delete all VLANs to become
185 * part of the team */
186 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
187 * for adapters */
188 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
189 * configured for adapters */
190 BFA_STATUS_IM_MTU_MISMATCH = 118, /* Mismatching MTUs configured for
191 * adapters */
192 BFA_STATUS_IM_RSS_MISMATCH = 119, /* Mismatching RSS parameters
193 * configured for adapters */
194 BFA_STATUS_IM_HDS_MISMATCH = 120, /* Mismatching HDS parameters
195 * configured for adapters */
196 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, /* Mismatching offload
197 * parameters configured for
198 * adapters */
199 BFA_STATUS_IM_PORT_PARAMS = 122, /* Error setting port parameters */
200 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, /* Port is not part of team */
201 BFA_STATUS_IM_CANNOT_REM_PRI = 124, /* Primary adapter cannot be
202 * removed. Change primary before
203 * removing */
204 BFA_STATUS_IM_MAX_PORTS_REACHED = 125, /* Exceeding maximum ports
205 * per team */
206 BFA_STATUS_IM_LAST_PORT_DELETE = 126, /* Last port in team being
207 * deleted */
208 BFA_STATUS_IM_NO_DRIVER = 127, /* IM driver is not installed */
209 BFA_STATUS_IM_MAX_VLANS_REACHED = 128, /* Exceeding maximum VLANs
210 * per port */
211 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, /* Bios speed config not
212 * allowed for CNA */
213 BFA_STATUS_NO_MINPORT_DRIVER = 130, /* Miniport driver is not
214 * loaded */
215 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
216 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
217 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed
218 * or loaded */
219 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
220 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
221 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
222 BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port VLAN (PVID) */
223 BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port VLAN (PVID) */
224 BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */
225 BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
226 * Non-Zero Value */
227 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network
228 * Subsystem Lock Failed.Please
229 * try after some time */
230 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsystem
231 * handle Failed. Please try
232 * after some time */
233 BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
234 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
235 * permissions to execute the BCU
236 * application */
237 BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved VLAN name
238 * string. The name is not allowed
239 * for the normal VLAN */
240 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
241 BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthrough VLAN
242 * id */
243 BFA_STATUS_IM_BIND_FAILED = 148, /* IM Driver bind operation
244 * failed */
245 BFA_STATUS_IM_UNBIND_FAILED = 149, /* IM Driver unbind operation
246 * failed */
247 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
248 * team */
249 BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */
250 BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't
251 * exists */
252 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
253 * allowed for the current
254 * Teaming mode */
255 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
256 * configuration */
257 BFA_STATUS_DEVID_MISSING = 155, /* Boot image is not for the adapter(s)
258 * installed */
259 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
260 BFA_STATUS_CREATE_FILE = 157, /* Failed to create temporary file */
261 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
262 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
263 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 160, /* Topology command not
264 * applicable to CNA */
265 BFA_STATUS_BOOT_CODE_UPDATED = 161, /* reboot -- -r is needed after
266 * boot code updated */
267 BFA_STATUS_BOOT_VERSION = 162, /* Boot code version not compatible with
268 * the driver installed */
269 BFA_STATUS_CARDTYPE_MISSING = 163, /* Boot image is not for the
270 * adapter(s) installed */
271 BFA_STATUS_INVALID_CARDTYPE = 164, /* Invalid card type provided */
272 BFA_STATUS_MAX_VAL /* Unknown error code */
273};
274#define bfa_status_t enum bfa_status
275
276enum bfa_eproto_status {
277 BFA_EPROTO_BAD_ACCEPT = 0,
278 BFA_EPROTO_UNKNOWN_RSP = 1
279};
280#define bfa_eproto_status_t enum bfa_eproto_status
281
282#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h b/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
deleted file mode 100644
index e05a2db7abed..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TIN_H__
19#define __BFA_DEFS_TIN_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23
24/**
25 * FCS tin states
26 */
27enum bfa_tin_state_e {
28 BFA_TIN_SM_OFFLINE = 0, /* tin is offline */
29 BFA_TIN_SM_WOS_LOGIN = 1, /* Waiting PRLI ACC/RJT from ULP */
30 BFA_TIN_SM_WFW_ONLINE = 2, /* Waiting ACK to PRLI ACC from FW */
31 BFA_TIN_SM_ONLINE = 3, /* tin login is complete */
32 BFA_TIN_SM_WIO_RELOGIN = 4, /* tin relogin is in progress */
33 BFA_TIN_SM_WIO_LOGOUT = 5, /* Processing of PRLO req from
34 * Initiator is in progress
35 */
36 BFA_TIN_SM_WOS_LOGOUT = 6, /* Processing of PRLO req from
37 * Initiator is in progress
38 */
39 BFA_TIN_SM_WIO_CLEAN = 7, /* Waiting for IO cleanup before tin
40 * is offline. This can be triggered
41 * by RPORT LOGO (rcvd/sent) or by
42 * PRLO (rcvd/sent)
43 */
44};
45
46struct bfa_prli_req_s {
47 struct fchs_s fchs;
48 struct fc_prli_s prli_payload;
49};
50
51struct bfa_prlo_req_s {
52 struct fchs_s fchs;
53 struct fc_prlo_s prlo_payload;
54};
55
56void bfa_tin_send_login_rsp(void *bfa_tin, u32 login_rsp,
57 struct fc_ls_rjt_s rjt_payload);
58void bfa_tin_send_logout_rsp(void *bfa_tin, u32 logout_rsp,
59 struct fc_ls_rjt_s rjt_payload);
60/**
61 * FCS target port statistics
62 */
63struct bfa_tin_stats_s {
64 u32 onlines; /* ITN nexus onlines (PRLI done) */
65 u32 offlines; /* ITN Nexus offlines */
66 u32 prli_req_parse_err; /* prli req parsing errors */
67 u32 prli_rsp_rjt; /* num prli rsp rejects sent */
68 u32 prli_rsp_acc; /* num prli rsp accepts sent */
69 u32 cleanup_comps; /* ITN cleanup completions */
70};
71
72/**
73 * FCS tin attributes returned in queries
74 */
75struct bfa_tin_attr_s {
76 enum bfa_tin_state_e state;
77 u8 seq_retry; /* Sequence retry supported */
78 u8 rsvd[3];
79};
80
81/**
82 * BFA TIN async event data structure for BFAL
83 */
84enum bfa_tin_aen_event {
85 BFA_TIN_AEN_ONLINE = 1, /* Target online */
86 BFA_TIN_AEN_OFFLINE = 2, /* Target offline */
87 BFA_TIN_AEN_DISCONNECT = 3, /* Target disconnected */
88};
89
90/**
91 * BFA TIN event data structure.
92 */
93struct bfa_tin_aen_data_s {
94 u16 vf_id; /* vf_id of the IT nexus */
95 u16 rsvd[3];
96 wwn_t lpwwn; /* WWN of logical port */
97 wwn_t rpwwn; /* WWN of remote(target) port */
98};
99
100/**
101 * Below APIs are needed from BFA driver
102 * Move these to BFA driver public header file?
103 */
104/* TIN rcvd new PRLI & gets bfad_tin_t ptr from driver this callback */
105void *bfad_tin_rcvd_login_req(void *bfad_tm_port, void *bfa_tin,
106 wwn_t rp_wwn, u32 rp_fcid,
107 struct bfa_prli_req_s prli_req);
108/* TIN rcvd new PRLO */
109void bfad_tin_rcvd_logout_req(void *bfad_tin, wwn_t rp_wwn, u32 rp_fcid,
110 struct bfa_prlo_req_s prlo_req);
111/* TIN is online and ready for IO */
112void bfad_tin_online(void *bfad_tin);
113/* TIN is offline and BFA driver can shutdown its upper stack */
114void bfad_tin_offline(void *bfad_tin);
115/* TIN does not need this BFA driver tin tag anymore, so can be freed */
116void bfad_tin_res_free(void *bfad_tin);
117
118#endif /* __BFA_DEFS_TIN_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
deleted file mode 100644
index ade763dbc8ce..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_TSENSOR_H__
19#define __BFA_DEFS_TSENSOR_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_types.h>
23
24/**
25 * Temperature sensor status values
26 */
27enum bfa_tsensor_status {
28 BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */
29 BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */
30 BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */
31 BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */
32 BFA_TSENSOR_STATUS_ABOVE_MAX = 5, /* temperature above maximum */
33};
34
35/**
36 * Temperature sensor attribute
37 */
38struct bfa_tsensor_attr_s {
39 enum bfa_tsensor_status status; /* temperature sensor status */
40 u32 value; /* current temperature in celsius */
41};
42
43#endif /* __BFA_DEFS_TSENSOR_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_types.h b/drivers/scsi/bfa/include/defs/bfa_defs_types.h
deleted file mode 100644
index 4348332b107a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_TYPES_H__
18#define __BFA_DEFS_TYPES_H__
19
20#include <bfa_os_inc.h>
21
22enum bfa_boolean {
23 BFA_FALSE = 0,
24 BFA_TRUE = 1
25};
26#define bfa_boolean_t enum bfa_boolean
27
28#define BFA_STRING_32 32
29
30#endif /* __BFA_DEFS_TYPES_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_version.h b/drivers/scsi/bfa/include/defs/bfa_defs_version.h
deleted file mode 100644
index f8902a2c9aad..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_version.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef __BFA_DEFS_VERSION_H__
18#define __BFA_DEFS_VERSION_H__
19
20#define BFA_VERSION_LEN 64
21
22#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h b/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
deleted file mode 100644
index 3235be5e9423..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VF_H__
19#define __BFA_DEFS_VF_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VF states
27 */
28enum bfa_vf_state {
29 BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
30 BFA_VF_LINK_DOWN = 1, /* link is down */
31 BFA_VF_FLOGI = 2, /* flogi is in progress */
32 BFA_VF_AUTH = 3, /* authentication in progress */
33 BFA_VF_NOFABRIC = 4, /* fabric is not present */
34 BFA_VF_ONLINE = 5, /* login to fabric is complete */
35 BFA_VF_EVFP = 6, /* EVFP is in progress */
36 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
37};
38
39/**
40 * VF statistics
41 */
42struct bfa_vf_stats_s {
43 u32 flogi_sent; /* Num FLOGIs sent */
44 u32 flogi_rsp_err; /* FLOGI response errors */
45 u32 flogi_acc_err; /* FLOGI accept errors */
46 u32 flogi_accepts; /* FLOGI accepts received */
47 u32 flogi_rejects; /* FLOGI rejects received */
48 u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
49 u32 flogi_alloc_wait; /* Allocation waits prior to
50 * sending FLOGI
51 */
52 u32 flogi_rcvd; /* FLOGIs received */
53 u32 flogi_rejected; /* Incoming FLOGIs rejected */
54 u32 fabric_onlines; /* Internal fabric online
55 * notification sent to other
56 * modules
57 */
58 u32 fabric_offlines; /* Internal fabric offline
59 * notification sent to other
60 * modules
61 */
62 u32 resvd;
63};
64
65/**
66 * VF attributes returned in queries
67 */
68struct bfa_vf_attr_s {
69 enum bfa_vf_state state; /* VF state */
70 u32 rsvd;
71 wwn_t fabric_name; /* fabric name */
72};
73
74#endif /* __BFA_DEFS_VF_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h b/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
deleted file mode 100644
index 9f021f43b3b4..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_VPORT_H__
19#define __BFA_DEFS_VPORT_H__
20
21#include <bfa_os_inc.h>
22#include <defs/bfa_defs_port.h>
23#include <protocol/types.h>
24
25/**
26 * VPORT states
27 */
28enum bfa_vport_state {
29 BFA_FCS_VPORT_UNINIT = 0,
30 BFA_FCS_VPORT_CREATED = 1,
31 BFA_FCS_VPORT_OFFLINE = 1,
32 BFA_FCS_VPORT_FDISC_SEND = 2,
33 BFA_FCS_VPORT_FDISC = 3,
34 BFA_FCS_VPORT_FDISC_RETRY = 4,
35 BFA_FCS_VPORT_ONLINE = 5,
36 BFA_FCS_VPORT_DELETING = 6,
37 BFA_FCS_VPORT_CLEANUP = 6,
38 BFA_FCS_VPORT_LOGO_SEND = 7,
39 BFA_FCS_VPORT_LOGO = 8,
40 BFA_FCS_VPORT_ERROR = 9,
41 BFA_FCS_VPORT_MAX_STATE,
42};
43
44/**
45 * vport statistics
46 */
47struct bfa_vport_stats_s {
48 struct bfa_port_stats_s port_stats; /* base class (port) stats */
49 /*
50 * TODO - remove
51 */
52
53 u32 fdisc_sent; /* num fdisc sent */
54 u32 fdisc_accepts; /* fdisc accepts */
55 u32 fdisc_retries; /* fdisc retries */
56 u32 fdisc_timeouts; /* fdisc timeouts */
57 u32 fdisc_rsp_err; /* fdisc response error */
58 u32 fdisc_acc_bad; /* bad fdisc accepts */
59 u32 fdisc_rejects; /* fdisc rejects */
60 u32 fdisc_unknown_rsp;
61 /*
62 *!< fdisc rsp unknown error
63 */
64 u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
65
66 u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
67 u32 logo_sent; /* logo sent */
68 u32 logo_accepts; /* logo accepts */
69 u32 logo_rejects; /* logo rejects */
70 u32 logo_rsp_err; /* logo rsp errors */
71 u32 logo_unknown_rsp;
72 /* logo rsp unknown errors */
73
74 u32 fab_no_npiv; /* fabric does not support npiv */
75
76 u32 fab_offline; /* offline events from fab SM */
77 u32 fab_online; /* online events from fab SM */
78 u32 fab_cleanup; /* cleanup request from fab SM */
79 u32 rsvd;
80};
81
82/**
83 * BFA vport attribute returned in queries
84 */
85struct bfa_vport_attr_s {
86 struct bfa_port_attr_s port_attr; /* base class (port) attributes */
87 enum bfa_vport_state vport_state; /* vport state */
88 u32 rsvd;
89};
90
91#endif /* __BFA_DEFS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb.h b/drivers/scsi/bfa/include/fcb/bfa_fcb.h
deleted file mode 100644
index 2963b0bc30e7..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb.h BFA FCS callback interfaces
20 */
21
22#ifndef __BFA_FCB_H__
23#define __BFA_FCB_H__
24
25/**
26 * fcb Main fcs callbacks
27 */
28
29void bfa_fcb_exit(struct bfad_s *bfad);
30
31
32
33#endif /* __BFA_FCB_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
deleted file mode 100644
index 52585d3dd891..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19* : bfad_fcpim.h - BFA FCS initiator mode remote port callbacks
20 */
21
22#ifndef __BFAD_FCB_FCPIM_H__
23#define __BFAD_FCB_FCPIM_H__
24
25struct bfad_itnim_s;
26
27/*
28 * RPIM callbacks
29 */
30
31/**
32 * Memory allocation for remote port instance. Called before PRLI is
33 * initiated to the remote target port.
34 *
35 * @param[in] bfad - driver instance
36 * @param[out] itnim - FCS remote port (IM) instance
37 * @param[out] itnim_drv - driver remote port (IM) instance
38 *
39 * @return None
40 */
41void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
42 struct bfad_itnim_s **itnim_drv);
43
44/**
45 * Free remote port (IM) instance.
46 *
47 * @param[in] bfad - driver instance
48 * @param[in] itnim_drv - driver remote port instance
49 *
50 * @return None
51 */
52void bfa_fcb_itnim_free(struct bfad_s *bfad,
53 struct bfad_itnim_s *itnim_drv);
54
55/**
56 * Notification of when login with a remote target device is complete.
57 *
58 * @param[in] itnim_drv - driver remote port instance
59 *
60 * @return None
61 */
62void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
63
64/**
65 * Notification when login with the remote device is severed.
66 *
67 * @param[in] itnim_drv - driver remote port instance
68 *
69 * @return None
70 */
71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
72
73void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
74
75#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
deleted file mode 100644
index 5fd7f986fa32..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_port.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_PORT_H__
23#define __BFA_FCB_PORT_H__
24
25#include <fcb/bfa_fcb_vport.h>
26/**
27 * fcs_port_fcb FCS port driver interfaces
28 */
29
30/*
31 * Forward declarations
32 */
33struct bfad_port_s;
34
35/*
36 * Callback functions from BFA FCS to driver
37 */
38
39/**
40 * Call from FCS to driver module when a port is instantiated. The port
41 * can be a base port or a virtual port with in the base fabric or
42 * a virtual fabric.
43 *
44 * On this callback, driver is supposed to create scsi_host, scsi_tgt or
45 * network interfaces bases on ports personality/roles.
46 *
47 * base port of base fabric: vf_drv == NULL && vp_drv == NULL
48 * vport of base fabric: vf_drv == NULL && vp_drv != NULL
49 * base port of VF: vf_drv != NULL && vp_drv == NULL
50 * vport of VF: vf_drv != NULL && vp_drv != NULL
51 *
52 * @param[in] bfad - driver instance
53 * @param[in] port - FCS port instance
54 * @param[in] roles - port roles: IM, TM, IP
55 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
56 * @param[in] vp_drv - vport driver instance, NULL if base port
57 *
58 * @return None
59 */
60struct bfad_port_s *bfa_fcb_port_new(struct bfad_s *bfad,
61 struct bfa_fcs_port_s *port,
62 enum bfa_port_role roles, struct bfad_vf_s *vf_drv,
63 struct bfad_vport_s *vp_drv);
64
65/**
66 * Call from FCS to driver module when a port is deleted. The port
67 * can be a base port or a virtual port with in the base fabric or
68 * a virtual fabric.
69 *
70 * @param[in] bfad - driver instance
71 * @param[in] roles - port roles: IM, TM, IP
72 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
73 * @param[in] vp_drv - vport driver instance, NULL if base port
74 *
75 * @return None
76 */
77void bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles,
78 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
79
80/**
81 * Notification when port transitions to ONLINE state.
82 *
83 * Online notification is a logical link up for the local port. This
84 * notification is sent after a successfull FLOGI, or a successful
85 * link initialization in proviate-loop or N2N topologies.
86 *
87 * @param[in] bfad - driver instance
88 * @param[in] roles - port roles: IM, TM, IP
89 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
90 * @param[in] vp_drv - vport driver instance, NULL if base port
91 *
92 * @return None
93 */
94void bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles,
95 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
96
97/**
98 * Notification when port transitions to OFFLINE state.
99 *
100 * Offline notification is a logical link down for the local port.
101 *
102 * @param[in] bfad - driver instance
103 * @param[in] roles - port roles: IM, TM, IP
104 * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF)
105 * @param[in] vp_drv - vport driver instance, NULL if base port
106 *
107 * @return None
108 */
109void bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles,
110 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv);
111
112
113#endif /* __BFA_FCB_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
deleted file mode 100644
index e0261bb6d1c1..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_rport.h BFA FCS rport driver interfaces
20 */
21
22#ifndef __BFA_FCB_RPORT_H__
23#define __BFA_FCB_RPORT_H__
24
25/**
26 * fcs_rport_fcb Remote port driver interfaces
27 */
28
29
30struct bfad_rport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_rport_add().
38 *
39 * @param[in] rport_drv - driver instance of rport
40 *
41 * @return None
42 */
43void bfa_fcb_rport_add(struct bfad_rport_s *rport_drv);
44
45/**
46 * Completion callback for bfa_fcs_rport_remove().
47 *
48 * @param[in] rport_drv - driver instance of rport
49 *
50 * @return None
51 */
52void bfa_fcb_rport_remove(struct bfad_rport_s *rport_drv);
53
54/**
55 * Call to allocate a rport instance.
56 *
57 * @param[in] bfad - driver instance
58 * @param[out] rport - BFA FCS instance of rport
59 * @param[out] rport_drv - driver instance of rport
60 *
61 * @retval BFA_STATUS_OK - successfully allocated
62 * @retval BFA_STATUS_ENOMEM - cannot allocate
63 */
64bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
65 struct bfa_fcs_rport_s **rport,
66 struct bfad_rport_s **rport_drv);
67
68/**
69 * Call to free rport memory resources.
70 *
71 * @param[in] bfad - driver instance
72 * @param[in] rport_drv - driver instance of rport
73 *
74 * @return None
75 */
76void bfa_fcb_rport_free(struct bfad_s *bfad, struct bfad_rport_s **rport_drv);
77
78
79
80#endif /* __BFA_FCB_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
deleted file mode 100644
index cfd3fac0a4e2..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vf.h BFA FCS virtual fabric driver interfaces
20 */
21
22#ifndef __BFA_FCB_VF_H__
23#define __BFA_FCB_VF_H__
24
25/**
26 * fcs_vf_fcb Virtual fabric driver intrefaces
27 */
28
29
30struct bfad_vf_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vf_stop().
38 *
39 * @param[in] vf_drv - driver instance of vf
40 *
41 * @return None
42 */
43void bfa_fcb_vf_stop(struct bfad_vf_s *vf_drv);
44
45
46
47#endif /* __BFA_FCB_VF_H__ */
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
deleted file mode 100644
index cfd6ba7c47ec..000000000000
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcb_vport.h BFA FCS virtual port driver interfaces
20 */
21
22#ifndef __BFA_FCB_VPORT_H__
23#define __BFA_FCB_VPORT_H__
24
25/**
26 * fcs_vport_fcb Virtual port driver interfaces
27 */
28
29
30struct bfad_vport_s;
31
32/*
33 * Callback functions from BFA FCS to driver
34 */
35
36/**
37 * Completion callback for bfa_fcs_vport_delete().
38 *
39 * @param[in] vport_drv - driver instance of vport
40 *
41 * @return None
42 */
43void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv);
44void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
45
46
47
48#endif /* __BFA_FCB_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
deleted file mode 100644
index 54e5b81ab2a3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_H__
19#define __BFA_FCS_H__
20
21#include <cs/bfa_debug.h>
22#include <defs/bfa_defs_status.h>
23#include <defs/bfa_defs_version.h>
24#include <bfa.h>
25#include <fcs/bfa_fcs_fabric.h>
26
27#define BFA_FCS_OS_STR_LEN 64
28
29struct bfa_fcs_stats_s {
30 struct {
31 u32 untagged; /* untagged receive frames */
32 u32 tagged; /* tagged receive frames */
33 u32 vfid_unknown; /* VF id is unknown */
34 } uf;
35};
36
37struct bfa_fcs_driver_info_s {
38 u8 version[BFA_VERSION_LEN]; /* Driver Version */
39 u8 host_machine_name[BFA_FCS_OS_STR_LEN];
40 u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
41 u8 host_os_patch[BFA_FCS_OS_STR_LEN];/* patch or service pack */
42 u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
43};
44
45struct bfa_fcs_s {
46 struct bfa_s *bfa; /* corresponding BFA bfa instance */
47 struct bfad_s *bfad; /* corresponding BDA driver instance */
48 struct bfa_log_mod_s *logm; /* driver logging module instance */
49 struct bfa_trc_mod_s *trcmod; /* tracing module */
50 struct bfa_aen_s *aen; /* aen component */
51 bfa_boolean_t vf_enabled; /* VF mode is enabled */
52 bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */
53 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
54 u16 port_vfid; /* port default VF ID */
55 struct bfa_fcs_driver_info_s driver_info;
56 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
57 struct bfa_fcs_stats_s stats; /* FCS statistics */
58 struct bfa_wc_s wc; /* waiting counter */
59};
60
61/*
62 * bfa fcs API functions
63 */
64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
65 struct bfad_s *bfad, bfa_boolean_t min_cfg);
66void bfa_fcs_init(struct bfa_fcs_s *fcs);
67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
68 struct bfa_fcs_driver_info_s *driver_info);
69void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
70void bfa_fcs_exit(struct bfa_fcs_s *fcs);
71void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
72void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
73void bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen);
74void bfa_fcs_start(struct bfa_fcs_s *fcs);
75
76#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
deleted file mode 100644
index 28c4c9ff08b3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_AUTH_H__
19#define __BFA_FCS_AUTH_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_auth.h>
25#include <defs/bfa_defs_vf.h>
26#include <cs/bfa_q.h>
27#include <cs/bfa_sm.h>
28#include <defs/bfa_defs_pport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <protocol/fc_sp.h>
31
32struct bfa_fcs_fabric_s;
33
34
35
36struct bfa_fcs_auth_s {
37 bfa_sm_t sm; /* state machine */
38 bfa_boolean_t policy; /* authentication enabled/disabled */
39 enum bfa_auth_status status; /* authentication status */
40 enum auth_rjt_codes rjt_code; /* auth reject status */
41 enum auth_rjt_code_exps rjt_code_exp; /* auth reject reason */
42 enum bfa_auth_algo algo; /* Authentication algorithm */
43 struct bfa_auth_stats_s stats; /* Statistics */
44 enum auth_dh_gid group; /* DH(diffie-hellman) Group */
45 enum bfa_auth_secretsource source; /* Secret source */
46 char secret[BFA_AUTH_SECRET_STRING_LEN];
47 /* secret string */
48 u8 secret_len;
49 /* secret string length */
50 u8 nretries;
51 /* number of retries */
52 struct bfa_fcs_fabric_s *fabric;/* pointer to fabric */
53 u8 sentcode; /* pointer to response data */
54 u8 *response; /* pointer to response data */
55 struct bfa_timer_s delay_timer; /* delay timer */
56 struct bfa_fcxp_s *fcxp; /* pointer to fcxp */
57 struct bfa_fcxp_wqe_s fcxp_wqe;
58};
59
60/**
61 * bfa fcs authentication public functions
62 */
63bfa_status_t bfa_fcs_auth_get_attr(struct bfa_fcs_s *port,
64 struct bfa_auth_attr_s *attr);
65bfa_status_t bfa_fcs_auth_set_policy(struct bfa_fcs_s *port,
66 bfa_boolean_t policy);
67enum bfa_auth_status bfa_fcs_auth_get_status(struct bfa_fcs_s *port);
68bfa_status_t bfa_fcs_auth_set_algo(struct bfa_fcs_s *port,
69 enum bfa_auth_algo algo);
70bfa_status_t bfa_fcs_auth_get_stats(struct bfa_fcs_s *port,
71 struct bfa_auth_stats_s *stats);
72bfa_status_t bfa_fcs_auth_set_dh_group(struct bfa_fcs_s *port, int group);
73bfa_status_t bfa_fcs_auth_set_secretstring(struct bfa_fcs_s *port,
74 char *secret);
75bfa_status_t bfa_fcs_auth_set_secretstring_encrypt(struct bfa_fcs_s *port,
76 u32 secret[], u32 len);
77bfa_status_t bfa_fcs_auth_set_secretsource(struct bfa_fcs_s *port,
78 enum bfa_auth_secretsource src);
79bfa_status_t bfa_fcs_auth_reset_stats(struct bfa_fcs_s *port);
80bfa_status_t bfa_fcs_auth_reinit(struct bfa_fcs_s *port);
81
82#endif /* __BFA_FCS_AUTH_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
deleted file mode 100644
index 08b79d5e46f3..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_FABRIC_H__
19#define __BFA_FCS_FABRIC_H__
20
21struct bfa_fcs_s;
22
23#include <defs/bfa_defs_status.h>
24#include <defs/bfa_defs_vf.h>
25#include <cs/bfa_q.h>
26#include <cs/bfa_sm.h>
27#include <defs/bfa_defs_pport.h>
28#include <fcs/bfa_fcs_lport.h>
29#include <protocol/fc_sp.h>
30#include <fcs/bfa_fcs_auth.h>
31
32/*
33 * forward declaration
34 */
35struct bfad_vf_s;
36
37enum bfa_fcs_fabric_type {
38 BFA_FCS_FABRIC_UNKNOWN = 0,
39 BFA_FCS_FABRIC_SWITCHED = 1,
40 BFA_FCS_FABRIC_PLOOP = 2,
41 BFA_FCS_FABRIC_N2N = 3,
42};
43
44
45struct bfa_fcs_fabric_s {
46 struct list_head qe; /* queue element */
47 bfa_sm_t sm; /* state machine */
48 struct bfa_fcs_s *fcs; /* FCS instance */
49 struct bfa_fcs_port_s bport; /* base logical port */
50 enum bfa_fcs_fabric_type fab_type; /* fabric type */
51 enum bfa_pport_type oper_type; /* current link topology */
52 u8 is_vf; /* is virtual fabric? */
53 u8 is_npiv; /* is NPIV supported ? */
54 u8 is_auth; /* is Security/Auth supported ? */
55 u16 bb_credit; /* BB credit from fabric */
56 u16 vf_id; /* virtual fabric ID */
57 u16 num_vports; /* num vports */
58 u16 rsvd;
59 struct list_head vport_q; /* queue of virtual ports */
60 struct list_head vf_q; /* queue of virtual fabrics */
61 struct bfad_vf_s *vf_drv; /* driver vf structure */
62 struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
63 wwn_t fabric_name; /* attached fabric name */
64 bfa_boolean_t auth_reqd; /* authentication required */
65 struct bfa_timer_s delay_timer; /* delay timer */
66 union {
67 u16 swp_vfid;/* switch port VF id */
68 } event_arg;
69 struct bfa_fcs_auth_s auth; /* authentication config */
70 struct bfa_wc_s wc; /* wait counter for delete */
71 struct bfa_vf_stats_s stats; /* fabric/vf stats */
72 struct bfa_lps_s *lps; /* lport login services */
73 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
74 * fabric's ip addr
75 */
76};
77
78#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
79#define bfa_fcs_fabric_is_switched(__f) \
80 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
81
82/**
83 * The design calls for a single implementation of base fabric and vf.
84 */
85#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
86
87struct bfa_vf_event_s {
88 u32 undefined;
89};
90
91/**
92 * bfa fcs vf public functions
93 */
94bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
95bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
96bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
97 u16 vf_id, struct bfa_port_cfg_s *port_cfg,
98 struct bfad_vf_s *vf_drv);
99bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
100void bfa_fcs_vf_start(bfa_fcs_vf_t *vf);
101bfa_status_t bfa_fcs_vf_stop(bfa_fcs_vf_t *vf);
102void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
103void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
104void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
105void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
106 struct bfa_vf_stats_s *vf_stats);
107void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
108void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
109bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
110struct bfad_vf_s *bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf);
111
112#endif /* __BFA_FCS_FABRIC_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
deleted file mode 100644
index 9a35ecf5cdf0..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fcpim.h BFA FCS FCP Initiator Mode interfaces/defines.
20 */
21
22#ifndef __BFA_FCS_FCPIM_H__
23#define __BFA_FCS_FCPIM_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_itnim.h>
27#include <fcs/bfa_fcs.h>
28#include <fcs/bfa_fcs_rport.h>
29#include <fcs/bfa_fcs_lport.h>
30#include <bfa_fcpim.h>
31
32/*
33 * forward declarations
34 */
35struct bfad_itnim_s;
36
37struct bfa_fcs_itnim_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent remote rport */
40 struct bfad_itnim_s *itnim_drv; /* driver peer instance */
41 struct bfa_fcs_s *fcs; /* fcs instance */
42 struct bfa_timer_s timer; /* timer functions */
43 struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
44 u32 prli_retries; /* max prli retry attempts */
45 bfa_boolean_t seq_rec; /* seq recovery support */
46 bfa_boolean_t rec_support; /* REC supported */
47 bfa_boolean_t conf_comp; /* FCP_CONF support */
48 bfa_boolean_t task_retry_id; /* task retry id supp */
49 struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
50 struct bfa_fcxp_s *fcxp; /* FCXP in use */
51 struct bfa_itnim_stats_s stats; /* itn statistics */
52};
53
54
55static inline struct bfad_port_s *
56bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
57{
58 return itnim->rport->port->bfad_port;
59}
60
61
62static inline struct bfa_fcs_port_s *
63bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
64{
65 return itnim->rport->port;
66}
67
68
69static inline wwn_t
70bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
71{
72 return itnim->rport->nwwn;
73}
74
75
76static inline wwn_t
77bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
78{
79 return itnim->rport->pwwn;
80}
81
82
83static inline u32
84bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
85{
86 return itnim->rport->pid;
87}
88
89
90static inline u32
91bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
92{
93 return itnim->rport->maxfrsize;
94}
95
96
97static inline enum fc_cos
98bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
99{
100 return itnim->rport->fc_cos;
101}
102
103
104static inline struct bfad_itnim_s *
105bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
106{
107 return itnim->itnim_drv;
108}
109
110
111static inline struct bfa_itnim_s *
112bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
113{
114 return itnim->bfa_itnim;
115}
116
117/**
118 * bfa fcs FCP Initiator mode API functions
119 */
120void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
121 struct bfa_itnim_attr_s *attr);
122void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
123 struct bfa_itnim_stats_s *stats);
124struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port,
125 wwn_t rpwwn);
126bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
127 struct bfa_itnim_attr_s *attr);
128bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
129 struct bfa_itnim_stats_s *stats);
130bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port,
131 wwn_t rpwwn);
132#endif /* __BFA_FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
deleted file mode 100644
index 4441fffc9c82..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_fdmi.h BFA fcs fdmi module public interface
20 */
21
22#ifndef __BFA_FCS_FDMI_H__
23#define __BFA_FCS_FDMI_H__
24#include <bfa_os_inc.h>
25#include <protocol/fdmi.h>
26
27#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
28 FDMI_TRANS_SPEED_2G | \
29 FDMI_TRANS_SPEED_4G | \
30 FDMI_TRANS_SPEED_8G)
31
32/*
33* HBA Attribute Block : BFA internal representation. Note : Some variable
34* sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
35 * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
36 */
37struct bfa_fcs_fdmi_hba_attr_s {
38 wwn_t node_name;
39 u8 manufacturer[64];
40 u8 serial_num[64];
41 u8 model[16];
42 u8 model_desc[256];
43 u8 hw_version[8];
44 u8 driver_version[8];
45 u8 option_rom_ver[BFA_VERSION_LEN];
46 u8 fw_version[8];
47 u8 os_name[256];
48 u32 max_ct_pyld;
49};
50
51/*
52 * Port Attribute Block
53 */
54struct bfa_fcs_fdmi_port_attr_s {
55 u8 supp_fc4_types[32]; /* supported FC4 types */
56 u32 supp_speed; /* supported speed */
57 u32 curr_speed; /* current Speed */
58 u32 max_frm_size; /* max frame size */
59 u8 os_device_name[256]; /* OS device Name */
60 u8 host_name[256]; /* host name */
61};
62
63#endif /* __BFA_FCS_FDMI_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
deleted file mode 100644
index ceaefd3060f4..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ /dev/null
@@ -1,219 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_port.h BFA fcs port module public interface
20 */
21
22#ifndef __BFA_FCS_PORT_H__
23#define __BFA_FCS_PORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_pport.h>
28#include <defs/bfa_defs_rport.h>
29#include <cs/bfa_q.h>
30#include <bfa_svc.h>
31#include <cs/bfa_wc.h>
32
33struct bfa_fcs_s;
34struct bfa_fcs_fabric_s;
35
36/*
37 * Maximum Rports supported per port (physical/logical).
38 */
39#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
40
41
42struct bfa_fcs_port_ns_s {
43 bfa_sm_t sm; /* state machine */
44 struct bfa_timer_s timer;
45 struct bfa_fcs_port_s *port; /* parent port */
46 struct bfa_fcxp_s *fcxp;
47 struct bfa_fcxp_wqe_s fcxp_wqe;
48};
49
50
51struct bfa_fcs_port_scn_s {
52 bfa_sm_t sm; /* state machine */
53 struct bfa_timer_s timer;
54 struct bfa_fcs_port_s *port; /* parent port */
55 struct bfa_fcxp_s *fcxp;
56 struct bfa_fcxp_wqe_s fcxp_wqe;
57};
58
59
60struct bfa_fcs_port_fdmi_s {
61 bfa_sm_t sm; /* state machine */
62 struct bfa_timer_s timer;
63 struct bfa_fcs_port_ms_s *ms; /* parent ms */
64 struct bfa_fcxp_s *fcxp;
65 struct bfa_fcxp_wqe_s fcxp_wqe;
66 u8 retry_cnt; /* retry count */
67 u8 rsvd[3];
68};
69
70
71struct bfa_fcs_port_ms_s {
72 bfa_sm_t sm; /* state machine */
73 struct bfa_timer_s timer;
74 struct bfa_fcs_port_s *port; /* parent port */
75 struct bfa_fcxp_s *fcxp;
76 struct bfa_fcxp_wqe_s fcxp_wqe;
77 struct bfa_fcs_port_fdmi_s fdmi; /* FDMI component of MS */
78 u8 retry_cnt; /* retry count */
79 u8 rsvd[3];
80};
81
82
83struct bfa_fcs_port_fab_s {
84 struct bfa_fcs_port_ns_s ns; /* NS component of port */
85 struct bfa_fcs_port_scn_s scn; /* scn component of port */
86 struct bfa_fcs_port_ms_s ms; /* MS component of port */
87};
88
89
90
91#define MAX_ALPA_COUNT 127
92
93struct bfa_fcs_port_loop_s {
94 u8 num_alpa; /* Num of ALPA entries in the map */
95 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
96 *Map */
97 struct bfa_fcs_port_s *port; /* parent port */
98};
99
100
101
102struct bfa_fcs_port_n2n_s {
103 u32 rsvd;
104 u16 reply_oxid; /* ox_id from the req flogi to be
105 *used in flogi acc */
106 wwn_t rem_port_wwn; /* Attached port's wwn */
107};
108
109
110union bfa_fcs_port_topo_u {
111 struct bfa_fcs_port_fab_s pfab;
112 struct bfa_fcs_port_loop_s ploop;
113 struct bfa_fcs_port_n2n_s pn2n;
114};
115
116
117struct bfa_fcs_port_s {
118 struct list_head qe; /* used by port/vport */
119 bfa_sm_t sm; /* state machine */
120 struct bfa_fcs_fabric_s *fabric;/* parent fabric */
121 struct bfa_port_cfg_s port_cfg;/* port configuration */
122 struct bfa_timer_s link_timer; /* timer for link offline */
123 u32 pid:24; /* FC address */
124 u8 lp_tag; /* lport tag */
125 u16 num_rports; /* Num of r-ports */
126 struct list_head rport_q; /* queue of discovered r-ports */
127 struct bfa_fcs_s *fcs; /* FCS instance */
128 union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */
129 struct bfad_port_s *bfad_port; /* driver peer instance */
130 struct bfa_fcs_vport_s *vport; /* NULL for base ports */
131 struct bfa_fcxp_s *fcxp;
132 struct bfa_fcxp_wqe_s fcxp_wqe;
133 struct bfa_port_stats_s stats;
134 struct bfa_wc_s wc; /* waiting counter for events */
135};
136
137#define bfa_fcs_lport_t struct bfa_fcs_port_s
138
139/**
140 * Symbolic Name related defines
141 * Total bytes 255.
142 * Physical Port's symbolic name 128 bytes.
143 * For Vports, Vport's symbolic name is appended to the Physical port's
144 * Symbolic Name.
145 *
146 * Physical Port's symbolic name Format : (Total 128 bytes)
147 * Adapter Model number/name : 12 bytes
148 * Driver Version : 10 bytes
149 * Host Machine Name : 30 bytes
150 * Host OS Info : 48 bytes
151 * Host OS PATCH Info : 16 bytes
152 * ( remaining 12 bytes reserved to be used for separator)
153 */
154#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
155
156#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
157#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
158#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
159#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
160#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
161
162/**
163 * Get FC port ID for a logical port.
164 */
165#define bfa_fcs_port_get_fcid(_lport) ((_lport)->pid)
166#define bfa_fcs_port_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
167#define bfa_fcs_port_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
168#define bfa_fcs_port_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
169#define bfa_fcs_port_is_initiator(_lport) \
170 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_IM)
171#define bfa_fcs_port_is_target(_lport) \
172 ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_TM)
173#define bfa_fcs_port_get_nrports(_lport) \
174 ((_lport) ? (_lport)->num_rports : 0)
175
176static inline struct bfad_port_s *
177bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port)
178{
179 return port->bfad_port;
180}
181
182
183#define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type)
184
185
186#define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
187
188
189#define bfa_fcs_port_get_fabric_ipaddr(_lport) \
190 ((_lport)->fabric->fabric_ip_addr)
191
192/**
193 * bfa fcs port public functions
194 */
195void bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs,
196 struct bfa_port_cfg_s *port_cfg);
197struct bfa_fcs_port_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
198void bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port,
199 wwn_t rport_wwns[], int *nrports);
200
201wwn_t bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn,
202 int index, int nrports, bfa_boolean_t bwwn);
203
204struct bfa_fcs_port_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
205 u16 vf_id, wwn_t lpwwn);
206
207void bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
208 struct bfa_port_info_s *port_info);
209void bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
210 struct bfa_port_attr_s *port_attr);
211void bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
212 struct bfa_port_stats_s *port_stats);
213void bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port);
214enum bfa_pport_speed bfa_fcs_port_get_rport_max_speed(
215 struct bfa_fcs_port_s *port);
216void bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
217void bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port);
218
219#endif /* __BFA_FCS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
deleted file mode 100644
index 3027fc6c7722..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_FCS_RPORT_H__
19#define __BFA_FCS_RPORT_H__
20
21#include <defs/bfa_defs_status.h>
22#include <cs/bfa_q.h>
23#include <fcs/bfa_fcs.h>
24#include <defs/bfa_defs_rport.h>
25
26#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
27/*
28 * forward declarations
29 */
30struct bfad_rport_s;
31
32struct bfa_fcs_itnim_s;
33struct bfa_fcs_tin_s;
34struct bfa_fcs_iprp_s;
35
36/* Rport Features (RPF) */
37struct bfa_fcs_rpf_s {
38 bfa_sm_t sm; /* state machine */
39 struct bfa_fcs_rport_s *rport; /* parent rport */
40 struct bfa_timer_s timer; /* general purpose timer */
41 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
42 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
43 int rpsc_retries; /* max RPSC retry attempts */
44 enum bfa_pport_speed rpsc_speed; /* Current Speed from RPSC.
45 * O if RPSC fails */
46 enum bfa_pport_speed assigned_speed; /* Speed assigned by the user.
47 * will be used if RPSC is not
48 * supported by the rport */
49};
50
51struct bfa_fcs_rport_s {
52 struct list_head qe; /* used by port/vport */
53 struct bfa_fcs_port_s *port; /* parent FCS port */
54 struct bfa_fcs_s *fcs; /* fcs instance */
55 struct bfad_rport_s *rp_drv; /* driver peer instance */
56 u32 pid; /* port ID of rport */
57 u16 maxfrsize; /* maximum frame size */
58 u16 reply_oxid; /* OX_ID of inbound requests */
59 enum fc_cos fc_cos; /* FC classes of service supp */
60 bfa_boolean_t cisc; /* CISC capable device */
61 bfa_boolean_t prlo; /* processing prlo or LOGO */
62 wwn_t pwwn; /* port wwn of rport */
63 wwn_t nwwn; /* node wwn of rport */
64 struct bfa_rport_symname_s psym_name; /* port symbolic name */
65 bfa_sm_t sm; /* state machine */
66 struct bfa_timer_s timer; /* general purpose timer */
67 struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
68 struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
69 struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
70 struct bfa_rport_s *bfa_rport; /* BFA Rport */
71 struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
72 int plogi_retries; /* max plogi retry attempts */
73 int ns_retries; /* max NS query retry attempts */
74 struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
75 struct bfa_rport_stats_s stats; /* rport stats */
76 enum bfa_rport_function scsi_function; /* Initiator/Target */
77 struct bfa_fcs_rpf_s rpf; /* Rport features module */
78};
79
80static inline struct bfa_rport_s *
81bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
82{
83 return rport->bfa_rport;
84}
85
86/**
87 * bfa fcs rport API functions
88 */
89bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
90 struct bfa_fcs_rport_s *rport,
91 struct bfad_rport_s *rport_drv);
92bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
93void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
94 struct bfa_rport_attr_s *attr);
95void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
96 struct bfa_rport_stats_s *stats);
97void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
98struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port,
99 wwn_t rpwwn);
100struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
101 struct bfa_fcs_port_s *port, wwn_t rnwwn);
102void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
103void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
104 enum bfa_pport_speed speed);
105#endif /* __BFA_FCS_RPORT_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
deleted file mode 100644
index 0af262430860..000000000000
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.h BFA fcs vport module public interface
20 */
21
22#ifndef __BFA_FCS_VPORT_H__
23#define __BFA_FCS_VPORT_H__
24
25#include <defs/bfa_defs_status.h>
26#include <defs/bfa_defs_port.h>
27#include <defs/bfa_defs_vport.h>
28#include <fcs/bfa_fcs.h>
29#include <fcb/bfa_fcb_vport.h>
30
31struct bfa_fcs_vport_s {
32 struct list_head qe; /* queue elem */
33 bfa_sm_t sm; /* state machine */
34 bfa_fcs_lport_t lport; /* logical port */
35 struct bfa_timer_s timer; /* general purpose timer */
36 struct bfad_vport_s *vport_drv; /* Driver private */
37 struct bfa_vport_stats_s vport_stats; /* vport statistics */
38 struct bfa_lps_s *lps; /* Lport login service */
39 int fdisc_retries;
40};
41
42#define bfa_fcs_vport_get_port(vport) \
43 ((struct bfa_fcs_port_s *)(&vport->port))
44
45/**
46 * bfa fcs vport public functions
47 */
48bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
49 struct bfa_fcs_s *fcs, u16 vf_id,
50 struct bfa_port_cfg_s *port_cfg,
51 struct bfad_vport_s *vport_drv);
52bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
53 struct bfa_fcs_s *fcs, uint16_t vf_id,
54 struct bfa_port_cfg_s *port_cfg,
55 struct bfad_vport_s *vport_drv);
56bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
57bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
58bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
59void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
60 struct bfa_vport_attr_s *vport_attr);
61void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
62 struct bfa_vport_stats_s *vport_stats);
63void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
64struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
65 u16 vf_id, wwn_t vpwwn);
66
67#endif /* __BFA_FCS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_fcs.h b/drivers/scsi/bfa/include/log/bfa_log_fcs.h
deleted file mode 100644
index b6f5df8827f8..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_fcs.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for FCS Module
20 */
21#ifndef __BFA_LOG_FCS_H__
22#define __BFA_LOG_FCS_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_FCS_FABRIC_NOSWITCH \
25 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_FCS_FABRIC_ISOLATED \
27 (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 2)
28#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
deleted file mode 100644
index 5f8f5e30b9e8..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for HAL Module */
19#ifndef __BFA_LOG_HAL_H__
20#define __BFA_LOG_HAL_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_HAL_ASSERT \
23 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_HAL_HEARTBEAT_FAILURE \
25 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_HAL_FCPIM_PARM_INVALID \
27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_HAL_SM_ASSERT \
29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_HAL_DRIVER_ERROR \
31 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
33 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_HAL_MBOX_ERROR \
35 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
36#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
deleted file mode 100644
index 44bc89768bda..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/* messages define for LINUX Module */
19#ifndef __BFA_LOG_LINUX_H__
20#define __BFA_LOG_LINUX_H__
21#include <cs/bfa_log.h>
22#define BFA_LOG_LINUX_DEVICE_CLAIMED \
23 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 1)
24#define BFA_LOG_LINUX_HASH_INIT_FAILED \
25 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 2)
26#define BFA_LOG_LINUX_SYSFS_FAILED \
27 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_LINUX_MEM_ALLOC_FAILED \
29 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED \
31 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_LINUX_ITNIM_FREE \
33 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_LINUX_ITNIM_ONLINE \
35 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 7)
36#define BFA_LOG_LINUX_ITNIM_OFFLINE \
37 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 8)
38#define BFA_LOG_LINUX_SCSI_HOST_FREE \
39 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 9)
40#define BFA_LOG_LINUX_SCSI_ABORT \
41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
44#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
45 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
46#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
47 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
48#define BFA_LOG_LINUX_IOC_ERROR \
49 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
50#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
51 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
52#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
54#define BFA_LOG_LINUX_DRIVER_ERROR \
55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
56#define BFA_LOG_LINUX_DRIVER_INFO \
57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
58#define BFA_LOG_LINUX_DRIVER_DIAG \
59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
60#define BFA_LOG_LINUX_DRIVER_AEN \
61 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 20)
62#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h b/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
deleted file mode 100644
index 809a95f7afe2..000000000000
--- a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * messages define for WDRV Module
20 */
21#ifndef __BFA_LOG_WDRV_H__
22#define __BFA_LOG_WDRV_H__
23#include <cs/bfa_log.h>
24#define BFA_LOG_WDRV_IOC_INIT_ERROR \
25 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 1)
26#define BFA_LOG_WDRV_IOC_INTERNAL_ERROR \
27 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 2)
28#define BFA_LOG_WDRV_IOC_START_ERROR \
29 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 3)
30#define BFA_LOG_WDRV_IOC_STOP_ERROR \
31 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 4)
32#define BFA_LOG_WDRV_INSUFFICIENT_RESOURCES \
33 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 5)
34#define BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR \
35 (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 6)
36#endif
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h
deleted file mode 100644
index b82540a230c4..000000000000
--- a/drivers/scsi/bfa/include/protocol/ct.h
+++ /dev/null
@@ -1,492 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __CT_H__
19#define __CT_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25struct ct_hdr_s{
26 u32 rev_id:8; /* Revision of the CT */
27 u32 in_id:24; /* Initiator Id */
28 u32 gs_type:8; /* Generic service Type */
29 u32 gs_sub_type:8; /* Generic service sub type */
30 u32 options:8; /* options */
31 u32 rsvrd:8; /* reserved */
32 u32 cmd_rsp_code:16;/* ct command/response code */
33 u32 max_res_size:16;/* maximum/residual size */
34 u32 frag_id:8; /* fragment ID */
35 u32 reason_code:8; /* reason code */
36 u32 exp_code:8; /* explanation code */
37 u32 vendor_unq:8; /* vendor unique */
38};
39
40/*
41 * defines for the Revision
42 */
43enum {
44 CT_GS3_REVISION = 0x01,
45};
46
47/*
48 * defines for gs_type
49 */
50enum {
51 CT_GSTYPE_KEYSERVICE = 0xF7,
52 CT_GSTYPE_ALIASSERVICE = 0xF8,
53 CT_GSTYPE_MGMTSERVICE = 0xFA,
54 CT_GSTYPE_TIMESERVICE = 0xFB,
55 CT_GSTYPE_DIRSERVICE = 0xFC,
56};
57
58/*
59 * defines for gs_sub_type for gs type directory service
60 */
61enum {
62 CT_GSSUBTYPE_NAMESERVER = 0x02,
63};
64
65/*
66 * defines for gs_sub_type for gs type management service
67 */
68enum {
69 CT_GSSUBTYPE_CFGSERVER = 0x01,
70 CT_GSSUBTYPE_UNZONED_NS = 0x02,
71 CT_GSSUBTYPE_ZONESERVER = 0x03,
72 CT_GSSUBTYPE_LOCKSERVER = 0x04,
73 CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
74};
75
76/*
77 * defines for CT response code field
78 */
79enum {
80 CT_RSP_REJECT = 0x8001,
81 CT_RSP_ACCEPT = 0x8002,
82};
83
84/*
85 * definitions for CT reason code
86 */
87enum {
88 CT_RSN_INV_CMD = 0x01,
89 CT_RSN_INV_VER = 0x02,
90 CT_RSN_LOGIC_ERR = 0x03,
91 CT_RSN_INV_SIZE = 0x04,
92 CT_RSN_LOGICAL_BUSY = 0x05,
93 CT_RSN_PROTO_ERR = 0x07,
94 CT_RSN_UNABLE_TO_PERF = 0x09,
95 CT_RSN_NOT_SUPP = 0x0B,
96 CT_RSN_SERVER_NOT_AVBL = 0x0D,
97 CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
98 CT_RSN_VENDOR_SPECIFIC = 0xFF,
99
100};
101
102/*
103 * definitions for explanations code for Name server
104 */
105enum {
106 CT_NS_EXP_NOADDITIONAL = 0x00,
107 CT_NS_EXP_ID_NOT_REG = 0x01,
108 CT_NS_EXP_PN_NOT_REG = 0x02,
109 CT_NS_EXP_NN_NOT_REG = 0x03,
110 CT_NS_EXP_CS_NOT_REG = 0x04,
111 CT_NS_EXP_IPN_NOT_REG = 0x05,
112 CT_NS_EXP_IPA_NOT_REG = 0x06,
113 CT_NS_EXP_FT_NOT_REG = 0x07,
114 CT_NS_EXP_SPN_NOT_REG = 0x08,
115 CT_NS_EXP_SNN_NOT_REG = 0x09,
116 CT_NS_EXP_PT_NOT_REG = 0x0A,
117 CT_NS_EXP_IPP_NOT_REG = 0x0B,
118 CT_NS_EXP_FPN_NOT_REG = 0x0C,
119 CT_NS_EXP_HA_NOT_REG = 0x0D,
120 CT_NS_EXP_FD_NOT_REG = 0x0E,
121 CT_NS_EXP_FF_NOT_REG = 0x0F,
122 CT_NS_EXP_ACCESSDENIED = 0x10,
123 CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
124 CT_NS_EXP_DATABASEEMPTY = 0x12,
125 CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
126 CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
127 CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
128 CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
129};
130
131/*
132 * definitions for the explanation code for all servers
133 */
134enum {
135 CT_EXP_AUTH_EXCEPTION = 0xF1,
136 CT_EXP_DB_FULL = 0xF2,
137 CT_EXP_DB_EMPTY = 0xF3,
138 CT_EXP_PROCESSING_REQ = 0xF4,
139 CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
140 CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
141};
142
143/*
144 * Command codes for Name server
145 */
146enum {
147 GS_GID_PN = 0x0121, /* Get Id on port name */
148 GS_GPN_ID = 0x0112, /* Get port name on ID */
149 GS_GNN_ID = 0x0113, /* Get node name on ID */
150 GS_GID_FT = 0x0171, /* Get Id on FC4 type */
151 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
152 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
153 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
154 GS_RPN_ID = 0x0212, /* Register port name */
155 GS_RNN_ID = 0x0213, /* Register node name */
156 GS_RCS_ID = 0x0214, /* Register class of service */
157 GS_RPT_ID = 0x021A, /* Register port type */
158 GS_GA_NXT = 0x0100, /* Get all next */
159 GS_RFF_ID = 0x021F, /* Register FC4 Feature */
160};
161
162struct fcgs_id_req_s{
163 u32 rsvd:8;
164 u32 dap:24; /* port identifier */
165};
166#define fcgs_gpnid_req_t struct fcgs_id_req_s
167#define fcgs_gnnid_req_t struct fcgs_id_req_s
168#define fcgs_gspnid_req_t struct fcgs_id_req_s
169
170struct fcgs_gidpn_req_s{
171 wwn_t port_name; /* port wwn */
172};
173
174struct fcgs_gidpn_resp_s{
175 u32 rsvd:8;
176 u32 dap:24; /* port identifier */
177};
178
179/**
180 * RFT_ID
181 */
182struct fcgs_rftid_req_s {
183 u32 rsvd:8;
184 u32 dap:24; /* port identifier */
185 u32 fc4_type[8]; /* fc4 types */
186};
187
188/**
189 * RFF_ID : Register FC4 features.
190 */
191
192#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
193#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
194
195struct fcgs_rffid_req_s{
196 u32 rsvd:8;
197 u32 dap:24; /* port identifier */
198 u32 rsvd1:16;
199 u32 fc4ftr_bits:8; /* fc4 feature bits */
200 u32 fc4_type:8; /* corresponding FC4 Type */
201};
202
203/**
204 * GID_FT Request
205 */
206struct fcgs_gidft_req_s{
207 u8 reserved;
208 u8 domain_id; /* domain, 0 - all fabric */
209 u8 area_id; /* area, 0 - whole domain */
210 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
211}; /* GID_FT Request */
212
213/**
214 * GID_FT Response
215 */
216struct fcgs_gidft_resp_s {
217 u8 last:1; /* last port identifier flag */
218 u8 reserved:7;
219 u32 pid:24; /* port identifier */
220}; /* GID_FT Response */
221
222/**
223 * RSPN_ID
224 */
225struct fcgs_rspnid_req_s{
226 u32 rsvd:8;
227 u32 dap:24; /* port identifier */
228 u8 spn_len; /* symbolic port name length */
229 u8 spn[256]; /* symbolic port name */
230};
231
232/**
233 * RPN_ID
234 */
235struct fcgs_rpnid_req_s{
236 u32 rsvd:8;
237 u32 port_id:24;
238 wwn_t port_name;
239};
240
241/**
242 * RNN_ID
243 */
244struct fcgs_rnnid_req_s{
245 u32 rsvd:8;
246 u32 port_id:24;
247 wwn_t node_name;
248};
249
250/**
251 * RCS_ID
252 */
253struct fcgs_rcsid_req_s{
254 u32 rsvd:8;
255 u32 port_id:24;
256 u32 cos;
257};
258
259/**
260 * RPT_ID
261 */
262struct fcgs_rptid_req_s{
263 u32 rsvd:8;
264 u32 port_id:24;
265 u32 port_type:8;
266 u32 rsvd1:24;
267};
268
269/**
270 * GA_NXT Request
271 */
272struct fcgs_ganxt_req_s{
273 u32 rsvd:8;
274 u32 port_id:24;
275};
276
277/**
278 * GA_NXT Response
279 */
280struct fcgs_ganxt_rsp_s{
281 u32 port_type:8; /* Port Type */
282 u32 port_id:24; /* Port Identifier */
283 wwn_t port_name; /* Port Name */
284 u8 spn_len; /* Length of Symbolic Port Name */
285 char spn[255]; /* Symbolic Port Name */
286 wwn_t node_name; /* Node Name */
287 u8 snn_len; /* Length of Symbolic Node Name */
288 char snn[255]; /* Symbolic Node Name */
289 u8 ipa[8]; /* Initial Process Associator */
290 u8 ip[16]; /* IP Address */
291 u32 cos; /* Class of Service */
292 u32 fc4types[8]; /* FC-4 TYPEs */
293 wwn_t fabric_port_name;
294 /* Fabric Port Name */
295 u32 rsvd:8; /* Reserved */
296 u32 hard_addr:24; /* Hard Address */
297};
298
299/*
300 * Fabric Config Server
301 */
302
303/*
304 * Command codes for Fabric Configuration Server
305 */
306enum {
307 GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
308 GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
309 GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
310 GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
311};
312
313/*
314 * Source or Destination Port Tags.
315 */
316enum {
317 GS_FTRACE_TAG_NPORT_ID = 1,
318 GS_FTRACE_TAG_NPORT_NAME = 2,
319};
320
321/*
322* Port Value : Could be a Port id or wwn
323 */
324union fcgs_port_val_u{
325 u32 nport_id;
326 wwn_t nport_wwn;
327};
328
329#define GS_FTRACE_MAX_HOP_COUNT 20
330#define GS_FTRACE_REVISION 1
331
332/*
333 * Ftrace Related Structures.
334 */
335
336/*
337 * STR (Switch Trace) Reject Reason Codes. From FC-SW.
338 */
339enum {
340 GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0,
341 GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
342 GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
343 GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
344 GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
345 GS_FTRACE_STR_DST_PORT_NOT_FOUND,
346 GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
347 GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
348 GS_FTRACE_STR_NO_ADDL_EXPLN,
349 GS_FTRACE_STR_FABRIC_BUSY,
350 GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
351 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
352 GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
353};
354
355/*
356 * Ftrace Request
357 */
358struct fcgs_ftrace_req_s{
359 u32 revision;
360 u16 src_port_tag; /* Source Port tag */
361 u16 src_port_len; /* Source Port len */
362 union fcgs_port_val_u src_port_val; /* Source Port value */
363 u16 dst_port_tag; /* Destination Port tag */
364 u16 dst_port_len; /* Destination Port len */
365 union fcgs_port_val_u dst_port_val; /* Destination Port value */
366 u32 token;
367 u8 vendor_id[8]; /* T10 Vendor Identifier */
368 u8 vendor_info[8]; /* Vendor specific Info */
369 u32 max_hop_cnt; /* Max Hop Count */
370};
371
372/*
373 * Path info structure
374 */
375struct fcgs_ftrace_path_info_s{
376 wwn_t switch_name; /* Switch WWN */
377 u32 domain_id;
378 wwn_t ingress_port_name; /* Ingress ports wwn */
379 u32 ingress_phys_port_num; /* Ingress ports physical port
380 * number
381 */
382 wwn_t egress_port_name; /* Ingress ports wwn */
383 u32 egress_phys_port_num; /* Ingress ports physical port
384 * number
385 */
386};
387
388/*
389 * Ftrace Acc Response
390 */
391struct fcgs_ftrace_resp_s{
392 u32 revision;
393 u32 token;
394 u8 vendor_id[8]; /* T10 Vendor Identifier */
395 u8 vendor_info[8]; /* Vendor specific Info */
396 u32 str_rej_reason_code; /* STR Reject Reason Code */
397 u32 num_path_info_entries; /* No. of path info entries */
398 /*
399 * path info entry/entries.
400 */
401 struct fcgs_ftrace_path_info_s path_info[1];
402
403};
404
405/*
406* Fabric Config Server : FCPing
407 */
408
409/*
410 * FC Ping Request
411 */
412struct fcgs_fcping_req_s{
413 u32 revision;
414 u16 port_tag;
415 u16 port_len; /* Port len */
416 union fcgs_port_val_u port_val; /* Port value */
417 u32 token;
418};
419
420/*
421 * FC Ping Response
422 */
423struct fcgs_fcping_resp_s{
424 u32 token;
425};
426
427/*
428 * Command codes for zone server query.
429 */
430enum {
431 ZS_GZME = 0x0124, /* Get zone member extended */
432};
433
434/*
435 * ZS GZME request
436 */
437#define ZS_GZME_ZNAMELEN 32
438struct zs_gzme_req_s{
439 u8 znamelen;
440 u8 rsvd[3];
441 u8 zname[ZS_GZME_ZNAMELEN];
442};
443
444enum zs_mbr_type{
445 ZS_MBR_TYPE_PWWN = 1,
446 ZS_MBR_TYPE_DOMPORT = 2,
447 ZS_MBR_TYPE_PORTID = 3,
448 ZS_MBR_TYPE_NWWN = 4,
449};
450
451struct zs_mbr_wwn_s{
452 u8 mbr_type;
453 u8 rsvd[3];
454 wwn_t wwn;
455};
456
457struct zs_query_resp_s{
458 u32 nmbrs; /* number of zone members */
459 struct zs_mbr_wwn_s mbr[1];
460};
461
462/*
463 * GMAL Command ( Get ( interconnect Element) Management Address List)
464 * To retrieve the IP Address of a Switch.
465 */
466
467#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
468#define CT_GMAL_RESP_PREFIX_HTTP "http://"
469
470/* GMAL/GFN request */
471struct fcgs_req_s {
472 wwn_t wwn; /* PWWN/NWWN */
473};
474
475#define fcgs_gmal_req_t struct fcgs_req_s
476#define fcgs_gfn_req_t struct fcgs_req_s
477
478/* Accept Response to GMAL */
479struct fcgs_gmal_resp_s {
480 u32 ms_len; /* Num of entries */
481 u8 ms_ma[256];
482};
483
484struct fc_gmal_entry_s {
485 u8 len;
486 u8 prefix[7]; /* like "http://" */
487 u8 ip_addr[248];
488};
489
490#pragma pack()
491
492#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc_sp.h b/drivers/scsi/bfa/include/protocol/fc_sp.h
deleted file mode 100644
index 55bb0b31d04b..000000000000
--- a/drivers/scsi/bfa/include/protocol/fc_sp.h
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FC_SP_H__
19#define __FC_SP_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25enum auth_els_flags{
26 FC_AUTH_ELS_MORE_FRAGS_FLAG = 0x80, /*! bit-7. More Fragments
27 * Follow
28 */
29 FC_AUTH_ELS_CONCAT_FLAG = 0x40, /*! bit-6. Concatenation Flag */
30 FC_AUTH_ELS_SEQ_NUM_FLAG = 0x01 /*! bit-0. Sequence Number */
31};
32
33enum auth_msg_codes{
34 FC_AUTH_MC_AUTH_RJT = 0x0A, /*! Auth Reject */
35 FC_AUTH_MC_AUTH_NEG = 0x0B, /*! Auth Negotiate */
36 FC_AUTH_MC_AUTH_DONE = 0x0C, /*! Auth Done */
37
38 FC_AUTH_MC_DHCHAP_CHAL = 0x10, /*! DHCHAP Challenge */
39 FC_AUTH_MC_DHCHAP_REPLY = 0x11, /*! DHCHAP Reply */
40 FC_AUTH_MC_DHCHAP_SUCC = 0x12, /*! DHCHAP Success */
41
42 FC_AUTH_MC_FCAP_REQ = 0x13, /*! FCAP Request */
43 FC_AUTH_MC_FCAP_ACK = 0x14, /*! FCAP Acknowledge */
44 FC_AUTH_MC_FCAP_CONF = 0x15, /*! FCAP Confirm */
45
46 FC_AUTH_MC_FCPAP_INIT = 0x16, /*! FCPAP Init */
47 FC_AUTH_MC_FCPAP_ACC = 0x17, /*! FCPAP Accept */
48 FC_AUTH_MC_FCPAP_COMP = 0x18, /*! FCPAP Complete */
49
50 FC_AUTH_MC_IKE_SA_INIT = 0x22, /*! IKE SA INIT */
51 FC_AUTH_MC_IKE_SA_AUTH = 0x23, /*! IKE SA Auth */
52 FC_AUTH_MC_IKE_CREATE_CHILD_SA = 0x24, /*! IKE Create Child SA */
53 FC_AUTH_MC_IKE_INFO = 0x25, /*! IKE informational */
54};
55
56enum auth_proto_version{
57 FC_AUTH_PROTO_VER_1 = 1, /*! Protocol Version 1 */
58};
59
60enum {
61 FC_AUTH_ELS_COMMAND_CODE = 0x90,/*! Authentication ELS Command code */
62 FC_AUTH_PROTO_PARAM_LEN_SZ = 4, /*! Size of Proto Parameter Len Field */
63 FC_AUTH_PROTO_PARAM_VAL_SZ = 4, /*! Size of Proto Parameter Val Field */
64 FC_MAX_AUTH_SECRET_LEN = 256,
65 /*! Maximum secret string length */
66 FC_AUTH_NUM_USABLE_PROTO_LEN_SZ = 4,
67 /*! Size of usable protocols field */
68 FC_AUTH_RESP_VALUE_LEN_SZ = 4,
69 /*! Size of response value length */
70 FC_MAX_CHAP_KEY_LEN = 256, /*! Maximum md5 digest length */
71 FC_MAX_AUTH_RETRIES = 3, /*! Maximum number of retries */
72 FC_MD5_DIGEST_LEN = 16, /*! MD5 digest length */
73 FC_SHA1_DIGEST_LEN = 20, /*! SHA1 digest length */
74 FC_MAX_DHG_SUPPORTED = 1, /*! Maximum DH Groups supported */
75 FC_MAX_ALG_SUPPORTED = 1, /*! Maximum algorithms supported */
76 FC_MAX_PROTO_SUPPORTED = 1, /*! Maximum protocols supported */
77 FC_START_TXN_ID = 2, /*! Starting transaction ID */
78};
79
80enum auth_proto_id{
81 FC_AUTH_PROTO_DHCHAP = 0x00000001,
82 FC_AUTH_PROTO_FCAP = 0x00000002,
83 FC_AUTH_PROTO_FCPAP = 0x00000003,
84 FC_AUTH_PROTO_IKEv2 = 0x00000004,
85 FC_AUTH_PROTO_IKEv2_AUTH = 0x00000005,
86};
87
88struct auth_name_s{
89 u16 name_tag; /*! Name Tag = 1 for Authentication */
90 u16 name_len; /*! Name Length = 8 for Authentication
91 */
92 wwn_t name; /*! Name. TODO - is this PWWN */
93};
94
95
96enum auth_hash_func{
97 FC_AUTH_HASH_FUNC_MD5 = 0x00000005,
98 FC_AUTH_HASH_FUNC_SHA_1 = 0x00000006,
99};
100
101enum auth_dh_gid{
102 FC_AUTH_DH_GID_0_DHG_NULL = 0x00000000,
103 FC_AUTH_DH_GID_1_DHG_1024 = 0x00000001,
104 FC_AUTH_DH_GID_2_DHG_1280 = 0x00000002,
105 FC_AUTH_DH_GID_3_DHG_1536 = 0x00000003,
106 FC_AUTH_DH_GID_4_DHG_2048 = 0x00000004,
107 FC_AUTH_DH_GID_6_DHG_3072 = 0x00000006,
108 FC_AUTH_DH_GID_7_DHG_4096 = 0x00000007,
109 FC_AUTH_DH_GID_8_DHG_6144 = 0x00000008,
110 FC_AUTH_DH_GID_9_DHG_8192 = 0x00000009,
111};
112
113struct auth_els_msg_s {
114 u8 auth_els_code; /* Authentication ELS Code (0x90) */
115 u8 auth_els_flag; /* Authentication ELS Flags */
116 u8 auth_msg_code; /* Authentication Message Code */
117 u8 proto_version; /* Protocol Version */
118 u32 msg_len; /* Message Length */
119 u32 trans_id; /* Transaction Identifier (T_ID) */
120
121 /* Msg payload follows... */
122};
123
124
125enum auth_neg_param_tags {
126 FC_AUTH_NEG_DHCHAP_HASHLIST = 0x0001,
127 FC_AUTH_NEG_DHCHAP_DHG_ID_LIST = 0x0002,
128};
129
130
131struct dhchap_param_format_s {
132 u16 tag; /*! Parameter Tag. See
133 * auth_neg_param_tags_t
134 */
135 u16 word_cnt;
136
137 /* followed by variable length parameter value... */
138};
139
140struct auth_proto_params_s {
141 u32 proto_param_len;
142 u32 proto_id;
143
144 /*
145 * Followed by variable length Protocol specific parameters. DH-CHAP
146 * uses dhchap_param_format_t
147 */
148};
149
150struct auth_neg_msg_s {
151 struct auth_name_s auth_ini_name;
152 u32 usable_auth_protos;
153 struct auth_proto_params_s proto_params[1]; /*! (1..usable_auth_proto)
154 * protocol params
155 */
156};
157
158struct auth_dh_val_s {
159 u32 dh_val_len;
160 u32 dh_val[1];
161};
162
163struct auth_dhchap_chal_msg_s {
164 struct auth_els_msg_s hdr;
165 struct auth_name_s auth_responder_name; /* TODO VRK - is auth_name_t
166 * type OK?
167 */
168 u32 hash_id;
169 u32 dh_grp_id;
170 u32 chal_val_len;
171 char chal_val[1];
172
173 /* ...followed by variable Challenge length/value and DH length/value */
174};
175
176
177enum auth_rjt_codes {
178 FC_AUTH_RJT_CODE_AUTH_FAILURE = 0x01,
179 FC_AUTH_RJT_CODE_LOGICAL_ERR = 0x02,
180};
181
182enum auth_rjt_code_exps {
183 FC_AUTH_CEXP_AUTH_MECH_NOT_USABLE = 0x01,
184 FC_AUTH_CEXP_DH_GROUP_NOT_USABLE = 0x02,
185 FC_AUTH_CEXP_HASH_FUNC_NOT_USABLE = 0x03,
186 FC_AUTH_CEXP_AUTH_XACT_STARTED = 0x04,
187 FC_AUTH_CEXP_AUTH_FAILED = 0x05,
188 FC_AUTH_CEXP_INCORRECT_PLD = 0x06,
189 FC_AUTH_CEXP_INCORRECT_PROTO_MSG = 0x07,
190 FC_AUTH_CEXP_RESTART_AUTH_PROTO = 0x08,
191 FC_AUTH_CEXP_AUTH_CONCAT_NOT_SUPP = 0x09,
192 FC_AUTH_CEXP_PROTO_VER_NOT_SUPP = 0x0A,
193};
194
195enum auth_status {
196 FC_AUTH_STATE_INPROGRESS = 0, /*! authentication in progress */
197 FC_AUTH_STATE_FAILED = 1, /*! authentication failed */
198 FC_AUTH_STATE_SUCCESS = 2 /*! authentication successful */
199};
200
201struct auth_rjt_msg_s {
202 struct auth_els_msg_s hdr;
203 u8 reason_code;
204 u8 reason_code_exp;
205 u8 rsvd[2];
206};
207
208
209struct auth_dhchap_neg_msg_s {
210 struct auth_els_msg_s hdr;
211 struct auth_neg_msg_s nego;
212};
213
214struct auth_dhchap_reply_msg_s {
215 struct auth_els_msg_s hdr;
216
217 /*
218 * followed by response value length & Value + DH Value Length & Value
219 */
220};
221
222#pragma pack()
223
224#endif /* __FC_SP_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/fcp.h b/drivers/scsi/bfa/include/protocol/fcp.h
deleted file mode 100644
index 74ea63ce84b7..000000000000
--- a/drivers/scsi/bfa/include/protocol/fcp.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FCPPROTO_H__
19#define __FCPPROTO_H__
20
21#include <linux/bitops.h>
22#include <protocol/scsi.h>
23
24#pragma pack(1)
25
26enum {
27 FCP_RJT = 0x01000000, /* SRR reject */
28 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
29 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
30};
31
32/*
33 * SRR FC-4 LS payload
34 */
35struct fc_srr_s{
36 u32 ls_cmd;
37 u32 ox_id:16; /* ox-id */
38 u32 rx_id:16; /* rx-id */
39 u32 ro; /* relative offset */
40 u32 r_ctl:8; /* R_CTL for I.U. */
41 u32 res:24;
42};
43
44
45/*
46 * FCP_CMND definitions
47 */
48#define FCP_CMND_CDB_LEN 16
49#define FCP_CMND_LUN_LEN 8
50
51struct fcp_cmnd_s{
52 lun_t lun; /* 64-bit LU number */
53 u8 crn; /* command reference number */
54#ifdef __BIGENDIAN
55 u8 resvd:1,
56 priority:4, /* FCP-3: SAM-3 priority */
57 taskattr:3; /* scsi task attribute */
58#else
59 u8 taskattr:3, /* scsi task attribute */
60 priority:4, /* FCP-3: SAM-3 priority */
61 resvd:1;
62#endif
63 u8 tm_flags; /* task management flags */
64#ifdef __BIGENDIAN
65 u8 addl_cdb_len:6, /* additional CDB length words */
66 iodir:2; /* read/write FCP_DATA IUs */
67#else
68 u8 iodir:2, /* read/write FCP_DATA IUs */
69 addl_cdb_len:6; /* additional CDB length */
70#endif
71 struct scsi_cdb_s cdb;
72
73 /*
74 * !!! additional cdb bytes follows here!!!
75 */
76 u32 fcp_dl; /* bytes to be transferred */
77};
78
79#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
80#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
81
82/*
83 * fcp_cmnd_t.iodir field values
84 */
85enum fcp_iodir{
86 FCP_IODIR_NONE = 0,
87 FCP_IODIR_WRITE = 1,
88 FCP_IODIR_READ = 2,
89 FCP_IODIR_RW = 3,
90};
91
92/*
93 * Task attribute field
94 */
95enum {
96 FCP_TASK_ATTR_SIMPLE = 0,
97 FCP_TASK_ATTR_HOQ = 1,
98 FCP_TASK_ATTR_ORDERED = 2,
99 FCP_TASK_ATTR_ACA = 4,
100 FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */
101};
102
103/*
104 * Task management flags field - only one bit shall be set
105 */
106enum fcp_tm_cmnd{
107 FCP_TM_ABORT_TASK_SET = BIT(1),
108 FCP_TM_CLEAR_TASK_SET = BIT(2),
109 FCP_TM_LUN_RESET = BIT(4),
110 FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
111 FCP_TM_CLEAR_ACA = BIT(6),
112};
113
114/*
115 * FCP_XFER_RDY IU defines
116 */
117struct fcp_xfer_rdy_s{
118 u32 data_ro;
119 u32 burst_len;
120 u32 reserved;
121};
122
123/*
124 * FCP_RSP residue flags
125 */
126enum fcp_residue{
127 FCP_NO_RESIDUE = 0, /* no residue */
128 FCP_RESID_OVER = 1, /* more data left that was not sent */
129 FCP_RESID_UNDER = 2, /* less data than requested */
130};
131
132enum {
133 FCP_RSPINFO_GOOD = 0,
134 FCP_RSPINFO_DATALEN_MISMATCH = 1,
135 FCP_RSPINFO_CMND_INVALID = 2,
136 FCP_RSPINFO_ROLEN_MISMATCH = 3,
137 FCP_RSPINFO_TM_NOT_SUPP = 4,
138 FCP_RSPINFO_TM_FAILED = 5,
139};
140
141struct fcp_rspinfo_s{
142 u32 res0:24;
143 u32 rsp_code:8; /* response code (as above) */
144 u32 res1;
145};
146
147struct fcp_resp_s{
148 u32 reserved[2]; /* 2 words reserved */
149 u16 reserved2;
150#ifdef __BIGENDIAN
151 u8 reserved3:3;
152 u8 fcp_conf_req:1; /* FCP_CONF is requested */
153 u8 resid_flags:2; /* underflow/overflow */
154 u8 sns_len_valid:1;/* sense len is valid */
155 u8 rsp_len_valid:1;/* response len is valid */
156#else
157 u8 rsp_len_valid:1;/* response len is valid */
158 u8 sns_len_valid:1;/* sense len is valid */
159 u8 resid_flags:2; /* underflow/overflow */
160 u8 fcp_conf_req:1; /* FCP_CONF is requested */
161 u8 reserved3:3;
162#endif
163 u8 scsi_status; /* one byte SCSI status */
164 u32 residue; /* residual data bytes */
165 u32 sns_len; /* length od sense info */
166 u32 rsp_len; /* length of response info */
167};
168
169#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
170 (__fcprsp)->sns_len : 0)
171#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
172 (__fcprsp)->rsp_len : 0)
173#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
174#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
175 fcp_rsplen(__fcprsp))
176
177struct fcp_cmnd_fr_s{
178 struct fchs_s fchs;
179 struct fcp_cmnd_s fcp;
180};
181
182#pragma pack()
183
184#endif
diff --git a/drivers/scsi/bfa/include/protocol/fdmi.h b/drivers/scsi/bfa/include/protocol/fdmi.h
deleted file mode 100644
index 6c05c268c71b..000000000000
--- a/drivers/scsi/bfa/include/protocol/fdmi.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __FDMI_H__
19#define __FDMI_H__
20
21#include <protocol/types.h>
22#include <protocol/fc.h>
23#include <protocol/ct.h>
24
25#pragma pack(1)
26
27/*
28 * FDMI Command Codes
29 */
30#define FDMI_GRHL 0x0100
31#define FDMI_GHAT 0x0101
32#define FDMI_GRPL 0x0102
33#define FDMI_GPAT 0x0110
34#define FDMI_RHBA 0x0200
35#define FDMI_RHAT 0x0201
36#define FDMI_RPRT 0x0210
37#define FDMI_RPA 0x0211
38#define FDMI_DHBA 0x0300
39#define FDMI_DPRT 0x0310
40
41/*
42 * FDMI reason codes
43 */
44#define FDMI_NO_ADDITIONAL_EXP 0x00
45#define FDMI_HBA_ALREADY_REG 0x10
46#define FDMI_HBA_ATTRIB_NOT_REG 0x11
47#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
48#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
49#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
50#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
51#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
52#define FDMI_PORT_ATTRIB_NOT_REG 0x20
53#define FDMI_PORT_NOT_REG 0x21
54#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
55#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
56#define FDMI_PORT_ALREADY_REGISTEREED 0x24
57
58/*
59 * FDMI Transmission Speed Mask values
60 */
61#define FDMI_TRANS_SPEED_1G 0x00000001
62#define FDMI_TRANS_SPEED_2G 0x00000002
63#define FDMI_TRANS_SPEED_10G 0x00000004
64#define FDMI_TRANS_SPEED_4G 0x00000008
65#define FDMI_TRANS_SPEED_8G 0x00000010
66#define FDMI_TRANS_SPEED_16G 0x00000020
67#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
68
69/*
70 * FDMI HBA attribute types
71 */
72enum fdmi_hba_attribute_type {
73 FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
74 FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
75 FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
76 FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
77 FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
78 FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
79 FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
80 FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
81 FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
82 FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
83 FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
84
85 FDMI_HBA_ATTRIB_MAX_TYPE
86};
87
88/*
89 * FDMI Port attribute types
90 */
91enum fdmi_port_attribute_type {
92 FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
93 FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
94 FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
95 FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
96 FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
97 FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
98
99 FDMI_PORT_ATTR_MAX_TYPE
100};
101
102/*
103 * FDMI attribute
104 */
105struct fdmi_attr_s {
106 u16 type;
107 u16 len;
108 u8 value[1];
109};
110
111/*
112 * HBA Attribute Block
113 */
114struct fdmi_hba_attr_s {
115 u32 attr_count; /* # of attributes */
116 struct fdmi_attr_s hba_attr; /* n attributes */
117};
118
119/*
120 * Registered Port List
121 */
122struct fdmi_port_list_s {
123 u32 num_ports; /* number Of Port Entries */
124 wwn_t port_entry; /* one or more */
125};
126
127/*
128 * Port Attribute Block
129 */
130struct fdmi_port_attr_s {
131 u32 attr_count; /* # of attributes */
132 struct fdmi_attr_s port_attr; /* n attributes */
133};
134
135/*
136 * FDMI Register HBA Attributes
137 */
138struct fdmi_rhba_s {
139 wwn_t hba_id; /* HBA Identifier */
140 struct fdmi_port_list_s port_list; /* Registered Port List */
141 struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
142};
143
144/*
145 * FDMI Register Port
146 */
147struct fdmi_rprt_s {
148 wwn_t hba_id; /* HBA Identifier */
149 wwn_t port_name; /* Port wwn */
150 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
151};
152
153/*
154 * FDMI Register Port Attributes
155 */
156struct fdmi_rpa_s {
157 wwn_t port_name; /* port wwn */
158 struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
159};
160
161#pragma pack()
162
163#endif
diff --git a/drivers/scsi/bfa/include/protocol/scsi.h b/drivers/scsi/bfa/include/protocol/scsi.h
deleted file mode 100644
index b220e6b4f6e1..000000000000
--- a/drivers/scsi/bfa/include/protocol/scsi.h
+++ /dev/null
@@ -1,1648 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __SCSI_H__
19#define __SCSI_H__
20
21#include <protocol/types.h>
22
23#pragma pack(1)
24
25/*
26 * generic SCSI cdb definition
27 */
28#define SCSI_MAX_CDBLEN 16
29struct scsi_cdb_s{
30 u8 scsi_cdb[SCSI_MAX_CDBLEN];
31};
32
33/*
34 * scsi lun serial number definition
35 */
36#define SCSI_LUN_SN_LEN 32
37struct scsi_lun_sn_s{
38 u8 lun_sn[SCSI_LUN_SN_LEN];
39};
40
41/*
42 * SCSI Direct Access Commands
43 */
44enum {
45 SCSI_OP_TEST_UNIT_READY = 0x00,
46 SCSI_OP_REQUEST_SENSE = 0x03,
47 SCSI_OP_FORMAT_UNIT = 0x04,
48 SCSI_OP_READ6 = 0x08,
49 SCSI_OP_WRITE6 = 0x0A,
50 SCSI_OP_WRITE_FILEMARKS = 0x10,
51 SCSI_OP_INQUIRY = 0x12,
52 SCSI_OP_MODE_SELECT6 = 0x15,
53 SCSI_OP_RESERVE6 = 0x16,
54 SCSI_OP_RELEASE6 = 0x17,
55 SCSI_OP_MODE_SENSE6 = 0x1A,
56 SCSI_OP_START_STOP_UNIT = 0x1B,
57 SCSI_OP_SEND_DIAGNOSTIC = 0x1D,
58 SCSI_OP_READ_CAPACITY = 0x25,
59 SCSI_OP_READ10 = 0x28,
60 SCSI_OP_WRITE10 = 0x2A,
61 SCSI_OP_VERIFY10 = 0x2F,
62 SCSI_OP_READ_DEFECT_DATA = 0x37,
63 SCSI_OP_LOG_SELECT = 0x4C,
64 SCSI_OP_LOG_SENSE = 0x4D,
65 SCSI_OP_MODE_SELECT10 = 0x55,
66 SCSI_OP_RESERVE10 = 0x56,
67 SCSI_OP_RELEASE10 = 0x57,
68 SCSI_OP_MODE_SENSE10 = 0x5A,
69 SCSI_OP_PER_RESERVE_IN = 0x5E,
70 SCSI_OP_PER_RESERVE_OUR = 0x5E,
71 SCSI_OP_READ16 = 0x88,
72 SCSI_OP_WRITE16 = 0x8A,
73 SCSI_OP_VERIFY16 = 0x8F,
74 SCSI_OP_READ_CAPACITY16 = 0x9E,
75 SCSI_OP_REPORT_LUNS = 0xA0,
76 SCSI_OP_READ12 = 0xA8,
77 SCSI_OP_WRITE12 = 0xAA,
78 SCSI_OP_UNDEF = 0xFF,
79};
80
81/*
82 * SCSI START_STOP_UNIT command
83 */
84struct scsi_start_stop_unit_s{
85 u8 opcode;
86#ifdef __BIGENDIAN
87 u8 lun:3;
88 u8 reserved1:4;
89 u8 immed:1;
90#else
91 u8 immed:1;
92 u8 reserved1:4;
93 u8 lun:3;
94#endif
95 u8 reserved2;
96 u8 reserved3;
97#ifdef __BIGENDIAN
98 u8 power_conditions:4;
99 u8 reserved4:2;
100 u8 loEj:1;
101 u8 start:1;
102#else
103 u8 start:1;
104 u8 loEj:1;
105 u8 reserved4:2;
106 u8 power_conditions:4;
107#endif
108 u8 control;
109};
110
111/*
112 * SCSI SEND_DIAGNOSTIC command
113 */
114struct scsi_send_diagnostic_s{
115 u8 opcode;
116#ifdef __BIGENDIAN
117 u8 self_test_code:3;
118 u8 pf:1;
119 u8 reserved1:1;
120 u8 self_test:1;
121 u8 dev_offl:1;
122 u8 unit_offl:1;
123#else
124 u8 unit_offl:1;
125 u8 dev_offl:1;
126 u8 self_test:1;
127 u8 reserved1:1;
128 u8 pf:1;
129 u8 self_test_code:3;
130#endif
131 u8 reserved2;
132
133 u8 param_list_length[2]; /* MSB first */
134 u8 control;
135
136};
137
138/*
139 * SCSI READ10/WRITE10 commands
140 */
141struct scsi_rw10_s{
142 u8 opcode;
143#ifdef __BIGENDIAN
144 u8 lun:3;
145 u8 dpo:1; /* Disable Page Out */
146 u8 fua:1; /* Force Unit Access */
147 u8 reserved1:2;
148 u8 rel_adr:1; /* relative address */
149#else
150 u8 rel_adr:1;
151 u8 reserved1:2;
152 u8 fua:1;
153 u8 dpo:1;
154 u8 lun:3;
155#endif
156 u8 lba0; /* logical block address - MSB */
157 u8 lba1;
158 u8 lba2;
159 u8 lba3; /* LSB */
160 u8 reserved3;
161 u8 xfer_length0; /* transfer length in blocks - MSB */
162 u8 xfer_length1; /* LSB */
163 u8 control;
164};
165
166#define SCSI_CDB10_GET_LBA(cdb) \
167 (((cdb)->lba0 << 24) | ((cdb)->lba1 << 16) | \
168 ((cdb)->lba2 << 8) | (cdb)->lba3)
169
170#define SCSI_CDB10_SET_LBA(cdb, lba) { \
171 (cdb)->lba0 = lba >> 24; \
172 (cdb)->lba1 = (lba >> 16) & 0xFF; \
173 (cdb)->lba2 = (lba >> 8) & 0xFF; \
174 (cdb)->lba3 = lba & 0xFF; \
175}
176
177#define SCSI_CDB10_GET_TL(cdb) \
178 ((cdb)->xfer_length0 << 8 | (cdb)->xfer_length1)
179#define SCSI_CDB10_SET_TL(cdb, tl) { \
180 (cdb)->xfer_length0 = tl >> 8; \
181 (cdb)->xfer_length1 = tl & 0xFF; \
182}
183
184/*
185 * SCSI READ6/WRITE6 commands
186 */
187struct scsi_rw6_s{
188 u8 opcode;
189#ifdef __BIGENDIAN
190 u8 lun:3;
191 u8 lba0:5; /* MSb */
192#else
193 u8 lba0:5; /* MSb */
194 u8 lun:3;
195#endif
196 u8 lba1;
197 u8 lba2; /* LSB */
198 u8 xfer_length;
199 u8 control;
200};
201
202#define SCSI_TAPE_CDB6_GET_TL(cdb) \
203 (((cdb)->tl0 << 16) | ((cdb)->tl1 << 8) | (cdb)->tl2)
204
205#define SCSI_TAPE_CDB6_SET_TL(cdb, tl) { \
206 (cdb)->tl0 = tl >> 16; \
207 (cdb)->tl1 = (tl >> 8) & 0xFF; \
208 (cdb)->tl2 = tl & 0xFF; \
209}
210
211/*
212 * SCSI sequential (TAPE) wrtie command
213 */
214struct scsi_tape_wr_s{
215 u8 opcode;
216#ifdef __BIGENDIAN
217 u8 rsvd:7;
218 u8 fixed:1; /* MSb */
219#else
220 u8 fixed:1; /* MSb */
221 u8 rsvd:7;
222#endif
223 u8 tl0; /* Msb */
224 u8 tl1;
225 u8 tl2; /* Lsb */
226
227 u8 control;
228};
229
230#define SCSI_CDB6_GET_LBA(cdb) \
231 (((cdb)->lba0 << 16) | ((cdb)->lba1 << 8) | (cdb)->lba2)
232
233#define SCSI_CDB6_SET_LBA(cdb, lba) { \
234 (cdb)->lba0 = lba >> 16; \
235 (cdb)->lba1 = (lba >> 8) & 0xFF; \
236 (cdb)->lba2 = lba & 0xFF; \
237}
238
239#define SCSI_CDB6_GET_TL(cdb) ((cdb)->xfer_length)
240#define SCSI_CDB6_SET_TL(cdb, tl) { \
241 (cdb)->xfer_length = tl; \
242}
243
244/*
245 * SCSI sense data format
246 */
247struct scsi_sense_s{
248#ifdef __BIGENDIAN
249 u8 valid:1;
250 u8 rsp_code:7;
251#else
252 u8 rsp_code:7;
253 u8 valid:1;
254#endif
255 u8 seg_num;
256#ifdef __BIGENDIAN
257 u8 file_mark:1;
258 u8 eom:1; /* end of media */
259 u8 ili:1; /* incorrect length indicator */
260 u8 reserved:1;
261 u8 sense_key:4;
262#else
263 u8 sense_key:4;
264 u8 reserved:1;
265 u8 ili:1; /* incorrect length indicator */
266 u8 eom:1; /* end of media */
267 u8 file_mark:1;
268#endif
269 u8 information[4]; /* device-type or command specific info
270 */
271 u8 add_sense_length;
272 /* additional sense length */
273 u8 command_info[4];/* command specific information
274 */
275 u8 asc; /* additional sense code */
276 u8 ascq; /* additional sense code qualifier */
277 u8 fru_code; /* field replaceable unit code */
278#ifdef __BIGENDIAN
279 u8 sksv:1; /* sense key specific valid */
280 u8 c_d:1; /* command/data bit */
281 u8 res1:2;
282 u8 bpv:1; /* bit pointer valid */
283 u8 bpointer:3; /* bit pointer */
284#else
285 u8 bpointer:3; /* bit pointer */
286 u8 bpv:1; /* bit pointer valid */
287 u8 res1:2;
288 u8 c_d:1; /* command/data bit */
289 u8 sksv:1; /* sense key specific valid */
290#endif
291 u8 fpointer[2]; /* field pointer */
292};
293
294#define SCSI_SENSE_CUR_ERR 0x70
295#define SCSI_SENSE_DEF_ERR 0x71
296
297/*
298 * SCSI sense key values
299 */
300#define SCSI_SK_NO_SENSE 0x0
301#define SCSI_SK_REC_ERR 0x1 /* recovered error */
302#define SCSI_SK_NOT_READY 0x2
303#define SCSI_SK_MED_ERR 0x3 /* medium error */
304#define SCSI_SK_HW_ERR 0x4 /* hardware error */
305#define SCSI_SK_ILLEGAL_REQ 0x5
306#define SCSI_SK_UNIT_ATT 0x6 /* unit attention */
307#define SCSI_SK_DATA_PROTECT 0x7
308#define SCSI_SK_BLANK_CHECK 0x8
309#define SCSI_SK_VENDOR_SPEC 0x9
310#define SCSI_SK_COPY_ABORTED 0xA
311#define SCSI_SK_ABORTED_CMND 0xB
312#define SCSI_SK_VOL_OVERFLOW 0xD
313#define SCSI_SK_MISCOMPARE 0xE
314
315/*
316 * SCSI additional sense codes
317 */
318#define SCSI_ASC_NO_ADD_SENSE 0x00
319#define SCSI_ASC_LUN_NOT_READY 0x04
320#define SCSI_ASC_LUN_COMMUNICATION 0x08
321#define SCSI_ASC_WRITE_ERROR 0x0C
322#define SCSI_ASC_INVALID_CMND_CODE 0x20
323#define SCSI_ASC_BAD_LBA 0x21
324#define SCSI_ASC_INVALID_FIELD_IN_CDB 0x24
325#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
326#define SCSI_ASC_LUN_WRITE_PROTECT 0x27
327#define SCSI_ASC_POWERON_BDR 0x29 /* power on reset, bus reset,
328 * bus device reset
329 */
330#define SCSI_ASC_PARAMS_CHANGED 0x2A
331#define SCSI_ASC_CMND_CLEARED_BY_A_I 0x2F
332#define SCSI_ASC_SAVING_PARAM_NOTSUPP 0x39
333#define SCSI_ASC_TOCC 0x3F /* target operating condtions
334 * changed
335 */
336#define SCSI_ASC_PARITY_ERROR 0x47
337#define SCSI_ASC_CMND_PHASE_ERROR 0x4A
338#define SCSI_ASC_DATA_PHASE_ERROR 0x4B
339#define SCSI_ASC_VENDOR_SPEC 0x7F
340
341/*
342 * SCSI additional sense code qualifiers
343 */
344#define SCSI_ASCQ_CAUSE_NOT_REPORT 0x00
345#define SCSI_ASCQ_BECOMING_READY 0x01
346#define SCSI_ASCQ_INIT_CMD_REQ 0x02
347#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
348#define SCSI_ASCQ_OPERATION_IN_PROGRESS 0x07
349#define SCSI_ASCQ_SELF_TEST_IN_PROGRESS 0x09
350#define SCSI_ASCQ_WR_UNEXP_UNSOL_DATA 0x0C
351#define SCSI_ASCQ_WR_NOTENG_UNSOL_DATA 0x0D
352
353#define SCSI_ASCQ_LBA_OUT_OF_RANGE 0x00
354#define SCSI_ASCQ_INVALID_ELEMENT_ADDR 0x01
355
356#define SCSI_ASCQ_LUN_WRITE_PROTECTED 0x00
357#define SCSI_ASCQ_LUN_HW_WRITE_PROTECTED 0x01
358#define SCSI_ASCQ_LUN_SW_WRITE_PROTECTED 0x02
359
360#define SCSI_ASCQ_POR 0x01 /* power on reset */
361#define SCSI_ASCQ_SBR 0x02 /* scsi bus reset */
362#define SCSI_ASCQ_BDR 0x03 /* bus device reset */
363#define SCSI_ASCQ_DIR 0x04 /* device internal reset */
364
365#define SCSI_ASCQ_MODE_PARAMS_CHANGED 0x01
366#define SCSI_ASCQ_LOG_PARAMS_CHANGED 0x02
367#define SCSI_ASCQ_RESERVATIONS_PREEMPTED 0x03
368#define SCSI_ASCQ_RESERVATIONS_RELEASED 0x04
369#define SCSI_ASCQ_REGISTRATIONS_PREEMPTED 0x05
370
371#define SCSI_ASCQ_MICROCODE_CHANGED 0x01
372#define SCSI_ASCQ_CHANGED_OPER_COND 0x02
373#define SCSI_ASCQ_INQ_CHANGED 0x03 /* inquiry data changed */
374#define SCSI_ASCQ_DI_CHANGED 0x05 /* device id changed */
375#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
376
377#define SCSI_ASCQ_DP_CRC_ERR 0x01 /* data phase crc error */
378#define SCSI_ASCQ_DP_SCSI_PARITY_ERR 0x02 /* data phase scsi parity error
379 */
380#define SCSI_ASCQ_IU_CRC_ERR 0x03 /* information unit crc error */
381#define SCSI_ASCQ_PROTO_SERV_CRC_ERR 0x05
382
383#define SCSI_ASCQ_LUN_TIME_OUT 0x01
384
385/* ------------------------------------------------------------
386 * SCSI INQUIRY
387 * ------------------------------------------------------------*/
388
389struct scsi_inquiry_s{
390 u8 opcode;
391#ifdef __BIGENDIAN
392 u8 lun:3;
393 u8 reserved1:3;
394 u8 cmd_dt:1;
395 u8 evpd:1;
396#else
397 u8 evpd:1;
398 u8 cmd_dt:1;
399 u8 reserved1:3;
400 u8 lun:3;
401#endif
402 u8 page_code;
403 u8 reserved2;
404 u8 alloc_length;
405 u8 control;
406};
407
408struct scsi_inquiry_vendor_s{
409 u8 vendor_id[8];
410};
411
412struct scsi_inquiry_prodid_s{
413 u8 product_id[16];
414};
415
416struct scsi_inquiry_prodrev_s{
417 u8 product_rev[4];
418};
419
420struct scsi_inquiry_data_s{
421#ifdef __BIGENDIAN
422 u8 peripheral_qual:3; /* peripheral qualifier */
423 u8 device_type:5; /* peripheral device type */
424
425 u8 rmb:1; /* removable medium bit */
426 u8 device_type_mod:7; /* device type modifier */
427
428 u8 version;
429
430 u8 aenc:1; /* async event notification capability
431 */
432 u8 trm_iop:1; /* terminate I/O process */
433 u8 norm_aca:1; /* normal ACA supported */
434 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
435 u8 rsp_data_format:4;
436
437 u8 additional_len;
438 u8 sccs:1;
439 u8 reserved1:7;
440
441 u8 reserved2:1;
442 u8 enc_serv:1; /* enclosure service component */
443 u8 reserved3:1;
444 u8 multi_port:1; /* multi-port device */
445 u8 m_chngr:1; /* device in medium transport element */
446 u8 ack_req_q:1; /* SIP specific bit */
447 u8 addr32:1; /* SIP specific bit */
448 u8 addr16:1; /* SIP specific bit */
449
450 u8 rel_adr:1; /* relative address */
451 u8 w_bus32:1;
452 u8 w_bus16:1;
453 u8 synchronous:1;
454 u8 linked_commands:1;
455 u8 trans_dis:1;
456 u8 cmd_queue:1; /* command queueing supported */
457 u8 soft_reset:1; /* soft reset alternative (VS) */
458#else
459 u8 device_type:5; /* peripheral device type */
460 u8 peripheral_qual:3;
461 /* peripheral qualifier */
462
463 u8 device_type_mod:7;
464 /* device type modifier */
465 u8 rmb:1; /* removable medium bit */
466
467 u8 version;
468
469 u8 rsp_data_format:4;
470 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
471 u8 norm_aca:1; /* normal ACA supported */
472 u8 terminate_iop:1;/* terminate I/O process */
473 u8 aenc:1; /* async event notification capability
474 */
475
476 u8 additional_len;
477 u8 reserved1:7;
478 u8 sccs:1;
479
480 u8 addr16:1; /* SIP specific bit */
481 u8 addr32:1; /* SIP specific bit */
482 u8 ack_req_q:1; /* SIP specific bit */
483 u8 m_chngr:1; /* device in medium transport element */
484 u8 multi_port:1; /* multi-port device */
485 u8 reserved3:1; /* TBD - Vendor Specific */
486 u8 enc_serv:1; /* enclosure service component */
487 u8 reserved2:1;
488
489 u8 soft_seset:1; /* soft reset alternative (VS) */
490 u8 cmd_queue:1; /* command queueing supported */
491 u8 trans_dis:1;
492 u8 linked_commands:1;
493 u8 synchronous:1;
494 u8 w_bus16:1;
495 u8 w_bus32:1;
496 u8 rel_adr:1; /* relative address */
497#endif
498 struct scsi_inquiry_vendor_s vendor_id;
499 struct scsi_inquiry_prodid_s product_id;
500 struct scsi_inquiry_prodrev_s product_rev;
501 u8 vendor_specific[20];
502 u8 reserved4[40];
503};
504
505/*
506 * inquiry.peripheral_qual field values
507 */
508#define SCSI_DEVQUAL_DEFAULT 0
509#define SCSI_DEVQUAL_NOT_CONNECTED 1
510#define SCSI_DEVQUAL_NOT_SUPPORTED 3
511
512/*
513 * inquiry.device_type field values
514 */
515#define SCSI_DEVICE_DIRECT_ACCESS 0x00
516#define SCSI_DEVICE_SEQ_ACCESS 0x01
517#define SCSI_DEVICE_ARRAY_CONTROLLER 0x0C
518#define SCSI_DEVICE_UNKNOWN 0x1F
519
520/*
521 * inquiry.version
522 */
523#define SCSI_VERSION_ANSI_X3131 2 /* ANSI X3.131 SCSI-2 */
524#define SCSI_VERSION_SPC 3 /* SPC (SCSI-3), ANSI X3.301:1997 */
525#define SCSI_VERSION_SPC_2 4 /* SPC-2 */
526
527/*
528 * response data format
529 */
530#define SCSI_RSP_DATA_FORMAT 2 /* SCSI-2 & SPC */
531
532/*
533 * SCSI inquiry page codes
534 */
535#define SCSI_INQ_PAGE_VPD_PAGES 0x00 /* supported vpd pages */
536#define SCSI_INQ_PAGE_USN_PAGE 0x80 /* unit serial number page */
537#define SCSI_INQ_PAGE_DEV_IDENT 0x83 /* device indentification page
538 */
539#define SCSI_INQ_PAGES_MAX 3
540
541/*
542 * supported vital product data pages
543 */
544struct scsi_inq_page_vpd_pages_s{
545#ifdef __BIGENDIAN
546 u8 peripheral_qual:3;
547 u8 device_type:5;
548#else
549 u8 device_type:5;
550 u8 peripheral_qual:3;
551#endif
552 u8 page_code;
553 u8 reserved;
554 u8 page_length;
555 u8 pages[SCSI_INQ_PAGES_MAX];
556};
557
558/*
559 * Unit serial number page
560 */
561#define SCSI_INQ_USN_LEN 32
562
563struct scsi_inq_usn_s{
564 char usn[SCSI_INQ_USN_LEN];
565};
566
567struct scsi_inq_page_usn_s{
568#ifdef __BIGENDIAN
569 u8 peripheral_qual:3;
570 u8 device_type:5;
571#else
572 u8 device_type:5;
573 u8 peripheral_qual:3;
574#endif
575 u8 page_code;
576 u8 reserved1;
577 u8 page_length;
578 struct scsi_inq_usn_s usn;
579};
580
581enum {
582 SCSI_INQ_DIP_CODE_BINARY = 1, /* identifier has binary value */
583 SCSI_INQ_DIP_CODE_ASCII = 2, /* identifier has ascii value */
584};
585
586enum {
587 SCSI_INQ_DIP_ASSOC_LUN = 0, /* id is associated with device */
588 SCSI_INQ_DIP_ASSOC_PORT = 1, /* id is associated with port that
589 * received the request
590 */
591};
592
593enum {
594 SCSI_INQ_ID_TYPE_VENDOR = 1,
595 SCSI_INQ_ID_TYPE_IEEE = 2,
596 SCSI_INQ_ID_TYPE_FC_FS = 3,
597 SCSI_INQ_ID_TYPE_OTHER = 4,
598};
599
600struct scsi_inq_dip_desc_s{
601#ifdef __BIGENDIAN
602 u8 res0:4;
603 u8 code_set:4;
604 u8 res1:2;
605 u8 association:2;
606 u8 id_type:4;
607#else
608 u8 code_set:4;
609 u8 res0:4;
610 u8 id_type:4;
611 u8 association:2;
612 u8 res1:2;
613#endif
614 u8 res2;
615 u8 id_len;
616 struct scsi_lun_sn_s id;
617};
618
619/*
620 * Device indentification page
621 */
622struct scsi_inq_page_dev_ident_s{
623#ifdef __BIGENDIAN
624 u8 peripheral_qual:3;
625 u8 device_type:5;
626#else
627 u8 device_type:5;
628 u8 peripheral_qual:3;
629#endif
630 u8 page_code;
631 u8 reserved1;
632 u8 page_length;
633 struct scsi_inq_dip_desc_s desc;
634};
635
636/* ------------------------------------------------------------
637 * READ CAPACITY
638 * ------------------------------------------------------------
639 */
640
641struct scsi_read_capacity_s{
642 u8 opcode;
643#ifdef __BIGENDIAN
644 u8 lun:3;
645 u8 reserved1:4;
646 u8 rel_adr:1;
647#else
648 u8 rel_adr:1;
649 u8 reserved1:4;
650 u8 lun:3;
651#endif
652 u8 lba0; /* MSB */
653 u8 lba1;
654 u8 lba2;
655 u8 lba3; /* LSB */
656 u8 reserved2;
657 u8 reserved3;
658#ifdef __BIGENDIAN
659 u8 reserved4:7;
660 u8 pmi:1; /* partial medium indicator */
661#else
662 u8 pmi:1; /* partial medium indicator */
663 u8 reserved4:7;
664#endif
665 u8 control;
666};
667
668struct scsi_read_capacity_data_s{
669 u32 max_lba; /* maximum LBA available */
670 u32 block_length; /* in bytes */
671};
672
673struct scsi_read_capacity16_data_s{
674 u64 lba; /* maximum LBA available */
675 u32 block_length; /* in bytes */
676#ifdef __BIGENDIAN
677 u8 reserved1:4,
678 p_type:3,
679 prot_en:1;
680 u8 reserved2:4,
681 lb_pbe:4; /* logical blocks per physical block
682 * exponent */
683 u16 reserved3:2,
684 lba_align:14; /* lowest aligned logical block
685 * address */
686#else
687 u16 lba_align:14, /* lowest aligned logical block
688 * address */
689 reserved3:2;
690 u8 lb_pbe:4, /* logical blocks per physical block
691 * exponent */
692 reserved2:4;
693 u8 prot_en:1,
694 p_type:3,
695 reserved1:4;
696#endif
697 u64 reserved4;
698 u64 reserved5;
699};
700
701/* ------------------------------------------------------------
702 * REPORT LUNS command
703 * ------------------------------------------------------------
704 */
705
706struct scsi_report_luns_s{
707 u8 opcode; /* A0h - REPORT LUNS opCode */
708 u8 reserved1[5];
709 u8 alloc_length[4];/* allocation length MSB first */
710 u8 reserved2;
711 u8 control;
712};
713
714#define SCSI_REPORT_LUN_ALLOC_LENGTH(rl) \
715 ((rl->alloc_length[0] << 24) | (rl->alloc_length[1] << 16) | \
716 (rl->alloc_length[2] << 8) | (rl->alloc_length[3]))
717
718#define SCSI_REPORT_LUNS_SET_ALLOCLEN(rl, alloc_len) { \
719 (rl)->alloc_length[0] = (alloc_len) >> 24; \
720 (rl)->alloc_length[1] = ((alloc_len) >> 16) & 0xFF; \
721 (rl)->alloc_length[2] = ((alloc_len) >> 8) & 0xFF; \
722 (rl)->alloc_length[3] = (alloc_len) & 0xFF; \
723}
724
725struct scsi_report_luns_data_s{
726 u32 lun_list_length; /* length of LUN list length */
727 u32 reserved;
728 lun_t lun[1]; /* first LUN in lun list */
729};
730
731/* -------------------------------------------------------------
732 * SCSI mode parameters
733 * -----------------------------------------------------------
734 */
735enum {
736 SCSI_DA_MEDIUM_DEF = 0, /* direct access default medium type */
737 SCSI_DA_MEDIUM_SS = 1, /* direct access single sided */
738 SCSI_DA_MEDIUM_DS = 2, /* direct access double sided */
739};
740
741/*
742 * SCSI Mode Select(6) cdb
743 */
744struct scsi_mode_select6_s{
745 u8 opcode;
746#ifdef __BIGENDIAN
747 u8 reserved1:3;
748 u8 pf:1; /* page format */
749 u8 reserved2:3;
750 u8 sp:1; /* save pages if set to 1 */
751#else
752 u8 sp:1; /* save pages if set to 1 */
753 u8 reserved2:3;
754 u8 pf:1; /* page format */
755 u8 reserved1:3;
756#endif
757 u8 reserved3[2];
758 u8 alloc_len;
759 u8 control;
760};
761
762/*
763 * SCSI Mode Select(10) cdb
764 */
765struct scsi_mode_select10_s{
766 u8 opcode;
767#ifdef __BIGENDIAN
768 u8 reserved1:3;
769 u8 pf:1; /* page format */
770 u8 reserved2:3;
771 u8 sp:1; /* save pages if set to 1 */
772#else
773 u8 sp:1; /* save pages if set to 1 */
774 u8 reserved2:3;
775 u8 pf:1; /* page format */
776 u8 reserved1:3;
777#endif
778 u8 reserved3[5];
779 u8 alloc_len_msb;
780 u8 alloc_len_lsb;
781 u8 control;
782};
783
784/*
785 * SCSI Mode Sense(6) cdb
786 */
787struct scsi_mode_sense6_s{
788 u8 opcode;
789#ifdef __BIGENDIAN
790 u8 reserved1:4;
791 u8 dbd:1; /* disable block discriptors if set to 1 */
792 u8 reserved2:3;
793
794 u8 pc:2; /* page control */
795 u8 page_code:6;
796#else
797 u8 reserved2:3;
798 u8 dbd:1; /* disable block descriptors if set to 1 */
799 u8 reserved1:4;
800
801 u8 page_code:6;
802 u8 pc:2; /* page control */
803#endif
804 u8 reserved3;
805 u8 alloc_len;
806 u8 control;
807};
808
809/*
810 * SCSI Mode Sense(10) cdb
811 */
812struct scsi_mode_sense10_s{
813 u8 opcode;
814#ifdef __BIGENDIAN
815 u8 reserved1:3;
816 u8 LLBAA:1; /* long LBA accepted if set to 1 */
817 u8 dbd:1; /* disable block descriptors if set
818 * to 1
819 */
820 u8 reserved2:3;
821
822 u8 pc:2; /* page control */
823 u8 page_code:6;
824#else
825 u8 reserved2:3;
826 u8 dbd:1; /* disable block descriptors if set to
827 * 1
828 */
829 u8 LLBAA:1; /* long LBA accepted if set to 1 */
830 u8 reserved1:3;
831
832 u8 page_code:6;
833 u8 pc:2; /* page control */
834#endif
835 u8 reserved3[4];
836 u8 alloc_len_msb;
837 u8 alloc_len_lsb;
838 u8 control;
839};
840
841#define SCSI_CDB10_GET_AL(cdb) \
842 ((cdb)->alloc_len_msb << 8 | (cdb)->alloc_len_lsb)
843
844#define SCSI_CDB10_SET_AL(cdb, al) { \
845 (cdb)->alloc_len_msb = al >> 8; \
846 (cdb)->alloc_len_lsb = al & 0xFF; \
847}
848
849#define SCSI_CDB6_GET_AL(cdb) ((cdb)->alloc_len)
850
851#define SCSI_CDB6_SET_AL(cdb, al) { \
852 (cdb)->alloc_len = al; \
853}
854
855/*
856 * page control field values
857 */
858#define SCSI_PC_CURRENT_VALUES 0x0
859#define SCSI_PC_CHANGEABLE_VALUES 0x1
860#define SCSI_PC_DEFAULT_VALUES 0x2
861#define SCSI_PC_SAVED_VALUES 0x3
862
863/*
864 * SCSI mode page codes
865 */
866#define SCSI_MP_VENDOR_SPEC 0x00
867#define SCSI_MP_DISC_RECN 0x02 /* disconnect-reconnect page */
868#define SCSI_MP_FORMAT_DEVICE 0x03
869#define SCSI_MP_RDG 0x04 /* rigid disk geometry page */
870#define SCSI_MP_FDP 0x05 /* flexible disk page */
871#define SCSI_MP_CACHING 0x08 /* caching page */
872#define SCSI_MP_CONTROL 0x0A /* control mode page */
873#define SCSI_MP_MED_TYPES_SUP 0x0B /* medium types supported page */
874#define SCSI_MP_INFO_EXCP_CNTL 0x1C /* informational exception control */
875#define SCSI_MP_ALL 0x3F /* return all pages - mode sense only */
876
877/*
878 * mode parameter header
879 */
880struct scsi_mode_param_header6_s{
881 u8 mode_datalen;
882 u8 medium_type;
883
884 /*
885 * device specific parameters expanded for direct access devices
886 */
887#ifdef __BIGENDIAN
888 u32 wp:1; /* write protected */
889 u32 reserved1:2;
890 u32 dpofua:1; /* disable page out + force unit access
891 */
892 u32 reserved2:4;
893#else
894 u32 reserved2:4;
895 u32 dpofua:1; /* disable page out + force unit access
896 */
897 u32 reserved1:2;
898 u32 wp:1; /* write protected */
899#endif
900
901 u8 block_desclen;
902};
903
904struct scsi_mode_param_header10_s{
905 u32 mode_datalen:16;
906 u32 medium_type:8;
907
908 /*
909 * device specific parameters expanded for direct access devices
910 */
911#ifdef __BIGENDIAN
912 u32 wp:1; /* write protected */
913 u32 reserved1:2;
914 u32 dpofua:1; /* disable page out + force unit access
915 */
916 u32 reserved2:4;
917#else
918 u32 reserved2:4;
919 u32 dpofua:1; /* disable page out + force unit access
920 */
921 u32 reserved1:2;
922 u32 wp:1; /* write protected */
923#endif
924
925#ifdef __BIGENDIAN
926 u32 reserved3:7;
927 u32 longlba:1;
928#else
929 u32 longlba:1;
930 u32 reserved3:7;
931#endif
932 u32 reserved4:8;
933 u32 block_desclen:16;
934};
935
936/*
937 * mode parameter block descriptor
938 */
939struct scsi_mode_param_desc_s{
940 u32 nblks;
941 u32 density_code:8;
942 u32 block_length:24;
943};
944
945/*
946 * Disconnect-reconnect mode page format
947 */
948struct scsi_mp_disc_recn_s{
949#ifdef __BIGENDIAN
950 u8 ps:1;
951 u8 reserved1:1;
952 u8 page_code:6;
953#else
954 u8 page_code:6;
955 u8 reserved1:1;
956 u8 ps:1;
957#endif
958 u8 page_len;
959 u8 buf_full_ratio;
960 u8 buf_empty_ratio;
961
962 u8 bil_msb; /* bus inactivity limit -MSB */
963 u8 bil_lsb; /* bus inactivity limit -LSB */
964
965 u8 dtl_msb; /* disconnect time limit - MSB */
966 u8 dtl_lsb; /* disconnect time limit - LSB */
967
968 u8 ctl_msb; /* connect time limit - MSB */
969 u8 ctl_lsb; /* connect time limit - LSB */
970
971 u8 max_burst_len_msb;
972 u8 max_burst_len_lsb;
973#ifdef __BIGENDIAN
974 u8 emdp:1; /* enable modify data pointers */
975 u8 fa:3; /* fair arbitration */
976 u8 dimm:1; /* disconnect immediate */
977 u8 dtdc:3; /* data transfer disconnect control */
978#else
979 u8 dtdc:3; /* data transfer disconnect control */
980 u8 dimm:1; /* disconnect immediate */
981 u8 fa:3; /* fair arbitration */
982 u8 emdp:1; /* enable modify data pointers */
983#endif
984
985 u8 reserved3;
986
987 u8 first_burst_len_msb;
988 u8 first_burst_len_lsb;
989};
990
991/*
992 * SCSI format device mode page
993 */
994struct scsi_mp_format_device_s{
995#ifdef __BIGENDIAN
996 u32 ps:1;
997 u32 reserved1:1;
998 u32 page_code:6;
999#else
1000 u32 page_code:6;
1001 u32 reserved1:1;
1002 u32 ps:1;
1003#endif
1004 u32 page_len:8;
1005 u32 tracks_per_zone:16;
1006
1007 u32 a_sec_per_zone:16;
1008 u32 a_tracks_per_zone:16;
1009
1010 u32 a_tracks_per_lun:16; /* alternate tracks/lun-MSB */
1011 u32 sec_per_track:16; /* sectors/track-MSB */
1012
1013 u32 bytes_per_sector:16;
1014 u32 interleave:16;
1015
1016 u32 tsf:16; /* track skew factor-MSB */
1017 u32 csf:16; /* cylinder skew factor-MSB */
1018
1019#ifdef __BIGENDIAN
1020 u32 ssec:1; /* soft sector formatting */
1021 u32 hsec:1; /* hard sector formatting */
1022 u32 rmb:1; /* removable media */
1023 u32 surf:1; /* surface */
1024 u32 reserved2:4;
1025#else
1026 u32 reserved2:4;
1027 u32 surf:1; /* surface */
1028 u32 rmb:1; /* removable media */
1029 u32 hsec:1; /* hard sector formatting */
1030 u32 ssec:1; /* soft sector formatting */
1031#endif
1032 u32 reserved3:24;
1033};
1034
1035/*
1036 * SCSI rigid disk device geometry page
1037 */
1038struct scsi_mp_rigid_device_geometry_s{
1039#ifdef __BIGENDIAN
1040 u32 ps:1;
1041 u32 reserved1:1;
1042 u32 page_code:6;
1043#else
1044 u32 page_code:6;
1045 u32 reserved1:1;
1046 u32 ps:1;
1047#endif
1048 u32 page_len:8;
1049 u32 num_cylinders0:8;
1050 u32 num_cylinders1:8;
1051
1052 u32 num_cylinders2:8;
1053 u32 num_heads:8;
1054 u32 scwp0:8;
1055 u32 scwp1:8;
1056
1057 u32 scwp2:8;
1058 u32 scrwc0:8;
1059 u32 scrwc1:8;
1060 u32 scrwc2:8;
1061
1062 u32 dsr:16;
1063 u32 lscyl0:8;
1064 u32 lscyl1:8;
1065
1066 u32 lscyl2:8;
1067#ifdef __BIGENDIAN
1068 u32 reserved2:6;
1069 u32 rpl:2; /* rotational position locking */
1070#else
1071 u32 rpl:2; /* rotational position locking */
1072 u32 reserved2:6;
1073#endif
1074 u32 rot_off:8;
1075 u32 reserved3:8;
1076
1077 u32 med_rot_rate:16;
1078 u32 reserved4:16;
1079};
1080
1081/*
1082 * SCSI caching mode page
1083 */
1084struct scsi_mp_caching_s{
1085#ifdef __BIGENDIAN
1086 u8 ps:1;
1087 u8 res1:1;
1088 u8 page_code:6;
1089#else
1090 u8 page_code:6;
1091 u8 res1:1;
1092 u8 ps:1;
1093#endif
1094 u8 page_len;
1095#ifdef __BIGENDIAN
1096 u8 ic:1; /* initiator control */
1097 u8 abpf:1; /* abort pre-fetch */
1098 u8 cap:1; /* caching analysis permitted */
1099 u8 disc:1; /* discontinuity */
1100 u8 size:1; /* size enable */
1101 u8 wce:1; /* write cache enable */
1102 u8 mf:1; /* multiplication factor */
1103 u8 rcd:1; /* read cache disable */
1104
1105 u8 drrp:4; /* demand read retention priority */
1106 u8 wrp:4; /* write retention priority */
1107#else
1108 u8 rcd:1; /* read cache disable */
1109 u8 mf:1; /* multiplication factor */
1110 u8 wce:1; /* write cache enable */
1111 u8 size:1; /* size enable */
1112 u8 disc:1; /* discontinuity */
1113 u8 cap:1; /* caching analysis permitted */
1114 u8 abpf:1; /* abort pre-fetch */
1115 u8 ic:1; /* initiator control */
1116
1117 u8 wrp:4; /* write retention priority */
1118 u8 drrp:4; /* demand read retention priority */
1119#endif
1120 u8 dptl[2];/* disable pre-fetch transfer length */
1121 u8 min_prefetch[2];
1122 u8 max_prefetch[2];
1123 u8 max_prefetch_limit[2];
1124#ifdef __BIGENDIAN
1125 u8 fsw:1; /* force sequential write */
1126 u8 lbcss:1;/* logical block cache segment size */
1127 u8 dra:1; /* disable read ahead */
1128 u8 vs:2; /* vendor specific */
1129 u8 res2:3;
1130#else
1131 u8 res2:3;
1132 u8 vs:2; /* vendor specific */
1133 u8 dra:1; /* disable read ahead */
1134 u8 lbcss:1;/* logical block cache segment size */
1135 u8 fsw:1; /* force sequential write */
1136#endif
1137 u8 num_cache_segs;
1138
1139 u8 cache_seg_size[2];
1140 u8 res3;
1141 u8 non_cache_seg_size[3];
1142};
1143
1144/*
1145 * SCSI control mode page
1146 */
1147struct scsi_mp_control_page_s{
1148#ifdef __BIGENDIAN
1149u8 ps:1;
1150u8 reserved1:1;
1151u8 page_code:6;
1152#else
1153u8 page_code:6;
1154u8 reserved1:1;
1155u8 ps:1;
1156#endif
1157 u8 page_len;
1158#ifdef __BIGENDIAN
1159 u8 tst:3; /* task set type */
1160 u8 reserved3:3;
1161 u8 gltsd:1; /* global logging target save disable */
1162 u8 rlec:1; /* report log exception condition */
1163
1164 u8 qalgo_mod:4; /* queue alogorithm modifier */
1165 u8 reserved4:1;
1166 u8 qerr:2; /* queue error management */
1167 u8 dque:1; /* disable queuing */
1168
1169 u8 reserved5:1;
1170 u8 rac:1; /* report a check */
1171 u8 reserved6:2;
1172 u8 swp:1; /* software write protect */
1173 u8 raerp:1; /* ready AER permission */
1174 u8 uaaerp:1; /* unit attenstion AER permission */
1175 u8 eaerp:1; /* error AER permission */
1176
1177 u8 reserved7:5;
1178 u8 autoload_mod:3;
1179#else
1180 u8 rlec:1; /* report log exception condition */
1181 u8 gltsd:1; /* global logging target save disable */
1182 u8 reserved3:3;
1183 u8 tst:3; /* task set type */
1184
1185 u8 dque:1; /* disable queuing */
1186 u8 qerr:2; /* queue error management */
1187 u8 reserved4:1;
1188 u8 qalgo_mod:4; /* queue alogorithm modifier */
1189
1190 u8 eaerp:1; /* error AER permission */
1191 u8 uaaerp:1; /* unit attenstion AER permission */
1192 u8 raerp:1; /* ready AER permission */
1193 u8 swp:1; /* software write protect */
1194 u8 reserved6:2;
1195 u8 rac:1; /* report a check */
1196 u8 reserved5:1;
1197
1198 u8 autoload_mod:3;
1199 u8 reserved7:5;
1200#endif
1201 u8 rahp_msb; /* ready AER holdoff period - MSB */
1202 u8 rahp_lsb; /* ready AER holdoff period - LSB */
1203
1204 u8 busy_timeout_period_msb;
1205 u8 busy_timeout_period_lsb;
1206
1207 u8 ext_selftest_compl_time_msb;
1208 u8 ext_selftest_compl_time_lsb;
1209};
1210
1211/*
1212 * SCSI medium types supported mode page
1213 */
1214struct scsi_mp_medium_types_sup_s{
1215#ifdef __BIGENDIAN
1216 u8 ps:1;
1217 u8 reserved1:1;
1218 u8 page_code:6;
1219#else
1220 u8 page_code:6;
1221 u8 reserved1:1;
1222 u8 ps:1;
1223#endif
1224 u8 page_len;
1225
1226 u8 reserved3[2];
1227 u8 med_type1_sup; /* medium type one supported */
1228 u8 med_type2_sup; /* medium type two supported */
1229 u8 med_type3_sup; /* medium type three supported */
1230 u8 med_type4_sup; /* medium type four supported */
1231};
1232
1233/*
1234 * SCSI informational exception control mode page
1235 */
1236struct scsi_mp_info_excpt_cntl_s{
1237#ifdef __BIGENDIAN
1238 u8 ps:1;
1239 u8 reserved1:1;
1240 u8 page_code:6;
1241#else
1242 u8 page_code:6;
1243 u8 reserved1:1;
1244 u8 ps:1;
1245#endif
1246 u8 page_len;
1247#ifdef __BIGENDIAN
1248 u8 perf:1; /* performance */
1249 u8 reserved3:1;
1250 u8 ebf:1; /* enable background fucntion */
1251 u8 ewasc:1; /* enable warning */
1252 u8 dexcpt:1; /* disable exception control */
1253 u8 test:1; /* enable test device failure
1254 * notification
1255 */
1256 u8 reserved4:1;
1257 u8 log_error:1;
1258
1259 u8 reserved5:4;
1260 u8 mrie:4; /* method of reporting info
1261 * exceptions
1262 */
1263#else
1264 u8 log_error:1;
1265 u8 reserved4:1;
1266 u8 test:1; /* enable test device failure
1267 * notification
1268 */
1269 u8 dexcpt:1; /* disable exception control */
1270 u8 ewasc:1; /* enable warning */
1271 u8 ebf:1; /* enable background fucntion */
1272 u8 reserved3:1;
1273 u8 perf:1; /* performance */
1274
1275 u8 mrie:4; /* method of reporting info
1276 * exceptions
1277 */
1278 u8 reserved5:4;
1279#endif
1280 u8 interval_timer_msb;
1281 u8 interval_timer_lsb;
1282
1283 u8 report_count_msb;
1284 u8 report_count_lsb;
1285};
1286
1287/*
1288 * Methods of reporting informational exceptions
1289 */
1290#define SCSI_MP_IEC_NO_REPORT 0x0 /* no reporting of exceptions */
1291#define SCSI_MP_IEC_AER 0x1 /* async event reporting */
1292#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attenstion */
1293#define SCSI_MO_IEC_COND_REC_ERR 0x3 /* conditionally generate recovered
1294 * error
1295 */
1296#define SCSI_MP_IEC_UNCOND_REC_ERR 0x4 /* unconditionally generate recovered
1297 * error
1298 */
1299#define SCSI_MP_IEC_NO_SENSE 0x5 /* generate no sense */
1300#define SCSI_MP_IEC_ON_REQUEST 0x6 /* only report exceptions on request */
1301
1302/*
1303 * SCSI flexible disk page
1304 */
1305struct scsi_mp_flexible_disk_s{
1306#ifdef __BIGENDIAN
1307 u8 ps:1;
1308 u8 reserved1:1;
1309 u8 page_code:6;
1310#else
1311 u8 page_code:6;
1312 u8 reserved1:1;
1313 u8 ps:1;
1314#endif
1315 u8 page_len;
1316
1317 u8 transfer_rate_msb;
1318 u8 transfer_rate_lsb;
1319
1320 u8 num_heads;
1321 u8 num_sectors;
1322
1323 u8 bytes_per_sector_msb;
1324 u8 bytes_per_sector_lsb;
1325
1326 u8 num_cylinders_msb;
1327 u8 num_cylinders_lsb;
1328
1329 u8 sc_wpc_msb; /* starting cylinder-write
1330 * precompensation msb
1331 */
1332 u8 sc_wpc_lsb; /* starting cylinder-write
1333 * precompensation lsb
1334 */
1335 u8 sc_rwc_msb; /* starting cylinder-reduced write
1336 * current msb
1337 */
1338 u8 sc_rwc_lsb; /* starting cylinder-reduced write
1339 * current lsb
1340 */
1341
1342 u8 dev_step_rate_msb;
1343 u8 dev_step_rate_lsb;
1344
1345 u8 dev_step_pulse_width;
1346
1347 u8 head_sd_msb; /* head settle delay msb */
1348 u8 head_sd_lsb; /* head settle delay lsb */
1349
1350 u8 motor_on_delay;
1351 u8 motor_off_delay;
1352#ifdef __BIGENDIAN
1353 u8 trdy:1; /* true ready bit */
1354 u8 ssn:1; /* start sector number bit */
1355 u8 mo:1; /* motor on bit */
1356 u8 reserved3:5;
1357
1358 u8 reserved4:4;
1359 u8 spc:4; /* step pulse per cylinder */
1360#else
1361 u8 reserved3:5;
1362 u8 mo:1; /* motor on bit */
1363 u8 ssn:1; /* start sector number bit */
1364 u8 trdy:1; /* true ready bit */
1365
1366 u8 spc:4; /* step pulse per cylinder */
1367 u8 reserved4:4;
1368#endif
1369 u8 write_comp;
1370 u8 head_load_delay;
1371 u8 head_unload_delay;
1372#ifdef __BIGENDIAN
1373 u8 pin34:4; /* pin34 usage */
1374 u8 pin2:4; /* pin2 usage */
1375
1376 u8 pin4:4; /* pin4 usage */
1377 u8 pin1:4; /* pin1 usage */
1378#else
1379 u8 pin2:4; /* pin2 usage */
1380 u8 pin34:4; /* pin34 usage */
1381
1382 u8 pin1:4; /* pin1 usage */
1383 u8 pin4:4; /* pin4 usage */
1384#endif
1385 u8 med_rot_rate_msb;
1386 u8 med_rot_rate_lsb;
1387
1388 u8 reserved5[2];
1389};
1390
1391struct scsi_mode_page_format_data6_s{
1392 struct scsi_mode_param_header6_s mph; /* mode page header */
1393 struct scsi_mode_param_desc_s desc; /* block descriptor */
1394 struct scsi_mp_format_device_s format; /* format device data */
1395};
1396
1397struct scsi_mode_page_format_data10_s{
1398 struct scsi_mode_param_header10_s mph; /* mode page header */
1399 struct scsi_mode_param_desc_s desc; /* block descriptor */
1400 struct scsi_mp_format_device_s format; /* format device data */
1401};
1402
1403struct scsi_mode_page_rdg_data6_s{
1404 struct scsi_mode_param_header6_s mph; /* mode page header */
1405 struct scsi_mode_param_desc_s desc; /* block descriptor */
1406 struct scsi_mp_rigid_device_geometry_s rdg;
1407 /* rigid geometry data */
1408};
1409
1410struct scsi_mode_page_rdg_data10_s{
1411 struct scsi_mode_param_header10_s mph; /* mode page header */
1412 struct scsi_mode_param_desc_s desc; /* block descriptor */
1413 struct scsi_mp_rigid_device_geometry_s rdg;
1414 /* rigid geometry data */
1415};
1416
1417struct scsi_mode_page_cache6_s{
1418 struct scsi_mode_param_header6_s mph; /* mode page header */
1419 struct scsi_mode_param_desc_s desc; /* block descriptor */
1420 struct scsi_mp_caching_s cache; /* cache page data */
1421};
1422
1423struct scsi_mode_page_cache10_s{
1424 struct scsi_mode_param_header10_s mph; /* mode page header */
1425 struct scsi_mode_param_desc_s desc; /* block descriptor */
1426 struct scsi_mp_caching_s cache; /* cache page data */
1427};
1428
1429/* --------------------------------------------------------------
1430 * Format Unit command
1431 * ------------------------------------------------------------
1432 */
1433
1434/*
1435 * Format Unit CDB
1436 */
1437struct scsi_format_unit_s{
1438 u8 opcode;
1439#ifdef __BIGENDIAN
1440 u8 res1:3;
1441 u8 fmtdata:1; /* if set, data out phase has format
1442 * data
1443 */
1444 u8 cmplst:1; /* if set, defect list is complete */
1445 u8 def_list:3; /* format of defect descriptor is
1446 * fmtdata =1
1447 */
1448#else
1449 u8 def_list:3; /* format of defect descriptor is
1450 * fmtdata = 1
1451 */
1452 u8 cmplst:1; /* if set, defect list is complete */
1453 u8 fmtdata:1; /* if set, data out phase has format
1454 * data
1455 */
1456 u8 res1:3;
1457#endif
1458 u8 interleave_msb;
1459 u8 interleave_lsb;
1460 u8 vendor_spec;
1461 u8 control;
1462};
1463
1464/*
1465 * h
1466 */
1467struct scsi_reserve6_s{
1468 u8 opcode;
1469#ifdef __BIGENDIAN
1470 u8 reserved:3;
1471 u8 obsolete:4;
1472 u8 extent:1;
1473#else
1474 u8 extent:1;
1475 u8 obsolete:4;
1476 u8 reserved:3;
1477#endif
1478 u8 reservation_id;
1479 u16 param_list_len;
1480 u8 control;
1481};
1482
1483/*
1484 * h
1485 */
1486struct scsi_release6_s{
1487 u8 opcode;
1488#ifdef __BIGENDIAN
1489 u8 reserved1:3;
1490 u8 obsolete:4;
1491 u8 extent:1;
1492#else
1493 u8 extent:1;
1494 u8 obsolete:4;
1495 u8 reserved1:3;
1496#endif
1497 u8 reservation_id;
1498 u16 reserved2;
1499 u8 control;
1500};
1501
1502/*
1503 * h
1504 */
1505struct scsi_reserve10_s{
1506 u8 opcode;
1507#ifdef __BIGENDIAN
1508 u8 reserved1:3;
1509 u8 third_party:1;
1510 u8 reserved2:2;
1511 u8 long_id:1;
1512 u8 extent:1;
1513#else
1514 u8 extent:1;
1515 u8 long_id:1;
1516 u8 reserved2:2;
1517 u8 third_party:1;
1518 u8 reserved1:3;
1519#endif
1520 u8 reservation_id;
1521 u8 third_pty_dev_id;
1522 u8 reserved3;
1523 u8 reserved4;
1524 u8 reserved5;
1525 u16 param_list_len;
1526 u8 control;
1527};
1528
1529struct scsi_release10_s{
1530 u8 opcode;
1531#ifdef __BIGENDIAN
1532 u8 reserved1:3;
1533 u8 third_party:1;
1534 u8 reserved2:2;
1535 u8 long_id:1;
1536 u8 extent:1;
1537#else
1538 u8 extent:1;
1539 u8 long_id:1;
1540 u8 reserved2:2;
1541 u8 third_party:1;
1542 u8 reserved1:3;
1543#endif
1544 u8 reservation_id;
1545 u8 third_pty_dev_id;
1546 u8 reserved3;
1547 u8 reserved4;
1548 u8 reserved5;
1549 u16 param_list_len;
1550 u8 control;
1551};
1552
1553struct scsi_verify10_s{
1554 u8 opcode;
1555#ifdef __BIGENDIAN
1556 u8 lun:3;
1557 u8 dpo:1;
1558 u8 reserved:2;
1559 u8 bytchk:1;
1560 u8 reladdr:1;
1561#else
1562 u8 reladdr:1;
1563 u8 bytchk:1;
1564 u8 reserved:2;
1565 u8 dpo:1;
1566 u8 lun:3;
1567#endif
1568 u8 lba0;
1569 u8 lba1;
1570 u8 lba2;
1571 u8 lba3;
1572 u8 reserved1;
1573 u8 verification_len0;
1574 u8 verification_len1;
1575 u8 control_byte;
1576};
1577
1578struct scsi_request_sense_s{
1579 u8 opcode;
1580#ifdef __BIGENDIAN
1581 u8 lun:3;
1582 u8 reserved:5;
1583#else
1584 u8 reserved:5;
1585 u8 lun:3;
1586#endif
1587 u8 reserved0;
1588 u8 reserved1;
1589 u8 alloc_len;
1590 u8 control_byte;
1591};
1592
1593/* ------------------------------------------------------------
1594 * SCSI status byte values
1595 * ------------------------------------------------------------
1596 */
1597#define SCSI_STATUS_GOOD 0x00
1598#define SCSI_STATUS_CHECK_CONDITION 0x02
1599#define SCSI_STATUS_CONDITION_MET 0x04
1600#define SCSI_STATUS_BUSY 0x08
1601#define SCSI_STATUS_INTERMEDIATE 0x10
1602#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
1603#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
1604#define SCSI_STATUS_COMMAND_TERMINATED 0x22
1605#define SCSI_STATUS_QUEUE_FULL 0x28
1606#define SCSI_STATUS_ACA_ACTIVE 0x30
1607
1608#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length
1609 * in CDBs
1610 */
1611
1612#define SCSI_OP_WRITE_VERIFY10 0x2E
1613#define SCSI_OP_WRITE_VERIFY12 0xAE
1614#define SCSI_OP_UNDEF 0xFF
1615
1616/*
1617 * SCSI WRITE-VERIFY(10) command
1618 */
1619struct scsi_write_verify10_s{
1620 u8 opcode;
1621#ifdef __BIGENDIAN
1622 u8 reserved1:3;
1623 u8 dpo:1; /* Disable Page Out */
1624 u8 reserved2:1;
1625 u8 ebp:1; /* erse by-pass */
1626 u8 bytchk:1; /* byte check */
1627 u8 rel_adr:1; /* relative address */
1628#else
1629 u8 rel_adr:1; /* relative address */
1630 u8 bytchk:1; /* byte check */
1631 u8 ebp:1; /* erse by-pass */
1632 u8 reserved2:1;
1633 u8 dpo:1; /* Disable Page Out */
1634 u8 reserved1:3;
1635#endif
1636 u8 lba0; /* logical block address - MSB */
1637 u8 lba1;
1638 u8 lba2;
1639 u8 lba3; /* LSB */
1640 u8 reserved3;
1641 u8 xfer_length0; /* transfer length in blocks - MSB */
1642 u8 xfer_length1; /* LSB */
1643 u8 control;
1644};
1645
1646#pragma pack()
1647
1648#endif /* __SCSI_H__ */
diff --git a/drivers/scsi/bfa/include/protocol/types.h b/drivers/scsi/bfa/include/protocol/types.h
deleted file mode 100644
index 2875a6cced3b..000000000000
--- a/drivers/scsi/bfa/include/protocol/types.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * types.h Protocol defined base types
20 */
21
22#ifndef __TYPES_H__
23#define __TYPES_H__
24
25#include <bfa_os_inc.h>
26
27#define wwn_t u64
28#define lun_t u64
29
30#define WWN_NULL (0)
31#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
32#define FC_ALPA_MAX 128
33
34#pragma pack(1)
35
36#define MAC_ADDRLEN (6)
37struct mac_s { u8 mac[MAC_ADDRLEN]; };
38#define mac_t struct mac_s
39
40#pragma pack()
41
42#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
deleted file mode 100644
index f6342efb6a90..000000000000
--- a/drivers/scsi/bfa/loop.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_loop.c vport private loop implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, LOOP);
29
30/**
31 * ALPA to LIXA bitmap mapping
32 *
33 * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
34 * is for L_bit (login required) and is filled as ALPA 0x00 here.
35 */
36static const u8 port_loop_alpa_map[] = {
37 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, /* Word 3 Bits 0..7 */
38 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, /* Word 3 Bits 8..15 */
39 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, /* Word 3 Bits 16..23 */
40 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, /* Word 3 Bits 24..31 */
41
42 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, /* Word 2 Bits 0..7 */
43 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, /* Word 2 Bits 8..15 */
44 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, /* Word 2 Bits 16..23 */
45 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, /* Word 2 Bits 24..31 */
46
47 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, /* Word 1 Bits 0..7 */
48 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, /* Word 1 Bits 8..15 */
49 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, /* Word 1 Bits 16..23 */
50 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, /* Word 1 Bits 24..31 */
51
52 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, /* Word 0 Bits 0..7 */
53 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, /* Word 0 Bits 8..15 */
54 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, /* Word 0 Bits 16..23 */
55 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01, 0x00, 0x00, /* Word 0 Bits 24..31 */
56};
57
58/*
59 * Local Functions
60 */
61static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
62 u8 alpa);
63
64static void bfa_fcs_port_loop_plogi_response(void *fcsarg,
65 struct bfa_fcxp_s *fcxp,
66 void *cbarg,
67 bfa_status_t req_status,
68 u32 rsp_len,
69 u32 resid_len,
70 struct fchs_s *rsp_fchs);
71/**
72 * Called by port to initializar in provate LOOP topology.
73 */
74void
75bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port)
76{
77}
78
79/**
80 * Called by port to notify transition to online state.
81 */
82void
83bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port)
84{
85
86 u8 num_alpa = port->port_topo.ploop.num_alpa;
87 u8 *alpa_pos_map = port->port_topo.ploop.alpa_pos_map;
88 struct bfa_fcs_rport_s *r_port;
89 int ii = 0;
90
91 /*
92 * If the port role is Initiator Mode, create Rports.
93 */
94 if (port->port_cfg.roles == BFA_PORT_ROLE_FCP_IM) {
95 /*
96 * Check if the ALPA positional bitmap is available.
97 * if not, we send PLOGI to all possible ALPAs.
98 */
99 if (num_alpa > 0) {
100 for (ii = 0; ii < num_alpa; ii++) {
101 /*
102 * ignore ALPA of bfa port
103 */
104 if (alpa_pos_map[ii] != port->pid) {
105 r_port = bfa_fcs_rport_create(port,
106 alpa_pos_map[ii]);
107 }
108 }
109 } else {
110 for (ii = 0; ii < MAX_ALPA_COUNT; ii++) {
111 /*
112 * ignore ALPA of bfa port
113 */
114 if ((port_loop_alpa_map[ii] > 0)
115 && (port_loop_alpa_map[ii] != port->pid))
116 bfa_fcs_port_loop_send_plogi(port,
117 port_loop_alpa_map[ii]);
118 /**TBD */
119 }
120 }
121 } else {
122 /*
123 * TBD Target Mode ??
124 */
125 }
126
127}
128
129/**
130 * Called by port to notify transition to offline state.
131 */
132void
133bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port)
134{
135
136}
137
138/**
139 * Called by port to notify a LIP on the loop.
140 */
141void
142bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port)
143{
144}
145
146/**
147 * Local Functions.
148 */
149static bfa_status_t
150bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
151{
152 struct fchs_s fchs;
153 struct bfa_fcxp_s *fcxp = NULL;
154 int len;
155
156 bfa_trc(port->fcs, alpa);
157
158 fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
159 NULL);
160 bfa_assert(fcxp);
161
162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
163 bfa_fcs_port_get_fcid(port), 0,
164 port->port_cfg.pwwn, port->port_cfg.nwwn,
165 bfa_fcport_get_maxfrsize(port->fcs->bfa));
166
167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
168 FC_CLASS_3, len, &fchs,
169 bfa_fcs_port_loop_plogi_response, (void *)port,
170 FC_MAX_PDUSZ, FC_RA_TOV);
171
172 return BFA_STATUS_OK;
173}
174
175/**
176 * Called by fcxp to notify the Plogi response
177 */
178static void
179bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
180 void *cbarg, bfa_status_t req_status,
181 u32 rsp_len, u32 resid_len,
182 struct fchs_s *rsp_fchs)
183{
184 struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
185 struct fc_logi_s *plogi_resp;
186 struct fc_els_cmd_s *els_cmd;
187
188 bfa_trc(port->fcs, req_status);
189
190 /*
191 * Sanity Checks
192 */
193 if (req_status != BFA_STATUS_OK) {
194 bfa_trc(port->fcs, req_status);
195 /*
196 * @todo
197 * This could mean that the device with this APLA does not
198 * exist on the loop.
199 */
200
201 return;
202 }
203
204 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
205 plogi_resp = (struct fc_logi_s *) els_cmd;
206
207 if (els_cmd->els_code == FC_ELS_ACC) {
208 bfa_fcs_rport_start(port, rsp_fchs, plogi_resp);
209 } else {
210 bfa_trc(port->fcs, plogi_resp->els_cmd.els_code);
211 bfa_assert(0);
212 }
213}
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
deleted file mode 100644
index 72b3f508d0e9..000000000000
--- a/drivers/scsi/bfa/lport_api.c
+++ /dev/null
@@ -1,303 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * port_api.c BFA FCS port
20 */
21
22#include <fcs/bfa_fcs.h>
23#include <fcs/bfa_fcs_lport.h>
24#include <fcs/bfa_fcs_rport.h>
25#include "fcs_rport.h"
26#include "fcs_fabric.h"
27#include "fcs_trcmod.h"
28#include "fcs_vport.h"
29
30BFA_TRC_FILE(FCS, PORT_API);
31
32
33
34/**
35 * fcs_port_api BFA FCS port API
36 */
37
38void
39bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg)
40{
41}
42
43struct bfa_fcs_port_s *
44bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
45{
46 return &fcs->fabric.bport;
47}
48
49wwn_t
50bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index,
51 int nrports, bfa_boolean_t bwwn)
52{
53 struct list_head *qh, *qe;
54 struct bfa_fcs_rport_s *rport = NULL;
55 int i;
56 struct bfa_fcs_s *fcs;
57
58 if (port == NULL || nrports == 0)
59 return (wwn_t) 0;
60
61 fcs = port->fcs;
62 bfa_trc(fcs, (u32) nrports);
63
64 i = 0;
65 qh = &port->rport_q;
66 qe = bfa_q_first(qh);
67
68 while ((qe != qh) && (i < nrports)) {
69 rport = (struct bfa_fcs_rport_s *)qe;
70 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
71 qe = bfa_q_next(qe);
72 bfa_trc(fcs, (u32) rport->pwwn);
73 bfa_trc(fcs, rport->pid);
74 bfa_trc(fcs, i);
75 continue;
76 }
77
78 if (bwwn) {
79 if (!memcmp(&wwn, &rport->pwwn, 8))
80 break;
81 } else {
82 if (i == index)
83 break;
84 }
85
86 i++;
87 qe = bfa_q_next(qe);
88 }
89
90 bfa_trc(fcs, i);
91 if (rport)
92 return rport->pwwn;
93 else
94 return (wwn_t) 0;
95}
96
97void
98bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[],
99 int *nrports)
100{
101 struct list_head *qh, *qe;
102 struct bfa_fcs_rport_s *rport = NULL;
103 int i;
104 struct bfa_fcs_s *fcs;
105
106 if (port == NULL || rport_wwns == NULL || *nrports == 0)
107 return;
108
109 fcs = port->fcs;
110 bfa_trc(fcs, (u32) *nrports);
111
112 i = 0;
113 qh = &port->rport_q;
114 qe = bfa_q_first(qh);
115
116 while ((qe != qh) && (i < *nrports)) {
117 rport = (struct bfa_fcs_rport_s *)qe;
118 if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
119 qe = bfa_q_next(qe);
120 bfa_trc(fcs, (u32) rport->pwwn);
121 bfa_trc(fcs, rport->pid);
122 bfa_trc(fcs, i);
123 continue;
124 }
125
126 rport_wwns[i] = rport->pwwn;
127
128 i++;
129 qe = bfa_q_next(qe);
130 }
131
132 bfa_trc(fcs, i);
133 *nrports = i;
134 return;
135}
136
137/*
138 * Iterate's through all the rport's in the given port to
139 * determine the maximum operating speed.
140 *
141 * To be used in TRL Functionality only
142 */
143enum bfa_pport_speed
144bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
145{
146 struct list_head *qh, *qe;
147 struct bfa_fcs_rport_s *rport = NULL;
148 struct bfa_fcs_s *fcs;
149 enum bfa_pport_speed max_speed = 0;
150 struct bfa_pport_attr_s pport_attr;
151 enum bfa_pport_speed pport_speed, rport_speed;
152 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
153
154 if (port == NULL)
155 return 0;
156
157 fcs = port->fcs;
158
159 /*
160 * Get Physical port's current speed
161 */
162 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
163 pport_speed = pport_attr.speed;
164 bfa_trc(fcs, pport_speed);
165
166 qh = &port->rport_q;
167 qe = bfa_q_first(qh);
168
169 while (qe != qh) {
170 rport = (struct bfa_fcs_rport_s *) qe;
171 if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
172 (bfa_fcs_rport_get_state(rport) ==
173 BFA_RPORT_OFFLINE)) {
174 qe = bfa_q_next(qe);
175 continue;
176 }
177
178 rport_speed = rport->rpf.rpsc_speed;
179 if ((trl_enabled) && (rport_speed ==
180 BFA_PPORT_SPEED_UNKNOWN)) {
181 /* Use default ratelim speed setting */
182 rport_speed =
183 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
184 }
185
186 if ((rport_speed == BFA_PPORT_SPEED_8GBPS) ||
187 (rport_speed > pport_speed)) {
188 max_speed = rport_speed;
189 break;
190 } else if (rport_speed > max_speed) {
191 max_speed = rport_speed;
192 }
193
194 qe = bfa_q_next(qe);
195 }
196
197 bfa_trc(fcs, max_speed);
198 return max_speed;
199}
200
201struct bfa_fcs_port_s *
202bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
203{
204 struct bfa_fcs_vport_s *vport;
205 bfa_fcs_vf_t *vf;
206
207 bfa_assert(fcs != NULL);
208
209 vf = bfa_fcs_vf_lookup(fcs, vf_id);
210 if (vf == NULL) {
211 bfa_trc(fcs, vf_id);
212 return NULL;
213 }
214
215 if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
216 return &vf->bport;
217
218 vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
219 if (vport)
220 return &vport->lport;
221
222 return NULL;
223}
224
225/*
226 * API corresponding to VmWare's NPIV_VPORT_GETINFO.
227 */
228void
229bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
230 struct bfa_port_info_s *port_info)
231{
232
233 bfa_trc(port->fcs, port->fabric->fabric_name);
234
235 if (port->vport == NULL) {
236 /*
237 * This is a Physical port
238 */
239 port_info->port_type = BFA_PORT_TYPE_PHYSICAL;
240
241 /*
242 * @todo : need to fix the state & reason
243 */
244 port_info->port_state = 0;
245 port_info->offline_reason = 0;
246
247 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
248 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
249
250 port_info->max_vports_supp =
251 bfa_lps_get_max_vport(port->fcs->bfa);
252 port_info->num_vports_inuse =
253 bfa_fcs_fabric_vport_count(port->fabric);
254 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
255 port_info->num_rports_inuse = port->num_rports;
256 } else {
257 /*
258 * This is a virtual port
259 */
260 port_info->port_type = BFA_PORT_TYPE_VIRTUAL;
261
262 /*
263 * @todo : need to fix the state & reason
264 */
265 port_info->port_state = 0;
266 port_info->offline_reason = 0;
267
268 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
269 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
270 }
271}
272
273void
274bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port,
275 struct bfa_port_stats_s *port_stats)
276{
277 bfa_os_memcpy(port_stats, &fcs_port->stats,
278 sizeof(struct bfa_port_stats_s));
279 return;
280}
281
282void
283bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port)
284{
285 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_port_stats_s));
286 return;
287}
288
289void
290bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
291{
292 fcs_port->port_cfg.roles |= BFA_PORT_ROLE_FCP_IPFC;
293 return;
294}
295
296void
297bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port)
298{
299 fcs_port->port_cfg.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
300 return;
301}
302
303
diff --git a/drivers/scsi/bfa/lport_priv.h b/drivers/scsi/bfa/lport_priv.h
deleted file mode 100644
index dbae370a599a..000000000000
--- a/drivers/scsi/bfa/lport_priv.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __VP_PRIV_H__
19#define __VP_PRIV_H__
20
21#include <fcs/bfa_fcs_lport.h>
22#include <fcs/bfa_fcs_vport.h>
23
24/*
25 * Functions exported by vps
26 */
27void bfa_fcs_vport_init(struct bfa_fcs_vport_s *vport);
28
29/*
30 * Functions exported by vps
31 */
32void bfa_fcs_vps_online(struct bfa_fcs_port_s *port);
33void bfa_fcs_vps_offline(struct bfa_fcs_port_s *port);
34void bfa_fcs_vps_lip(struct bfa_fcs_port_s *port);
35
36/*
37 * Functions exported by port_fab
38 */
39void bfa_fcs_port_fab_init(struct bfa_fcs_port_s *vport);
40void bfa_fcs_port_fab_online(struct bfa_fcs_port_s *vport);
41void bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *vport);
42void bfa_fcs_port_fab_rx_frame(struct bfa_fcs_port_s *port,
43 u8 *rx_frame, u32 len);
44
45/*
46 * Functions exported by VP-NS.
47 */
48void bfa_fcs_port_ns_init(struct bfa_fcs_port_s *vport);
49void bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *vport);
50void bfa_fcs_port_ns_online(struct bfa_fcs_port_s *vport);
51void bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port);
52
53/*
54 * Functions exported by VP-SCN
55 */
56void bfa_fcs_port_scn_init(struct bfa_fcs_port_s *vport);
57void bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *vport);
58void bfa_fcs_port_scn_online(struct bfa_fcs_port_s *vport);
59void bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port,
60 struct fchs_s *rx_frame, u32 len);
61
62/*
63 * Functions exported by VP-N2N
64 */
65
66void bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port);
67void bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port);
68void bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port);
69void bfa_fcs_port_n2n_rx_frame(struct bfa_fcs_port_s *port,
70 u8 *rx_frame, u32 len);
71
72/*
73 * Functions exported by VP-LOOP
74 */
75void bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port);
76void bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port);
77void bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port);
78void bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port);
79void bfa_fcs_port_loop_rx_frame(struct bfa_fcs_port_s *port,
80 u8 *rx_frame, u32 len);
81
82#endif /* __VP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
deleted file mode 100644
index 1d579ef26122..000000000000
--- a/drivers/scsi/bfa/ms.c
+++ /dev/null
@@ -1,759 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18
19#include <bfa.h>
20#include <bfa_svc.h>
21#include "fcs_lport.h"
22#include "fcs_rport.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "lport_priv.h"
26
27BFA_TRC_FILE(FCS, MS);
28
29#define BFA_FCS_MS_CMD_MAX_RETRIES 2
30/*
31 * forward declarations
32 */
33static void bfa_fcs_port_ms_send_plogi(void *ms_cbarg,
34 struct bfa_fcxp_s *fcxp_alloced);
35static void bfa_fcs_port_ms_timeout(void *arg);
36static void bfa_fcs_port_ms_plogi_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp,
38 void *cbarg,
39 bfa_status_t req_status,
40 u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43
44static void bfa_fcs_port_ms_send_gmal(void *ms_cbarg,
45 struct bfa_fcxp_s *fcxp_alloced);
46static void bfa_fcs_port_ms_gmal_response(void *fcsarg,
47 struct bfa_fcxp_s *fcxp,
48 void *cbarg,
49 bfa_status_t req_status,
50 u32 rsp_len,
51 u32 resid_len,
52 struct fchs_s *rsp_fchs);
53static void bfa_fcs_port_ms_send_gfn(void *ms_cbarg,
54 struct bfa_fcxp_s *fcxp_alloced);
55static void bfa_fcs_port_ms_gfn_response(void *fcsarg,
56 struct bfa_fcxp_s *fcxp,
57 void *cbarg,
58 bfa_status_t req_status,
59 u32 rsp_len,
60 u32 resid_len,
61 struct fchs_s *rsp_fchs);
62/**
63 * fcs_ms_sm FCS MS state machine
64 */
65
66/**
67 * MS State Machine events
68 */
69enum port_ms_event {
70 MSSM_EVENT_PORT_ONLINE = 1,
71 MSSM_EVENT_PORT_OFFLINE = 2,
72 MSSM_EVENT_RSP_OK = 3,
73 MSSM_EVENT_RSP_ERROR = 4,
74 MSSM_EVENT_TIMEOUT = 5,
75 MSSM_EVENT_FCXP_SENT = 6,
76 MSSM_EVENT_PORT_FABRIC_RSCN = 7
77};
78
79static void bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
80 enum port_ms_event event);
81static void bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
82 enum port_ms_event event);
83static void bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms,
84 enum port_ms_event event);
85static void bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
86 enum port_ms_event event);
87static void bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
88 enum port_ms_event event);
89static void bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms,
90 enum port_ms_event event);
91static void bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
92 enum port_ms_event event);
93static void bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
94 enum port_ms_event event);
95static void bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms,
96 enum port_ms_event event);
97static void bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
98 enum port_ms_event event);
99static void bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
100 enum port_ms_event event);
101/**
102 * Start in offline state - awaiting NS to send start.
103 */
104static void
105bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
106 enum port_ms_event event)
107{
108 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
109 bfa_trc(ms->port->fcs, event);
110
111 switch (event) {
112 case MSSM_EVENT_PORT_ONLINE:
113 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
114 bfa_fcs_port_ms_send_plogi(ms, NULL);
115 break;
116
117 case MSSM_EVENT_PORT_OFFLINE:
118 break;
119
120 default:
121 bfa_sm_fault(ms->port->fcs, event);
122 }
123}
124
125static void
126bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
127 enum port_ms_event event)
128{
129 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
130 bfa_trc(ms->port->fcs, event);
131
132 switch (event) {
133 case MSSM_EVENT_FCXP_SENT:
134 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi);
135 break;
136
137 case MSSM_EVENT_PORT_OFFLINE:
138 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
139 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
140 &ms->fcxp_wqe);
141 break;
142
143 default:
144 bfa_sm_fault(ms->port->fcs, event);
145 }
146}
147
148static void
149bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
150{
151 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
152 bfa_trc(ms->port->fcs, event);
153
154 switch (event) {
155 case MSSM_EVENT_RSP_ERROR:
156 /*
157 * Start timer for a delayed retry
158 */
159 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry);
160 ms->port->stats.ms_retries++;
161 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer,
162 bfa_fcs_port_ms_timeout, ms,
163 BFA_FCS_RETRY_TIMEOUT);
164 break;
165
166 case MSSM_EVENT_RSP_OK:
167 /*
168 * since plogi is done, now invoke MS related sub-modules
169 */
170 bfa_fcs_port_fdmi_online(ms);
171
172 /**
173 * if this is a Vport, go to online state.
174 */
175 if (ms->port->vport) {
176 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
177 break;
178 }
179
180 /*
181 * For a base port we need to get the
182 * switch's IP address.
183 */
184 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
185 bfa_fcs_port_ms_send_gmal(ms, NULL);
186 break;
187
188 case MSSM_EVENT_PORT_OFFLINE:
189 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
190 bfa_fcxp_discard(ms->fcxp);
191 break;
192
193 default:
194 bfa_sm_fault(ms->port->fcs, event);
195 }
196}
197
198static void
199bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
200 enum port_ms_event event)
201{
202 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
203 bfa_trc(ms->port->fcs, event);
204
205 switch (event) {
206 case MSSM_EVENT_TIMEOUT:
207 /*
208 * Retry Timer Expired. Re-send
209 */
210 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
211 bfa_fcs_port_ms_send_plogi(ms, NULL);
212 break;
213
214 case MSSM_EVENT_PORT_OFFLINE:
215 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
216 bfa_timer_stop(&ms->timer);
217 break;
218
219 default:
220 bfa_sm_fault(ms->port->fcs, event);
221 }
222}
223
224static void
225bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
226 enum port_ms_event event)
227{
228 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
229 bfa_trc(ms->port->fcs, event);
230
231 switch (event) {
232 case MSSM_EVENT_PORT_OFFLINE:
233 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
234 break;
235
236 case MSSM_EVENT_PORT_FABRIC_RSCN:
237 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
238 ms->retry_cnt = 0;
239 bfa_fcs_port_ms_send_gfn(ms, NULL);
240 break;
241
242 default:
243 bfa_sm_fault(ms->port->fcs, event);
244 }
245}
246
247static void
248bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
249 enum port_ms_event event)
250{
251 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
252 bfa_trc(ms->port->fcs, event);
253
254 switch (event) {
255 case MSSM_EVENT_FCXP_SENT:
256 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal);
257 break;
258
259 case MSSM_EVENT_PORT_OFFLINE:
260 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
261 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
262 &ms->fcxp_wqe);
263 break;
264
265 default:
266 bfa_sm_fault(ms->port->fcs, event);
267 }
268}
269
270static void
271bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
272{
273 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
274 bfa_trc(ms->port->fcs, event);
275
276 switch (event) {
277 case MSSM_EVENT_RSP_ERROR:
278 /*
279 * Start timer for a delayed retry
280 */
281 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
282 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry);
283 ms->port->stats.ms_retries++;
284 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
285 &ms->timer, bfa_fcs_port_ms_timeout, ms,
286 BFA_FCS_RETRY_TIMEOUT);
287 } else {
288 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
289 bfa_fcs_port_ms_send_gfn(ms, NULL);
290 ms->retry_cnt = 0;
291 }
292 break;
293
294 case MSSM_EVENT_RSP_OK:
295 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
296 bfa_fcs_port_ms_send_gfn(ms, NULL);
297 break;
298
299 case MSSM_EVENT_PORT_OFFLINE:
300 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
301 bfa_fcxp_discard(ms->fcxp);
302 break;
303
304 default:
305 bfa_sm_fault(ms->port->fcs, event);
306 }
307}
308
309static void
310bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
311 enum port_ms_event event)
312{
313 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
314 bfa_trc(ms->port->fcs, event);
315
316 switch (event) {
317 case MSSM_EVENT_TIMEOUT:
318 /*
319 * Retry Timer Expired. Re-send
320 */
321 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending);
322 bfa_fcs_port_ms_send_gmal(ms, NULL);
323 break;
324
325 case MSSM_EVENT_PORT_OFFLINE:
326 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
327 bfa_timer_stop(&ms->timer);
328 break;
329
330 default:
331 bfa_sm_fault(ms->port->fcs, event);
332 }
333}
334
335/**
336 * ms_pvt MS local functions
337 */
338
339static void
340bfa_fcs_port_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
341{
342 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
343 struct bfa_fcs_port_s *port = ms->port;
344 struct fchs_s fchs;
345 int len;
346 struct bfa_fcxp_s *fcxp;
347
348 bfa_trc(port->fcs, port->pid);
349
350 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
351 if (!fcxp) {
352 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
353 bfa_fcs_port_ms_send_gmal, ms);
354 return;
355 }
356 ms->fcxp = fcxp;
357
358 len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
359 bfa_fcs_port_get_fcid(port),
360 bfa_lps_get_peer_nwwn(port->fabric->lps));
361
362 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
363 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response,
364 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
365
366 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
367}
368
369static void
370bfa_fcs_port_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
371 void *cbarg, bfa_status_t req_status,
372 u32 rsp_len, u32 resid_len,
373 struct fchs_s *rsp_fchs)
374{
375 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
376 struct bfa_fcs_port_s *port = ms->port;
377 struct ct_hdr_s *cthdr = NULL;
378 struct fcgs_gmal_resp_s *gmal_resp;
379 struct fc_gmal_entry_s *gmal_entry;
380 u32 num_entries;
381 u8 *rsp_str;
382
383 bfa_trc(port->fcs, req_status);
384 bfa_trc(port->fcs, port->port_cfg.pwwn);
385
386 /*
387 * Sanity Checks
388 */
389 if (req_status != BFA_STATUS_OK) {
390 bfa_trc(port->fcs, req_status);
391 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
392 return;
393 }
394
395 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
396 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
397
398 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
399 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
400 num_entries = bfa_os_ntohl(gmal_resp->ms_len);
401 if (num_entries == 0) {
402 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
403 return;
404 }
405 /*
406 * The response could contain multiple Entries.
407 * Entries for SNMP interface, etc.
408 * We look for the entry with a telnet prefix.
409 * First "http://" entry refers to IP addr
410 */
411
412 gmal_entry = (struct fc_gmal_entry_s *)gmal_resp->ms_ma;
413 while (num_entries > 0) {
414 if (strncmp
415 (gmal_entry->prefix, CT_GMAL_RESP_PREFIX_HTTP,
416 sizeof(gmal_entry->prefix)) == 0) {
417
418 /*
419 * if the IP address is terminating with a '/',
420 * remove it. *Byte 0 consists of the length
421 * of the string.
422 */
423 rsp_str = &(gmal_entry->prefix[0]);
424 if (rsp_str[gmal_entry->len - 1] == '/')
425 rsp_str[gmal_entry->len - 1] = 0;
426 /*
427 * copy IP Address to fabric
428 */
429 strncpy(bfa_fcs_port_get_fabric_ipaddr(port),
430 gmal_entry->ip_addr,
431 BFA_FCS_FABRIC_IPADDR_SZ);
432 break;
433 } else {
434 --num_entries;
435 ++gmal_entry;
436 }
437 }
438
439 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
440 return;
441 }
442
443 bfa_trc(port->fcs, cthdr->reason_code);
444 bfa_trc(port->fcs, cthdr->exp_code);
445 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
446}
447
448static void
449bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
450 enum port_ms_event event)
451{
452 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
453 bfa_trc(ms->port->fcs, event);
454
455 switch (event) {
456 case MSSM_EVENT_FCXP_SENT:
457 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn);
458 break;
459
460 case MSSM_EVENT_PORT_OFFLINE:
461 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
462 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
463 &ms->fcxp_wqe);
464 break;
465
466 default:
467 bfa_sm_fault(ms->port->fcs, event);
468 }
469}
470
471static void
472bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
473{
474 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
475 bfa_trc(ms->port->fcs, event);
476
477 switch (event) {
478 case MSSM_EVENT_RSP_ERROR:
479 /*
480 * Start timer for a delayed retry
481 */
482 if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
483 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
484 ms->port->stats.ms_retries++;
485 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
486 &ms->timer, bfa_fcs_port_ms_timeout, ms,
487 BFA_FCS_RETRY_TIMEOUT);
488 } else {
489 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
490 ms->retry_cnt = 0;
491 }
492 break;
493
494 case MSSM_EVENT_RSP_OK:
495 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
496 break;
497
498 case MSSM_EVENT_PORT_OFFLINE:
499 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
500 bfa_fcxp_discard(ms->fcxp);
501 break;
502
503 default:
504 bfa_sm_fault(ms->port->fcs, event);
505 }
506}
507
508static void
509bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
510 enum port_ms_event event)
511{
512 bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
513 bfa_trc(ms->port->fcs, event);
514
515 switch (event) {
516 case MSSM_EVENT_TIMEOUT:
517 /*
518 * Retry Timer Expired. Re-send
519 */
520 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
521 bfa_fcs_port_ms_send_gfn(ms, NULL);
522 break;
523
524 case MSSM_EVENT_PORT_OFFLINE:
525 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
526 bfa_timer_stop(&ms->timer);
527 break;
528
529 default:
530 bfa_sm_fault(ms->port->fcs, event);
531 }
532}
533
534/**
535 * ms_pvt MS local functions
536 */
537
538static void
539bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
540{
541 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
542 struct bfa_fcs_port_s *port = ms->port;
543 struct fchs_s fchs;
544 int len;
545 struct bfa_fcxp_s *fcxp;
546
547 bfa_trc(port->fcs, port->pid);
548
549 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
550 if (!fcxp) {
551 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
552 bfa_fcs_port_ms_send_gfn, ms);
553 return;
554 }
555 ms->fcxp = fcxp;
556
557 len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
558 bfa_fcs_port_get_fcid(port),
559 bfa_lps_get_peer_nwwn(port->fabric->lps));
560
561 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
562 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response,
563 (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV);
564
565 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
566}
567
568static void
569bfa_fcs_port_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
570 bfa_status_t req_status, u32 rsp_len,
571 u32 resid_len, struct fchs_s *rsp_fchs)
572{
573 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
574 struct bfa_fcs_port_s *port = ms->port;
575 struct ct_hdr_s *cthdr = NULL;
576 wwn_t *gfn_resp;
577
578 bfa_trc(port->fcs, req_status);
579 bfa_trc(port->fcs, port->port_cfg.pwwn);
580
581 /*
582 * Sanity Checks
583 */
584 if (req_status != BFA_STATUS_OK) {
585 bfa_trc(port->fcs, req_status);
586 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
587 return;
588 }
589
590 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
591 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
592
593 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
594 gfn_resp = (wwn_t *) (cthdr + 1);
595 /*
596 * check if it has actually changed
597 */
598 if ((memcmp
599 ((void *)&bfa_fcs_port_get_fabric_name(port), gfn_resp,
600 sizeof(wwn_t)) != 0))
601 bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
602 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
603 return;
604 }
605
606 bfa_trc(port->fcs, cthdr->reason_code);
607 bfa_trc(port->fcs, cthdr->exp_code);
608 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
609}
610
611/**
612 * ms_pvt MS local functions
613 */
614
615static void
616bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
617{
618 struct bfa_fcs_port_ms_s *ms = ms_cbarg;
619 struct bfa_fcs_port_s *port = ms->port;
620 struct fchs_s fchs;
621 int len;
622 struct bfa_fcxp_s *fcxp;
623
624 bfa_trc(port->fcs, port->pid);
625
626 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
627 if (!fcxp) {
628 port->stats.ms_plogi_alloc_wait++;
629 bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
630 bfa_fcs_port_ms_send_plogi, ms);
631 return;
632 }
633 ms->fcxp = fcxp;
634
635 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
636 bfa_os_hton3b(FC_MGMT_SERVER),
637 bfa_fcs_port_get_fcid(port), 0,
638 port->port_cfg.pwwn, port->port_cfg.nwwn,
639 bfa_fcport_get_maxfrsize(port->fcs->bfa));
640
641 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
642 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
643 (void *)ms, FC_MAX_PDUSZ, FC_ELS_TOV);
644
645 port->stats.ms_plogi_sent++;
646 bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
647}
648
649static void
650bfa_fcs_port_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
651 void *cbarg, bfa_status_t req_status,
652 u32 rsp_len, u32 resid_len,
653 struct fchs_s *rsp_fchs)
654{
655 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
656
657 struct bfa_fcs_port_s *port = ms->port;
658 struct fc_els_cmd_s *els_cmd;
659 struct fc_ls_rjt_s *ls_rjt;
660
661 bfa_trc(port->fcs, req_status);
662 bfa_trc(port->fcs, port->port_cfg.pwwn);
663
664 /*
665 * Sanity Checks
666 */
667 if (req_status != BFA_STATUS_OK) {
668 port->stats.ms_plogi_rsp_err++;
669 bfa_trc(port->fcs, req_status);
670 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
671 return;
672 }
673
674 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
675
676 switch (els_cmd->els_code) {
677
678 case FC_ELS_ACC:
679 if (rsp_len < sizeof(struct fc_logi_s)) {
680 bfa_trc(port->fcs, rsp_len);
681 port->stats.ms_plogi_acc_err++;
682 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
683 break;
684 }
685 port->stats.ms_plogi_accepts++;
686 bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
687 break;
688
689 case FC_ELS_LS_RJT:
690 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
691
692 bfa_trc(port->fcs, ls_rjt->reason_code);
693 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
694
695 port->stats.ms_rejects++;
696 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
697 break;
698
699 default:
700 port->stats.ms_plogi_unknown_rsp++;
701 bfa_trc(port->fcs, els_cmd->els_code);
702 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
703 }
704}
705
706static void
707bfa_fcs_port_ms_timeout(void *arg)
708{
709 struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)arg;
710
711 ms->port->stats.ms_timeouts++;
712 bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
713}
714
715
716void
717bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port)
718{
719 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
720
721 ms->port = port;
722 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
723
724 /*
725 * Invoke init routines of sub modules.
726 */
727 bfa_fcs_port_fdmi_init(ms);
728}
729
730void
731bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
732{
733 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
734
735 ms->port = port;
736 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
737 bfa_fcs_port_fdmi_offline(ms);
738}
739
740void
741bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port)
742{
743 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
744
745 ms->port = port;
746 bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
747}
748
749void
750bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port)
751{
752 struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
753
754 /*
755 * @todo. Handle this only when in Online state
756 */
757 if (bfa_sm_cmp_state(ms, bfa_fcs_port_ms_sm_online))
758 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
759}
diff --git a/drivers/scsi/bfa/n2n.c b/drivers/scsi/bfa/n2n.c
deleted file mode 100644
index 735456824346..000000000000
--- a/drivers/scsi/bfa/n2n.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * n2n.c n2n implementation.
20 */
21#include <bfa.h>
22#include <bfa_svc.h>
23#include "fcs_lport.h"
24#include "fcs_rport.h"
25#include "fcs_trcmod.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, N2N);
29
30/**
31 * Called by fcs/port to initialize N2N topology.
32 */
33void
34bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port)
35{
36}
37
38/**
39 * Called by fcs/port to notify transition to online state.
40 */
41void
42bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port)
43{
44 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
45 struct bfa_port_cfg_s *pcfg = &port->port_cfg;
46 struct bfa_fcs_rport_s *rport;
47
48 bfa_trc(port->fcs, pcfg->pwwn);
49
50 /*
51 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
52 * and assign an Address. if not, we need to wait for its PLOGI.
53 *
54 * If our PWWN is < than that of the remote port, it will send a PLOGI
55 * with the PIDs assigned. The rport state machine take care of this
56 * incoming PLOGI.
57 */
58 if (memcmp
59 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
60 sizeof(wwn_t)) > 0) {
61 port->pid = N2N_LOCAL_PID;
62 /**
63 * First, check if we know the device by pwwn.
64 */
65 rport = bfa_fcs_port_get_rport_by_pwwn(port,
66 n2n_port->rem_port_wwn);
67 if (rport) {
68 bfa_trc(port->fcs, rport->pid);
69 bfa_trc(port->fcs, rport->pwwn);
70 rport->pid = N2N_REMOTE_PID;
71 bfa_fcs_rport_online(rport);
72 return;
73 }
74
75 /*
76 * In n2n there can be only one rport. Delete the old one whose
77 * pid should be zero, because it is offline.
78 */
79 if (port->num_rports > 0) {
80 rport = bfa_fcs_port_get_rport_by_pid(port, 0);
81 bfa_assert(rport != NULL);
82 if (rport) {
83 bfa_trc(port->fcs, rport->pwwn);
84 bfa_fcs_rport_delete(rport);
85 }
86 }
87 bfa_fcs_rport_create(port, N2N_REMOTE_PID);
88 }
89}
90
91/**
92 * Called by fcs/port to notify transition to offline state.
93 */
94void
95bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port)
96{
97 struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;
98
99 bfa_trc(port->fcs, port->pid);
100 port->pid = 0;
101 n2n_port->rem_port_wwn = 0;
102 n2n_port->reply_oxid = 0;
103}
104
105
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
deleted file mode 100644
index ae0edcc86ed5..000000000000
--- a/drivers/scsi/bfa/ns.c
+++ /dev/null
@@ -1,1242 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * @page ns_sm_info VPORT NS State Machine
20 *
21 * @section ns_sm_interactions VPORT NS State Machine Interactions
22 *
23 * @section ns_sm VPORT NS State Machine
24 * img ns_sm.jpg
25 */
26#include <bfa.h>
27#include <bfa_svc.h>
28#include <bfa_iocfc.h>
29#include "fcs_lport.h"
30#include "fcs_rport.h"
31#include "fcs_trcmod.h"
32#include "fcs_fcxp.h"
33#include "fcs.h"
34#include "lport_priv.h"
35
36BFA_TRC_FILE(FCS, NS);
37
38/*
39 * forward declarations
40 */
41static void bfa_fcs_port_ns_send_plogi(void *ns_cbarg,
42 struct bfa_fcxp_s *fcxp_alloced);
43static void bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg,
44 struct bfa_fcxp_s *fcxp_alloced);
45static void bfa_fcs_port_ns_send_rft_id(void *ns_cbarg,
46 struct bfa_fcxp_s *fcxp_alloced);
47static void bfa_fcs_port_ns_send_rff_id(void *ns_cbarg,
48 struct bfa_fcxp_s *fcxp_alloced);
49static void bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg,
50 struct bfa_fcxp_s *fcxp_alloced);
51static void bfa_fcs_port_ns_timeout(void *arg);
52static void bfa_fcs_port_ns_plogi_response(void *fcsarg,
53 struct bfa_fcxp_s *fcxp,
54 void *cbarg,
55 bfa_status_t req_status,
56 u32 rsp_len,
57 u32 resid_len,
58 struct fchs_s *rsp_fchs);
59static void bfa_fcs_port_ns_rspn_id_response(void *fcsarg,
60 struct bfa_fcxp_s *fcxp,
61 void *cbarg,
62 bfa_status_t req_status,
63 u32 rsp_len,
64 u32 resid_len,
65 struct fchs_s *rsp_fchs);
66static void bfa_fcs_port_ns_rft_id_response(void *fcsarg,
67 struct bfa_fcxp_s *fcxp,
68 void *cbarg,
69 bfa_status_t req_status,
70 u32 rsp_len,
71 u32 resid_len,
72 struct fchs_s *rsp_fchs);
73static void bfa_fcs_port_ns_rff_id_response(void *fcsarg,
74 struct bfa_fcxp_s *fcxp,
75 void *cbarg,
76 bfa_status_t req_status,
77 u32 rsp_len,
78 u32 resid_len,
79 struct fchs_s *rsp_fchs);
80static void bfa_fcs_port_ns_gid_ft_response(void *fcsarg,
81 struct bfa_fcxp_s *fcxp,
82 void *cbarg,
83 bfa_status_t req_status,
84 u32 rsp_len,
85 u32 resid_len,
86 struct fchs_s *rsp_fchs);
87static void bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
88 u32 *pid_buf,
89 u32 n_pids);
90
91static void bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port);
92/**
93 * fcs_ns_sm FCS nameserver interface state machine
94 */
95
96/**
97 * VPort NS State Machine events
98 */
99enum vport_ns_event {
100 NSSM_EVENT_PORT_ONLINE = 1,
101 NSSM_EVENT_PORT_OFFLINE = 2,
102 NSSM_EVENT_PLOGI_SENT = 3,
103 NSSM_EVENT_RSP_OK = 4,
104 NSSM_EVENT_RSP_ERROR = 5,
105 NSSM_EVENT_TIMEOUT = 6,
106 NSSM_EVENT_NS_QUERY = 7,
107 NSSM_EVENT_RSPNID_SENT = 8,
108 NSSM_EVENT_RFTID_SENT = 9,
109 NSSM_EVENT_RFFID_SENT = 10,
110 NSSM_EVENT_GIDFT_SENT = 11,
111};
112
113static void bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
114 enum vport_ns_event event);
115static void bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
116 enum vport_ns_event event);
117static void bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
118 enum vport_ns_event event);
119static void bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
120 enum vport_ns_event event);
121static void bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
122 enum vport_ns_event event);
123static void bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
124 enum vport_ns_event event);
125static void bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
126 enum vport_ns_event event);
127static void bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
128 enum vport_ns_event event);
129static void bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
130 enum vport_ns_event event);
131static void bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
132 enum vport_ns_event event);
133static void bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
134 enum vport_ns_event event);
135static void bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
136 enum vport_ns_event event);
137static void bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
138 enum vport_ns_event event);
139static void bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
140 enum vport_ns_event event);
141static void bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
142 enum vport_ns_event event);
143static void bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
144 enum vport_ns_event event);
145static void bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
146 enum vport_ns_event event);
147/**
148 * Start in offline state - awaiting linkup
149 */
150static void
151bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
152 enum vport_ns_event event)
153{
154 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
155 bfa_trc(ns->port->fcs, event);
156
157 switch (event) {
158 case NSSM_EVENT_PORT_ONLINE:
159 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
160 bfa_fcs_port_ns_send_plogi(ns, NULL);
161 break;
162
163 case NSSM_EVENT_PORT_OFFLINE:
164 break;
165
166 default:
167 bfa_sm_fault(ns->port->fcs, event);
168 }
169}
170
171static void
172bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
173 enum vport_ns_event event)
174{
175 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
176 bfa_trc(ns->port->fcs, event);
177
178 switch (event) {
179 case NSSM_EVENT_PLOGI_SENT:
180 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi);
181 break;
182
183 case NSSM_EVENT_PORT_OFFLINE:
184 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
185 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
186 &ns->fcxp_wqe);
187 break;
188
189 default:
190 bfa_sm_fault(ns->port->fcs, event);
191 }
192}
193
194static void
195bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
196 enum vport_ns_event event)
197{
198 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
199 bfa_trc(ns->port->fcs, event);
200
201 switch (event) {
202 case NSSM_EVENT_RSP_ERROR:
203 /*
204 * Start timer for a delayed retry
205 */
206 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_retry);
207 ns->port->stats.ns_retries++;
208 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
209 bfa_fcs_port_ns_timeout, ns,
210 BFA_FCS_RETRY_TIMEOUT);
211 break;
212
213 case NSSM_EVENT_RSP_OK:
214 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
215 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
216 break;
217
218 case NSSM_EVENT_PORT_OFFLINE:
219 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
220 bfa_fcxp_discard(ns->fcxp);
221 break;
222
223 default:
224 bfa_sm_fault(ns->port->fcs, event);
225 }
226}
227
228static void
229bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
230 enum vport_ns_event event)
231{
232 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
233 bfa_trc(ns->port->fcs, event);
234
235 switch (event) {
236 case NSSM_EVENT_TIMEOUT:
237 /*
238 * Retry Timer Expired. Re-send
239 */
240 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
241 bfa_fcs_port_ns_send_plogi(ns, NULL);
242 break;
243
244 case NSSM_EVENT_PORT_OFFLINE:
245 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
246 bfa_timer_stop(&ns->timer);
247 break;
248
249 default:
250 bfa_sm_fault(ns->port->fcs, event);
251 }
252}
253
254static void
255bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
256 enum vport_ns_event event)
257{
258 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
259 bfa_trc(ns->port->fcs, event);
260
261 switch (event) {
262 case NSSM_EVENT_RSPNID_SENT:
263 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id);
264 break;
265
266 case NSSM_EVENT_PORT_OFFLINE:
267 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
268 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
269 &ns->fcxp_wqe);
270 break;
271
272 default:
273 bfa_sm_fault(ns->port->fcs, event);
274 }
275}
276
277static void
278bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
279 enum vport_ns_event event)
280{
281 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
282 bfa_trc(ns->port->fcs, event);
283
284 switch (event) {
285 case NSSM_EVENT_RSP_ERROR:
286 /*
287 * Start timer for a delayed retry
288 */
289 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id_retry);
290 ns->port->stats.ns_retries++;
291 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
292 bfa_fcs_port_ns_timeout, ns,
293 BFA_FCS_RETRY_TIMEOUT);
294 break;
295
296 case NSSM_EVENT_RSP_OK:
297 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
298 bfa_fcs_port_ns_send_rft_id(ns, NULL);
299 break;
300
301 case NSSM_EVENT_PORT_OFFLINE:
302 bfa_fcxp_discard(ns->fcxp);
303 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
304 break;
305
306 default:
307 bfa_sm_fault(ns->port->fcs, event);
308 }
309}
310
311static void
312bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
313 enum vport_ns_event event)
314{
315 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
316 bfa_trc(ns->port->fcs, event);
317
318 switch (event) {
319 case NSSM_EVENT_TIMEOUT:
320 /*
321 * Retry Timer Expired. Re-send
322 */
323 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
324 bfa_fcs_port_ns_send_rspn_id(ns, NULL);
325 break;
326
327 case NSSM_EVENT_PORT_OFFLINE:
328 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
329 bfa_timer_stop(&ns->timer);
330 break;
331
332 default:
333 bfa_sm_fault(ns->port->fcs, event);
334 }
335}
336
337static void
338bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
339 enum vport_ns_event event)
340{
341 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
342 bfa_trc(ns->port->fcs, event);
343
344 switch (event) {
345 case NSSM_EVENT_RFTID_SENT:
346 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id);
347 break;
348
349 case NSSM_EVENT_PORT_OFFLINE:
350 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
351 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
352 &ns->fcxp_wqe);
353 break;
354
355 default:
356 bfa_sm_fault(ns->port->fcs, event);
357 }
358}
359
360static void
361bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
362 enum vport_ns_event event)
363{
364 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
365 bfa_trc(ns->port->fcs, event);
366
367 switch (event) {
368 case NSSM_EVENT_RSP_OK:
369 /*
370 * Now move to register FC4 Features
371 */
372 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
373 bfa_fcs_port_ns_send_rff_id(ns, NULL);
374 break;
375
376 case NSSM_EVENT_RSP_ERROR:
377 /*
378 * Start timer for a delayed retry
379 */
380 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id_retry);
381 ns->port->stats.ns_retries++;
382 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
383 bfa_fcs_port_ns_timeout, ns,
384 BFA_FCS_RETRY_TIMEOUT);
385 break;
386
387 case NSSM_EVENT_PORT_OFFLINE:
388 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
389 bfa_fcxp_discard(ns->fcxp);
390 break;
391
392 default:
393 bfa_sm_fault(ns->port->fcs, event);
394 }
395}
396
397static void
398bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
399 enum vport_ns_event event)
400{
401 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
402 bfa_trc(ns->port->fcs, event);
403
404 switch (event) {
405 case NSSM_EVENT_TIMEOUT:
406 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id);
407 bfa_fcs_port_ns_send_rft_id(ns, NULL);
408 break;
409
410 case NSSM_EVENT_PORT_OFFLINE:
411 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
412 bfa_timer_stop(&ns->timer);
413 break;
414
415 default:
416 bfa_sm_fault(ns->port->fcs, event);
417 }
418}
419
420static void
421bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
422 enum vport_ns_event event)
423{
424 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
425 bfa_trc(ns->port->fcs, event);
426
427 switch (event) {
428 case NSSM_EVENT_RFFID_SENT:
429 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id);
430 break;
431
432 case NSSM_EVENT_PORT_OFFLINE:
433 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
434 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
435 &ns->fcxp_wqe);
436 break;
437
438 default:
439 bfa_sm_fault(ns->port->fcs, event);
440 }
441}
442
443static void
444bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
445 enum vport_ns_event event)
446{
447 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
448 bfa_trc(ns->port->fcs, event);
449
450 switch (event) {
451 case NSSM_EVENT_RSP_OK:
452
453 /*
454 * If min cfg mode is enabled, we donot initiate rport
455 * discovery with the fabric. Instead, we will retrieve the
456 * boot targets from HAL/FW.
457 */
458 if (__fcs_min_cfg(ns->port->fcs)) {
459 bfa_fcs_port_ns_boot_target_disc(ns->port);
460 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
461 return;
462 }
463
464 /*
465 * If the port role is Initiator Mode issue NS query.
466 * If it is Target Mode, skip this and go to online.
467 */
468 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
469 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
470 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
471 } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
472 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
473 }
474 /*
475 * kick off mgmt srvr state machine
476 */
477 bfa_fcs_port_ms_online(ns->port);
478 break;
479
480 case NSSM_EVENT_RSP_ERROR:
481 /*
482 * Start timer for a delayed retry
483 */
484 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry);
485 ns->port->stats.ns_retries++;
486 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
487 bfa_fcs_port_ns_timeout, ns,
488 BFA_FCS_RETRY_TIMEOUT);
489 break;
490
491 case NSSM_EVENT_PORT_OFFLINE:
492 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
493 bfa_fcxp_discard(ns->fcxp);
494 break;
495
496 default:
497 bfa_sm_fault(ns->port->fcs, event);
498 }
499}
500
501static void
502bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
503 enum vport_ns_event event)
504{
505 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
506 bfa_trc(ns->port->fcs, event);
507
508 switch (event) {
509 case NSSM_EVENT_TIMEOUT:
510 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id);
511 bfa_fcs_port_ns_send_rff_id(ns, NULL);
512 break;
513
514 case NSSM_EVENT_PORT_OFFLINE:
515 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
516 bfa_timer_stop(&ns->timer);
517 break;
518
519 default:
520 bfa_sm_fault(ns->port->fcs, event);
521 }
522}
523static void
524bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
525 enum vport_ns_event event)
526{
527 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
528 bfa_trc(ns->port->fcs, event);
529
530 switch (event) {
531 case NSSM_EVENT_GIDFT_SENT:
532 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft);
533 break;
534
535 case NSSM_EVENT_PORT_OFFLINE:
536 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
537 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
538 &ns->fcxp_wqe);
539 break;
540
541 default:
542 bfa_sm_fault(ns->port->fcs, event);
543 }
544}
545
546static void
547bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
548 enum vport_ns_event event)
549{
550 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
551 bfa_trc(ns->port->fcs, event);
552
553 switch (event) {
554 case NSSM_EVENT_RSP_OK:
555 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
556 break;
557
558 case NSSM_EVENT_RSP_ERROR:
559 /*
560 * TBD: for certain reject codes, we don't need to retry
561 */
562 /*
563 * Start timer for a delayed retry
564 */
565 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry);
566 ns->port->stats.ns_retries++;
567 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
568 bfa_fcs_port_ns_timeout, ns,
569 BFA_FCS_RETRY_TIMEOUT);
570 break;
571
572 case NSSM_EVENT_PORT_OFFLINE:
573 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
574 bfa_fcxp_discard(ns->fcxp);
575 break;
576
577 default:
578 bfa_sm_fault(ns->port->fcs, event);
579 }
580}
581
582static void
583bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
584 enum vport_ns_event event)
585{
586 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
587 bfa_trc(ns->port->fcs, event);
588
589 switch (event) {
590 case NSSM_EVENT_TIMEOUT:
591 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
592 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
593 break;
594
595 case NSSM_EVENT_PORT_OFFLINE:
596 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
597 bfa_timer_stop(&ns->timer);
598 break;
599
600 default:
601 bfa_sm_fault(ns->port->fcs, event);
602 }
603}
604
605static void
606bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
607 enum vport_ns_event event)
608{
609 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
610 bfa_trc(ns->port->fcs, event);
611
612 switch (event) {
613 case NSSM_EVENT_PORT_OFFLINE:
614 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
615 break;
616
617 case NSSM_EVENT_NS_QUERY:
618 /*
619 * If the port role is Initiator Mode issue NS query.
620 * If it is Target Mode, skip this and go to online.
621 */
622 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
623 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
624 bfa_fcs_port_ns_send_gid_ft(ns, NULL);
625 };
626 break;
627
628 default:
629 bfa_sm_fault(ns->port->fcs, event);
630 }
631}
632
633
634
635/**
636 * ns_pvt Nameserver local functions
637 */
638
639static void
640bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
641{
642 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
643 struct bfa_fcs_port_s *port = ns->port;
644 struct fchs_s fchs;
645 int len;
646 struct bfa_fcxp_s *fcxp;
647
648 bfa_trc(port->fcs, port->pid);
649
650 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
651 if (!fcxp) {
652 port->stats.ns_plogi_alloc_wait++;
653 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
654 bfa_fcs_port_ns_send_plogi, ns);
655 return;
656 }
657 ns->fcxp = fcxp;
658
659 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
660 bfa_os_hton3b(FC_NAME_SERVER),
661 bfa_fcs_port_get_fcid(port), 0,
662 port->port_cfg.pwwn, port->port_cfg.nwwn,
663 bfa_fcport_get_maxfrsize(port->fcs->bfa));
664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
667 (void *)ns, FC_MAX_PDUSZ, FC_ELS_TOV);
668 port->stats.ns_plogi_sent++;
669
670 bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
671}
672
673static void
674bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
675 void *cbarg, bfa_status_t req_status,
676 u32 rsp_len, u32 resid_len,
677 struct fchs_s *rsp_fchs)
678{
679 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
680 struct bfa_fcs_port_s *port = ns->port;
681 /* struct fc_logi_s *plogi_resp; */
682 struct fc_els_cmd_s *els_cmd;
683 struct fc_ls_rjt_s *ls_rjt;
684
685 bfa_trc(port->fcs, req_status);
686 bfa_trc(port->fcs, port->port_cfg.pwwn);
687
688 /*
689 * Sanity Checks
690 */
691 if (req_status != BFA_STATUS_OK) {
692 bfa_trc(port->fcs, req_status);
693 port->stats.ns_plogi_rsp_err++;
694 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
695 return;
696 }
697
698 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
699
700 switch (els_cmd->els_code) {
701
702 case FC_ELS_ACC:
703 if (rsp_len < sizeof(struct fc_logi_s)) {
704 bfa_trc(port->fcs, rsp_len);
705 port->stats.ns_plogi_acc_err++;
706 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
707 break;
708 }
709 port->stats.ns_plogi_accepts++;
710 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
711 break;
712
713 case FC_ELS_LS_RJT:
714 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
715
716 bfa_trc(port->fcs, ls_rjt->reason_code);
717 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
718
719 port->stats.ns_rejects++;
720
721 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
722 break;
723
724 default:
725 port->stats.ns_plogi_unknown_rsp++;
726 bfa_trc(port->fcs, els_cmd->els_code);
727 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
728 }
729}
730
731/**
732 * Register the symbolic port name.
733 */
734static void
735bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
736{
737 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
738 struct bfa_fcs_port_s *port = ns->port;
739 struct fchs_s fchs;
740 int len;
741 struct bfa_fcxp_s *fcxp;
742 u8 symbl[256];
743 u8 *psymbl = &symbl[0];
744
745 bfa_os_memset(symbl, 0, sizeof(symbl));
746
747 bfa_trc(port->fcs, port->port_cfg.pwwn);
748
749 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
750 if (!fcxp) {
751 port->stats.ns_rspnid_alloc_wait++;
752 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
753 bfa_fcs_port_ns_send_rspn_id, ns);
754 return;
755 }
756 ns->fcxp = fcxp;
757
758 /*
759 * for V-Port, form a Port Symbolic Name
760 */
761 if (port->vport) {
762 /**For Vports,
763 * we append the vport's port symbolic name to that of the base port.
764 */
765
766 strncpy((char *)psymbl,
767 (char *)
768 &(bfa_fcs_port_get_psym_name
769 (bfa_fcs_get_base_port(port->fcs))),
770 strlen((char *)
771 &bfa_fcs_port_get_psym_name(bfa_fcs_get_base_port
772 (port->fcs))));
773
774 /*
775 * Ensure we have a null terminating string.
776 */
777 ((char *)
778 psymbl)[strlen((char *)
779 &bfa_fcs_port_get_psym_name
780 (bfa_fcs_get_base_port(port->fcs)))] = 0;
781
782 strncat((char *)psymbl,
783 (char *)&(bfa_fcs_port_get_psym_name(port)),
784 strlen((char *)&bfa_fcs_port_get_psym_name(port)));
785 } else {
786 psymbl = (u8 *) &(bfa_fcs_port_get_psym_name(port));
787 }
788
789 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
790 bfa_fcs_port_get_fcid(port), 0, psymbl);
791
792 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
793 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response,
794 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
795
796 port->stats.ns_rspnid_sent++;
797
798 bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
799}
800
801static void
802bfa_fcs_port_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
803 void *cbarg, bfa_status_t req_status,
804 u32 rsp_len, u32 resid_len,
805 struct fchs_s *rsp_fchs)
806{
807 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
808 struct bfa_fcs_port_s *port = ns->port;
809 struct ct_hdr_s *cthdr = NULL;
810
811 bfa_trc(port->fcs, port->port_cfg.pwwn);
812
813 /*
814 * Sanity Checks
815 */
816 if (req_status != BFA_STATUS_OK) {
817 bfa_trc(port->fcs, req_status);
818 port->stats.ns_rspnid_rsp_err++;
819 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
820 return;
821 }
822
823 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
824 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
825
826 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
827 port->stats.ns_rspnid_accepts++;
828 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
829 return;
830 }
831
832 port->stats.ns_rspnid_rejects++;
833 bfa_trc(port->fcs, cthdr->reason_code);
834 bfa_trc(port->fcs, cthdr->exp_code);
835 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
836}
837
838/**
839 * Register FC4-Types
840 * TBD, Need to retrieve this from the OS driver, in case IPFC is enabled ?
841 */
842static void
843bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
844{
845 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
846 struct bfa_fcs_port_s *port = ns->port;
847 struct fchs_s fchs;
848 int len;
849 struct bfa_fcxp_s *fcxp;
850
851 bfa_trc(port->fcs, port->port_cfg.pwwn);
852
853 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
854 if (!fcxp) {
855 port->stats.ns_rftid_alloc_wait++;
856 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
857 bfa_fcs_port_ns_send_rft_id, ns);
858 return;
859 }
860 ns->fcxp = fcxp;
861
862 len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
863 bfa_fcs_port_get_fcid(port), 0,
864 port->port_cfg.roles);
865
866 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
867 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response,
868 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
869
870 port->stats.ns_rftid_sent++;
871 bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
872}
873
874static void
875bfa_fcs_port_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
876 void *cbarg, bfa_status_t req_status,
877 u32 rsp_len, u32 resid_len,
878 struct fchs_s *rsp_fchs)
879{
880 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
881 struct bfa_fcs_port_s *port = ns->port;
882 struct ct_hdr_s *cthdr = NULL;
883
884 bfa_trc(port->fcs, port->port_cfg.pwwn);
885
886 /*
887 * Sanity Checks
888 */
889 if (req_status != BFA_STATUS_OK) {
890 bfa_trc(port->fcs, req_status);
891 port->stats.ns_rftid_rsp_err++;
892 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
893 return;
894 }
895
896 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
897 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
898
899 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
900 port->stats.ns_rftid_accepts++;
901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
902 return;
903 }
904
905 port->stats.ns_rftid_rejects++;
906 bfa_trc(port->fcs, cthdr->reason_code);
907 bfa_trc(port->fcs, cthdr->exp_code);
908 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
909}
910
911/**
912* Register FC4-Features : Should be done after RFT_ID
913 */
914static void
915bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
916{
917 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
918 struct bfa_fcs_port_s *port = ns->port;
919 struct fchs_s fchs;
920 int len;
921 struct bfa_fcxp_s *fcxp;
922 u8 fc4_ftrs = 0;
923
924 bfa_trc(port->fcs, port->port_cfg.pwwn);
925
926 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
927 if (!fcxp) {
928 port->stats.ns_rffid_alloc_wait++;
929 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
930 bfa_fcs_port_ns_send_rff_id, ns);
931 return;
932 }
933 ns->fcxp = fcxp;
934
935 if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
936 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
937 else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port))
938 fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET;
939
940 len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
941 bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP,
942 fc4_ftrs);
943
944 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
945 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response,
946 (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV);
947
948 port->stats.ns_rffid_sent++;
949 bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
950}
951
952static void
953bfa_fcs_port_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
954 void *cbarg, bfa_status_t req_status,
955 u32 rsp_len, u32 resid_len,
956 struct fchs_s *rsp_fchs)
957{
958 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
959 struct bfa_fcs_port_s *port = ns->port;
960 struct ct_hdr_s *cthdr = NULL;
961
962 bfa_trc(port->fcs, port->port_cfg.pwwn);
963
964 /*
965 * Sanity Checks
966 */
967 if (req_status != BFA_STATUS_OK) {
968 bfa_trc(port->fcs, req_status);
969 port->stats.ns_rffid_rsp_err++;
970 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
971 return;
972 }
973
974 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
975 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
976
977 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
978 port->stats.ns_rffid_accepts++;
979 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
980 return;
981 }
982
983 port->stats.ns_rffid_rejects++;
984 bfa_trc(port->fcs, cthdr->reason_code);
985 bfa_trc(port->fcs, cthdr->exp_code);
986
987 if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
988 /*
989 * if this command is not supported, we don't retry
990 */
991 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
992 } else {
993 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
994 }
995}
996
997/**
998 * Query Fabric for FC4-Types Devices.
999 *
1000* TBD : Need to use a local (FCS private) response buffer, since the response
1001 * can be larger than 2K.
1002 */
1003static void
1004bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1005{
1006 struct bfa_fcs_port_ns_s *ns = ns_cbarg;
1007 struct bfa_fcs_port_s *port = ns->port;
1008 struct fchs_s fchs;
1009 int len;
1010 struct bfa_fcxp_s *fcxp;
1011
1012 bfa_trc(port->fcs, port->pid);
1013
1014 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
1015 if (!fcxp) {
1016 port->stats.ns_gidft_alloc_wait++;
1017 bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
1018 bfa_fcs_port_ns_send_gid_ft, ns);
1019 return;
1020 }
1021 ns->fcxp = fcxp;
1022
1023 /*
1024 * This query is only initiated for FCP initiator mode.
1025 */
1026 len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid,
1027 FC_TYPE_FCP);
1028
1029 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1030 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response,
1031 (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa),
1032 FC_FCCT_TOV);
1033
1034 port->stats.ns_gidft_sent++;
1035
1036 bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
1037}
1038
1039static void
1040bfa_fcs_port_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1041 void *cbarg, bfa_status_t req_status,
1042 u32 rsp_len, u32 resid_len,
1043 struct fchs_s *rsp_fchs)
1044{
1045 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
1046 struct bfa_fcs_port_s *port = ns->port;
1047 struct ct_hdr_s *cthdr = NULL;
1048 u32 n_pids;
1049
1050 bfa_trc(port->fcs, port->port_cfg.pwwn);
1051
1052 /*
1053 * Sanity Checks
1054 */
1055 if (req_status != BFA_STATUS_OK) {
1056 bfa_trc(port->fcs, req_status);
1057 port->stats.ns_gidft_rsp_err++;
1058 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1059 return;
1060 }
1061
1062 if (resid_len != 0) {
1063 /*
1064 * TBD : we will need to allocate a larger buffer & retry the
1065 * command
1066 */
1067 bfa_trc(port->fcs, rsp_len);
1068 bfa_trc(port->fcs, resid_len);
1069 return;
1070 }
1071
1072 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1073 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
1074
1075 switch (cthdr->cmd_rsp_code) {
1076
1077 case CT_RSP_ACCEPT:
1078
1079 port->stats.ns_gidft_accepts++;
1080 n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
1081 bfa_trc(port->fcs, n_pids);
1082 bfa_fcs_port_ns_process_gidft_pids(port,
1083 (u32 *) (cthdr + 1),
1084 n_pids);
1085 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1086 break;
1087
1088 case CT_RSP_REJECT:
1089
1090 /*
1091 * Check the reason code & explanation.
1092 * There may not have been any FC4 devices in the fabric
1093 */
1094 port->stats.ns_gidft_rejects++;
1095 bfa_trc(port->fcs, cthdr->reason_code);
1096 bfa_trc(port->fcs, cthdr->exp_code);
1097
1098 if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
1099 && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
1100
1101 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
1102 } else {
1103 /*
1104 * for all other errors, retry
1105 */
1106 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1107 }
1108 break;
1109
1110 default:
1111 port->stats.ns_gidft_unknown_rsp++;
1112 bfa_trc(port->fcs, cthdr->cmd_rsp_code);
1113 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
1114 }
1115}
1116
1117/**
1118 * This routine will be called by bfa_timer on timer timeouts.
1119 *
1120 * param[in] port - pointer to bfa_fcs_port_t.
1121 *
1122 * return
1123 * void
1124 *
1125* Special Considerations:
1126 *
1127 * note
1128 */
1129static void
1130bfa_fcs_port_ns_timeout(void *arg)
1131{
1132 struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)arg;
1133
1134 ns->port->stats.ns_timeouts++;
1135 bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
1136}
1137
1138/*
1139 * Process the PID list in GID_FT response
1140 */
1141static void
1142bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port,
1143 u32 *pid_buf, u32 n_pids)
1144{
1145 struct fcgs_gidft_resp_s *gidft_entry;
1146 struct bfa_fcs_rport_s *rport;
1147 u32 ii;
1148
1149 for (ii = 0; ii < n_pids; ii++) {
1150 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
1151
1152 if (gidft_entry->pid == port->pid)
1153 continue;
1154
1155 /*
1156 * Check if this rport already exists
1157 */
1158 rport = bfa_fcs_port_get_rport_by_pid(port, gidft_entry->pid);
1159 if (rport == NULL) {
1160 /*
1161 * this is a new device. create rport
1162 */
1163 rport = bfa_fcs_rport_create(port, gidft_entry->pid);
1164 } else {
1165 /*
1166 * this rport already exists
1167 */
1168 bfa_fcs_rport_scn(rport);
1169 }
1170
1171 bfa_trc(port->fcs, gidft_entry->pid);
1172
1173 /*
1174 * if the last entry bit is set, bail out.
1175 */
1176 if (gidft_entry->last)
1177 return;
1178 }
1179}
1180
1181/**
1182 * fcs_ns_public FCS nameserver public interfaces
1183 */
1184
1185/*
1186 * Functions called by port/fab.
1187 * These will send relevant Events to the ns state machine.
1188 */
1189void
1190bfa_fcs_port_ns_init(struct bfa_fcs_port_s *port)
1191{
1192 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1193
1194 ns->port = port;
1195 bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
1196}
1197
1198void
1199bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *port)
1200{
1201 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1202
1203 ns->port = port;
1204 bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
1205}
1206
1207void
1208bfa_fcs_port_ns_online(struct bfa_fcs_port_s *port)
1209{
1210 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1211
1212 ns->port = port;
1213 bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
1214}
1215
1216void
1217bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port)
1218{
1219 struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
1220
1221 bfa_trc(port->fcs, port->pid);
1222 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
1223}
1224
1225static void
1226bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port)
1227{
1228
1229 struct bfa_fcs_rport_s *rport;
1230 u8 nwwns;
1231 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1232 int ii;
1233
1234 bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
1235
1236 for (ii = 0; ii < nwwns; ++ii) {
1237 rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
1238 bfa_assert(rport);
1239 }
1240}
1241
1242
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c
deleted file mode 100644
index fcb8864d3276..000000000000
--- a/drivers/scsi/bfa/plog.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa_os_inc.h>
19#include <cs/bfa_plog.h>
20#include <cs/bfa_debug.h>
21
22static int
23plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
24{
25 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
26 && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
27 return 1;
28
29 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT)
30 && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
31 return 1;
32
33 return 0;
34}
35
36static void
37bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
38{
39 u16 tail;
40 struct bfa_plog_rec_s *pl_recp;
41
42 if (plog->plog_enabled == 0)
43 return;
44
45 if (plkd_validate_logrec(pl_rec)) {
46 bfa_assert(0);
47 return;
48 }
49
50 tail = plog->tail;
51
52 pl_recp = &(plog->plog_recs[tail]);
53
54 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
55
56 pl_recp->tv = BFA_TRC_TS(plog);
57 BFA_PL_LOG_REC_INCR(plog->tail);
58
59 if (plog->head == plog->tail)
60 BFA_PL_LOG_REC_INCR(plog->head);
61}
62
63void
64bfa_plog_init(struct bfa_plog_s *plog)
65{
66 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
67
68 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
69 plog->head = plog->tail = 0;
70 plog->plog_enabled = 1;
71}
72
73void
74bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
75 enum bfa_plog_eid event,
76 u16 misc, char *log_str)
77{
78 struct bfa_plog_rec_s lp;
79
80 if (plog->plog_enabled) {
81 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
82 lp.mid = mid;
83 lp.eid = event;
84 lp.log_type = BFA_PL_LOG_TYPE_STRING;
85 lp.misc = misc;
86 strncpy(lp.log_entry.string_log, log_str,
87 BFA_PL_STRING_LOG_SZ - 1);
88 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
89 bfa_plog_add(plog, &lp);
90 }
91}
92
93void
94bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
95 enum bfa_plog_eid event,
96 u16 misc, u32 *intarr, u32 num_ints)
97{
98 struct bfa_plog_rec_s lp;
99 u32 i;
100
101 if (num_ints > BFA_PL_INT_LOG_SZ)
102 num_ints = BFA_PL_INT_LOG_SZ;
103
104 if (plog->plog_enabled) {
105 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
106 lp.mid = mid;
107 lp.eid = event;
108 lp.log_type = BFA_PL_LOG_TYPE_INT;
109 lp.misc = misc;
110
111 for (i = 0; i < num_ints; i++)
112 bfa_os_assign(lp.log_entry.int_log[i],
113 intarr[i]);
114
115 lp.log_num_ints = (u8) num_ints;
116
117 bfa_plog_add(plog, &lp);
118 }
119}
120
121void
122bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
123 enum bfa_plog_eid event,
124 u16 misc, struct fchs_s *fchdr)
125{
126 struct bfa_plog_rec_s lp;
127 u32 *tmp_int = (u32 *) fchdr;
128 u32 ints[BFA_PL_INT_LOG_SZ];
129
130 if (plog->plog_enabled) {
131 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
132
133 ints[0] = tmp_int[0];
134 ints[1] = tmp_int[1];
135 ints[2] = tmp_int[4];
136
137 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
138 }
139}
140
141void
142bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
143 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
144 u32 pld_w0)
145{
146 struct bfa_plog_rec_s lp;
147 u32 *tmp_int = (u32 *) fchdr;
148 u32 ints[BFA_PL_INT_LOG_SZ];
149
150 if (plog->plog_enabled) {
151 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
152
153 ints[0] = tmp_int[0];
154 ints[1] = tmp_int[1];
155 ints[2] = tmp_int[4];
156 ints[3] = pld_w0;
157
158 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
159 }
160}
161
162void
163bfa_plog_clear(struct bfa_plog_s *plog)
164{
165 plog->head = plog->tail = 0;
166}
167
168void
169bfa_plog_enable(struct bfa_plog_s *plog)
170{
171 plog->plog_enabled = 1;
172}
173
174void
175bfa_plog_disable(struct bfa_plog_s *plog)
176{
177 plog->plog_enabled = 0;
178}
179
180bfa_boolean_t
181bfa_plog_get_setting(struct bfa_plog_s *plog)
182{
183 return (bfa_boolean_t)plog->plog_enabled;
184}
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
deleted file mode 100644
index 15e0c470afd9..000000000000
--- a/drivers/scsi/bfa/rport_api.c
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
18#include <bfa_svc.h>
19#include "fcs_vport.h"
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_trcmod.h"
23
24BFA_TRC_FILE(FCS, RPORT_API);
25
26/**
27 * rport_api.c Remote port implementation.
28 */
29
30/**
31 * fcs_rport_api FCS rport API.
32 */
33
34/**
35 * Direct API to add a target by port wwn. This interface is used, for
36 * example, by bios when target pwwn is known from boot lun configuration.
37 */
38bfa_status_t
39bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn,
40 struct bfa_fcs_rport_s *rport,
41 struct bfad_rport_s *rport_drv)
42{
43 bfa_trc(port->fcs, *pwwn);
44
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Direct API to remove a target and its associated resources. This
50 * interface is used, for example, by vmware driver to remove target
51 * ports from the target list for a VM.
52 */
53bfa_status_t
54bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
55{
56
57 struct bfa_fcs_rport_s *rport;
58
59 bfa_trc(rport_in->fcs, rport_in->pwwn);
60
61 rport = bfa_fcs_port_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
62 if (rport == NULL) {
63 /*
64 * TBD Error handling
65 */
66 bfa_trc(rport_in->fcs, rport_in->pid);
67 return BFA_STATUS_UNKNOWN_RWWN;
68 }
69
70 /*
71 * TBD if this remote port is online, send a logo
72 */
73 return BFA_STATUS_OK;
74
75}
76
77/**
78 * Remote device status for display/debug.
79 */
80void
81bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
82 struct bfa_rport_attr_s *rport_attr)
83{
84 struct bfa_rport_qos_attr_s qos_attr;
85 struct bfa_fcs_port_s *port = rport->port;
86 enum bfa_pport_speed rport_speed = rport->rpf.rpsc_speed;
87
88 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
89
90 rport_attr->pid = rport->pid;
91 rport_attr->pwwn = rport->pwwn;
92 rport_attr->nwwn = rport->nwwn;
93 rport_attr->cos_supported = rport->fc_cos;
94 rport_attr->df_sz = rport->maxfrsize;
95 rport_attr->state = bfa_fcs_rport_get_state(rport);
96 rport_attr->fc_cos = rport->fc_cos;
97 rport_attr->cisc = rport->cisc;
98 rport_attr->scsi_function = rport->scsi_function;
99 rport_attr->curr_speed = rport->rpf.rpsc_speed;
100 rport_attr->assigned_speed = rport->rpf.assigned_speed;
101
102 bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
103 rport_attr->qos_attr = qos_attr;
104
105 rport_attr->trl_enforced = BFA_FALSE;
106
107 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
108 if (rport_speed == BFA_PPORT_SPEED_UNKNOWN) {
109 /* Use default ratelim speed setting */
110 rport_speed =
111 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
112 }
113 if (rport_speed < bfa_fcs_port_get_rport_max_speed(port))
114 rport_attr->trl_enforced = BFA_TRUE;
115 }
116
117 /*
118 * TODO
119 * rport->symname
120 */
121}
122
123/**
124 * Per remote device statistics.
125 */
126void
127bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
128 struct bfa_rport_stats_s *stats)
129{
130 *stats = rport->stats;
131}
132
133void
134bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
135{
136 bfa_os_memset((char *)&rport->stats, 0,
137 sizeof(struct bfa_rport_stats_s));
138}
139
140struct bfa_fcs_rport_s *
141bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
142{
143 struct bfa_fcs_rport_s *rport;
144
145 rport = bfa_fcs_port_get_rport_by_pwwn(port, rpwwn);
146 if (rport == NULL) {
147 /*
148 * TBD Error handling
149 */
150 }
151
152 return rport;
153}
154
155struct bfa_fcs_rport_s *
156bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_port_s *port, wwn_t rnwwn)
157{
158 struct bfa_fcs_rport_s *rport;
159
160 rport = bfa_fcs_port_get_rport_by_nwwn(port, rnwwn);
161 if (rport == NULL) {
162 /*
163 * TBD Error handling
164 */
165 }
166
167 return rport;
168}
169
170/*
171 * This API is to set the Rport's speed. Should be used when RPSC is not
172 * supported by the rport.
173 */
174void
175bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
176 enum bfa_pport_speed speed)
177{
178 rport->rpf.assigned_speed = speed;
179
180 /* Set this speed in f/w only if the RPSC speed is not available */
181 if (rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN)
182 bfa_rport_speed(rport->bfa_rport, speed);
183}
184
185
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
deleted file mode 100644
index f2a9361ce9a4..000000000000
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ /dev/null
@@ -1,379 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * rport_ftrs.c Remote port features (RPF) implementation.
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include "fcbuild.h"
25#include "fcs_rport.h"
26#include "fcs_lport.h"
27#include "fcs_trcmod.h"
28#include "fcs_fcxp.h"
29#include "fcs.h"
30
31BFA_TRC_FILE(FCS, RPORT_FTRS);
32
33#define BFA_FCS_RPF_RETRIES (3)
34#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
35
36static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp, void *cbarg,
40 bfa_status_t req_status, u32 rsp_len,
41 u32 resid_len,
42 struct fchs_s *rsp_fchs);
43static void bfa_fcs_rpf_timeout(void *arg);
44
45/**
46 * fcs_rport_ftrs_sm FCS rport state machine events
47 */
48
49enum rpf_event {
50 RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
51 RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
52 RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
53 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
54 RPFSM_EVENT_RPSC_COMP = 5,
55 RPFSM_EVENT_RPSC_FAIL = 6,
56 RPFSM_EVENT_RPSC_ERROR = 7,
57};
58
59static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
60 enum rpf_event event);
61static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
62 enum rpf_event event);
63static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
64 enum rpf_event event);
65static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
66 enum rpf_event event);
67static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
68 enum rpf_event event);
69static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
70 enum rpf_event event);
71
72static void
73bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
74{
75 struct bfa_fcs_rport_s *rport = rpf->rport;
76 struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
77
78 bfa_trc(rport->fcs, rport->pwwn);
79 bfa_trc(rport->fcs, rport->pid);
80 bfa_trc(rport->fcs, event);
81
82 switch (event) {
83 case RPFSM_EVENT_RPORT_ONLINE:
84 /* Send RPSC2 to a Brocade fabric only. */
85 if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
86 ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
87 (bfa_fcs_fabric_get_switch_oui(fabric) ==
88 BFA_FCS_BRCD_SWITCH_OUI))) {
89 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
90 rpf->rpsc_retries = 0;
91 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
92 }
93 break;
94
95 case RPFSM_EVENT_RPORT_OFFLINE:
96 break;
97
98 default:
99 bfa_sm_fault(rport->fcs, event);
100 }
101}
102
103static void
104bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
105{
106 struct bfa_fcs_rport_s *rport = rpf->rport;
107
108 bfa_trc(rport->fcs, event);
109
110 switch (event) {
111 case RPFSM_EVENT_FCXP_SENT:
112 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
113 break;
114
115 case RPFSM_EVENT_RPORT_OFFLINE:
116 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
117 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
118 rpf->rpsc_retries = 0;
119 break;
120
121 default:
122 bfa_sm_fault(rport->fcs, event);
123 }
124}
125
126static void
127bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
128{
129 struct bfa_fcs_rport_s *rport = rpf->rport;
130
131 bfa_trc(rport->fcs, rport->pid);
132 bfa_trc(rport->fcs, event);
133
134 switch (event) {
135 case RPFSM_EVENT_RPSC_COMP:
136 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
137 /* Update speed info in f/w via BFA */
138 if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN)
139 bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
140 else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN)
141 bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
142 break;
143
144 case RPFSM_EVENT_RPSC_FAIL:
145 /* RPSC not supported by rport */
146 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
147 break;
148
149 case RPFSM_EVENT_RPSC_ERROR:
150 /* need to retry...delayed a bit. */
151 if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
152 bfa_timer_start(rport->fcs->bfa, &rpf->timer,
153 bfa_fcs_rpf_timeout, rpf,
154 BFA_FCS_RPF_RETRY_TIMEOUT);
155 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
156 } else {
157 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
158 }
159 break;
160
161 case RPFSM_EVENT_RPORT_OFFLINE:
162 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
163 bfa_fcxp_discard(rpf->fcxp);
164 rpf->rpsc_retries = 0;
165 break;
166
167 default:
168 bfa_sm_fault(rport->fcs, event);
169 }
170}
171
172static void
173bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
174{
175 struct bfa_fcs_rport_s *rport = rpf->rport;
176
177 bfa_trc(rport->fcs, rport->pid);
178 bfa_trc(rport->fcs, event);
179
180 switch (event) {
181 case RPFSM_EVENT_TIMEOUT:
182 /* re-send the RPSC */
183 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
184 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
185 break;
186
187 case RPFSM_EVENT_RPORT_OFFLINE:
188 bfa_timer_stop(&rpf->timer);
189 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
190 rpf->rpsc_retries = 0;
191 break;
192
193 default:
194 bfa_sm_fault(rport->fcs, event);
195 }
196}
197
198static void
199bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
200{
201 struct bfa_fcs_rport_s *rport = rpf->rport;
202
203 bfa_trc(rport->fcs, rport->pwwn);
204 bfa_trc(rport->fcs, rport->pid);
205 bfa_trc(rport->fcs, event);
206
207 switch (event) {
208 case RPFSM_EVENT_RPORT_OFFLINE:
209 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
210 rpf->rpsc_retries = 0;
211 break;
212
213 default:
214 bfa_sm_fault(rport->fcs, event);
215 }
216}
217
218static void
219bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
220{
221 struct bfa_fcs_rport_s *rport = rpf->rport;
222
223 bfa_trc(rport->fcs, rport->pwwn);
224 bfa_trc(rport->fcs, rport->pid);
225 bfa_trc(rport->fcs, event);
226
227 switch (event) {
228 case RPFSM_EVENT_RPORT_ONLINE:
229 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
230 bfa_fcs_rpf_send_rpsc2(rpf, NULL);
231 break;
232
233 case RPFSM_EVENT_RPORT_OFFLINE:
234 break;
235
236 default:
237 bfa_sm_fault(rport->fcs, event);
238 }
239}
240/**
241 * Called when Rport is created.
242 */
243void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
244{
245 struct bfa_fcs_rpf_s *rpf = &rport->rpf;
246
247 bfa_trc(rport->fcs, rport->pid);
248 rpf->rport = rport;
249
250 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
251}
252
253/**
254 * Called when Rport becomes online
255 */
256void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
257{
258 bfa_trc(rport->fcs, rport->pid);
259
260 if (__fcs_min_cfg(rport->port->fcs))
261 return;
262
263 if (bfa_fcs_fabric_is_switched(rport->port->fabric))
264 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
265}
266
267/**
268 * Called when Rport becomes offline
269 */
270void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
271{
272 bfa_trc(rport->fcs, rport->pid);
273
274 if (__fcs_min_cfg(rport->port->fcs))
275 return;
276
277 rport->rpf.rpsc_speed = 0;
278 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
279}
280
281static void
282bfa_fcs_rpf_timeout(void *arg)
283{
284 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
285 struct bfa_fcs_rport_s *rport = rpf->rport;
286
287 bfa_trc(rport->fcs, rport->pid);
288 bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
289}
290
291static void
292bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
293{
294 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
295 struct bfa_fcs_rport_s *rport = rpf->rport;
296 struct bfa_fcs_port_s *port = rport->port;
297 struct fchs_s fchs;
298 int len;
299 struct bfa_fcxp_s *fcxp;
300
301 bfa_trc(rport->fcs, rport->pwwn);
302
303 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
304 if (!fcxp) {
305 bfa_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
306 bfa_fcs_rpf_send_rpsc2, rpf);
307 return;
308 }
309 rpf->fcxp = fcxp;
310
311 len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
312 bfa_fcs_port_get_fcid(port), &rport->pid, 1);
313
314 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
315 FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
316 rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
317 rport->stats.rpsc_sent++;
318 bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
319
320}
321
322static void
323bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
324 bfa_status_t req_status, u32 rsp_len,
325 u32 resid_len, struct fchs_s *rsp_fchs)
326{
327 struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
328 struct bfa_fcs_rport_s *rport = rpf->rport;
329 struct fc_ls_rjt_s *ls_rjt;
330 struct fc_rpsc2_acc_s *rpsc2_acc;
331 u16 num_ents;
332
333 bfa_trc(rport->fcs, req_status);
334
335 if (req_status != BFA_STATUS_OK) {
336 bfa_trc(rport->fcs, req_status);
337 if (req_status == BFA_STATUS_ETIMER)
338 rport->stats.rpsc_failed++;
339 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
340 return;
341 }
342
343 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
344 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
345 rport->stats.rpsc_accs++;
346 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
347 bfa_trc(rport->fcs, num_ents);
348 if (num_ents > 0) {
349 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
350 bfa_trc(rport->fcs,
351 bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
352 bfa_trc(rport->fcs,
353 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
354 bfa_trc(rport->fcs,
355 bfa_os_ntohs(rpsc2_acc->port_info[0].index));
356 bfa_trc(rport->fcs,
357 rpsc2_acc->port_info[0].type);
358
359 if (rpsc2_acc->port_info[0].speed == 0) {
360 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
361 return;
362 }
363
364 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
365 bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
366
367 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
368 }
369 } else {
370 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
371 bfa_trc(rport->fcs, ls_rjt->reason_code);
372 bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
373 rport->stats.rpsc_rejects++;
374 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
375 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
376 else
377 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
378 }
379}
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
deleted file mode 100644
index 8a60129e6307..000000000000
--- a/drivers/scsi/bfa/scn.c
+++ /dev/null
@@ -1,482 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include "fcs_lport.h"
21#include "fcs_rport.h"
22#include "fcs_ms.h"
23#include "fcs_trcmod.h"
24#include "fcs_fcxp.h"
25#include "fcs.h"
26#include "lport_priv.h"
27
28BFA_TRC_FILE(FCS, SCN);
29
30#define FC_QOS_RSCN_EVENT 0x0c
31#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
32
33/*
34 * forward declarations
35 */
36static void bfa_fcs_port_scn_send_scr(void *scn_cbarg,
37 struct bfa_fcxp_s *fcxp_alloced);
38static void bfa_fcs_port_scn_scr_response(void *fcsarg,
39 struct bfa_fcxp_s *fcxp,
40 void *cbarg,
41 bfa_status_t req_status,
42 u32 rsp_len,
43 u32 resid_len,
44 struct fchs_s *rsp_fchs);
45static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
46 struct fchs_s *rx_fchs);
47static void bfa_fcs_port_scn_timeout(void *arg);
48
49/**
50 * fcs_scm_sm FCS SCN state machine
51 */
52
53/**
54 * VPort SCN State Machine events
55 */
56enum port_scn_event {
57 SCNSM_EVENT_PORT_ONLINE = 1,
58 SCNSM_EVENT_PORT_OFFLINE = 2,
59 SCNSM_EVENT_RSP_OK = 3,
60 SCNSM_EVENT_RSP_ERROR = 4,
61 SCNSM_EVENT_TIMEOUT = 5,
62 SCNSM_EVENT_SCR_SENT = 6,
63};
64
65static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
66 enum port_scn_event event);
67static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
68 enum port_scn_event event);
69static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
70 enum port_scn_event event);
71static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
72 enum port_scn_event event);
73static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
74 enum port_scn_event event);
75
76/**
77 * Starting state - awaiting link up.
78 */
79static void
80bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
81 enum port_scn_event event)
82{
83 switch (event) {
84 case SCNSM_EVENT_PORT_ONLINE:
85 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
86 bfa_fcs_port_scn_send_scr(scn, NULL);
87 break;
88
89 case SCNSM_EVENT_PORT_OFFLINE:
90 break;
91
92 default:
93 bfa_sm_fault(scn->port->fcs, event);
94 }
95}
96
97static void
98bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
99 enum port_scn_event event)
100{
101 switch (event) {
102 case SCNSM_EVENT_SCR_SENT:
103 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr);
104 break;
105
106 case SCNSM_EVENT_PORT_OFFLINE:
107 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
108 bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
109 break;
110
111 default:
112 bfa_sm_fault(scn->port->fcs, event);
113 }
114}
115
116static void
117bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
118 enum port_scn_event event)
119{
120 struct bfa_fcs_port_s *port = scn->port;
121
122 switch (event) {
123 case SCNSM_EVENT_RSP_OK:
124 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_online);
125 break;
126
127 case SCNSM_EVENT_RSP_ERROR:
128 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr_retry);
129 bfa_timer_start(port->fcs->bfa, &scn->timer,
130 bfa_fcs_port_scn_timeout, scn,
131 BFA_FCS_RETRY_TIMEOUT);
132 break;
133
134 case SCNSM_EVENT_PORT_OFFLINE:
135 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
136 bfa_fcxp_discard(scn->fcxp);
137 break;
138
139 default:
140 bfa_sm_fault(scn->port->fcs, event);
141 }
142}
143
144static void
145bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
146 enum port_scn_event event)
147{
148 switch (event) {
149 case SCNSM_EVENT_TIMEOUT:
150 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr);
151 bfa_fcs_port_scn_send_scr(scn, NULL);
152 break;
153
154 case SCNSM_EVENT_PORT_OFFLINE:
155 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
156 bfa_timer_stop(&scn->timer);
157 break;
158
159 default:
160 bfa_sm_fault(scn->port->fcs, event);
161 }
162}
163
164static void
165bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
166 enum port_scn_event event)
167{
168 switch (event) {
169 case SCNSM_EVENT_PORT_OFFLINE:
170 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
171 break;
172
173 default:
174 bfa_sm_fault(scn->port->fcs, event);
175 }
176}
177
178
179
180/**
181 * fcs_scn_private FCS SCN private functions
182 */
183
184/**
185 * This routine will be called to send a SCR command.
186 */
187static void
188bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
189{
190 struct bfa_fcs_port_scn_s *scn = scn_cbarg;
191 struct bfa_fcs_port_s *port = scn->port;
192 struct fchs_s fchs;
193 int len;
194 struct bfa_fcxp_s *fcxp;
195
196 bfa_trc(port->fcs, port->pid);
197 bfa_trc(port->fcs, port->port_cfg.pwwn);
198
199 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
200 if (!fcxp) {
201 bfa_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
202 bfa_fcs_port_scn_send_scr, scn);
203 return;
204 }
205 scn->fcxp = fcxp;
206
207 /*
208 * Handle VU registrations for Base port only
209 */
210 if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
211 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
212 bfa_lps_is_brcd_fabric(port->fabric->lps),
213 port->pid, 0);
214 } else {
215 len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE,
216 port->pid, 0);
217 }
218
219 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
220 FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response,
221 (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
222
223 bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
224}
225
226static void
227bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
228 void *cbarg, bfa_status_t req_status,
229 u32 rsp_len, u32 resid_len,
230 struct fchs_s *rsp_fchs)
231{
232 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)cbarg;
233 struct bfa_fcs_port_s *port = scn->port;
234 struct fc_els_cmd_s *els_cmd;
235 struct fc_ls_rjt_s *ls_rjt;
236
237 bfa_trc(port->fcs, port->port_cfg.pwwn);
238
239 /*
240 * Sanity Checks
241 */
242 if (req_status != BFA_STATUS_OK) {
243 bfa_trc(port->fcs, req_status);
244 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
245 return;
246 }
247
248 els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
249
250 switch (els_cmd->els_code) {
251
252 case FC_ELS_ACC:
253 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
254 break;
255
256 case FC_ELS_LS_RJT:
257
258 ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
259
260 bfa_trc(port->fcs, ls_rjt->reason_code);
261 bfa_trc(port->fcs, ls_rjt->reason_code_expl);
262
263 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
264 break;
265
266 default:
267 bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
268 }
269}
270
271/*
272 * Send a LS Accept
273 */
274static void
275bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port,
276 struct fchs_s *rx_fchs)
277{
278 struct fchs_s fchs;
279 struct bfa_fcxp_s *fcxp;
280 struct bfa_rport_s *bfa_rport = NULL;
281 int len;
282
283 bfa_trc(port->fcs, rx_fchs->s_id);
284
285 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
286 if (!fcxp)
287 return;
288
289 len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
290 bfa_fcs_port_get_fcid(port), rx_fchs->ox_id);
291
292 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
293 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
294 FC_MAX_PDUSZ, 0);
295}
296
297/**
298 * This routine will be called by bfa_timer on timer timeouts.
299 *
300 * param[in] vport - pointer to bfa_fcs_port_t.
301 * param[out] vport_status - pointer to return vport status in
302 *
303 * return
304 * void
305 *
306* Special Considerations:
307 *
308 * note
309 */
310static void
311bfa_fcs_port_scn_timeout(void *arg)
312{
313 struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)arg;
314
315 bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
316}
317
318
319
320/**
321 * fcs_scn_public FCS state change notification public interfaces
322 */
323
324/*
325 * Functions called by port/fab
326 */
327void
328bfa_fcs_port_scn_init(struct bfa_fcs_port_s *port)
329{
330 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
331
332 scn->port = port;
333 bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline);
334}
335
336void
337bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *port)
338{
339 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
340
341 scn->port = port;
342 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
343}
344
345void
346bfa_fcs_port_scn_online(struct bfa_fcs_port_s *port)
347{
348 struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
349
350 scn->port = port;
351 bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
352}
353
354static void
355bfa_fcs_port_scn_portid_rscn(struct bfa_fcs_port_s *port, u32 rpid)
356{
357 struct bfa_fcs_rport_s *rport;
358
359 bfa_trc(port->fcs, rpid);
360
361 /**
362 * If this is an unknown device, then it just came online.
363 * Otherwise let rport handle the RSCN event.
364 */
365 rport = bfa_fcs_port_get_rport_by_pid(port, rpid);
366 if (rport == NULL) {
367 /*
368 * If min cfg mode is enabled, we donot need to
369 * discover any new rports.
370 */
371 if (!__fcs_min_cfg(port->fcs))
372 rport = bfa_fcs_rport_create(port, rpid);
373 } else {
374 bfa_fcs_rport_scn(rport);
375 }
376}
377
378/**
379 * rscn format based PID comparison
380 */
381#define __fc_pid_match(__c0, __c1, __fmt) \
382 (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
383 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
384 ((__c0)[0] == (__c1)[0])) || \
385 (((__fmt) == FC_RSCN_FORMAT_AREA) && \
386 ((__c0)[0] == (__c1)[0]) && \
387 ((__c0)[1] == (__c1)[1])))
388
389static void
390bfa_fcs_port_scn_multiport_rscn(struct bfa_fcs_port_s *port,
391 enum fc_rscn_format format, u32 rscn_pid)
392{
393 struct bfa_fcs_rport_s *rport;
394 struct list_head *qe, *qe_next;
395 u8 *c0, *c1;
396
397 bfa_trc(port->fcs, format);
398 bfa_trc(port->fcs, rscn_pid);
399
400 c0 = (u8 *) &rscn_pid;
401
402 list_for_each_safe(qe, qe_next, &port->rport_q) {
403 rport = (struct bfa_fcs_rport_s *)qe;
404 c1 = (u8 *) &rport->pid;
405 if (__fc_pid_match(c0, c1, format))
406 bfa_fcs_rport_scn(rport);
407 }
408}
409
410void
411bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, struct fchs_s *fchs,
412 u32 len)
413{
414 struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
415 int num_entries;
416 u32 rscn_pid;
417 bfa_boolean_t nsquery = BFA_FALSE;
418 int i = 0;
419
420 num_entries =
421 (bfa_os_ntohs(rscn->payldlen) -
422 sizeof(u32)) / sizeof(rscn->event[0]);
423
424 bfa_trc(port->fcs, num_entries);
425
426 port->stats.num_rscn++;
427
428 bfa_fcs_port_scn_send_ls_acc(port, fchs);
429
430 for (i = 0; i < num_entries; i++) {
431 rscn_pid = rscn->event[i].portid;
432
433 bfa_trc(port->fcs, rscn->event[i].format);
434 bfa_trc(port->fcs, rscn_pid);
435
436 switch (rscn->event[i].format) {
437 case FC_RSCN_FORMAT_PORTID:
438 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
439 /*
440 * Ignore this event. f/w would have processed
441 * it
442 */
443 bfa_trc(port->fcs, rscn_pid);
444 } else {
445 port->stats.num_portid_rscn++;
446 bfa_fcs_port_scn_portid_rscn(port, rscn_pid);
447 }
448 break;
449
450 case FC_RSCN_FORMAT_FABRIC:
451 if (rscn->event[i].qualifier ==
452 FC_FABRIC_NAME_RSCN_EVENT) {
453 bfa_fcs_port_ms_fabric_rscn(port);
454 break;
455 }
456 /*
457 * !!!!!!!!! Fall Through !!!!!!!!!!!!!
458 */
459
460 case FC_RSCN_FORMAT_AREA:
461 case FC_RSCN_FORMAT_DOMAIN:
462 nsquery = BFA_TRUE;
463 bfa_fcs_port_scn_multiport_rscn(port,
464 rscn->event[i].format,
465 rscn_pid);
466 break;
467
468 default:
469 bfa_assert(0);
470 nsquery = BFA_TRUE;
471 }
472 }
473
474 /**
475 * If any of area, domain or fabric RSCN is received, do a fresh discovery
476 * to find new devices.
477 */
478 if (nsquery)
479 bfa_fcs_port_ns_query(port);
480}
481
482
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c
deleted file mode 100644
index 391a4790bebd..000000000000
--- a/drivers/scsi/bfa/vfapi.c
+++ /dev/null
@@ -1,292 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * vfapi.c Fabric module implementation.
20 */
21
22#include "fcs_fabric.h"
23#include "fcs_trcmod.h"
24
25BFA_TRC_FILE(FCS, VFAPI);
26
27/**
28 * fcs_vf_api virtual fabrics API
29 */
30
31/**
32 * Enable VF mode.
33 *
34 * @param[in] fcs fcs module instance
35 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
36 * to use standard default vf_id of 1.
37 *
38 * @retval BFA_STATUS_OK vf mode is enabled
39 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
40 * before VF mode can be enabled.
41 */
42bfa_status_t
43bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
44{
45 return BFA_STATUS_OK;
46}
47
48/**
49 * Disable VF mode.
50 *
51 * @param[in] fcs fcs module instance
52 *
53 * @retval BFA_STATUS_OK vf mode is disabled
54 * @retval BFA_STATUS_BUSY VFs are present and being used. All
55 * VFs must be deleted before disabling
56 * VF mode.
57 */
58bfa_status_t
59bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
60{
61 return BFA_STATUS_OK;
62}
63
64/**
65 * Create a new VF instance.
66 *
67 * A new VF is created using the given VF configuration. A VF is identified
68 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
69 * a VF is created, VF is automatically started after link initialization
70 * and EVFP exchange is completed.
71 *
72 * param[in] vf - FCS vf data structure. Memory is
73 * allocated by caller (driver)
74 * param[in] fcs - FCS module
75 * param[in] vf_cfg - VF configuration
76 * param[in] vf_drv - Opaque handle back to the driver's
77 * virtual vf structure
78 *
79 * retval BFA_STATUS_OK VF creation is successful
80 * retval BFA_STATUS_FAILED VF creation failed
81 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
82 */
83bfa_status_t
84bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
85 struct bfa_port_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
86{
87 bfa_trc(fcs, vf_id);
88 return BFA_STATUS_OK;
89}
90
91/**
92 * Use this function to delete a BFA VF object. VF object should
93 * be stopped before this function call.
94 *
95 * param[in] vf - pointer to bfa_vf_t.
96 *
97 * retval BFA_STATUS_OK On vf deletion success
98 * retval BFA_STATUS_BUSY VF is not in a stopped state
99 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
100 */
101bfa_status_t
102bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
103{
104 bfa_trc(vf->fcs, vf->vf_id);
105 return BFA_STATUS_OK;
106}
107
108/**
109 * Start participation in VF. This triggers login to the virtual fabric.
110 *
111 * param[in] vf - pointer to bfa_vf_t.
112 *
113 * return None
114 */
115void
116bfa_fcs_vf_start(bfa_fcs_vf_t *vf)
117{
118 bfa_trc(vf->fcs, vf->vf_id);
119}
120
121/**
122 * Logout with the virtual fabric.
123 *
124 * param[in] vf - pointer to bfa_vf_t.
125 *
126 * retval BFA_STATUS_OK On success.
127 * retval BFA_STATUS_INPROGRESS VF is being stopped.
128 */
129bfa_status_t
130bfa_fcs_vf_stop(bfa_fcs_vf_t *vf)
131{
132 bfa_trc(vf->fcs, vf->vf_id);
133 return BFA_STATUS_OK;
134}
135
136/**
137 * Returns attributes of the given VF.
138 *
139 * param[in] vf pointer to bfa_vf_t.
140 * param[out] vf_attr vf attributes returned
141 *
142 * return None
143 */
144void
145bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
146{
147 bfa_trc(vf->fcs, vf->vf_id);
148}
149
150/**
151 * Return statistics associated with the given vf.
152 *
153 * param[in] vf pointer to bfa_vf_t.
154 * param[out] vf_stats vf statistics returned
155 *
156 * @return None
157 */
158void
159bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
160{
161 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
162 return;
163}
164
165void
166/**
167 * clear statistics associated with the given vf.
168 *
169 * param[in] vf pointer to bfa_vf_t.
170 *
171 * @return None
172 */
173bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
174{
175 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
176 return;
177}
178
179/**
180 * Returns FCS vf structure for a given vf_id.
181 *
182 * param[in] vf_id - VF_ID
183 *
184 * return
185 * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
186 */
187bfa_fcs_vf_t *
188bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
189{
190 bfa_trc(fcs, vf_id);
191 if (vf_id == FC_VF_ID_NULL)
192 return &fcs->fabric;
193
194 /**
195 * @todo vf support
196 */
197
198 return NULL;
199}
200
201/**
202 * Returns driver VF structure for a given FCS vf.
203 *
204 * param[in] vf - pointer to bfa_vf_t
205 *
206 * return Driver VF structure
207 */
208struct bfad_vf_s *
209bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf)
210{
211 bfa_assert(vf);
212 bfa_trc(vf->fcs, vf->vf_id);
213 return vf->vf_drv;
214}
215
216/**
217 * Return the list of VFs configured.
218 *
219 * param[in] fcs fcs module instance
220 * param[out] vf_ids returned list of vf_ids
221 * param[in,out] nvfs in:size of vf_ids array,
222 * out:total elements present,
223 * actual elements returned is limited by the size
224 *
225 * return Driver VF structure
226 */
227void
228bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
229{
230 bfa_trc(fcs, *nvfs);
231}
232
233/**
234 * Return the list of all VFs visible from fabric.
235 *
236 * param[in] fcs fcs module instance
237 * param[out] vf_ids returned list of vf_ids
238 * param[in,out] nvfs in:size of vf_ids array,
239 * out:total elements present,
240 * actual elements returned is limited by the size
241 *
242 * return Driver VF structure
243 */
244void
245bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
246{
247 bfa_trc(fcs, *nvfs);
248}
249
250/**
251 * Return the list of local logical ports present in the given VF.
252 *
253 * param[in] vf vf for which logical ports are returned
254 * param[out] lpwwn returned logical port wwn list
255 * param[in,out] nlports in:size of lpwwn list;
256 * out:total elements present,
257 * actual elements returned is limited by the size
258 *
259 */
260void
261bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
262{
263 struct list_head *qe;
264 struct bfa_fcs_vport_s *vport;
265 int i;
266 struct bfa_fcs_s *fcs;
267
268 if (vf == NULL || lpwwn == NULL || *nlports == 0)
269 return;
270
271 fcs = vf->fcs;
272
273 bfa_trc(fcs, vf->vf_id);
274 bfa_trc(fcs, (u32) *nlports);
275
276 i = 0;
277 lpwwn[i++] = vf->bport.port_cfg.pwwn;
278
279 list_for_each(qe, &vf->vport_q) {
280 if (i >= *nlports)
281 break;
282
283 vport = (struct bfa_fcs_vport_s *) qe;
284 lpwwn[i++] = vport->lport.port_cfg.pwwn;
285 }
286
287 bfa_trc(fcs, i);
288 *nlports = i;
289 return;
290}
291
292
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
deleted file mode 100644
index b378ec79d386..000000000000
--- a/drivers/scsi/bfa/vport.c
+++ /dev/null
@@ -1,903 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfa_fcs_vport.c FCS virtual port state machine
20 */
21
22#include <bfa.h>
23#include <bfa_svc.h>
24#include <fcbuild.h>
25#include "fcs_fabric.h"
26#include "fcs_lport.h"
27#include "fcs_vport.h"
28#include "fcs_trcmod.h"
29#include "fcs.h"
30#include <aen/bfa_aen_lport.h>
31
32BFA_TRC_FILE(FCS, VPORT);
33
34#define __vport_fcs(__vp) ((__vp)->lport.fcs)
35#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
36#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
37#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
38#define __vport_fcid(__vp) ((__vp)->lport.pid)
39#define __vport_fabric(__vp) ((__vp)->lport.fabric)
40#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
41
42#define BFA_FCS_VPORT_MAX_RETRIES 5
43/*
44 * Forward declarations
45 */
46static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
47static void bfa_fcs_vport_timeout(void *vport_arg);
48static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
49static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
50
51/**
52 * fcs_vport_sm FCS virtual port state machine
53 */
54
55/**
56 * VPort State Machine events
57 */
58enum bfa_fcs_vport_event {
59 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
60 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
61 BFA_FCS_VPORT_SM_START = 3, /* vport start request */
62 BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
63 BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
64 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
65 BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
66 BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
67 BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
68 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
69 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
70 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error */
71 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
72};
73
74static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
75 enum bfa_fcs_vport_event event);
76static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
77 enum bfa_fcs_vport_event event);
78static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
79 enum bfa_fcs_vport_event event);
80static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
81 enum bfa_fcs_vport_event event);
82static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
83 enum bfa_fcs_vport_event event);
84static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
85 enum bfa_fcs_vport_event event);
86static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
87 enum bfa_fcs_vport_event event);
88static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
89 enum bfa_fcs_vport_event event);
90static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
91 enum bfa_fcs_vport_event event);
92static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
93 enum bfa_fcs_vport_event event);
94
95static struct bfa_sm_table_s vport_sm_table[] = {
96 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
97 {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
98 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
99 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
100 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
101 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
102 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
103 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
104 {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
105 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
106};
107
108/**
109 * Beginning state.
110 */
111static void
112bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
113 enum bfa_fcs_vport_event event)
114{
115 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
116 bfa_trc(__vport_fcs(vport), event);
117
118 switch (event) {
119 case BFA_FCS_VPORT_SM_CREATE:
120 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
121 bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
122 break;
123
124 default:
125 bfa_sm_fault(__vport_fcs(vport), event);
126 }
127}
128
129/**
130 * Created state - a start event is required to start up the state machine.
131 */
132static void
133bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
134 enum bfa_fcs_vport_event event)
135{
136 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
137 bfa_trc(__vport_fcs(vport), event);
138
139 switch (event) {
140 case BFA_FCS_VPORT_SM_START:
141 if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
142 && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
143 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
144 bfa_fcs_vport_do_fdisc(vport);
145 } else {
146 /**
147 * Fabric is offline or not NPIV capable, stay in
148 * offline state.
149 */
150 vport->vport_stats.fab_no_npiv++;
151 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
152 }
153 break;
154
155 case BFA_FCS_VPORT_SM_DELETE:
156 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
157 bfa_fcs_port_delete(&vport->lport);
158 break;
159
160 case BFA_FCS_VPORT_SM_ONLINE:
161 case BFA_FCS_VPORT_SM_OFFLINE:
162 /**
163 * Ignore ONLINE/OFFLINE events from fabric till vport is started.
164 */
165 break;
166
167 default:
168 bfa_sm_fault(__vport_fcs(vport), event);
169 }
170}
171
172/**
173 * Offline state - awaiting ONLINE event from fabric SM.
174 */
175static void
176bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
177 enum bfa_fcs_vport_event event)
178{
179 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
180 bfa_trc(__vport_fcs(vport), event);
181
182 switch (event) {
183 case BFA_FCS_VPORT_SM_DELETE:
184 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
185 bfa_fcs_port_delete(&vport->lport);
186 break;
187
188 case BFA_FCS_VPORT_SM_ONLINE:
189 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
190 vport->fdisc_retries = 0;
191 bfa_fcs_vport_do_fdisc(vport);
192 break;
193
194 case BFA_FCS_VPORT_SM_OFFLINE:
195 /*
196 * This can happen if the vport couldn't be initialzied due
197 * the fact that the npiv was not enabled on the switch. In
198 * that case we will put the vport in offline state. However,
199 * the link can go down and cause the this event to be sent when
200 * we are already offline. Ignore it.
201 */
202 break;
203
204 default:
205 bfa_sm_fault(__vport_fcs(vport), event);
206 }
207}
208
209/**
210 * FDISC is sent and awaiting reply from fabric.
211 */
212static void
213bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
214 enum bfa_fcs_vport_event event)
215{
216 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
217 bfa_trc(__vport_fcs(vport), event);
218
219 switch (event) {
220 case BFA_FCS_VPORT_SM_DELETE:
221 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
222 bfa_lps_discard(vport->lps);
223 bfa_fcs_port_delete(&vport->lport);
224 break;
225
226 case BFA_FCS_VPORT_SM_OFFLINE:
227 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
228 bfa_lps_discard(vport->lps);
229 break;
230
231 case BFA_FCS_VPORT_SM_RSP_OK:
232 bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
233 bfa_fcs_port_online(&vport->lport);
234 break;
235
236 case BFA_FCS_VPORT_SM_RSP_ERROR:
237 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
238 bfa_timer_start(__vport_bfa(vport), &vport->timer,
239 bfa_fcs_vport_timeout, vport,
240 BFA_FCS_RETRY_TIMEOUT);
241 break;
242
243 case BFA_FCS_VPORT_SM_RSP_FAILED:
244 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
245 break;
246
247 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
248 bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
249 break;
250
251 default:
252 bfa_sm_fault(__vport_fcs(vport), event);
253 }
254}
255
256/**
257 * FDISC attempt failed - a timer is active to retry FDISC.
258 */
259static void
260bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
261 enum bfa_fcs_vport_event event)
262{
263 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
264 bfa_trc(__vport_fcs(vport), event);
265
266 switch (event) {
267 case BFA_FCS_VPORT_SM_DELETE:
268 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
269 bfa_timer_stop(&vport->timer);
270 bfa_fcs_port_delete(&vport->lport);
271 break;
272
273 case BFA_FCS_VPORT_SM_OFFLINE:
274 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
275 bfa_timer_stop(&vport->timer);
276 break;
277
278 case BFA_FCS_VPORT_SM_TIMEOUT:
279 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
280 vport->vport_stats.fdisc_retries++;
281 vport->fdisc_retries++;
282 bfa_fcs_vport_do_fdisc(vport);
283 break;
284
285 default:
286 bfa_sm_fault(__vport_fcs(vport), event);
287 }
288}
289
290/**
291 * Vport is online (FDISC is complete).
292 */
293static void
294bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
295 enum bfa_fcs_vport_event event)
296{
297 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
298 bfa_trc(__vport_fcs(vport), event);
299
300 switch (event) {
301 case BFA_FCS_VPORT_SM_DELETE:
302 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
303 bfa_fcs_port_delete(&vport->lport);
304 break;
305
306 case BFA_FCS_VPORT_SM_OFFLINE:
307 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
308 bfa_lps_discard(vport->lps);
309 bfa_fcs_port_offline(&vport->lport);
310 break;
311
312 default:
313 bfa_sm_fault(__vport_fcs(vport), event);
314 }
315}
316
317/**
318 * Vport is being deleted - awaiting lport delete completion to send
319 * LOGO to fabric.
320 */
321static void
322bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
323 enum bfa_fcs_vport_event event)
324{
325 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
326 bfa_trc(__vport_fcs(vport), event);
327
328 switch (event) {
329 case BFA_FCS_VPORT_SM_DELETE:
330 break;
331
332 case BFA_FCS_VPORT_SM_DELCOMP:
333 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
334 bfa_fcs_vport_do_logo(vport);
335 break;
336
337 case BFA_FCS_VPORT_SM_OFFLINE:
338 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
339 break;
340
341 default:
342 bfa_sm_fault(__vport_fcs(vport), event);
343 }
344}
345
346/**
347 * Error State.
348 * This state will be set when the Vport Creation fails due to errors like
349 * Dup WWN. In this state only operation allowed is a Vport Delete.
350 */
351static void
352bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
353 enum bfa_fcs_vport_event event)
354{
355 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
356 bfa_trc(__vport_fcs(vport), event);
357
358 switch (event) {
359 case BFA_FCS_VPORT_SM_DELETE:
360 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
361 bfa_fcs_port_delete(&vport->lport);
362
363 break;
364
365 default:
366 bfa_trc(__vport_fcs(vport), event);
367 }
368}
369
370/**
371 * Lport cleanup is in progress since vport is being deleted. Fabric is
372 * offline, so no LOGO is needed to complete vport deletion.
373 */
374static void
375bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
376 enum bfa_fcs_vport_event event)
377{
378 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
379 bfa_trc(__vport_fcs(vport), event);
380
381 switch (event) {
382 case BFA_FCS_VPORT_SM_DELCOMP:
383 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
384 bfa_fcs_vport_free(vport);
385 break;
386
387 case BFA_FCS_VPORT_SM_DELETE:
388 break;
389
390 default:
391 bfa_sm_fault(__vport_fcs(vport), event);
392 }
393}
394
395/**
396 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
397 * is done.
398 */
399static void
400bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
401 enum bfa_fcs_vport_event event)
402{
403 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
404 bfa_trc(__vport_fcs(vport), event);
405
406 switch (event) {
407 case BFA_FCS_VPORT_SM_OFFLINE:
408 bfa_lps_discard(vport->lps);
409 /*
410 * !!! fall through !!!
411 */
412
413 case BFA_FCS_VPORT_SM_RSP_OK:
414 case BFA_FCS_VPORT_SM_RSP_ERROR:
415 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
416 bfa_fcs_vport_free(vport);
417 break;
418
419 case BFA_FCS_VPORT_SM_DELETE:
420 break;
421
422 default:
423 bfa_sm_fault(__vport_fcs(vport), event);
424 }
425}
426
427
428
429/**
430 * fcs_vport_private FCS virtual port private functions
431 */
432
433/**
434 * Send AEN notification
435 */
436static void
437bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
438{
439 union bfa_aen_data_u aen_data;
440 struct bfa_log_mod_s *logmod = port->fcs->logm;
441 enum bfa_port_role role = port->port_cfg.roles;
442 wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
443 char lpwwn_ptr[BFA_STRING_32];
444 char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
445 { "Initiator", "Target", "IPFC" };
446
447 wwn2str(lpwwn_ptr, lpwwn);
448
449 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
450
451 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
452 role_str[role/2]);
453
454 aen_data.lport.vf_id = port->fabric->vf_id;
455 aen_data.lport.roles = role;
456 aen_data.lport.ppwwn =
457 bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
458 aen_data.lport.lpwwn = lpwwn;
459}
460
461/**
462 * This routine will be called to send a FDISC command.
463 */
464static void
465bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
466{
467 bfa_lps_fdisc(vport->lps, vport,
468 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
469 __vport_pwwn(vport), __vport_nwwn(vport));
470 vport->vport_stats.fdisc_sent++;
471}
472
473static void
474bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
475{
476 u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
477 u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
478
479 bfa_trc(__vport_fcs(vport), lsrjt_rsn);
480 bfa_trc(__vport_fcs(vport), lsrjt_expl);
481
482 /*
483 * For certain reason codes, we don't want to retry.
484 */
485 switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
486 case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
487 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
488 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
489 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
490 else {
491 bfa_fcs_vport_aen_post(&vport->lport,
492 BFA_LPORT_AEN_NPIV_DUP_WWN);
493 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
494 }
495 break;
496
497 case FC_LS_RJT_EXP_INSUFF_RES:
498 /*
499 * This means max logins per port/switch setting on the
500 * switch was exceeded.
501 */
502 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
503 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
504 else {
505 bfa_fcs_vport_aen_post(&vport->lport,
506 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
507 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
508 }
509 break;
510
511 default:
512 if (vport->fdisc_retries == 0) /* Print only once */
513 bfa_fcs_vport_aen_post(&vport->lport,
514 BFA_LPORT_AEN_NPIV_UNKNOWN);
515 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
516 }
517}
518
519/**
520 * Called to send a logout to the fabric. Used when a V-Port is
521 * deleted/stopped.
522 */
523static void
524bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
525{
526 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
527
528 vport->vport_stats.logo_sent++;
529 bfa_lps_fdisclogo(vport->lps);
530}
531
532/**
533 * This routine will be called by bfa_timer on timer timeouts.
534 *
535 * param[in] vport - pointer to bfa_fcs_vport_t.
536 * param[out] vport_status - pointer to return vport status in
537 *
538 * return
539 * void
540 *
541* Special Considerations:
542 *
543 * note
544 */
545static void
546bfa_fcs_vport_timeout(void *vport_arg)
547{
548 struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *)vport_arg;
549
550 vport->vport_stats.fdisc_timeouts++;
551 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
552}
553
554static void
555bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
556{
557 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
558 bfa_fcb_vport_delete(vport->vport_drv);
559 bfa_lps_delete(vport->lps);
560}
561
562
563
564/**
565 * fcs_vport_public FCS virtual port public interfaces
566 */
567
568/**
569 * Online notification from fabric SM.
570 */
571void
572bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
573{
574 vport->vport_stats.fab_online++;
575 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
576}
577
578/**
579 * Offline notification from fabric SM.
580 */
581void
582bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
583{
584 vport->vport_stats.fab_offline++;
585 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
586}
587
588/**
589 * Cleanup notification from fabric SM on link timer expiry.
590 */
591void
592bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
593{
594 vport->vport_stats.fab_cleanup++;
595}
596
597/**
598 * delete notification from fabric SM. To be invoked from within FCS.
599 */
600void
601bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
602{
603 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
604}
605
606/**
607 * Delete completion callback from associated lport
608 */
609void
610bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
611{
612 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
613}
614
615/**
616 * fcs_vport_api Virtual port API
617 */
618
619/**
620 * Use this function to instantiate a new FCS vport object. This
621 * function will not trigger any HW initialization process (which will be
622 * done in vport_start() call)
623 *
624 * param[in] vport - pointer to bfa_fcs_vport_t. This space
625 * needs to be allocated by the driver.
626 * param[in] fcs - FCS instance
627 * param[in] vport_cfg - vport configuration
628 * param[in] vf_id - VF_ID if vport is created within a VF.
629 * FC_VF_ID_NULL to specify base fabric.
630 * param[in] vport_drv - Opaque handle back to the driver's vport
631 * structure
632 *
633 * retval BFA_STATUS_OK - on success.
634 * retval BFA_STATUS_FAILED - on failure.
635 */
636bfa_status_t
637bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
638 u16 vf_id, struct bfa_port_cfg_s *vport_cfg,
639 struct bfad_vport_s *vport_drv)
640{
641 if (vport_cfg->pwwn == 0)
642 return BFA_STATUS_INVALID_WWN;
643
644 if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
645 return BFA_STATUS_VPORT_WWN_BP;
646
647 if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
648 return BFA_STATUS_VPORT_EXISTS;
649
650 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
651 bfa_lps_get_max_vport(fcs->bfa))
652 return BFA_STATUS_VPORT_MAX;
653
654 vport->lps = bfa_lps_alloc(fcs->bfa);
655 if (!vport->lps)
656 return BFA_STATUS_VPORT_MAX;
657
658 vport->vport_drv = vport_drv;
659 vport_cfg->preboot_vp = BFA_FALSE;
660 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
661
662 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
663 bfa_fcs_lport_init(&vport->lport, vport_cfg);
664
665 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
666
667 return BFA_STATUS_OK;
668}
669
670/**
671 * Use this function to instantiate a new FCS PBC vport object. This
672 * function will not trigger any HW initialization process (which will be
673 * done in vport_start() call)
674 *
675 * param[in] vport - pointer to bfa_fcs_vport_t. This space
676 * needs to be allocated by the driver.
677 * param[in] fcs - FCS instance
678 * param[in] vport_cfg - vport configuration
679 * param[in] vf_id - VF_ID if vport is created within a VF.
680 * FC_VF_ID_NULL to specify base fabric.
681 * param[in] vport_drv - Opaque handle back to the driver's vport
682 * structure
683 *
684 * retval BFA_STATUS_OK - on success.
685 * retval BFA_STATUS_FAILED - on failure.
686 */
687bfa_status_t
688bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
689 uint16_t vf_id, struct bfa_port_cfg_s *vport_cfg,
690 struct bfad_vport_s *vport_drv)
691{
692 bfa_status_t rc;
693
694 rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
695 vport->lport.port_cfg.preboot_vp = BFA_TRUE;
696
697 return rc;
698}
699
700/**
701 * Use this function initialize the vport.
702 *
703 * @param[in] vport - pointer to bfa_fcs_vport_t.
704 *
705 * @returns None
706 */
707bfa_status_t
708bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
709{
710 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
711
712 return BFA_STATUS_OK;
713}
714
715/**
716 * Use this function quiese the vport object. This function will return
717 * immediately, when the vport is actually stopped, the
718 * bfa_drv_vport_stop_cb() will be called.
719 *
720 * param[in] vport - pointer to bfa_fcs_vport_t.
721 *
722 * return None
723 */
724bfa_status_t
725bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
726{
727 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
728
729 return BFA_STATUS_OK;
730}
731
732/**
733 * Use this function to delete a vport object. Fabric object should
734 * be stopped before this function call.
735 *
736 * Donot invoke this from within FCS
737 *
738 * param[in] vport - pointer to bfa_fcs_vport_t.
739 *
740 * return None
741 */
742bfa_status_t
743bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
744{
745 if (vport->lport.port_cfg.preboot_vp)
746 return BFA_STATUS_PBC;
747
748 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
749
750 return BFA_STATUS_OK;
751}
752
753/**
754 * Use this function to get vport's current status info.
755 *
756 * param[in] vport pointer to bfa_fcs_vport_t.
757 * param[out] attr pointer to return vport attributes
758 *
759 * return None
760 */
761void
762bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
763 struct bfa_vport_attr_s *attr)
764{
765 if (vport == NULL || attr == NULL)
766 return;
767
768 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
769
770 bfa_fcs_port_get_attr(&vport->lport, &attr->port_attr);
771 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
772}
773
774/**
775 * Use this function to get vport's statistics.
776 *
777 * param[in] vport pointer to bfa_fcs_vport_t.
778 * param[out] stats pointer to return vport statistics in
779 *
780 * return None
781 */
782void
783bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
784 struct bfa_vport_stats_s *stats)
785{
786 *stats = vport->vport_stats;
787}
788
789/**
790 * Use this function to clear vport's statistics.
791 *
792 * param[in] vport pointer to bfa_fcs_vport_t.
793 *
794 * return None
795 */
796void
797bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
798{
799 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
800}
801
802/**
803 * Lookup a virtual port. Excludes base port from lookup.
804 */
805struct bfa_fcs_vport_s *
806bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
807{
808 struct bfa_fcs_vport_s *vport;
809 struct bfa_fcs_fabric_s *fabric;
810
811 bfa_trc(fcs, vf_id);
812 bfa_trc(fcs, vpwwn);
813
814 fabric = bfa_fcs_vf_lookup(fcs, vf_id);
815 if (!fabric) {
816 bfa_trc(fcs, vf_id);
817 return NULL;
818 }
819
820 vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
821 return vport;
822}
823
824/**
825 * FDISC Response
826 */
827void
828bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
829{
830 struct bfa_fcs_vport_s *vport = uarg;
831
832 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
833 bfa_trc(__vport_fcs(vport), status);
834
835 switch (status) {
836 case BFA_STATUS_OK:
837 /*
838 * Initialize the V-Port fields
839 */
840 __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
841 vport->vport_stats.fdisc_accepts++;
842 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
843 break;
844
845 case BFA_STATUS_INVALID_MAC:
846 /*
847 * Only for CNA
848 */
849 vport->vport_stats.fdisc_acc_bad++;
850 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
851
852 break;
853
854 case BFA_STATUS_EPROTOCOL:
855 switch (bfa_lps_get_extstatus(vport->lps)) {
856 case BFA_EPROTO_BAD_ACCEPT:
857 vport->vport_stats.fdisc_acc_bad++;
858 break;
859
860 case BFA_EPROTO_UNKNOWN_RSP:
861 vport->vport_stats.fdisc_unknown_rsp++;
862 break;
863
864 default:
865 break;
866 }
867
868 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
869 break;
870
871 case BFA_STATUS_FABRIC_RJT:
872 vport->vport_stats.fdisc_rejects++;
873 bfa_fcs_vport_fdisc_rejected(vport);
874 break;
875
876 default:
877 vport->vport_stats.fdisc_rsp_err++;
878 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
879 }
880}
881
882/**
883 * LOGO response
884 */
885void
886bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
887{
888 struct bfa_fcs_vport_s *vport = uarg;
889 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
890}
891
892/**
893 * Received clear virtual link
894 */
895void
896bfa_cb_lps_cvl_event(void *bfad, void *uarg)
897{
898 struct bfa_fcs_vport_s *vport = uarg;
899
900 /* Send an Offline followed by an ONLINE */
901 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
902 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
903}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 00c033511cbf..b6345d91bb66 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -753,7 +753,7 @@ extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
753extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, 753extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
754 struct bnx2i_cmd *cmnd); 754 struct bnx2i_cmd *cmnd);
755extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, 755extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
756 struct iscsi_task *mtask, u32 ttt, 756 struct iscsi_task *mtask,
757 char *datap, int data_len, int unsol); 757 char *datap, int data_len, int unsol);
758extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, 758extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
759 struct iscsi_task *mtask); 759 struct iscsi_task *mtask);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index d23fc256d585..90cef716b796 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -385,7 +385,6 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
385 struct bnx2i_cmd *bnx2i_cmd; 385 struct bnx2i_cmd *bnx2i_cmd;
386 struct bnx2i_tmf_request *tmfabort_wqe; 386 struct bnx2i_tmf_request *tmfabort_wqe;
387 u32 dword; 387 u32 dword;
388 u32 scsi_lun[2];
389 388
390 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
391 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -393,38 +392,41 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
393 bnx2i_conn->ep->qp.sq_prod_qe; 392 bnx2i_conn->ep->qp.sq_prod_qe;
394 393
395 tmfabort_wqe->op_code = tmfabort_hdr->opcode; 394 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
396 tmfabort_wqe->op_attr = 0; 395 tmfabort_wqe->op_attr = tmfabort_hdr->flags;
397 tmfabort_wqe->op_attr =
398 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
399 396
400 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 397 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
401 tmfabort_wqe->reserved2 = 0; 398 tmfabort_wqe->reserved2 = 0;
402 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 399 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
403 400
404 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 401 switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
405 if (!ctask || !ctask->sc) 402 case ISCSI_TM_FUNC_ABORT_TASK:
406 /* 403 case ISCSI_TM_FUNC_TASK_REASSIGN:
407 * the iscsi layer must have completed the cmd while this 404 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
408 * was starting up. 405 if (!ctask || !ctask->sc)
409 * 406 /*
410 * Note: In the case of a SCSI cmd timeout, the task's sc 407 * the iscsi layer must have completed the cmd while
411 * is still active; hence ctask->sc != 0 408 * was starting up.
412 * In this case, the task must be aborted 409 *
413 */ 410 * Note: In the case of a SCSI cmd timeout, the task's
414 return 0; 411 * sc is still active; hence ctask->sc != 0
415 412 * In this case, the task must be aborted
416 ref_sc = ctask->sc; 413 */
417 414 return 0;
418 /* Retrieve LUN directly from the ref_sc */ 415
419 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun); 416 ref_sc = ctask->sc;
420 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); 417 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
421 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); 418 dword = (ISCSI_TASK_TYPE_WRITE <<
422 419 ISCSI_CMD_REQUEST_TYPE_SHIFT);
423 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 420 else
424 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 421 dword = (ISCSI_TASK_TYPE_READ <<
425 else 422 ISCSI_CMD_REQUEST_TYPE_SHIFT);
426 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 423 tmfabort_wqe->ref_itt = (dword |
427 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); 424 (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
425 break;
426 default:
427 tmfabort_wqe->ref_itt = RESERVED_ITT;
428 }
429 memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
428 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 430 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
429 431
430 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 432 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
@@ -464,7 +466,6 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
464 * @conn: iscsi connection 466 * @conn: iscsi connection
465 * @cmd: driver command structure which is requesting 467 * @cmd: driver command structure which is requesting
466 * a WQE to sent to chip for further processing 468 * a WQE to sent to chip for further processing
467 * @ttt: TTT to be used when building pdu header
468 * @datap: payload buffer pointer 469 * @datap: payload buffer pointer
469 * @data_len: payload data length 470 * @data_len: payload data length
470 * @unsol: indicated whether nopout pdu is unsolicited pdu or 471 * @unsol: indicated whether nopout pdu is unsolicited pdu or
@@ -473,7 +474,7 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
473 * prepare and post a nopout request WQE to CNIC firmware 474 * prepare and post a nopout request WQE to CNIC firmware
474 */ 475 */
475int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, 476int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
476 struct iscsi_task *task, u32 ttt, 477 struct iscsi_task *task,
477 char *datap, int data_len, int unsol) 478 char *datap, int data_len, int unsol)
478{ 479{
479 struct bnx2i_endpoint *ep = bnx2i_conn->ep; 480 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
@@ -498,7 +499,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
498 nopout_wqe->itt = ((u16)task->itt | 499 nopout_wqe->itt = ((u16)task->itt |
499 (ISCSI_TASK_TYPE_MPATH << 500 (ISCSI_TASK_TYPE_MPATH <<
500 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 501 ISCSI_TMF_REQUEST_TYPE_SHIFT));
501 nopout_wqe->ttt = ttt; 502 nopout_wqe->ttt = nopout_hdr->ttt;
502 nopout_wqe->flags = 0; 503 nopout_wqe->flags = 0;
503 if (!unsol) 504 if (!unsol)
504 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 505 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index a796f565f383..50c2aa3b8eb1 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,15 +17,17 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.1.2" 20#define DRV_MODULE_VERSION "2.1.3"
21#define DRV_MODULE_RELDATE "Jun 28, 2010" 21#define DRV_MODULE_RELDATE "Aug 10, 2010"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
25 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 25 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
26 26
27 27
28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>"); 28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
29 "Eddie Wai <eddie.wai@broadcom.com>");
30
29MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711" 31MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
30 " iSCSI Driver"); 32 " iSCSI Driver");
31MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
@@ -167,6 +169,38 @@ void bnx2i_start(void *handle)
167 169
168 170
169/** 171/**
172 * bnx2i_chip_cleanup - local routine to handle chip cleanup
173 * @hba: Adapter instance to register
174 *
175 * Driver checks if adapter still has any active connections before
176 * executing the cleanup process
177 */
178static void bnx2i_chip_cleanup(struct bnx2i_hba *hba)
179{
180 struct bnx2i_endpoint *bnx2i_ep;
181 struct list_head *pos, *tmp;
182
183 if (hba->ofld_conns_active) {
184 /* Stage to force the disconnection
185 * This is the case where the daemon is either slow or
186 * not present
187 */
188 printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active "
189 "connections\n", hba->netdev->name,
190 hba->ofld_conns_active);
191 mutex_lock(&hba->net_dev_lock);
192 list_for_each_safe(pos, tmp, &hba->ep_active_list) {
193 bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
194 /* Clean up the chip only */
195 bnx2i_hw_ep_disconnect(bnx2i_ep);
196 bnx2i_ep->cm_sk = NULL;
197 }
198 mutex_unlock(&hba->net_dev_lock);
199 }
200}
201
202
203/**
170 * bnx2i_stop - cnic callback to shutdown adapter instance 204 * bnx2i_stop - cnic callback to shutdown adapter instance
171 * @handle: transparent handle pointing to adapter structure 205 * @handle: transparent handle pointing to adapter structure
172 * 206 *
@@ -176,8 +210,6 @@ void bnx2i_start(void *handle)
176void bnx2i_stop(void *handle) 210void bnx2i_stop(void *handle)
177{ 211{
178 struct bnx2i_hba *hba = handle; 212 struct bnx2i_hba *hba = handle;
179 struct list_head *pos, *tmp;
180 struct bnx2i_endpoint *bnx2i_ep;
181 int conns_active; 213 int conns_active;
182 214
183 /* check if cleanup happened in GOING_DOWN context */ 215 /* check if cleanup happened in GOING_DOWN context */
@@ -198,24 +230,7 @@ void bnx2i_stop(void *handle)
198 if (hba->ofld_conns_active == conns_active) 230 if (hba->ofld_conns_active == conns_active)
199 break; 231 break;
200 } 232 }
201 if (hba->ofld_conns_active) { 233 bnx2i_chip_cleanup(hba);
202 /* Stage to force the disconnection
203 * This is the case where the daemon is either slow or
204 * not present
205 */
206 printk(KERN_ALERT "bnx2i: Wait timeout, force all eps "
207 "to disconnect (%d)\n", hba->ofld_conns_active);
208 mutex_lock(&hba->net_dev_lock);
209 list_for_each_safe(pos, tmp, &hba->ep_active_list) {
210 bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
211 /* Clean up the chip only */
212 bnx2i_hw_ep_disconnect(bnx2i_ep);
213 }
214 mutex_unlock(&hba->net_dev_lock);
215 if (hba->ofld_conns_active)
216 printk(KERN_ERR "bnx2i: EP disconnect timeout (%d)!\n",
217 hba->ofld_conns_active);
218 }
219 234
220 /* This flag should be cleared last so that ep_disconnect() gracefully 235 /* This flag should be cleared last so that ep_disconnect() gracefully
221 * cleans up connection context 236 * cleans up connection context
@@ -457,6 +472,7 @@ static void __exit bnx2i_mod_exit(void)
457 adapter_count--; 472 adapter_count--;
458 473
459 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 474 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
475 bnx2i_chip_cleanup(hba);
460 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 476 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
461 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 477 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
462 } 478 }
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index a46ccc380ab1..fb50efbce087 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1078,11 +1078,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1078 buf = bnx2i_conn->gen_pdu.req_buf; 1078 buf = bnx2i_conn->gen_pdu.req_buf;
1079 if (data_len) 1079 if (data_len)
1080 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1080 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1081 RESERVED_ITT,
1082 buf, data_len, 1); 1081 buf, data_len, 1);
1083 else 1082 else
1084 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1083 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1085 RESERVED_ITT,
1086 NULL, 0, 1); 1084 NULL, 0, 1);
1087 break; 1085 break;
1088 case ISCSI_OP_LOGOUT: 1086 case ISCSI_OP_LOGOUT:
@@ -1955,6 +1953,9 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1955 if (!cnic) 1953 if (!cnic)
1956 return 0; 1954 return 0;
1957 1955
1956 if (bnx2i_ep->state == EP_STATE_IDLE)
1957 return 0;
1958
1958 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 1959 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1959 goto destroy_conn; 1960 goto destroy_conn;
1960 1961
@@ -1998,11 +1999,13 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
1998 else 1999 else
1999 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); 2000 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2000 2001
2002 /* No longer allow CFC delete if cm_close/abort fails the request */
2001 if (close_ret) 2003 if (close_ret)
2002 bnx2i_ep->state = EP_STATE_DISCONN_COMPL; 2004 printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n",
2003 2005 bnx2i_ep->hba->netdev->name, close, close_ret);
2004 /* wait for option-2 conn teardown */ 2006 else
2005 wait_event_interruptible(bnx2i_ep->ofld_wait, 2007 /* wait for option-2 conn teardown */
2008 wait_event_interruptible(bnx2i_ep->ofld_wait,
2006 bnx2i_ep->state != EP_STATE_DISCONN_START); 2009 bnx2i_ep->state != EP_STATE_DISCONN_START);
2007 2010
2008 if (signal_pending(current)) 2011 if (signal_pending(current))
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
deleted file mode 100644
index e3133b58e594..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
24#include <scsi/libiscsi_tcp.h>
25
26/* from cxgb3 LLD */
27#include "common.h"
28#include "t3_cpl.h"
29#include "t3cdev.h"
30#include "cxgb3_ctl_defs.h"
31#include "cxgb3_offload.h"
32#include "firmware_exports.h"
33
34#include "cxgb3i_offload.h"
35#include "cxgb3i_ddp.h"
36
37#define CXGB3I_SCSI_HOST_QDEPTH 1024
38#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
39#define CXGB3I_MAX_LUN 512
40#define ISCSI_PDU_NONPAYLOAD_MAX \
41 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
42
43struct cxgb3i_adapter;
44struct cxgb3i_hba;
45struct cxgb3i_endpoint;
46
47/**
48 * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
49 *
50 * @snic: cxgb3i adapter containing this port
51 * @ndev: pointer to netdev structure
52 * @shost: pointer to scsi host structure
53 */
54struct cxgb3i_hba {
55 struct cxgb3i_adapter *snic;
56 struct net_device *ndev;
57 struct Scsi_Host *shost;
58};
59
60/**
61 * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
62 *
63 * @listhead: list head to link elements
64 * @lock: lock for this structure
65 * @tdev: pointer to t3cdev used by cxgb3 driver
66 * @pdev: pointer to pci dev
67 * @hba_cnt: # of hbas (the same as # of ports)
68 * @hba: all the hbas on this adapter
69 * @flags: bit flag for adapter event/status
70 * @tx_max_size: max. tx packet size supported
71 * @rx_max_size: max. rx packet size supported
72 * @tag_format: ddp tag format settings
73 */
74#define CXGB3I_ADAPTER_FLAG_RESET 0x1
75struct cxgb3i_adapter {
76 struct list_head list_head;
77 spinlock_t lock;
78 struct t3cdev *tdev;
79 struct pci_dev *pdev;
80 unsigned char hba_cnt;
81 struct cxgb3i_hba *hba[MAX_NPORTS];
82
83 unsigned int flags;
84 unsigned int tx_max_size;
85 unsigned int rx_max_size;
86
87 struct cxgb3i_tag_format tag_format;
88};
89
90/**
91 * struct cxgb3i_conn - cxgb3i iscsi connection
92 *
93 * @listhead: list head to link elements
94 * @cep: pointer to iscsi_endpoint structure
95 * @conn: pointer to iscsi_conn structure
96 * @hba: pointer to the hba this conn. is going through
97 * @task_idx_bits: # of bits needed for session->cmds_max
98 */
99struct cxgb3i_conn {
100 struct list_head list_head;
101 struct cxgb3i_endpoint *cep;
102 struct iscsi_conn *conn;
103 struct cxgb3i_hba *hba;
104 unsigned int task_idx_bits;
105};
106
107/**
108 * struct cxgb3i_endpoint - iscsi tcp endpoint
109 *
110 * @c3cn: the h/w tcp connection representation
111 * @hba: pointer to the hba this conn. is going through
112 * @cconn: pointer to the associated cxgb3i iscsi connection
113 */
114struct cxgb3i_endpoint {
115 struct s3_conn *c3cn;
116 struct cxgb3i_hba *hba;
117 struct cxgb3i_conn *cconn;
118};
119
120/**
121 * struct cxgb3i_task_data - private iscsi task data
122 *
123 * @nr_frags: # of coalesced page frags (from scsi sgl)
124 * @frags: coalesced page frags (from scsi sgl)
125 * @skb: tx pdu skb
126 * @offset: data offset for the next pdu
127 * @count: max. possible pdu payload
128 * @sgoffset: offset to the first sg entry for a given offset
129 */
130#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
131struct cxgb3i_task_data {
132 unsigned short nr_frags;
133 skb_frag_t frags[MAX_PDU_FRAGS];
134 struct sk_buff *skb;
135 unsigned int offset;
136 unsigned int count;
137 unsigned int sgoffset;
138};
139
140int cxgb3i_iscsi_init(void);
141void cxgb3i_iscsi_cleanup(void);
142
143struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *);
146
147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
148 struct net_device *);
149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
150
151int cxgb3i_pdu_init(void);
152void cxgb3i_pdu_cleanup(void);
153void cxgb3i_conn_cleanup_task(struct iscsi_task *);
154int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
155int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
156int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
157
158void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
159int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
160
161#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
deleted file mode 100644
index be0e23042c76..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ /dev/null
@@ -1,773 +0,0 @@
1/*
2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/slab.h>
14#include <linux/skbuff.h>
15#include <linux/scatterlist.h>
16
17/* from cxgb3 LLD */
18#include "common.h"
19#include "t3_cpl.h"
20#include "t3cdev.h"
21#include "cxgb3_ctl_defs.h"
22#include "cxgb3_offload.h"
23#include "firmware_exports.h"
24
25#include "cxgb3i_ddp.h"
26
27#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
28#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
29#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
30
31#ifdef __DEBUG_CXGB3I_DDP__
32#define ddp_log_debug(fmt, args...) \
33 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
34#else
35#define ddp_log_debug(fmt...)
36#endif
37
38/*
39 * iSCSI Direct Data Placement
40 *
41 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
42 * pre-posted final destination host-memory buffers based on the Initiator
43 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
44 *
45 * The host memory address is programmed into h/w in the format of pagepod
46 * entries.
47 * The location of the pagepod entry is encoded into ddp tag which is used or
48 * is the base for ITT/TTT.
49 */
50
51#define DDP_PGIDX_MAX 4
52#define DDP_THRESHOLD 2048
53static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
54static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
55static unsigned char page_idx = DDP_PGIDX_MAX;
56
57/*
58 * functions to program the pagepod in h/w
59 */
60static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
61{
62 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
63
64 req->wr.wr_lo = 0;
65 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
66 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
67 V_ULPTX_CMD(ULP_MEM_WRITE));
68 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
69 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
70}
71
72static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
73 unsigned int idx, unsigned int npods,
74 struct cxgb3i_gather_list *gl)
75{
76 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
77 int i;
78
79 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
80 struct sk_buff *skb = ddp->gl_skb[idx];
81 struct pagepod *ppod;
82 int j, pidx;
83
84 /* hold on to the skb until we clear the ddp mapping */
85 skb_get(skb);
86
87 ulp_mem_io_set_hdr(skb, pm_addr);
88 ppod = (struct pagepod *)
89 (skb->head + sizeof(struct ulp_mem_io));
90 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
91 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
92 ppod->addr[j] = pidx < gl->nelem ?
93 cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
94
95 skb->priority = CPL_PRIORITY_CONTROL;
96 cxgb3_ofld_send(ddp->tdev, skb);
97 }
98 return 0;
99}
100
101static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
102 unsigned int idx, unsigned int npods)
103{
104 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
105 int i;
106
107 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
108 struct sk_buff *skb = ddp->gl_skb[idx];
109
110 if (!skb) {
111 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
112 tag, idx, i, npods);
113 continue;
114 }
115 ddp->gl_skb[idx] = NULL;
116 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
117 ulp_mem_io_set_hdr(skb, pm_addr);
118 skb->priority = CPL_PRIORITY_CONTROL;
119 cxgb3_ofld_send(ddp->tdev, skb);
120 }
121}
122
123static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
124 unsigned int start, unsigned int max,
125 unsigned int count,
126 struct cxgb3i_gather_list *gl)
127{
128 unsigned int i, j, k;
129
130 /* not enough entries */
131 if ((max - start) < count)
132 return -EBUSY;
133
134 max -= count;
135 spin_lock(&ddp->map_lock);
136 for (i = start; i < max;) {
137 for (j = 0, k = i; j < count; j++, k++) {
138 if (ddp->gl_map[k])
139 break;
140 }
141 if (j == count) {
142 for (j = 0, k = i; j < count; j++, k++)
143 ddp->gl_map[k] = gl;
144 spin_unlock(&ddp->map_lock);
145 return i;
146 }
147 i += j + 1;
148 }
149 spin_unlock(&ddp->map_lock);
150 return -EBUSY;
151}
152
153static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
154 int start, int count)
155{
156 spin_lock(&ddp->map_lock);
157 memset(&ddp->gl_map[start], 0,
158 count * sizeof(struct cxgb3i_gather_list *));
159 spin_unlock(&ddp->map_lock);
160}
161
162static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
163 int idx, int count)
164{
165 int i;
166
167 for (i = 0; i < count; i++, idx++)
168 if (ddp->gl_skb[idx]) {
169 kfree_skb(ddp->gl_skb[idx]);
170 ddp->gl_skb[idx] = NULL;
171 }
172}
173
174static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
175 int count, gfp_t gfp)
176{
177 int i;
178
179 for (i = 0; i < count; i++) {
180 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
181 PPOD_SIZE, gfp);
182 if (skb) {
183 ddp->gl_skb[idx + i] = skb;
184 skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
185 } else {
186 ddp_free_gl_skb(ddp, idx, i);
187 return -ENOMEM;
188 }
189 }
190 return 0;
191}
192
193/**
194 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
195 * @pgsz: page size
196 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
197 */
198int cxgb3i_ddp_find_page_index(unsigned long pgsz)
199{
200 int i;
201
202 for (i = 0; i < DDP_PGIDX_MAX; i++) {
203 if (pgsz == (1UL << ddp_page_shift[i]))
204 return i;
205 }
206 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
207 return DDP_PGIDX_MAX;
208}
209
210/**
211 * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
212 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
213 */
214int cxgb3i_ddp_adjust_page_table(void)
215{
216 int i;
217 unsigned int base_order, order;
218
219 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
220 ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
221 PAGE_SIZE, 1UL << ddp_page_shift[0]);
222 return -EINVAL;
223 }
224
225 base_order = get_order(1UL << ddp_page_shift[0]);
226 order = get_order(1 << PAGE_SHIFT);
227 for (i = 0; i < DDP_PGIDX_MAX; i++) {
228 /* first is the kernel page size, then just doubling the size */
229 ddp_page_order[i] = order - base_order + i;
230 ddp_page_shift[i] = PAGE_SHIFT + i;
231 }
232 return 0;
233}
234
235static inline void ddp_gl_unmap(struct pci_dev *pdev,
236 struct cxgb3i_gather_list *gl)
237{
238 int i;
239
240 for (i = 0; i < gl->nelem; i++)
241 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
242 PCI_DMA_FROMDEVICE);
243}
244
245static inline int ddp_gl_map(struct pci_dev *pdev,
246 struct cxgb3i_gather_list *gl)
247{
248 int i;
249
250 for (i = 0; i < gl->nelem; i++) {
251 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
252 PAGE_SIZE,
253 PCI_DMA_FROMDEVICE);
254 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
255 goto unmap;
256 }
257
258 return i;
259
260unmap:
261 if (i) {
262 unsigned int nelem = gl->nelem;
263
264 gl->nelem = i;
265 ddp_gl_unmap(pdev, gl);
266 gl->nelem = nelem;
267 }
268 return -ENOMEM;
269}
270
271/**
272 * cxgb3i_ddp_make_gl - build ddp page buffer list
273 * @xferlen: total buffer length
274 * @sgl: page buffer scatter-gather list
275 * @sgcnt: # of page buffers
276 * @pdev: pci_dev, used for pci map
277 * @gfp: allocation mode
278 *
279 * construct a ddp page buffer list from the scsi scattergather list.
280 * coalesce buffers as much as possible, and obtain dma addresses for
281 * each page.
282 *
283 * Return the cxgb3i_gather_list constructed from the page buffers if the
284 * memory can be used for ddp. Return NULL otherwise.
285 */
286struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
287 struct scatterlist *sgl,
288 unsigned int sgcnt,
289 struct pci_dev *pdev,
290 gfp_t gfp)
291{
292 struct cxgb3i_gather_list *gl;
293 struct scatterlist *sg = sgl;
294 struct page *sgpage = sg_page(sg);
295 unsigned int sglen = sg->length;
296 unsigned int sgoffset = sg->offset;
297 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
298 PAGE_SHIFT;
299 int i = 1, j = 0;
300
301 if (xferlen < DDP_THRESHOLD) {
302 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
303 xferlen, DDP_THRESHOLD);
304 return NULL;
305 }
306
307 gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
308 npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
309 gfp);
310 if (!gl)
311 return NULL;
312
313 gl->pages = (struct page **)&gl->phys_addr[npages];
314 gl->length = xferlen;
315 gl->offset = sgoffset;
316 gl->pages[0] = sgpage;
317
318 sg = sg_next(sg);
319 while (sg) {
320 struct page *page = sg_page(sg);
321
322 if (sgpage == page && sg->offset == sgoffset + sglen)
323 sglen += sg->length;
324 else {
325 /* make sure the sgl is fit for ddp:
326 * each has the same page size, and
327 * all of the middle pages are used completely
328 */
329 if ((j && sgoffset) ||
330 ((i != sgcnt - 1) &&
331 ((sglen + sgoffset) & ~PAGE_MASK)))
332 goto error_out;
333
334 j++;
335 if (j == gl->nelem || sg->offset)
336 goto error_out;
337 gl->pages[j] = page;
338 sglen = sg->length;
339 sgoffset = sg->offset;
340 sgpage = page;
341 }
342 i++;
343 sg = sg_next(sg);
344 }
345 gl->nelem = ++j;
346
347 if (ddp_gl_map(pdev, gl) < 0)
348 goto error_out;
349
350 return gl;
351
352error_out:
353 kfree(gl);
354 return NULL;
355}
356
357/**
358 * cxgb3i_ddp_release_gl - release a page buffer list
359 * @gl: a ddp page buffer list
360 * @pdev: pci_dev used for pci_unmap
361 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
362 */
363void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
364 struct pci_dev *pdev)
365{
366 ddp_gl_unmap(pdev, gl);
367 kfree(gl);
368}
369
370/**
371 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
372 * @tdev: t3cdev adapter
373 * @tid: connection id
374 * @tformat: tag format
375 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
376 * @gl: the page momory list
377 * @gfp: allocation mode
378 *
379 * ddp setup for a given page buffer list and construct the ddp tag.
380 * return 0 if success, < 0 otherwise.
381 */
382int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
383 struct cxgb3i_tag_format *tformat, u32 *tagp,
384 struct cxgb3i_gather_list *gl, gfp_t gfp)
385{
386 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
387 struct pagepod_hdr hdr;
388 unsigned int npods;
389 int idx = -1;
390 int err = -ENOMEM;
391 u32 sw_tag = *tagp;
392 u32 tag;
393
394 if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
395 gl->length < DDP_THRESHOLD) {
396 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
397 page_idx, gl->length, DDP_THRESHOLD);
398 return -EINVAL;
399 }
400
401 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
402
403 if (ddp->idx_last == ddp->nppods)
404 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl);
405 else {
406 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
407 ddp->nppods, npods, gl);
408 if (idx < 0 && ddp->idx_last >= npods) {
409 idx = ddp_find_unused_entries(ddp, 0,
410 min(ddp->idx_last + npods, ddp->nppods),
411 npods, gl);
412 }
413 }
414 if (idx < 0) {
415 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
416 gl->length, gl->nelem, npods);
417 return idx;
418 }
419
420 err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
421 if (err < 0)
422 goto unmark_entries;
423
424 tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
425 tag |= idx << PPOD_IDX_SHIFT;
426
427 hdr.rsvd = 0;
428 hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
429 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
430 hdr.maxoffset = htonl(gl->length);
431 hdr.pgoffset = htonl(gl->offset);
432
433 err = set_ddp_map(ddp, &hdr, idx, npods, gl);
434 if (err < 0)
435 goto free_gl_skb;
436
437 ddp->idx_last = idx;
438 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
439 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
440 idx, npods);
441 *tagp = tag;
442 return 0;
443
444free_gl_skb:
445 ddp_free_gl_skb(ddp, idx, npods);
446unmark_entries:
447 ddp_unmark_entries(ddp, idx, npods);
448 return err;
449}
450
451/**
452 * cxgb3i_ddp_tag_release - release a ddp tag
453 * @tdev: t3cdev adapter
454 * @tag: ddp tag
455 * ddp cleanup for a given ddp tag and release all the resources held
456 */
457void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
458{
459 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
460 u32 idx;
461
462 if (!ddp) {
463 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
464 return;
465 }
466
467 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
468 if (idx < ddp->nppods) {
469 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
470 unsigned int npods;
471
472 if (!gl || !gl->nelem) {
473 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
474 tag, idx, gl, gl ? gl->nelem : 0);
475 return;
476 }
477 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
478 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
479 tag, idx, npods);
480 clear_ddp_map(ddp, tag, idx, npods);
481 ddp_unmark_entries(ddp, idx, npods);
482 cxgb3i_ddp_release_gl(gl, ddp->pdev);
483 } else
484 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
485 tag, idx, ddp->nppods);
486}
487
488static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
489 int reply)
490{
491 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
492 GFP_KERNEL);
493 struct cpl_set_tcb_field *req;
494 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
495
496 if (!skb)
497 return -ENOMEM;
498
499 /* set up ulp submode and page size */
500 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
501 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
502 req->wr.wr_lo = 0;
503 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
504 req->reply = V_NO_REPLY(reply ? 0 : 1);
505 req->cpu_idx = 0;
506 req->word = htons(31);
507 req->mask = cpu_to_be64(0xF0000000);
508 req->val = cpu_to_be64(val << 28);
509 skb->priority = CPL_PRIORITY_CONTROL;
510
511 cxgb3_ofld_send(tdev, skb);
512 return 0;
513}
514
515/**
516 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
517 * @tdev: t3cdev adapter
518 * @tid: connection id
519 * @reply: request reply from h/w
520 * set up the ddp page size based on the host PAGE_SIZE for a connection
521 * identified by tid
522 */
523int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
524 int reply)
525{
526 return setup_conn_pgidx(tdev, tid, page_idx, reply);
527}
528
529/**
530 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
531 * @tdev: t3cdev adapter
532 * @tid: connection id
533 * @reply: request reply from h/w
534 * @pgsz: ddp page size
535 * set up the ddp page size for a connection identified by tid
536 */
537int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
538 int reply, unsigned long pgsz)
539{
540 int pgidx = cxgb3i_ddp_find_page_index(pgsz);
541
542 return setup_conn_pgidx(tdev, tid, pgidx, reply);
543}
544
545/**
546 * cxgb3i_setup_conn_digest - setup conn. digest setting
547 * @tdev: t3cdev adapter
548 * @tid: connection id
549 * @hcrc: header digest enabled
550 * @dcrc: data digest enabled
551 * @reply: request reply from h/w
552 * set up the iscsi digest settings for a connection identified by tid
553 */
554int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
555 int hcrc, int dcrc, int reply)
556{
557 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
558 GFP_KERNEL);
559 struct cpl_set_tcb_field *req;
560 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
561
562 if (!skb)
563 return -ENOMEM;
564
565 /* set up ulp submode and page size */
566 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
567 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
568 req->wr.wr_lo = 0;
569 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
570 req->reply = V_NO_REPLY(reply ? 0 : 1);
571 req->cpu_idx = 0;
572 req->word = htons(31);
573 req->mask = cpu_to_be64(0x0F000000);
574 req->val = cpu_to_be64(val << 24);
575 skb->priority = CPL_PRIORITY_CONTROL;
576
577 cxgb3_ofld_send(tdev, skb);
578 return 0;
579}
580
581
582/**
583 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
584 * @tdev: t3cdev adapter
585 * @tformat: tag format
586 * @txsz: max tx pdu payload size, filled in by this func.
587 * @rxsz: max rx pdu payload size, filled in by this func.
588 * setup the tag format for a given iscsi entity
589 */
590int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
591 struct cxgb3i_tag_format *tformat,
592 unsigned int *txsz, unsigned int *rxsz)
593{
594 struct cxgb3i_ddp_info *ddp;
595 unsigned char idx_bits;
596
597 if (!tformat)
598 return -EINVAL;
599
600 if (!tdev->ulp_iscsi)
601 return -EINVAL;
602
603 ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
604
605 idx_bits = 32 - tformat->sw_bits;
606 tformat->rsvd_bits = ddp->idx_bits;
607 tformat->rsvd_shift = PPOD_IDX_SHIFT;
608 tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
609
610 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
611 tformat->sw_bits, tformat->rsvd_bits,
612 tformat->rsvd_shift, tformat->rsvd_mask);
613
614 *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
615 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
616 *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
617 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
618 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
619 *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
620 return 0;
621}
622
623/**
624 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
625 * @tdev: t3cdev adapter
626 * release all the resource held by the ddp pagepod manager for a given
627 * adapter if needed
628 */
629
630static void ddp_cleanup(struct kref *kref)
631{
632 struct cxgb3i_ddp_info *ddp = container_of(kref,
633 struct cxgb3i_ddp_info,
634 refcnt);
635 int i = 0;
636
637 ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev);
638
639 ddp->tdev->ulp_iscsi = NULL;
640 while (i < ddp->nppods) {
641 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
642 if (gl) {
643 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
644 >> PPOD_PAGES_SHIFT;
645 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
646 ddp->tdev, i, npods);
647 kfree(gl);
648 ddp_free_gl_skb(ddp, i, npods);
649 i += npods;
650 } else
651 i++;
652 }
653 cxgb3i_free_big_mem(ddp);
654}
655
656void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
657{
658 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
659
660 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
661 if (ddp)
662 kref_put(&ddp->refcnt, ddp_cleanup);
663}
664
665/**
666 * ddp_init - initialize the cxgb3 adapter's ddp resource
667 * @tdev: t3cdev adapter
668 * initialize the ddp pagepod manager for a given adapter
669 */
670static void ddp_init(struct t3cdev *tdev)
671{
672 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
673 struct ulp_iscsi_info uinfo;
674 unsigned int ppmax, bits;
675 int i, err;
676
677 if (ddp) {
678 kref_get(&ddp->refcnt);
679 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
680 tdev, tdev->ulp_iscsi);
681 return;
682 }
683
684 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
685 if (err < 0) {
686 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
687 tdev->name, err);
688 return;
689 }
690
691 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
692 bits = __ilog2_u32(ppmax) + 1;
693 if (bits > PPOD_IDX_MAX_SIZE)
694 bits = PPOD_IDX_MAX_SIZE;
695 ppmax = (1 << (bits - 1)) - 1;
696
697 ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
698 ppmax *
699 (sizeof(struct cxgb3i_gather_list *) +
700 sizeof(struct sk_buff *)),
701 GFP_KERNEL);
702 if (!ddp) {
703 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
704 tdev->name, ppmax);
705 return;
706 }
707 ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
708 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
709 ppmax *
710 sizeof(struct cxgb3i_gather_list *));
711 spin_lock_init(&ddp->map_lock);
712 kref_init(&ddp->refcnt);
713
714 ddp->tdev = tdev;
715 ddp->pdev = uinfo.pdev;
716 ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
717 ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
718 ddp->llimit = uinfo.llimit;
719 ddp->ulimit = uinfo.ulimit;
720 ddp->nppods = ppmax;
721 ddp->idx_last = ppmax;
722 ddp->idx_bits = bits;
723 ddp->idx_mask = (1 << bits) - 1;
724 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
725
726 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
727 for (i = 0; i < DDP_PGIDX_MAX; i++)
728 uinfo.pgsz_factor[i] = ddp_page_order[i];
729 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
730
731 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
732 if (err < 0) {
733 ddp_log_warn("%s unable to set iscsi param err=%d, "
734 "ddp disabled.\n", tdev->name, err);
735 goto free_ddp_map;
736 }
737
738 tdev->ulp_iscsi = ddp;
739
740 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
741 " %u/%u.\n",
742 tdev, ppmax, ddp->idx_bits, ddp->idx_mask,
743 ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
744 ddp->max_rxsz, uinfo.max_rxsz);
745 return;
746
747free_ddp_map:
748 cxgb3i_free_big_mem(ddp);
749}
750
751/**
752 * cxgb3i_ddp_init - initialize ddp functions
753 */
754void cxgb3i_ddp_init(struct t3cdev *tdev)
755{
756 if (page_idx == DDP_PGIDX_MAX) {
757 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
758
759 if (page_idx == DDP_PGIDX_MAX) {
760 ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
761 PAGE_SIZE);
762 if (cxgb3i_ddp_adjust_page_table() < 0) {
763 ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
764 PAGE_SIZE);
765 return;
766 }
767 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
768 }
769 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
770 PAGE_SIZE, page_idx);
771 }
772 ddp_init(tdev);
773}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
deleted file mode 100644
index 6761b329124d..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ /dev/null
@@ -1,312 +0,0 @@
1/*
2 * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__
15
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19/**
20 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
21 *
22 * @sw_bits: # of bits used by iscsi software layer
23 * @rsvd_bits: # of bits used by h/w
24 * @rsvd_shift: h/w bits shift left
25 * @rsvd_mask: reserved bit mask
26 */
27struct cxgb3i_tag_format {
28 unsigned char sw_bits;
29 unsigned char rsvd_bits;
30 unsigned char rsvd_shift;
31 unsigned char filler[1];
32 u32 rsvd_mask;
33};
34
35/**
36 * struct cxgb3i_gather_list - cxgb3i direct data placement memory
37 *
38 * @tag: ddp tag
39 * @length: total data buffer length
40 * @offset: initial offset to the 1st page
41 * @nelem: # of pages
42 * @pages: page pointers
43 * @phys_addr: physical address
44 */
45struct cxgb3i_gather_list {
46 u32 tag;
47 unsigned int length;
48 unsigned int offset;
49 unsigned int nelem;
50 struct page **pages;
51 dma_addr_t phys_addr[0];
52};
53
54/**
55 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
56 *
57 * @list: list head to link elements
58 * @refcnt: ref. count
59 * @tdev: pointer to t3cdev used by cxgb3 driver
60 * @max_txsz: max tx packet size for ddp
61 * @max_rxsz: max rx packet size for ddp
62 * @llimit: lower bound of the page pod memory
63 * @ulimit: upper bound of the page pod memory
64 * @nppods: # of page pod entries
65 * @idx_last: page pod entry last used
66 * @idx_bits: # of bits the pagepod index would take
67 * @idx_mask: pagepod index mask
68 * @rsvd_tag_mask: tag mask
69 * @map_lock: lock to synchonize access to the page pod map
70 * @gl_map: ddp memory gather list
71 * @gl_skb: skb used to program the pagepod
72 */
73struct cxgb3i_ddp_info {
74 struct list_head list;
75 struct kref refcnt;
76 struct t3cdev *tdev;
77 struct pci_dev *pdev;
78 unsigned int max_txsz;
79 unsigned int max_rxsz;
80 unsigned int llimit;
81 unsigned int ulimit;
82 unsigned int nppods;
83 unsigned int idx_last;
84 unsigned char idx_bits;
85 unsigned char filler[3];
86 u32 idx_mask;
87 u32 rsvd_tag_mask;
88 spinlock_t map_lock;
89 struct cxgb3i_gather_list **gl_map;
90 struct sk_buff **gl_skb;
91};
92
93#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
94#define ULP2_MAX_PKT_SIZE 16224
95#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
96#define PPOD_PAGES_MAX 4
97#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
98
99/*
100 * struct pagepod_hdr, pagepod - pagepod format
101 */
102struct pagepod_hdr {
103 u32 vld_tid;
104 u32 pgsz_tag_clr;
105 u32 maxoffset;
106 u32 pgoffset;
107 u64 rsvd;
108};
109
110struct pagepod {
111 struct pagepod_hdr hdr;
112 u64 addr[PPOD_PAGES_MAX + 1];
113};
114
115#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
116#define PPOD_SIZE_SHIFT 6
117
118#define PPOD_COLOR_SHIFT 0
119#define PPOD_COLOR_SIZE 6
120#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
121
122#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
123#define PPOD_IDX_MAX_SIZE 24
124
125#define S_PPOD_TID 0
126#define M_PPOD_TID 0xFFFFFF
127#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
128
129#define S_PPOD_VALID 24
130#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
131#define F_PPOD_VALID V_PPOD_VALID(1U)
132
133#define S_PPOD_COLOR 0
134#define M_PPOD_COLOR 0x3F
135#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
136
137#define S_PPOD_TAG 6
138#define M_PPOD_TAG 0xFFFFFF
139#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
140
141#define S_PPOD_PGSZ 30
142#define M_PPOD_PGSZ 0x3
143#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
144
145/*
146 * large memory chunk allocation/release
147 * use vmalloc() if kmalloc() fails
148 */
149static inline void *cxgb3i_alloc_big_mem(unsigned int size,
150 gfp_t gfp)
151{
152 void *p = kmalloc(size, gfp);
153 if (!p)
154 p = vmalloc(size);
155 if (p)
156 memset(p, 0, size);
157 return p;
158}
159
160static inline void cxgb3i_free_big_mem(void *addr)
161{
162 if (is_vmalloc_addr(addr))
163 vfree(addr);
164 else
165 kfree(addr);
166}
167
168/*
169 * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
170 * non-reserved bits that can be used by the iscsi s/w.
171 * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
172 * in struct cxgb3i_tag_format.
173 *
174 * The upper most reserved bit can be used to check if a tag is ddp tag or not:
175 * if the bit is 0, the tag is a valid ddp tag
176 */
177
178/**
179 * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
180 * @tformat: tag format information
181 * @tag: tag to be checked
182 *
183 * return true if the tag is a ddp tag, false otherwise.
184 */
185static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
186{
187 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
188}
189
190/**
191 * cxgb3i_sw_tag_usable - check if s/w tag has enough bits left for hw bits
192 * @tformat: tag format information
193 * @sw_tag: s/w tag to be checked
194 *
195 * return true if the tag can be used for hw ddp tag, false otherwise.
196 */
197static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
198 u32 sw_tag)
199{
200 sw_tag >>= (32 - tformat->rsvd_bits);
201 return !sw_tag;
202}
203
204/**
205 * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
206 * @tformat: tag format information
207 * @sw_tag: s/w tag to be checked
208 *
209 * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
210 */
211static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
212 u32 sw_tag)
213{
214 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
215 u32 mask = (1 << shift) - 1;
216
217 if (sw_tag && (sw_tag & ~mask)) {
218 u32 v1 = sw_tag & ((1 << shift) - 1);
219 u32 v2 = (sw_tag >> (shift - 1)) << shift;
220
221 return v2 | v1 | 1 << shift;
222 }
223 return sw_tag | 1 << shift;
224}
225
226/**
227 * cxgb3i_ddp_tag_base - shift s/w tag bits so that reserved bits are not used
228 * @tformat: tag format information
229 * @sw_tag: s/w tag to be checked
230 */
231static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
232 u32 sw_tag)
233{
234 u32 mask = (1 << tformat->rsvd_shift) - 1;
235
236 if (sw_tag && (sw_tag & ~mask)) {
237 u32 v1 = sw_tag & mask;
238 u32 v2 = sw_tag >> tformat->rsvd_shift;
239
240 v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
241 return v2 | v1;
242 }
243 return sw_tag;
244}
245
246/**
247 * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
248 * @tformat: tag format information
249 * @tag: tag to be checked
250 *
251 * return the reserved bits in the tag
252 */
253static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
254 u32 tag)
255{
256 if (cxgb3i_is_ddp_tag(tformat, tag))
257 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
258 return 0;
259}
260
261/**
262 * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
263 * @tformat: tag format information
264 * @tag: tag to be checked
265 *
266 * return the non-reserved bits in the tag.
267 */
268static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
269 u32 tag)
270{
271 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
272 u32 v1, v2;
273
274 if (cxgb3i_is_ddp_tag(tformat, tag)) {
275 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
276 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
277 } else {
278 u32 mask = (1 << shift) - 1;
279
280 tag &= ~(1 << shift);
281 v1 = tag & mask;
282 v2 = (tag >> 1) & ~mask;
283 }
284 return v1 | v2;
285}
286
287int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
288 struct cxgb3i_tag_format *, u32 *tag,
289 struct cxgb3i_gather_list *, gfp_t gfp);
290void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
291
292struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
293 struct scatterlist *sgl,
294 unsigned int sgcnt,
295 struct pci_dev *pdev,
296 gfp_t gfp);
297void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
298 struct pci_dev *pdev);
299
300int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
301 int reply);
302int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
303 unsigned long pgsz);
304int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
305 int hcrc, int dcrc, int reply);
306int cxgb3i_ddp_find_page_index(unsigned long pgsz);
307int cxgb3i_adapter_ddp_info(struct t3cdev *, struct cxgb3i_tag_format *,
308 unsigned int *txsz, unsigned int *rxsz);
309
310void cxgb3i_ddp_init(struct t3cdev *);
311void cxgb3i_ddp_cleanup(struct t3cdev *);
312#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
deleted file mode 100644
index 685af3698518..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Karen Xie (kxie@chelsio.com)
10 */
11
12#include "cxgb3i.h"
13
14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.2"
16#define DRV_MODULE_RELDATE "Mar. 2009"
17
18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
20 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
21
22MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
23MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(DRV_MODULE_VERSION);
26
27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *);
29static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
30
31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
32static struct cxgb3_client t3c_client = {
33 .name = "iscsi_cxgb3",
34 .handlers = cxgb3i_cpl_handlers,
35 .add = open_s3_dev,
36 .remove = close_s3_dev,
37 .event_handler = s3_event_handler,
38};
39
40/**
41 * open_s3_dev - register with cxgb3 LLD
42 * @t3dev: cxgb3 adapter instance
43 */
44static void open_s3_dev(struct t3cdev *t3dev)
45{
46 static int vers_printed;
47
48 if (!vers_printed) {
49 printk(KERN_INFO "%s", version);
50 vers_printed = 1;
51 }
52
53 cxgb3i_ddp_init(t3dev);
54 cxgb3i_sdev_add(t3dev, &t3c_client);
55 cxgb3i_adapter_open(t3dev);
56}
57
58/**
59 * close_s3_dev - de-register with cxgb3 LLD
60 * @t3dev: cxgb3 adapter instance
61 */
62static void close_s3_dev(struct t3cdev *t3dev)
63{
64 cxgb3i_adapter_close(t3dev);
65 cxgb3i_sdev_remove(t3dev);
66 cxgb3i_ddp_cleanup(t3dev);
67}
68
69static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
70{
71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
72
73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
74 snic, tdev, event, port);
75 if (!snic)
76 return;
77
78 switch (event) {
79 case OFFLOAD_STATUS_DOWN:
80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
81 break;
82 case OFFLOAD_STATUS_UP:
83 snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
84 break;
85 }
86}
87
88/**
89 * cxgb3i_init_module - module init entry point
90 *
91 * initialize any driver wide global data structures and register itself
92 * with the cxgb3 module
93 */
94static int __init cxgb3i_init_module(void)
95{
96 int err;
97
98 err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
99 if (err < 0)
100 return err;
101
102 err = cxgb3i_iscsi_init();
103 if (err < 0)
104 return err;
105
106 err = cxgb3i_pdu_init();
107 if (err < 0) {
108 cxgb3i_iscsi_cleanup();
109 return err;
110 }
111
112 cxgb3_register_client(&t3c_client);
113
114 return 0;
115}
116
117/**
118 * cxgb3i_exit_module - module cleanup/exit entry point
119 *
120 * go through the driver hba list and for each hba, release any resource held.
121 * and unregisters iscsi transport and the cxgb3 module
122 */
123static void __exit cxgb3i_exit_module(void)
124{
125 cxgb3_unregister_client(&t3c_client);
126 cxgb3i_pdu_cleanup();
127 cxgb3i_iscsi_cleanup();
128 cxgb3i_sdev_cleanup();
129}
130
131module_init(cxgb3i_init_module);
132module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
deleted file mode 100644
index 7b686abaae64..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ /dev/null
@@ -1,1018 +0,0 @@
1/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 * Copyright (c) 2008 Mike Christie
5 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Karen Xie (kxie@chelsio.com)
12 */
13
14#include <linux/inet.h>
15#include <linux/slab.h>
16#include <linux/crypto.h>
17#include <linux/if_vlan.h>
18#include <net/dst.h>
19#include <net/tcp.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_device.h>
22#include <scsi/scsi_eh.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi.h>
25#include <scsi/iscsi_proto.h>
26#include <scsi/libiscsi.h>
27#include <scsi/scsi_transport_iscsi.h>
28
29#include "cxgb3i.h"
30#include "cxgb3i_pdu.h"
31
32#ifdef __DEBUG_CXGB3I_TAG__
33#define cxgb3i_tag_debug cxgb3i_log_debug
34#else
35#define cxgb3i_tag_debug(fmt...)
36#endif
37
38#ifdef __DEBUG_CXGB3I_API__
39#define cxgb3i_api_debug cxgb3i_log_debug
40#else
41#define cxgb3i_api_debug(fmt...)
42#endif
43
44/*
45 * align pdu size to multiple of 512 for better performance
46 */
47#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
48
49static struct scsi_transport_template *cxgb3i_scsi_transport;
50static struct scsi_host_template cxgb3i_host_template;
51static struct iscsi_transport cxgb3i_iscsi_transport;
52static unsigned char sw_tag_idx_bits;
53static unsigned char sw_tag_age_bits;
54
55static LIST_HEAD(cxgb3i_snic_list);
56static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
57
58/**
59 * cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev
60 * @tdev: t3cdev pointer
61 */
62struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev)
63{
64 struct cxgb3i_adapter *snic;
65
66 read_lock(&cxgb3i_snic_rwlock);
67 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
68 if (snic->tdev == tdev) {
69 read_unlock(&cxgb3i_snic_rwlock);
70 return snic;
71 }
72 }
73 read_unlock(&cxgb3i_snic_rwlock);
74 return NULL;
75}
76
77static inline int adapter_update(struct cxgb3i_adapter *snic)
78{
79 cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n",
80 snic, snic->tdev);
81 return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format,
82 &snic->tx_max_size,
83 &snic->rx_max_size);
84}
85
86static int adapter_add(struct cxgb3i_adapter *snic)
87{
88 struct t3cdev *t3dev = snic->tdev;
89 struct adapter *adapter = tdev2adap(t3dev);
90 int i, err;
91
92 snic->pdev = adapter->pdev;
93 snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
94
95 err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format,
96 &snic->tx_max_size,
97 &snic->rx_max_size);
98 if (err < 0)
99 return err;
100
101 for_each_port(adapter, i) {
102 snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
103 if (!snic->hba[i])
104 return -EINVAL;
105 }
106 snic->hba_cnt = adapter->params.nports;
107
108 /* add to the list */
109 write_lock(&cxgb3i_snic_rwlock);
110 list_add_tail(&snic->list_head, &cxgb3i_snic_list);
111 write_unlock(&cxgb3i_snic_rwlock);
112
113 cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n",
114 t3dev, snic, snic->hba_cnt);
115 return 0;
116}
117
118/**
119 * cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings
120 * @t3dev: t3cdev adapter
121 */
122void cxgb3i_adapter_open(struct t3cdev *t3dev)
123{
124 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
125 int err;
126
127 if (snic)
128 err = adapter_update(snic);
129 else {
130 snic = kzalloc(sizeof(*snic), GFP_KERNEL);
131 if (snic) {
132 spin_lock_init(&snic->lock);
133 snic->tdev = t3dev;
134 err = adapter_add(snic);
135 } else
136 err = -ENOMEM;
137 }
138
139 if (err < 0) {
140 cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
141 snic, snic ? snic->flags : 0, t3dev, err);
142 if (snic) {
143 snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET;
144 cxgb3i_adapter_close(t3dev);
145 }
146 }
147}
148
149/**
150 * cxgb3i_adapter_close - release the resources held and cleanup h/w settings
151 * @t3dev: t3cdev adapter
152 */
153void cxgb3i_adapter_close(struct t3cdev *t3dev)
154{
155 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev);
156 int i;
157
158 if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) {
159 cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n",
160 t3dev, snic, snic ? snic->flags : 0);
161 return;
162 }
163
164 /* remove from the list */
165 write_lock(&cxgb3i_snic_rwlock);
166 list_del(&snic->list_head);
167 write_unlock(&cxgb3i_snic_rwlock);
168
169 for (i = 0; i < snic->hba_cnt; i++) {
170 if (snic->hba[i]) {
171 cxgb3i_hba_host_remove(snic->hba[i]);
172 snic->hba[i] = NULL;
173 }
174 }
175 cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n",
176 t3dev, snic, snic->hba_cnt);
177 kfree(snic);
178}
179
180/**
181 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
182 * @t3dev: t3cdev adapter
183 */
184static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
185{
186 struct cxgb3i_adapter *snic;
187 int i;
188
189 if (ndev->priv_flags & IFF_802_1Q_VLAN)
190 ndev = vlan_dev_real_dev(ndev);
191
192 read_lock(&cxgb3i_snic_rwlock);
193 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
194 for (i = 0; i < snic->hba_cnt; i++) {
195 if (snic->hba[i]->ndev == ndev) {
196 read_unlock(&cxgb3i_snic_rwlock);
197 return snic->hba[i];
198 }
199 }
200 }
201 read_unlock(&cxgb3i_snic_rwlock);
202 return NULL;
203}
204
205/**
206 * cxgb3i_hba_host_add - register a new host with scsi/iscsi
207 * @snic: the cxgb3i adapter
208 * @ndev: associated net_device
209 */
210struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
211 struct net_device *ndev)
212{
213 struct cxgb3i_hba *hba;
214 struct Scsi_Host *shost;
215 int err;
216
217 shost = iscsi_host_alloc(&cxgb3i_host_template,
218 sizeof(struct cxgb3i_hba), 1);
219 if (!shost) {
220 cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n",
221 snic, ndev);
222 return NULL;
223 }
224
225 shost->transportt = cxgb3i_scsi_transport;
226 shost->max_lun = CXGB3I_MAX_LUN;
227 shost->max_id = CXGB3I_MAX_TARGET;
228 shost->max_channel = 0;
229 shost->max_cmd_len = 16;
230
231 hba = iscsi_host_priv(shost);
232 hba->snic = snic;
233 hba->ndev = ndev;
234 hba->shost = shost;
235
236 pci_dev_get(snic->pdev);
237 err = iscsi_host_add(shost, &snic->pdev->dev);
238 if (err) {
239 cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n",
240 snic, ndev);
241 goto pci_dev_put;
242 }
243
244 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
245 shost, hba, shost->host_no);
246
247 return hba;
248
249pci_dev_put:
250 pci_dev_put(snic->pdev);
251 scsi_host_put(shost);
252 return NULL;
253}
254
255/**
256 * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
257 * @hba: the cxgb3i hba
258 */
259void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
260{
261 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
262 hba->shost, hba, hba->shost->host_no);
263 iscsi_host_remove(hba->shost);
264 pci_dev_put(hba->snic->pdev);
265 iscsi_host_free(hba->shost);
266}
267
268/**
269 * cxgb3i_ep_connect - establish TCP connection to target portal
270 * @shost: scsi host to use
271 * @dst_addr: target IP address
272 * @non_blocking: blocking or non-blocking call
273 *
274 * Initiates a TCP/IP connection to the dst_addr
275 */
276static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
277 struct sockaddr *dst_addr,
278 int non_blocking)
279{
280 struct iscsi_endpoint *ep;
281 struct cxgb3i_endpoint *cep;
282 struct cxgb3i_hba *hba = NULL;
283 struct s3_conn *c3cn = NULL;
284 int err = 0;
285
286 if (shost)
287 hba = iscsi_host_priv(shost);
288
289 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
290
291 c3cn = cxgb3i_c3cn_create();
292 if (!c3cn) {
293 cxgb3i_log_info("ep connect OOM.\n");
294 err = -ENOMEM;
295 goto release_conn;
296 }
297
298 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
299 (struct sockaddr_in *)dst_addr);
300 if (err < 0) {
301 cxgb3i_log_info("ep connect failed.\n");
302 goto release_conn;
303 }
304
305 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
306 if (!hba) {
307 err = -ENOSPC;
308 cxgb3i_log_info("NOT going through cxgbi device.\n");
309 goto release_conn;
310 }
311
312 if (shost && hba != iscsi_host_priv(shost)) {
313 err = -ENOSPC;
314 cxgb3i_log_info("Could not connect through request host%u\n",
315 shost->host_no);
316 goto release_conn;
317 }
318
319 if (c3cn_is_closing(c3cn)) {
320 err = -ENOSPC;
321 cxgb3i_log_info("ep connect unable to connect.\n");
322 goto release_conn;
323 }
324
325 ep = iscsi_create_endpoint(sizeof(*cep));
326 if (!ep) {
327 err = -ENOMEM;
328 cxgb3i_log_info("iscsi alloc ep, OOM.\n");
329 goto release_conn;
330 }
331 cep = ep->dd_data;
332 cep->c3cn = c3cn;
333 cep->hba = hba;
334
335 cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
336 ep, cep, c3cn, hba);
337 return ep;
338
339release_conn:
340 cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
341 if (c3cn)
342 cxgb3i_c3cn_release(c3cn);
343 return ERR_PTR(err);
344}
345
346/**
347 * cxgb3i_ep_poll - polls for TCP connection establishement
348 * @ep: TCP connection (endpoint) handle
349 * @timeout_ms: timeout value in milli secs
350 *
351 * polls for TCP connect request to complete
352 */
353static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
354{
355 struct cxgb3i_endpoint *cep = ep->dd_data;
356 struct s3_conn *c3cn = cep->c3cn;
357
358 if (!c3cn_is_established(c3cn))
359 return 0;
360 cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
361 return 1;
362}
363
364/**
365 * cxgb3i_ep_disconnect - teardown TCP connection
366 * @ep: TCP connection (endpoint) handle
367 *
368 * teardown TCP connection
369 */
370static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
371{
372 struct cxgb3i_endpoint *cep = ep->dd_data;
373 struct cxgb3i_conn *cconn = cep->cconn;
374
375 cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
376
377 if (cconn && cconn->conn) {
378 /*
379 * stop the xmit path so the xmit_pdu function is
380 * not being called
381 */
382 iscsi_suspend_tx(cconn->conn);
383
384 write_lock_bh(&cep->c3cn->callback_lock);
385 cep->c3cn->user_data = NULL;
386 cconn->cep = NULL;
387 write_unlock_bh(&cep->c3cn->callback_lock);
388 }
389
390 cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
391 ep, cep, cep->c3cn);
392 cxgb3i_c3cn_release(cep->c3cn);
393 iscsi_destroy_endpoint(ep);
394}
395
396/**
397 * cxgb3i_session_create - create a new iscsi session
398 * @cmds_max: max # of commands
399 * @qdepth: scsi queue depth
400 * @initial_cmdsn: initial iscsi CMDSN for this session
401 *
402 * Creates a new iSCSI session
403 */
404static struct iscsi_cls_session *
405cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
406 u32 initial_cmdsn)
407{
408 struct cxgb3i_endpoint *cep;
409 struct cxgb3i_hba *hba;
410 struct Scsi_Host *shost;
411 struct iscsi_cls_session *cls_session;
412 struct iscsi_session *session;
413
414 if (!ep) {
415 cxgb3i_log_error("%s, missing endpoint.\n", __func__);
416 return NULL;
417 }
418
419 cep = ep->dd_data;
420 hba = cep->hba;
421 shost = hba->shost;
422 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
423 BUG_ON(hba != iscsi_host_priv(shost));
424
425 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
426 cmds_max, 0,
427 sizeof(struct iscsi_tcp_task) +
428 sizeof(struct cxgb3i_task_data),
429 initial_cmdsn, ISCSI_MAX_TARGET);
430 if (!cls_session)
431 return NULL;
432 session = cls_session->dd_data;
433 if (iscsi_tcp_r2tpool_alloc(session))
434 goto remove_session;
435
436 return cls_session;
437
438remove_session:
439 iscsi_session_teardown(cls_session);
440 return NULL;
441}
442
443/**
444 * cxgb3i_session_destroy - destroys iscsi session
445 * @cls_session: pointer to iscsi cls session
446 *
447 * Destroys an iSCSI session instance and releases its all resources held
448 */
449static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
450{
451 cxgb3i_api_debug("sess 0x%p.\n", cls_session);
452 iscsi_tcp_r2tpool_free(cls_session->dd_data);
453 iscsi_session_teardown(cls_session);
454}
455
456/**
457 * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size
458 * @conn: iscsi connection
459 * check the max. xmit pdu payload, reduce it if needed
460 */
461static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
462
463{
464 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
465 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
466 unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
467
468 max = min(cconn->hba->snic->tx_max_size, max);
469 if (conn->max_xmit_dlength)
470 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
471 else
472 conn->max_xmit_dlength = max;
473 align_pdu_size(conn->max_xmit_dlength);
474 cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
475 conn, conn->max_xmit_dlength);
476 return 0;
477}
478
479/**
480 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size
481 * @conn: iscsi connection
482 * return 0 if the value is valid, < 0 otherwise.
483 */
484static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
485{
486 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
487 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
488 unsigned int max = cconn->hba->snic->rx_max_size;
489
490 align_pdu_size(max);
491 if (conn->max_recv_dlength) {
492 if (conn->max_recv_dlength > max) {
493 cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
494 " Need to be <= %u.\n",
495 conn->max_recv_dlength, max);
496 return -EINVAL;
497 }
498 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
499 align_pdu_size(conn->max_recv_dlength);
500 } else
501 conn->max_recv_dlength = max;
502 cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
503 conn, conn->max_recv_dlength);
504 return 0;
505}
506
507/**
508 * cxgb3i_conn_create - create iscsi connection instance
509 * @cls_session: pointer to iscsi cls session
510 * @cid: iscsi cid
511 *
512 * Creates a new iSCSI connection instance for a given session
513 */
514static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
515 *cls_session, u32 cid)
516{
517 struct iscsi_cls_conn *cls_conn;
518 struct iscsi_conn *conn;
519 struct iscsi_tcp_conn *tcp_conn;
520 struct cxgb3i_conn *cconn;
521
522 cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
523
524 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
525 if (!cls_conn)
526 return NULL;
527 conn = cls_conn->dd_data;
528 tcp_conn = conn->dd_data;
529 cconn = tcp_conn->dd_data;
530
531 cconn->conn = conn;
532 return cls_conn;
533}
534
535/**
536 * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
537 * @cls_session: pointer to iscsi cls session
538 * @cls_conn: pointer to iscsi cls conn
539 * @transport_eph: 64-bit EP handle
540 * @is_leading: leading connection on this session?
541 *
542 * Binds together an iSCSI session, an iSCSI connection and a
543 * TCP connection. This routine returns error code if the TCP
544 * connection does not belong on the device iSCSI sess/conn is bound
545 */
546
547static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
548 struct iscsi_cls_conn *cls_conn,
549 u64 transport_eph, int is_leading)
550{
551 struct iscsi_conn *conn = cls_conn->dd_data;
552 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
553 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
554 struct cxgb3i_adapter *snic;
555 struct iscsi_endpoint *ep;
556 struct cxgb3i_endpoint *cep;
557 struct s3_conn *c3cn;
558 int err;
559
560 ep = iscsi_lookup_endpoint(transport_eph);
561 if (!ep)
562 return -EINVAL;
563
564 /* setup ddp pagesize */
565 cep = ep->dd_data;
566 c3cn = cep->c3cn;
567 snic = cep->hba->snic;
568 err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
569 if (err < 0)
570 return err;
571
572 cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
573 ep, cls_session, cls_conn);
574
575 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
576 if (err)
577 return -EINVAL;
578
579 /* calculate the tag idx bits needed for this conn based on cmds_max */
580 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
581 cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
582 conn->session->cmds_max, cconn->task_idx_bits);
583
584 read_lock(&c3cn->callback_lock);
585 c3cn->user_data = conn;
586 cconn->hba = cep->hba;
587 cconn->cep = cep;
588 cep->cconn = cconn;
589 read_unlock(&c3cn->callback_lock);
590
591 cxgb3i_conn_max_xmit_dlength(conn);
592 cxgb3i_conn_max_recv_dlength(conn);
593
594 spin_lock_bh(&conn->session->lock);
595 sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
596 conn->portal_port = ntohs(c3cn->daddr.sin_port);
597 spin_unlock_bh(&conn->session->lock);
598
599 /* init recv engine */
600 iscsi_tcp_hdr_recv_prep(tcp_conn);
601
602 return 0;
603}
604
605/**
606 * cxgb3i_conn_get_param - return iscsi connection parameter to caller
607 * @cls_conn: pointer to iscsi cls conn
608 * @param: parameter type identifier
609 * @buf: buffer pointer
610 *
611 * returns iSCSI connection parameters
612 */
613static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
614 enum iscsi_param param, char *buf)
615{
616 struct iscsi_conn *conn = cls_conn->dd_data;
617 int len;
618
619 cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
620
621 switch (param) {
622 case ISCSI_PARAM_CONN_PORT:
623 spin_lock_bh(&conn->session->lock);
624 len = sprintf(buf, "%hu\n", conn->portal_port);
625 spin_unlock_bh(&conn->session->lock);
626 break;
627 case ISCSI_PARAM_CONN_ADDRESS:
628 spin_lock_bh(&conn->session->lock);
629 len = sprintf(buf, "%s\n", conn->portal_address);
630 spin_unlock_bh(&conn->session->lock);
631 break;
632 default:
633 return iscsi_conn_get_param(cls_conn, param, buf);
634 }
635
636 return len;
637}
638
639/**
640 * cxgb3i_conn_set_param - set iscsi connection parameter
641 * @cls_conn: pointer to iscsi cls conn
642 * @param: parameter type identifier
643 * @buf: buffer pointer
644 * @buflen: buffer length
645 *
646 * set iSCSI connection parameters
647 */
648static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
649 enum iscsi_param param, char *buf, int buflen)
650{
651 struct iscsi_conn *conn = cls_conn->dd_data;
652 struct iscsi_session *session = conn->session;
653 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
654 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
655 struct cxgb3i_adapter *snic = cconn->hba->snic;
656 struct s3_conn *c3cn = cconn->cep->c3cn;
657 int value, err = 0;
658
659 switch (param) {
660 case ISCSI_PARAM_HDRDGST_EN:
661 err = iscsi_set_param(cls_conn, param, buf, buflen);
662 if (!err && conn->hdrdgst_en)
663 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
664 conn->hdrdgst_en,
665 conn->datadgst_en, 0);
666 break;
667 case ISCSI_PARAM_DATADGST_EN:
668 err = iscsi_set_param(cls_conn, param, buf, buflen);
669 if (!err && conn->datadgst_en)
670 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
671 conn->hdrdgst_en,
672 conn->datadgst_en, 0);
673 break;
674 case ISCSI_PARAM_MAX_R2T:
675 sscanf(buf, "%d", &value);
676 if (value <= 0 || !is_power_of_2(value))
677 return -EINVAL;
678 if (session->max_r2t == value)
679 break;
680 iscsi_tcp_r2tpool_free(session);
681 err = iscsi_set_param(cls_conn, param, buf, buflen);
682 if (!err && iscsi_tcp_r2tpool_alloc(session))
683 return -ENOMEM;
684 case ISCSI_PARAM_MAX_RECV_DLENGTH:
685 err = iscsi_set_param(cls_conn, param, buf, buflen);
686 if (!err)
687 err = cxgb3i_conn_max_recv_dlength(conn);
688 break;
689 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
690 err = iscsi_set_param(cls_conn, param, buf, buflen);
691 if (!err)
692 err = cxgb3i_conn_max_xmit_dlength(conn);
693 break;
694 default:
695 return iscsi_set_param(cls_conn, param, buf, buflen);
696 }
697 return err;
698}
699
700/**
701 * cxgb3i_host_set_param - configure host (adapter) related parameters
702 * @shost: scsi host pointer
703 * @param: parameter type identifier
704 * @buf: buffer pointer
705 */
706static int cxgb3i_host_set_param(struct Scsi_Host *shost,
707 enum iscsi_host_param param,
708 char *buf, int buflen)
709{
710 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
711
712 if (!hba->ndev) {
713 shost_printk(KERN_ERR, shost, "Could not set host param. "
714 "Netdev for host not set.\n");
715 return -ENODEV;
716 }
717
718 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
719
720 switch (param) {
721 case ISCSI_HOST_PARAM_IPADDRESS:
722 {
723 __be32 addr = in_aton(buf);
724 cxgb3i_set_private_ipv4addr(hba->ndev, addr);
725 return 0;
726 }
727 case ISCSI_HOST_PARAM_HWADDRESS:
728 case ISCSI_HOST_PARAM_NETDEV_NAME:
729 /* ignore */
730 return 0;
731 default:
732 return iscsi_host_set_param(shost, param, buf, buflen);
733 }
734}
735
736/**
737 * cxgb3i_host_get_param - returns host (adapter) related parameters
738 * @shost: scsi host pointer
739 * @param: parameter type identifier
740 * @buf: buffer pointer
741 */
742static int cxgb3i_host_get_param(struct Scsi_Host *shost,
743 enum iscsi_host_param param, char *buf)
744{
745 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
746 int len = 0;
747
748 if (!hba->ndev) {
749 shost_printk(KERN_ERR, shost, "Could not set host param. "
750 "Netdev for host not set.\n");
751 return -ENODEV;
752 }
753
754 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
755
756 switch (param) {
757 case ISCSI_HOST_PARAM_HWADDRESS:
758 len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
759 break;
760 case ISCSI_HOST_PARAM_NETDEV_NAME:
761 len = sprintf(buf, "%s\n", hba->ndev->name);
762 break;
763 case ISCSI_HOST_PARAM_IPADDRESS:
764 {
765 __be32 addr;
766
767 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
768 len = sprintf(buf, "%pI4", &addr);
769 break;
770 }
771 default:
772 return iscsi_host_get_param(shost, param, buf);
773 }
774 return len;
775}
776
777/**
778 * cxgb3i_conn_get_stats - returns iSCSI stats
779 * @cls_conn: pointer to iscsi cls conn
780 * @stats: pointer to iscsi statistic struct
781 */
782static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
783 struct iscsi_stats *stats)
784{
785 struct iscsi_conn *conn = cls_conn->dd_data;
786
787 stats->txdata_octets = conn->txdata_octets;
788 stats->rxdata_octets = conn->rxdata_octets;
789 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
790 stats->dataout_pdus = conn->dataout_pdus_cnt;
791 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
792 stats->datain_pdus = conn->datain_pdus_cnt;
793 stats->r2t_pdus = conn->r2t_pdus_cnt;
794 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
795 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
796 stats->digest_err = 0;
797 stats->timeout_err = 0;
798 stats->custom_length = 1;
799 strcpy(stats->custom[0].desc, "eh_abort_cnt");
800 stats->custom[0].value = conn->eh_abort_cnt;
801}
802
803/**
804 * cxgb3i_parse_itt - get the idx and age bits from a given tag
805 * @conn: iscsi connection
806 * @itt: itt tag
807 * @idx: task index, filled in by this function
808 * @age: session age, filled in by this function
809 */
810static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
811 int *idx, int *age)
812{
813 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
814 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
815 struct cxgb3i_adapter *snic = cconn->hba->snic;
816 u32 tag = ntohl((__force u32) itt);
817 u32 sw_bits;
818
819 sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
820 if (idx)
821 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
822 if (age)
823 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
824
825 cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
826 tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
827 age ? *age : 0xFF);
828}
829
830/**
831 * cxgb3i_reserve_itt - generate tag for a give task
832 * @task: iscsi task
833 * @hdr_itt: tag, filled in by this function
834 * Set up ddp for scsi read tasks if possible.
835 */
836int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
837{
838 struct scsi_cmnd *sc = task->sc;
839 struct iscsi_conn *conn = task->conn;
840 struct iscsi_session *sess = conn->session;
841 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
842 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
843 struct cxgb3i_adapter *snic = cconn->hba->snic;
844 struct cxgb3i_tag_format *tformat = &snic->tag_format;
845 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
846 u32 tag;
847 int err = -EINVAL;
848
849 if (sc &&
850 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
851 cxgb3i_sw_tag_usable(tformat, sw_tag)) {
852 struct s3_conn *c3cn = cconn->cep->c3cn;
853 struct cxgb3i_gather_list *gl;
854
855 gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
856 scsi_in(sc)->table.sgl,
857 scsi_in(sc)->table.nents,
858 snic->pdev,
859 GFP_ATOMIC);
860 if (gl) {
861 tag = sw_tag;
862 err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
863 tformat, &tag,
864 gl, GFP_ATOMIC);
865 if (err < 0)
866 cxgb3i_ddp_release_gl(gl, snic->pdev);
867 }
868 }
869
870 if (err < 0)
871 tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
872 /* the itt need to sent in big-endian order */
873 *hdr_itt = (__force itt_t)htonl(tag);
874
875 cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
876 tag, *hdr_itt, task->itt, sess->age);
877 return 0;
878}
879
880/**
881 * cxgb3i_release_itt - release the tag for a given task
882 * @task: iscsi task
883 * @hdr_itt: tag
884 * If the tag is a ddp tag, release the ddp setup
885 */
886void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
887{
888 struct scsi_cmnd *sc = task->sc;
889 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
890 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
891 struct cxgb3i_adapter *snic = cconn->hba->snic;
892 struct cxgb3i_tag_format *tformat = &snic->tag_format;
893 u32 tag = ntohl((__force u32)hdr_itt);
894
895 cxgb3i_tag_debug("release tag 0x%x.\n", tag);
896
897 if (sc &&
898 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
899 cxgb3i_is_ddp_tag(tformat, tag))
900 cxgb3i_ddp_tag_release(snic->tdev, tag);
901}
902
903/**
904 * cxgb3i_host_template -- Scsi_Host_Template structure
905 * used when registering with the scsi mid layer
906 */
907static struct scsi_host_template cxgb3i_host_template = {
908 .module = THIS_MODULE,
909 .name = "Chelsio S3xx iSCSI Initiator",
910 .proc_name = "cxgb3i",
911 .queuecommand = iscsi_queuecommand,
912 .change_queue_depth = iscsi_change_queue_depth,
913 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
914 .sg_tablesize = SG_ALL,
915 .max_sectors = 0xFFFF,
916 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
917 .eh_abort_handler = iscsi_eh_abort,
918 .eh_device_reset_handler = iscsi_eh_device_reset,
919 .eh_target_reset_handler = iscsi_eh_recover_target,
920 .target_alloc = iscsi_target_alloc,
921 .use_clustering = DISABLE_CLUSTERING,
922 .this_id = -1,
923};
924
925static struct iscsi_transport cxgb3i_iscsi_transport = {
926 .owner = THIS_MODULE,
927 .name = "cxgb3i",
928 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
929 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
930 CAP_PADDING_OFFLOAD,
931 .param_mask = ISCSI_MAX_RECV_DLENGTH |
932 ISCSI_MAX_XMIT_DLENGTH |
933 ISCSI_HDRDGST_EN |
934 ISCSI_DATADGST_EN |
935 ISCSI_INITIAL_R2T_EN |
936 ISCSI_MAX_R2T |
937 ISCSI_IMM_DATA_EN |
938 ISCSI_FIRST_BURST |
939 ISCSI_MAX_BURST |
940 ISCSI_PDU_INORDER_EN |
941 ISCSI_DATASEQ_INORDER_EN |
942 ISCSI_ERL |
943 ISCSI_CONN_PORT |
944 ISCSI_CONN_ADDRESS |
945 ISCSI_EXP_STATSN |
946 ISCSI_PERSISTENT_PORT |
947 ISCSI_PERSISTENT_ADDRESS |
948 ISCSI_TARGET_NAME | ISCSI_TPGT |
949 ISCSI_USERNAME | ISCSI_PASSWORD |
950 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
951 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
952 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
953 ISCSI_PING_TMO | ISCSI_RECV_TMO |
954 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
955 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
956 ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
957 .get_host_param = cxgb3i_host_get_param,
958 .set_host_param = cxgb3i_host_set_param,
959 /* session management */
960 .create_session = cxgb3i_session_create,
961 .destroy_session = cxgb3i_session_destroy,
962 .get_session_param = iscsi_session_get_param,
963 /* connection management */
964 .create_conn = cxgb3i_conn_create,
965 .bind_conn = cxgb3i_conn_bind,
966 .destroy_conn = iscsi_tcp_conn_teardown,
967 .start_conn = iscsi_conn_start,
968 .stop_conn = iscsi_conn_stop,
969 .get_conn_param = cxgb3i_conn_get_param,
970 .set_param = cxgb3i_conn_set_param,
971 .get_stats = cxgb3i_conn_get_stats,
972 /* pdu xmit req. from user space */
973 .send_pdu = iscsi_conn_send_pdu,
974 /* task */
975 .init_task = iscsi_tcp_task_init,
976 .xmit_task = iscsi_tcp_task_xmit,
977 .cleanup_task = cxgb3i_conn_cleanup_task,
978
979 /* pdu */
980 .alloc_pdu = cxgb3i_conn_alloc_pdu,
981 .init_pdu = cxgb3i_conn_init_pdu,
982 .xmit_pdu = cxgb3i_conn_xmit_pdu,
983 .parse_pdu_itt = cxgb3i_parse_itt,
984
985 /* TCP connect/disconnect */
986 .ep_connect = cxgb3i_ep_connect,
987 .ep_poll = cxgb3i_ep_poll,
988 .ep_disconnect = cxgb3i_ep_disconnect,
989 /* Error recovery timeout call */
990 .session_recovery_timedout = iscsi_session_recovery_timedout,
991};
992
993int cxgb3i_iscsi_init(void)
994{
995 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
996 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
997 cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
998 ISCSI_ITT_MASK, sw_tag_idx_bits,
999 ISCSI_AGE_MASK, sw_tag_age_bits);
1000
1001 cxgb3i_scsi_transport =
1002 iscsi_register_transport(&cxgb3i_iscsi_transport);
1003 if (!cxgb3i_scsi_transport) {
1004 cxgb3i_log_error("Could not register cxgb3i transport.\n");
1005 return -ENODEV;
1006 }
1007 cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
1008 return 0;
1009}
1010
1011void cxgb3i_iscsi_cleanup(void)
1012{
1013 if (cxgb3i_scsi_transport) {
1014 cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
1015 cxgb3i_scsi_transport);
1016 iscsi_unregister_transport(&cxgb3i_iscsi_transport);
1017 }
1018}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
deleted file mode 100644
index 3ee13cf9556b..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ /dev/null
@@ -1,1944 +0,0 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/if_vlan.h>
16#include <linux/slab.h>
17#include <linux/version.h>
18
19#include "cxgb3_defs.h"
20#include "cxgb3_ctl_defs.h"
21#include "firmware_exports.h"
22#include "cxgb3i_offload.h"
23#include "cxgb3i_pdu.h"
24#include "cxgb3i_ddp.h"
25
26#ifdef __DEBUG_C3CN_CONN__
27#define c3cn_conn_debug cxgb3i_log_debug
28#else
29#define c3cn_conn_debug(fmt...)
30#endif
31
32#ifdef __DEBUG_C3CN_TX__
33#define c3cn_tx_debug cxgb3i_log_debug
34#else
35#define c3cn_tx_debug(fmt...)
36#endif
37
38#ifdef __DEBUG_C3CN_RX__
39#define c3cn_rx_debug cxgb3i_log_debug
40#else
41#define c3cn_rx_debug(fmt...)
42#endif
43
44/*
45 * module parameters releated to offloaded iscsi connection
46 */
47static int cxgb3_rcv_win = 256 * 1024;
48module_param(cxgb3_rcv_win, int, 0644);
49MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
50
51static int cxgb3_snd_win = 128 * 1024;
52module_param(cxgb3_snd_win, int, 0644);
53MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)");
54
55static int cxgb3_rx_credit_thres = 10 * 1024;
56module_param(cxgb3_rx_credit_thres, int, 0644);
57MODULE_PARM_DESC(rx_credit_thres,
58 "RX credits return threshold in bytes (default=10KB)");
59
60static unsigned int cxgb3_max_connect = 8 * 1024;
61module_param(cxgb3_max_connect, uint, 0644);
62MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
63
64static unsigned int cxgb3_sport_base = 20000;
65module_param(cxgb3_sport_base, uint, 0644);
66MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
67
68/*
69 * cxgb3i tcp connection data(per adapter) list
70 */
71static LIST_HEAD(cdata_list);
72static DEFINE_RWLOCK(cdata_rwlock);
73
74static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
75static void c3cn_release_offload_resources(struct s3_conn *c3cn);
76
77/*
78 * iscsi source port management
79 *
80 * Find a free source port in the port allocation map. We use a very simple
81 * rotor scheme to look for the next free port.
82 *
83 * If a source port has been specified make sure that it doesn't collide with
84 * our normal source port allocation map. If it's outside the range of our
85 * allocation/deallocation scheme just let them use it.
86 *
87 * If the source port is outside our allocation range, the caller is
88 * responsible for keeping track of their port usage.
89 */
90static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
91{
92 unsigned int start;
93 int idx;
94
95 if (!cdata)
96 goto error_out;
97
98 if (c3cn->saddr.sin_port) {
99 cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n",
100 c3cn->saddr.sin_port);
101 return -EADDRINUSE;
102 }
103
104 spin_lock_bh(&cdata->lock);
105 start = idx = cdata->sport_next;
106 do {
107 if (++idx >= cxgb3_max_connect)
108 idx = 0;
109 if (!cdata->sport_conn[idx]) {
110 c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
111 cdata->sport_next = idx;
112 cdata->sport_conn[idx] = c3cn;
113 spin_unlock_bh(&cdata->lock);
114
115 c3cn_conn_debug("%s reserve port %u.\n",
116 cdata->cdev->name,
117 cxgb3_sport_base + idx);
118 return 0;
119 }
120 } while (idx != start);
121 spin_unlock_bh(&cdata->lock);
122
123error_out:
124 return -EADDRNOTAVAIL;
125}
126
127static void c3cn_put_port(struct s3_conn *c3cn)
128{
129 if (!c3cn->cdev)
130 return;
131
132 if (c3cn->saddr.sin_port) {
133 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
134 int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
135
136 c3cn->saddr.sin_port = 0;
137 if (idx < 0 || idx >= cxgb3_max_connect)
138 return;
139 spin_lock_bh(&cdata->lock);
140 cdata->sport_conn[idx] = NULL;
141 spin_unlock_bh(&cdata->lock);
142 c3cn_conn_debug("%s, release port %u.\n",
143 cdata->cdev->name, cxgb3_sport_base + idx);
144 }
145}
146
147static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
148{
149 __set_bit(flag, &c3cn->flags);
150 c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
151 c3cn, flag, c3cn->state, c3cn->flags);
152}
153
154static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
155{
156 __clear_bit(flag, &c3cn->flags);
157 c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
158 c3cn, flag, c3cn->state, c3cn->flags);
159}
160
161static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
162{
163 if (c3cn == NULL)
164 return 0;
165 return test_bit(flag, &c3cn->flags);
166}
167
168static void c3cn_set_state(struct s3_conn *c3cn, int state)
169{
170 c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
171 c3cn->state = state;
172}
173
174static inline void c3cn_hold(struct s3_conn *c3cn)
175{
176 atomic_inc(&c3cn->refcnt);
177}
178
179static inline void c3cn_put(struct s3_conn *c3cn)
180{
181 if (atomic_dec_and_test(&c3cn->refcnt)) {
182 c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
183 c3cn, c3cn->state, c3cn->flags);
184 kfree(c3cn);
185 }
186}
187
188static void c3cn_closed(struct s3_conn *c3cn)
189{
190 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
191 c3cn, c3cn->state, c3cn->flags);
192
193 c3cn_put_port(c3cn);
194 c3cn_release_offload_resources(c3cn);
195 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
196 cxgb3i_conn_closing(c3cn);
197}
198
199/*
200 * CPL (Chelsio Protocol Language) defines a message passing interface between
201 * the host driver and T3 asic.
202 * The section below implments CPLs that related to iscsi tcp connection
203 * open/close/abort and data send/receive.
204 */
205
206/*
207 * CPL connection active open request: host ->
208 */
209static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
210{
211 int i = 0;
212
213 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
214 ++i;
215 return i;
216}
217
218static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
219{
220 unsigned int idx;
221 struct dst_entry *dst = c3cn->dst_cache;
222 struct t3cdev *cdev = c3cn->cdev;
223 const struct t3c_data *td = T3C_DATA(cdev);
224 u16 advmss = dst_metric(dst, RTAX_ADVMSS);
225
226 if (advmss > pmtu - 40)
227 advmss = pmtu - 40;
228 if (advmss < td->mtus[0] - 40)
229 advmss = td->mtus[0] - 40;
230 idx = find_best_mtu(td, advmss + 40);
231 return idx;
232}
233
234static inline int compute_wscale(int win)
235{
236 int wscale = 0;
237 while (wscale < 14 && (65535<<wscale) < win)
238 wscale++;
239 return wscale;
240}
241
242static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
243{
244 int wscale = compute_wscale(cxgb3_rcv_win);
245 return V_KEEP_ALIVE(1) |
246 F_TCAM_BYPASS |
247 V_WND_SCALE(wscale) |
248 V_MSS_IDX(c3cn->mss_idx);
249}
250
251static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
252{
253 return V_ULP_MODE(ULP_MODE_ISCSI) |
254 V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
255}
256
257static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
258 unsigned int atid, const struct l2t_entry *e)
259{
260 struct cpl_act_open_req *req;
261
262 c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
263
264 skb->priority = CPL_PRIORITY_SETUP;
265 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
266 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
267 req->wr.wr_lo = 0;
268 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
269 req->local_port = c3cn->saddr.sin_port;
270 req->peer_port = c3cn->daddr.sin_port;
271 req->local_ip = c3cn->saddr.sin_addr.s_addr;
272 req->peer_ip = c3cn->daddr.sin_addr.s_addr;
273 req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
274 V_TX_CHANNEL(e->smt_idx));
275 req->opt0l = htonl(calc_opt0l(c3cn));
276 req->params = 0;
277 req->opt2 = 0;
278}
279
280static void fail_act_open(struct s3_conn *c3cn, int errno)
281{
282 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
283 c3cn, c3cn->state, c3cn->flags);
284 c3cn->err = errno;
285 c3cn_closed(c3cn);
286}
287
288static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
289{
290 struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
291
292 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
293
294 c3cn_hold(c3cn);
295 spin_lock_bh(&c3cn->lock);
296 if (c3cn->state == C3CN_STATE_CONNECTING)
297 fail_act_open(c3cn, -EHOSTUNREACH);
298 spin_unlock_bh(&c3cn->lock);
299 c3cn_put(c3cn);
300 __kfree_skb(skb);
301}
302
303/*
304 * CPL connection close request: host ->
305 *
306 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
307 * the write queue (i.e., after any unsent txt data).
308 */
309static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
310 int flags)
311{
312 skb_tcp_seq(skb) = c3cn->write_seq;
313 skb_flags(skb) = flags;
314 __skb_queue_tail(&c3cn->write_queue, skb);
315}
316
317static void send_close_req(struct s3_conn *c3cn)
318{
319 struct sk_buff *skb = c3cn->cpl_close;
320 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
321 unsigned int tid = c3cn->tid;
322
323 c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
324 c3cn, c3cn->state, c3cn->flags);
325
326 c3cn->cpl_close = NULL;
327
328 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
329 req->wr.wr_lo = htonl(V_WR_TID(tid));
330 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
331 req->rsvd = htonl(c3cn->write_seq);
332
333 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
334 if (c3cn->state != C3CN_STATE_CONNECTING)
335 c3cn_push_tx_frames(c3cn, 1);
336}
337
338/*
339 * CPL connection abort request: host ->
340 *
341 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
342 * for the same connection and also that we do not try to send a message
343 * after the connection has closed.
344 */
345static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
346{
347 struct cpl_abort_req *req = cplhdr(skb);
348
349 c3cn_conn_debug("tdev 0x%p.\n", cdev);
350
351 req->cmd = CPL_ABORT_NO_RST;
352 cxgb3_ofld_send(cdev, skb);
353}
354
355static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
356{
357 struct sk_buff *skb;
358
359 while ((skb = __skb_dequeue(&c3cn->write_queue)))
360 __kfree_skb(skb);
361}
362
363static void send_abort_req(struct s3_conn *c3cn)
364{
365 struct sk_buff *skb = c3cn->cpl_abort_req;
366 struct cpl_abort_req *req;
367 unsigned int tid = c3cn->tid;
368
369 if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
370 !c3cn->cdev)
371 return;
372
373 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
374
375 c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
376
377 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
378
379 /* Purge the send queue so we don't send anything after an abort. */
380 c3cn_purge_write_queue(c3cn);
381
382 c3cn->cpl_abort_req = NULL;
383 req = (struct cpl_abort_req *)skb->head;
384 memset(req, 0, sizeof(*req));
385
386 skb->priority = CPL_PRIORITY_DATA;
387 set_arp_failure_handler(skb, abort_arp_failure);
388
389 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
390 req->wr.wr_lo = htonl(V_WR_TID(tid));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
392 req->rsvd0 = htonl(c3cn->snd_nxt);
393 req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
394 req->cmd = CPL_ABORT_SEND_RST;
395
396 l2t_send(c3cn->cdev, skb, c3cn->l2t);
397}
398
399/*
400 * CPL connection abort reply: host ->
401 *
402 * Send an ABORT_RPL message in response of the ABORT_REQ received.
403 */
404static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
405{
406 struct sk_buff *skb = c3cn->cpl_abort_rpl;
407 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
408
409 c3cn->cpl_abort_rpl = NULL;
410
411 skb->priority = CPL_PRIORITY_DATA;
412 memset(rpl, 0, sizeof(*rpl));
413 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
414 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
415 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
416 rpl->cmd = rst_status;
417
418 cxgb3_ofld_send(c3cn->cdev, skb);
419}
420
421/*
422 * CPL connection rx data ack: host ->
423 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
424 * credits sent.
425 */
426static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
427{
428 struct sk_buff *skb;
429 struct cpl_rx_data_ack *req;
430
431 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
432 if (!skb)
433 return 0;
434
435 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
436 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
437 req->wr.wr_lo = 0;
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
439 req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
440 skb->priority = CPL_PRIORITY_ACK;
441 cxgb3_ofld_send(c3cn->cdev, skb);
442 return credits;
443}
444
445/*
446 * CPL connection tx data: host ->
447 *
448 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
449 * credits sent.
450 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
451 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
452 */
453
454/*
455 * For ULP connections HW may inserts digest bytes into the pdu. Those digest
456 * bytes are not sent by the host but are part of the TCP payload and therefore
457 * consume TCP sequence space.
458 */
459static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
460static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
461{
462 return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
463}
464
465static unsigned int wrlen __read_mostly;
466
467/*
468 * The number of WRs needed for an skb depends on the number of fragments
469 * in the skb and whether it has any payload in its main body. This maps the
470 * length of the gather list represented by an skb into the # of necessary WRs.
471 * The extra two fragments are for iscsi bhs and payload padding.
472 */
473#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
474static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
475
476static void s3_init_wr_tab(unsigned int wr_len)
477{
478 int i;
479
480 if (skb_wrs[1]) /* already initialized */
481 return;
482
483 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
484 int sgl_len = (3 * i) / 2 + (i & 1);
485
486 sgl_len += 3;
487 skb_wrs[i] = (sgl_len <= wr_len
488 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
489 }
490
491 wrlen = wr_len * 8;
492}
493
494static inline void reset_wr_list(struct s3_conn *c3cn)
495{
496 c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL;
497}
498
499/*
500 * Add a WR to a connections's list of pending WRs. This is a singly-linked
501 * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
502 * and the tail in wr_pending_tail.
503 */
504static inline void enqueue_wr(struct s3_conn *c3cn,
505 struct sk_buff *skb)
506{
507 skb_tx_wr_next(skb) = NULL;
508
509 /*
510 * We want to take an extra reference since both us and the driver
511 * need to free the packet before it's really freed. We know there's
512 * just one user currently so we use atomic_set rather than skb_get
513 * to avoid the atomic op.
514 */
515 atomic_set(&skb->users, 2);
516
517 if (!c3cn->wr_pending_head)
518 c3cn->wr_pending_head = skb;
519 else
520 skb_tx_wr_next(c3cn->wr_pending_tail) = skb;
521 c3cn->wr_pending_tail = skb;
522}
523
524static int count_pending_wrs(struct s3_conn *c3cn)
525{
526 int n = 0;
527 const struct sk_buff *skb = c3cn->wr_pending_head;
528
529 while (skb) {
530 n += skb->csum;
531 skb = skb_tx_wr_next(skb);
532 }
533 return n;
534}
535
536static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
537{
538 return c3cn->wr_pending_head;
539}
540
541static inline void free_wr_skb(struct sk_buff *skb)
542{
543 kfree_skb(skb);
544}
545
546static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
547{
548 struct sk_buff *skb = c3cn->wr_pending_head;
549
550 if (likely(skb)) {
551 /* Don't bother clearing the tail */
552 c3cn->wr_pending_head = skb_tx_wr_next(skb);
553 skb_tx_wr_next(skb) = NULL;
554 }
555 return skb;
556}
557
558static void purge_wr_queue(struct s3_conn *c3cn)
559{
560 struct sk_buff *skb;
561 while ((skb = dequeue_wr(c3cn)) != NULL)
562 free_wr_skb(skb);
563}
564
565static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
566 int len, int req_completion)
567{
568 struct tx_data_wr *req;
569
570 skb_reset_transport_header(skb);
571 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
572 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
573 (req_completion ? F_WR_COMPL : 0));
574 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
575 req->sndseq = htonl(c3cn->snd_nxt);
576 /* len includes the length of any HW ULP additions */
577 req->len = htonl(len);
578 req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
579 /* V_TX_ULP_SUBMODE sets both the mode and submode */
580 req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
581 V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
582
583 if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
584 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
585 V_TX_CPU_IDX(c3cn->qset));
586 /* Sendbuffer is in units of 32KB. */
587 req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
588 c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
589 }
590}
591
592/**
593 * c3cn_push_tx_frames -- start transmit
594 * @c3cn: the offloaded connection
595 * @req_completion: request wr_ack or not
596 *
597 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
598 * connection's send queue and sends them on to T3. Must be called with the
599 * connection's lock held. Returns the amount of send buffer space that was
600 * freed as a result of sending queued data to T3.
601 */
602static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
603{
604 kfree_skb(skb);
605}
606
607static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
608{
609 int total_size = 0;
610 struct sk_buff *skb;
611 struct t3cdev *cdev;
612 struct cxgb3i_sdev_data *cdata;
613
614 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
615 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
616 c3cn->state >= C3CN_STATE_ABORTING)) {
617 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
618 c3cn, c3cn->state);
619 return 0;
620 }
621
622 cdev = c3cn->cdev;
623 cdata = CXGB3_SDEV_DATA(cdev);
624
625 while (c3cn->wr_avail
626 && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
627 int len = skb->len; /* length before skb_push */
628 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
629 int wrs_needed = skb_wrs[frags];
630
631 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
632 wrs_needed = 1;
633
634 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
635
636 if (c3cn->wr_avail < wrs_needed) {
637 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
638 "wr %d < %u.\n",
639 c3cn, skb->len, skb->data_len, frags,
640 wrs_needed, c3cn->wr_avail);
641 break;
642 }
643
644 __skb_unlink(skb, &c3cn->write_queue);
645 skb->priority = CPL_PRIORITY_DATA;
646 skb->csum = wrs_needed; /* remember this until the WR_ACK */
647 c3cn->wr_avail -= wrs_needed;
648 c3cn->wr_unacked += wrs_needed;
649 enqueue_wr(c3cn, skb);
650
651 c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
652 "wr %d, left %u, unack %u.\n",
653 c3cn, skb->len, skb->data_len, frags,
654 wrs_needed, c3cn->wr_avail, c3cn->wr_unacked);
655
656
657 if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) {
658 if ((req_completion &&
659 c3cn->wr_unacked == wrs_needed) ||
660 (skb_flags(skb) & C3CB_FLAG_COMPL) ||
661 c3cn->wr_unacked >= c3cn->wr_max / 2) {
662 req_completion = 1;
663 c3cn->wr_unacked = 0;
664 }
665 len += ulp_extra_len(skb);
666 make_tx_data_wr(c3cn, skb, len, req_completion);
667 c3cn->snd_nxt += len;
668 skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR;
669 }
670
671 total_size += skb->truesize;
672 set_arp_failure_handler(skb, arp_failure_discard);
673 l2t_send(cdev, skb, c3cn->l2t);
674 }
675 return total_size;
676}
677
678/*
679 * process_cpl_msg: -> host
680 * Top-level CPL message processing used by most CPL messages that
681 * pertain to connections.
682 */
683static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
684 struct sk_buff *),
685 struct s3_conn *c3cn,
686 struct sk_buff *skb)
687{
688 spin_lock_bh(&c3cn->lock);
689 fn(c3cn, skb);
690 spin_unlock_bh(&c3cn->lock);
691}
692
693/*
694 * process_cpl_msg_ref: -> host
695 * Similar to process_cpl_msg() but takes an extra connection reference around
696 * the call to the handler. Should be used if the handler may drop a
697 * connection reference.
698 */
699static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
700 struct sk_buff *),
701 struct s3_conn *c3cn,
702 struct sk_buff *skb)
703{
704 c3cn_hold(c3cn);
705 process_cpl_msg(fn, c3cn, skb);
706 c3cn_put(c3cn);
707}
708
709/*
710 * Process a CPL_ACT_ESTABLISH message: -> host
711 * Updates connection state from an active establish CPL message. Runs with
712 * the connection lock held.
713 */
714
715static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
716{
717 struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
718 if (c3cn)
719 c3cn_put(c3cn);
720}
721
722static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
723 unsigned int opt)
724{
725 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
726
727 c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
728
729 /*
730 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
731 * pass through opt0.
732 */
733 if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
734 c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
735
736 dst_confirm(c3cn->dst_cache);
737
738 smp_mb();
739
740 c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
741}
742
743static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
744{
745 struct cpl_act_establish *req = cplhdr(skb);
746 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
747
748 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
749 c3cn, c3cn->state, c3cn->flags);
750
751 if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
752 cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
753 c3cn->tid, c3cn->state);
754
755 c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
756 c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
757
758 __kfree_skb(skb);
759
760 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
761 /* upper layer has requested closing */
762 send_abort_req(c3cn);
763 else {
764 if (skb_queue_len(&c3cn->write_queue))
765 c3cn_push_tx_frames(c3cn, 1);
766 cxgb3i_conn_tx_open(c3cn);
767 }
768}
769
770static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
771 void *ctx)
772{
773 struct cpl_act_establish *req = cplhdr(skb);
774 unsigned int tid = GET_TID(req);
775 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
776 struct s3_conn *c3cn = ctx;
777 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
778
779 c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
780 tid, c3cn, c3cn->state, c3cn->flags);
781
782 c3cn->tid = tid;
783 c3cn_hold(c3cn);
784 cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
785 s3_free_atid(cdev, atid);
786
787 c3cn->qset = G_QNUM(ntohl(skb->csum));
788
789 process_cpl_msg(process_act_establish, c3cn, skb);
790 return 0;
791}
792
793/*
794 * Process a CPL_ACT_OPEN_RPL message: -> host
795 * Handle active open failures.
796 */
797static int act_open_rpl_status_to_errno(int status)
798{
799 switch (status) {
800 case CPL_ERR_CONN_RESET:
801 return -ECONNREFUSED;
802 case CPL_ERR_ARP_MISS:
803 return -EHOSTUNREACH;
804 case CPL_ERR_CONN_TIMEDOUT:
805 return -ETIMEDOUT;
806 case CPL_ERR_TCAM_FULL:
807 return -ENOMEM;
808 case CPL_ERR_CONN_EXIST:
809 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
810 return -EADDRINUSE;
811 default:
812 return -EIO;
813 }
814}
815
816static void act_open_retry_timer(unsigned long data)
817{
818 struct sk_buff *skb;
819 struct s3_conn *c3cn = (struct s3_conn *)data;
820
821 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
822
823 spin_lock_bh(&c3cn->lock);
824 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
825 if (!skb)
826 fail_act_open(c3cn, -ENOMEM);
827 else {
828 skb->sk = (struct sock *)c3cn;
829 set_arp_failure_handler(skb, act_open_req_arp_failure);
830 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
831 l2t_send(c3cn->cdev, skb, c3cn->l2t);
832 }
833 spin_unlock_bh(&c3cn->lock);
834 c3cn_put(c3cn);
835}
836
837static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
838{
839 struct cpl_act_open_rpl *rpl = cplhdr(skb);
840
841 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
842 c3cn, c3cn->state, c3cn->flags);
843
844 if (rpl->status == CPL_ERR_CONN_EXIST &&
845 c3cn->retry_timer.function != act_open_retry_timer) {
846 c3cn->retry_timer.function = act_open_retry_timer;
847 if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
848 c3cn_hold(c3cn);
849 } else
850 fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
851 __kfree_skb(skb);
852}
853
854static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
855{
856 struct s3_conn *c3cn = ctx;
857 struct cpl_act_open_rpl *rpl = cplhdr(skb);
858
859 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
860 rpl->status, c3cn, c3cn->state, c3cn->flags);
861
862 if (rpl->status != CPL_ERR_TCAM_FULL &&
863 rpl->status != CPL_ERR_CONN_EXIST &&
864 rpl->status != CPL_ERR_ARP_MISS)
865 cxgb3_queue_tid_release(cdev, GET_TID(rpl));
866
867 process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
868 return 0;
869}
870
871/*
872 * Process PEER_CLOSE CPL messages: -> host
873 * Handle peer FIN.
874 */
875static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
876{
877 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
878 c3cn, c3cn->state, c3cn->flags);
879
880 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
881 goto out;
882
883 switch (c3cn->state) {
884 case C3CN_STATE_ESTABLISHED:
885 c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
886 break;
887 case C3CN_STATE_ACTIVE_CLOSE:
888 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
889 break;
890 case C3CN_STATE_CLOSE_WAIT_1:
891 c3cn_closed(c3cn);
892 break;
893 case C3CN_STATE_ABORTING:
894 break;
895 default:
896 cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
897 c3cn->cdev->name, c3cn->tid, c3cn->state);
898 }
899
900 cxgb3i_conn_closing(c3cn);
901out:
902 __kfree_skb(skb);
903}
904
905static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
906{
907 struct s3_conn *c3cn = ctx;
908
909 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
910 c3cn, c3cn->state, c3cn->flags);
911 process_cpl_msg_ref(process_peer_close, c3cn, skb);
912 return 0;
913}
914
915/*
916 * Process CLOSE_CONN_RPL CPL message: -> host
917 * Process a peer ACK to our FIN.
918 */
919static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
920{
921 struct cpl_close_con_rpl *rpl = cplhdr(skb);
922
923 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
924 c3cn, c3cn->state, c3cn->flags);
925
926 c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
927
928 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
929 goto out;
930
931 switch (c3cn->state) {
932 case C3CN_STATE_ACTIVE_CLOSE:
933 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
934 break;
935 case C3CN_STATE_CLOSE_WAIT_1:
936 case C3CN_STATE_CLOSE_WAIT_2:
937 c3cn_closed(c3cn);
938 break;
939 case C3CN_STATE_ABORTING:
940 break;
941 default:
942 cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
943 c3cn->cdev->name, c3cn->tid, c3cn->state);
944 }
945
946out:
947 kfree_skb(skb);
948}
949
950static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
951 void *ctx)
952{
953 struct s3_conn *c3cn = ctx;
954
955 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
956 c3cn, c3cn->state, c3cn->flags);
957
958 process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
959 return 0;
960}
961
962/*
963 * Process ABORT_REQ_RSS CPL message: -> host
964 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
965 * request except that we need to reply to it.
966 */
967
968static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
969 int *need_rst)
970{
971 switch (abort_reason) {
972 case CPL_ERR_BAD_SYN: /* fall through */
973 case CPL_ERR_CONN_RESET:
974 return c3cn->state > C3CN_STATE_ESTABLISHED ?
975 -EPIPE : -ECONNRESET;
976 case CPL_ERR_XMIT_TIMEDOUT:
977 case CPL_ERR_PERSIST_TIMEDOUT:
978 case CPL_ERR_FINWAIT2_TIMEDOUT:
979 case CPL_ERR_KEEPALIVE_TIMEDOUT:
980 return -ETIMEDOUT;
981 default:
982 return -EIO;
983 }
984}
985
986static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
987{
988 int rst_status = CPL_ABORT_NO_RST;
989 const struct cpl_abort_req_rss *req = cplhdr(skb);
990
991 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
992 c3cn, c3cn->state, c3cn->flags);
993
994 if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
995 c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
996 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
997 __kfree_skb(skb);
998 return;
999 }
1000
1001 c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
1002 send_abort_rpl(c3cn, rst_status);
1003
1004 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1005 c3cn->err =
1006 abort_status_to_errno(c3cn, req->status, &rst_status);
1007 c3cn_closed(c3cn);
1008 }
1009}
1010
1011static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1012{
1013 const struct cpl_abort_req_rss *req = cplhdr(skb);
1014 struct s3_conn *c3cn = ctx;
1015
1016 c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
1017 c3cn, c3cn->state, c3cn->flags);
1018
1019 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
1020 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
1021 __kfree_skb(skb);
1022 return 0;
1023 }
1024
1025 process_cpl_msg_ref(process_abort_req, c3cn, skb);
1026 return 0;
1027}
1028
1029/*
1030 * Process ABORT_RPL_RSS CPL message: -> host
1031 * Process abort replies. We only process these messages if we anticipate
1032 * them as the coordination between SW and HW in this area is somewhat lacking
1033 * and sometimes we get ABORT_RPLs after we are done with the connection that
1034 * originated the ABORT_REQ.
1035 */
1036static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
1037{
1038 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1039 c3cn, c3cn->state, c3cn->flags);
1040
1041 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1042 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
1043 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1044 else {
1045 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1046 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
1047 if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
1048 cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
1049 c3cn->cdev->name, c3cn->tid);
1050 c3cn_closed(c3cn);
1051 }
1052 }
1053 __kfree_skb(skb);
1054}
1055
1056static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1057{
1058 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1059 struct s3_conn *c3cn = ctx;
1060
1061 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
1062 rpl->status, c3cn, c3cn ? c3cn->state : 0,
1063 c3cn ? c3cn->flags : 0UL);
1064
1065 /*
1066 * Ignore replies to post-close aborts indicating that the abort was
1067 * requested too late. These connections are terminated when we get
1068 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
1069 * arrives the TID is either no longer used or it has been recycled.
1070 */
1071 if (rpl->status == CPL_ERR_ABORT_FAILED)
1072 goto discard;
1073
1074 /*
1075 * Sometimes we've already closed the connection, e.g., a post-close
1076 * abort races with ABORT_REQ_RSS, the latter frees the connection
1077 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
1078 * but FW turns the ABORT_REQ into a regular one and so we get
1079 * ABORT_RPL_RSS with status 0 and no connection.
1080 */
1081 if (!c3cn)
1082 goto discard;
1083
1084 process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
1085 return 0;
1086
1087discard:
1088 __kfree_skb(skb);
1089 return 0;
1090}
1091
1092/*
1093 * Process RX_ISCSI_HDR CPL message: -> host
1094 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
1095 * follow after the bhs.
1096 */
1097static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1098{
1099 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
1100 struct cpl_iscsi_hdr_norss data_cpl;
1101 struct cpl_rx_data_ddp_norss ddp_cpl;
1102 unsigned int hdr_len, data_len, status;
1103 unsigned int len;
1104 int err;
1105
1106 if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
1107 if (c3cn->state != C3CN_STATE_ABORTING)
1108 send_abort_req(c3cn);
1109 __kfree_skb(skb);
1110 return;
1111 }
1112
1113 skb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
1114 skb_flags(skb) = 0;
1115
1116 skb_reset_transport_header(skb);
1117 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
1118
1119 len = hdr_len = ntohs(hdr_cpl->len);
1120 /* msg coalesce is off or not enough data received */
1121 if (skb->len <= hdr_len) {
1122 cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
1123 c3cn->cdev->name, c3cn->tid,
1124 skb->len, hdr_len);
1125 goto abort_conn;
1126 }
1127
1128 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
1129 sizeof(ddp_cpl));
1130 if (err < 0)
1131 goto abort_conn;
1132
1133 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1134 skb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
1135 skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1136 status = ntohl(ddp_cpl.ddp_status);
1137
1138 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1139 skb, skb->len, skb_rx_pdulen(skb), status);
1140
1141 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1142 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
1143 if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
1144 skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
1145 if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
1146 skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
1147
1148 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
1149 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
1150 if (err < 0)
1151 goto abort_conn;
1152 data_len = ntohs(data_cpl.len);
1153 len += sizeof(data_cpl) + data_len;
1154 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1155 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1156
1157 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb);
1158 __pskb_trim(skb, len);
1159 __skb_queue_tail(&c3cn->receive_queue, skb);
1160 cxgb3i_conn_pdu_ready(c3cn);
1161
1162 return;
1163
1164abort_conn:
1165 send_abort_req(c3cn);
1166 __kfree_skb(skb);
1167}
1168
1169static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1170{
1171 struct s3_conn *c3cn = ctx;
1172
1173 process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
1174 return 0;
1175}
1176
1177/*
1178 * Process TX_DATA_ACK CPL messages: -> host
1179 * Process an acknowledgment of WR completion. Advance snd_una and send the
1180 * next batch of work requests from the write queue.
1181 */
1182static void check_wr_invariants(struct s3_conn *c3cn)
1183{
1184 int pending = count_pending_wrs(c3cn);
1185
1186 if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max))
1187 cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
1188 "pending %u, total should be %u\n",
1189 c3cn->tid, c3cn->wr_avail, pending,
1190 c3cn->wr_max);
1191}
1192
1193static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1194{
1195 struct cpl_wr_ack *hdr = cplhdr(skb);
1196 unsigned int credits = ntohs(hdr->credits);
1197 u32 snd_una = ntohl(hdr->snd_una);
1198
1199 c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
1200 credits, c3cn->wr_avail, c3cn->wr_unacked,
1201 c3cn->tid, c3cn->state);
1202
1203 c3cn->wr_avail += credits;
1204 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1205 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
1206
1207 while (credits) {
1208 struct sk_buff *p = peek_wr(c3cn);
1209
1210 if (unlikely(!p)) {
1211 cxgb3i_log_error("%u WR_ACK credits for TID %u with "
1212 "nothing pending, state %u\n",
1213 credits, c3cn->tid, c3cn->state);
1214 break;
1215 }
1216 if (unlikely(credits < p->csum)) {
1217 struct tx_data_wr *w = cplhdr(p);
1218 cxgb3i_log_error("TID %u got %u WR credits need %u, "
1219 "len %u, main body %u, frags %u, "
1220 "seq # %u, ACK una %u, ACK nxt %u, "
1221 "WR_AVAIL %u, WRs pending %u\n",
1222 c3cn->tid, credits, p->csum, p->len,
1223 p->len - p->data_len,
1224 skb_shinfo(p)->nr_frags,
1225 ntohl(w->sndseq), snd_una,
1226 ntohl(hdr->snd_nxt), c3cn->wr_avail,
1227 count_pending_wrs(c3cn) - credits);
1228 p->csum -= credits;
1229 break;
1230 } else {
1231 dequeue_wr(c3cn);
1232 credits -= p->csum;
1233 free_wr_skb(p);
1234 }
1235 }
1236
1237 check_wr_invariants(c3cn);
1238
1239 if (unlikely(before(snd_una, c3cn->snd_una))) {
1240 cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
1241 "snd_una %u\n",
1242 c3cn->tid, snd_una, c3cn->snd_una);
1243 goto out_free;
1244 }
1245
1246 if (c3cn->snd_una != snd_una) {
1247 c3cn->snd_una = snd_una;
1248 dst_confirm(c3cn->dst_cache);
1249 }
1250
1251 if (skb_queue_len(&c3cn->write_queue)) {
1252 if (c3cn_push_tx_frames(c3cn, 0))
1253 cxgb3i_conn_tx_open(c3cn);
1254 } else
1255 cxgb3i_conn_tx_open(c3cn);
1256out_free:
1257 __kfree_skb(skb);
1258}
1259
1260static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1261{
1262 struct s3_conn *c3cn = ctx;
1263
1264 process_cpl_msg(process_wr_ack, c3cn, skb);
1265 return 0;
1266}
1267
1268/*
1269 * for each connection, pre-allocate skbs needed for close/abort requests. So
1270 * that we can service the request right away.
1271 */
1272static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
1273{
1274 if (c3cn->cpl_close)
1275 kfree_skb(c3cn->cpl_close);
1276 if (c3cn->cpl_abort_req)
1277 kfree_skb(c3cn->cpl_abort_req);
1278 if (c3cn->cpl_abort_rpl)
1279 kfree_skb(c3cn->cpl_abort_rpl);
1280}
1281
1282static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
1283{
1284 c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
1285 GFP_KERNEL);
1286 if (!c3cn->cpl_close)
1287 return -ENOMEM;
1288 skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
1289
1290 c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
1291 GFP_KERNEL);
1292 if (!c3cn->cpl_abort_req)
1293 goto free_cpl_skbs;
1294 skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
1295
1296 c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
1297 GFP_KERNEL);
1298 if (!c3cn->cpl_abort_rpl)
1299 goto free_cpl_skbs;
1300 skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
1301
1302 return 0;
1303
1304free_cpl_skbs:
1305 c3cn_free_cpl_skbs(c3cn);
1306 return -ENOMEM;
1307}
1308
1309/**
1310 * c3cn_release_offload_resources - release offload resource
1311 * @c3cn: the offloaded iscsi tcp connection.
1312 * Release resources held by an offload connection (TID, L2T entry, etc.)
1313 */
1314static void c3cn_release_offload_resources(struct s3_conn *c3cn)
1315{
1316 struct t3cdev *cdev = c3cn->cdev;
1317 unsigned int tid = c3cn->tid;
1318
1319 c3cn->qset = 0;
1320 c3cn_free_cpl_skbs(c3cn);
1321
1322 if (c3cn->wr_avail != c3cn->wr_max) {
1323 purge_wr_queue(c3cn);
1324 reset_wr_list(c3cn);
1325 }
1326
1327 if (cdev) {
1328 if (c3cn->l2t) {
1329 l2t_release(L2DATA(cdev), c3cn->l2t);
1330 c3cn->l2t = NULL;
1331 }
1332 if (c3cn->state == C3CN_STATE_CONNECTING)
1333 /* we have ATID */
1334 s3_free_atid(cdev, tid);
1335 else {
1336 /* we have TID */
1337 cxgb3_remove_tid(cdev, (void *)c3cn, tid);
1338 c3cn_put(c3cn);
1339 }
1340 }
1341
1342 c3cn->dst_cache = NULL;
1343 c3cn->cdev = NULL;
1344}
1345
1346/**
1347 * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
1348 * returns the s3_conn structure allocated.
1349 */
1350struct s3_conn *cxgb3i_c3cn_create(void)
1351{
1352 struct s3_conn *c3cn;
1353
1354 c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
1355 if (!c3cn)
1356 return NULL;
1357
1358 /* pre-allocate close/abort cpl, so we don't need to wait for memory
1359 when close/abort is requested. */
1360 if (c3cn_alloc_cpl_skbs(c3cn) < 0)
1361 goto free_c3cn;
1362
1363 c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
1364
1365 c3cn->flags = 0;
1366 spin_lock_init(&c3cn->lock);
1367 atomic_set(&c3cn->refcnt, 1);
1368 skb_queue_head_init(&c3cn->receive_queue);
1369 skb_queue_head_init(&c3cn->write_queue);
1370 setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
1371 rwlock_init(&c3cn->callback_lock);
1372
1373 return c3cn;
1374
1375free_c3cn:
1376 kfree(c3cn);
1377 return NULL;
1378}
1379
1380static void c3cn_active_close(struct s3_conn *c3cn)
1381{
1382 int data_lost;
1383 int close_req = 0;
1384
1385 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1386 c3cn, c3cn->state, c3cn->flags);
1387
1388 dst_confirm(c3cn->dst_cache);
1389
1390 c3cn_hold(c3cn);
1391 spin_lock_bh(&c3cn->lock);
1392
1393 data_lost = skb_queue_len(&c3cn->receive_queue);
1394 __skb_queue_purge(&c3cn->receive_queue);
1395
1396 switch (c3cn->state) {
1397 case C3CN_STATE_CLOSED:
1398 case C3CN_STATE_ACTIVE_CLOSE:
1399 case C3CN_STATE_CLOSE_WAIT_1:
1400 case C3CN_STATE_CLOSE_WAIT_2:
1401 case C3CN_STATE_ABORTING:
1402 /* nothing need to be done */
1403 break;
1404 case C3CN_STATE_CONNECTING:
1405 /* defer until cpl_act_open_rpl or cpl_act_establish */
1406 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1407 break;
1408 case C3CN_STATE_ESTABLISHED:
1409 close_req = 1;
1410 c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
1411 break;
1412 case C3CN_STATE_PASSIVE_CLOSE:
1413 close_req = 1;
1414 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
1415 break;
1416 }
1417
1418 if (close_req) {
1419 if (data_lost)
1420 /* Unread data was tossed, zap the connection. */
1421 send_abort_req(c3cn);
1422 else
1423 send_close_req(c3cn);
1424 }
1425
1426 spin_unlock_bh(&c3cn->lock);
1427 c3cn_put(c3cn);
1428}
1429
1430/**
1431 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
1432 * resource held
1433 * @c3cn: the iscsi tcp connection
1434 */
1435void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1436{
1437 c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
1438 c3cn, c3cn->state, c3cn->flags);
1439 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING))
1440 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1441 else if (likely(c3cn->state != C3CN_STATE_CLOSED))
1442 c3cn_active_close(c3cn);
1443 c3cn_put(c3cn);
1444}
1445
1446static int is_cxgb3_dev(struct net_device *dev)
1447{
1448 struct cxgb3i_sdev_data *cdata;
1449 struct net_device *ndev = dev;
1450
1451 if (dev->priv_flags & IFF_802_1Q_VLAN)
1452 ndev = vlan_dev_real_dev(dev);
1453
1454 write_lock(&cdata_rwlock);
1455 list_for_each_entry(cdata, &cdata_list, list) {
1456 struct adap_ports *ports = &cdata->ports;
1457 int i;
1458
1459 for (i = 0; i < ports->nports; i++)
1460 if (ndev == ports->lldevs[i]) {
1461 write_unlock(&cdata_rwlock);
1462 return 1;
1463 }
1464 }
1465 write_unlock(&cdata_rwlock);
1466 return 0;
1467}
1468
1469/**
1470 * cxgb3_egress_dev - return the cxgb3 egress device
1471 * @root_dev: the root device anchoring the search
1472 * @c3cn: the connection used to determine egress port in bonding mode
1473 * @context: in bonding mode, indicates a connection set up or failover
1474 *
1475 * Return egress device or NULL if the egress device isn't one of our ports.
1476 */
1477static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1478 struct s3_conn *c3cn,
1479 int context)
1480{
1481 while (root_dev) {
1482 if (root_dev->priv_flags & IFF_802_1Q_VLAN)
1483 root_dev = vlan_dev_real_dev(root_dev);
1484 else if (is_cxgb3_dev(root_dev))
1485 return root_dev;
1486 else
1487 return NULL;
1488 }
1489 return NULL;
1490}
1491
1492static struct rtable *find_route(struct net_device *dev,
1493 __be32 saddr, __be32 daddr,
1494 __be16 sport, __be16 dport)
1495{
1496 struct rtable *rt;
1497 struct flowi fl = {
1498 .oif = dev ? dev->ifindex : 0,
1499 .nl_u = {
1500 .ip4_u = {
1501 .daddr = daddr,
1502 .saddr = saddr,
1503 .tos = 0 } },
1504 .proto = IPPROTO_TCP,
1505 .uli_u = {
1506 .ports = {
1507 .sport = sport,
1508 .dport = dport } } };
1509
1510 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
1511 return NULL;
1512 return rt;
1513}
1514
1515/*
1516 * Assign offload parameters to some connection fields.
1517 */
1518static void init_offload_conn(struct s3_conn *c3cn,
1519 struct t3cdev *cdev,
1520 struct dst_entry *dst)
1521{
1522 BUG_ON(c3cn->cdev != cdev);
1523 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1;
1524 c3cn->wr_unacked = 0;
1525 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1526
1527 reset_wr_list(c3cn);
1528}
1529
1530static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
1531{
1532 struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
1533 struct t3cdev *cdev = cdata->cdev;
1534 struct dst_entry *dst = c3cn->dst_cache;
1535 struct sk_buff *skb;
1536
1537 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1538 c3cn, c3cn->state, c3cn->flags);
1539 /*
1540 * Initialize connection data. Note that the flags and ULP mode are
1541 * initialized higher up ...
1542 */
1543 c3cn->dev = dev;
1544 c3cn->cdev = cdev;
1545 c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
1546 if (c3cn->tid < 0)
1547 goto out_err;
1548
1549 c3cn->qset = 0;
1550 c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
1551 if (!c3cn->l2t)
1552 goto free_tid;
1553
1554 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
1555 if (!skb)
1556 goto free_l2t;
1557
1558 skb->sk = (struct sock *)c3cn;
1559 set_arp_failure_handler(skb, act_open_req_arp_failure);
1560
1561 c3cn_hold(c3cn);
1562
1563 init_offload_conn(c3cn, cdev, dst);
1564 c3cn->err = 0;
1565
1566 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1567 l2t_send(cdev, skb, c3cn->l2t);
1568 return 0;
1569
1570free_l2t:
1571 l2t_release(L2DATA(cdev), c3cn->l2t);
1572free_tid:
1573 s3_free_atid(cdev, c3cn->tid);
1574 c3cn->tid = 0;
1575out_err:
1576 return -EINVAL;
1577}
1578
1579/**
1580 * cxgb3i_find_dev - find the interface associated with the given address
1581 * @ipaddr: ip address
1582 */
1583static struct net_device *
1584cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
1585{
1586 struct flowi fl;
1587 int err;
1588 struct rtable *rt;
1589
1590 memset(&fl, 0, sizeof(fl));
1591 fl.nl_u.ip4_u.daddr = ipaddr;
1592
1593 err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
1594 if (!err)
1595 return (&rt->dst)->dev;
1596
1597 return NULL;
1598}
1599
1600/**
1601 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
1602 * @c3cn: the iscsi tcp connection
1603 * @usin: destination address
1604 *
1605 * return 0 if active open request is sent, < 0 otherwise.
1606 */
1607int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1608 struct sockaddr_in *usin)
1609{
1610 struct rtable *rt;
1611 struct cxgb3i_sdev_data *cdata;
1612 struct t3cdev *cdev;
1613 __be32 sipv4;
1614 struct net_device *dstdev;
1615 int err;
1616
1617 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1618
1619 if (usin->sin_family != AF_INET)
1620 return -EAFNOSUPPORT;
1621
1622 c3cn->daddr.sin_port = usin->sin_port;
1623 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1624
1625 dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
1626 if (!dstdev || !is_cxgb3_dev(dstdev))
1627 return -ENETUNREACH;
1628
1629 if (dstdev->priv_flags & IFF_802_1Q_VLAN)
1630 dev = dstdev;
1631
1632 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1633 c3cn->daddr.sin_addr.s_addr,
1634 c3cn->saddr.sin_port,
1635 c3cn->daddr.sin_port);
1636 if (rt == NULL) {
1637 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1638 c3cn->daddr.sin_addr.s_addr,
1639 ntohs(c3cn->daddr.sin_port),
1640 dev ? dev->name : "any");
1641 return -ENETUNREACH;
1642 }
1643
1644 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1645 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1646 c3cn->daddr.sin_addr.s_addr,
1647 ntohs(c3cn->daddr.sin_port),
1648 dev ? dev->name : "any");
1649 ip_rt_put(rt);
1650 return -ENETUNREACH;
1651 }
1652
1653 if (!c3cn->saddr.sin_addr.s_addr)
1654 c3cn->saddr.sin_addr.s_addr = rt->rt_src;
1655
1656 /* now commit destination to connection */
1657 c3cn->dst_cache = &rt->dst;
1658
1659 /* try to establish an offloaded connection */
1660 dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
1661 if (dev == NULL) {
1662 c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
1663 return -ENETUNREACH;
1664 }
1665 cdata = NDEV2CDATA(dev);
1666 cdev = cdata->cdev;
1667
1668 /* get a source port if one hasn't been provided */
1669 err = c3cn_get_port(c3cn, cdata);
1670 if (err)
1671 return err;
1672
1673 c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1674 c3cn, ntohs(c3cn->saddr.sin_port));
1675
1676 sipv4 = cxgb3i_get_private_ipv4addr(dev);
1677 if (!sipv4) {
1678 c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
1679 sipv4 = c3cn->saddr.sin_addr.s_addr;
1680 cxgb3i_set_private_ipv4addr(dev, sipv4);
1681 } else
1682 c3cn->saddr.sin_addr.s_addr = sipv4;
1683
1684 c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
1685 c3cn,
1686 &c3cn->saddr.sin_addr.s_addr,
1687 ntohs(c3cn->saddr.sin_port),
1688 &c3cn->daddr.sin_addr.s_addr,
1689 ntohs(c3cn->daddr.sin_port));
1690
1691 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
1692 if (!initiate_act_open(c3cn, dev))
1693 return 0;
1694
1695 /*
1696 * If we get here, we don't have an offload connection so simply
1697 * return a failure.
1698 */
1699 err = -ENOTSUPP;
1700
1701 /*
1702 * This trashes the connection and releases the local port,
1703 * if necessary.
1704 */
1705 c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
1706 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
1707 ip_rt_put(rt);
1708 c3cn_put_port(c3cn);
1709 return err;
1710}
1711
1712/**
1713 * cxgb3i_c3cn_rx_credits - ack received tcp data.
1714 * @c3cn: iscsi tcp connection
1715 * @copied: # of bytes processed
1716 *
1717 * Called after some received data has been read. It returns RX credits
1718 * to the HW for the amount of data processed.
1719 */
1720void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
1721{
1722 struct t3cdev *cdev;
1723 int must_send;
1724 u32 credits, dack = 0;
1725
1726 if (c3cn->state != C3CN_STATE_ESTABLISHED)
1727 return;
1728
1729 credits = c3cn->copied_seq - c3cn->rcv_wup;
1730 if (unlikely(!credits))
1731 return;
1732
1733 cdev = c3cn->cdev;
1734
1735 if (unlikely(cxgb3_rx_credit_thres == 0))
1736 return;
1737
1738 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
1739
1740 /*
1741 * For coalescing to work effectively ensure the receive window has
1742 * at least 16KB left.
1743 */
1744 must_send = credits + 16384 >= cxgb3_rcv_win;
1745
1746 if (must_send || credits >= cxgb3_rx_credit_thres)
1747 c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
1748}
1749
1750/**
1751 * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
1752 * @c3cn: iscsi tcp connection
1753 * @skb: skb contains the iscsi pdu
1754 *
1755 * Add a list of skbs to a connection send queue. The skbs must comply with
1756 * the max size limit of the device and have a headroom of at least
1757 * TX_HEADER_LEN bytes.
1758 * Return # of bytes queued.
1759 */
1760int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1761{
1762 struct sk_buff *next;
1763 int err, copied = 0;
1764
1765 spin_lock_bh(&c3cn->lock);
1766
1767 if (c3cn->state != C3CN_STATE_ESTABLISHED) {
1768 c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
1769 c3cn, c3cn->state);
1770 err = -EAGAIN;
1771 goto out_err;
1772 }
1773
1774 if (c3cn->err) {
1775 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1776 err = -EPIPE;
1777 goto out_err;
1778 }
1779
1780 if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) {
1781 c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
1782 c3cn, c3cn->write_seq, c3cn->snd_una,
1783 cxgb3_snd_win);
1784 err = -ENOBUFS;
1785 goto out_err;
1786 }
1787
1788 while (skb) {
1789 int frags = skb_shinfo(skb)->nr_frags +
1790 (skb->len != skb->data_len);
1791
1792 if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
1793 c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
1794 err = -EINVAL;
1795 goto out_err;
1796 }
1797
1798 if (frags >= SKB_WR_LIST_SIZE) {
1799 cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1800 c3cn, skb_shinfo(skb)->nr_frags,
1801 skb->len, skb->data_len);
1802 err = -EINVAL;
1803 goto out_err;
1804 }
1805
1806 next = skb->next;
1807 skb->next = NULL;
1808 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
1809 copied += skb->len;
1810 c3cn->write_seq += skb->len + ulp_extra_len(skb);
1811 skb = next;
1812 }
1813done:
1814 if (likely(skb_queue_len(&c3cn->write_queue)))
1815 c3cn_push_tx_frames(c3cn, 1);
1816 spin_unlock_bh(&c3cn->lock);
1817 return copied;
1818
1819out_err:
1820 if (copied == 0 && err == -EPIPE)
1821 copied = c3cn->err ? c3cn->err : -EPIPE;
1822 else
1823 copied = err;
1824 goto done;
1825}
1826
1827static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
1828{
1829 struct adap_ports *ports = &cdata->ports;
1830 struct s3_conn *c3cn;
1831 int i;
1832
1833 for (i = 0; i < cxgb3_max_connect; i++) {
1834 if (cdata->sport_conn[i]) {
1835 c3cn = cdata->sport_conn[i];
1836 cdata->sport_conn[i] = NULL;
1837
1838 spin_lock_bh(&c3cn->lock);
1839 c3cn->cdev = NULL;
1840 c3cn_set_flag(c3cn, C3CN_OFFLOAD_DOWN);
1841 c3cn_closed(c3cn);
1842 spin_unlock_bh(&c3cn->lock);
1843 }
1844 }
1845
1846 for (i = 0; i < ports->nports; i++)
1847 NDEV2CDATA(ports->lldevs[i]) = NULL;
1848
1849 cxgb3i_free_big_mem(cdata);
1850}
1851
1852void cxgb3i_sdev_cleanup(void)
1853{
1854 struct cxgb3i_sdev_data *cdata;
1855
1856 write_lock(&cdata_rwlock);
1857 list_for_each_entry(cdata, &cdata_list, list) {
1858 list_del(&cdata->list);
1859 sdev_data_cleanup(cdata);
1860 }
1861 write_unlock(&cdata_rwlock);
1862}
1863
1864int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
1865{
1866 cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
1867 cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
1868 cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
1869 cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
1870 cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
1871 cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
1872 cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
1873 cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
1874
1875 if (cxgb3_max_connect > CXGB3I_MAX_CONN)
1876 cxgb3_max_connect = CXGB3I_MAX_CONN;
1877 return 0;
1878}
1879
1880/**
1881 * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
1882 * @cdev: t3cdev adapter
1883 * @client: cxgb3 driver client
1884 */
1885void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
1886{
1887 struct cxgb3i_sdev_data *cdata;
1888 struct ofld_page_info rx_page_info;
1889 unsigned int wr_len;
1890 int mapsize = cxgb3_max_connect * sizeof(struct s3_conn *);
1891 int i;
1892
1893 cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
1894 if (!cdata) {
1895 cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n",
1896 cdev, mapsize);
1897 return;
1898 }
1899
1900 if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
1901 cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
1902 cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1903 cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n",
1904 cdev);
1905 goto free_cdata;
1906 }
1907
1908 s3_init_wr_tab(wr_len);
1909
1910 spin_lock_init(&cdata->lock);
1911 INIT_LIST_HEAD(&cdata->list);
1912 cdata->cdev = cdev;
1913 cdata->client = client;
1914
1915 for (i = 0; i < cdata->ports.nports; i++)
1916 NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
1917
1918 write_lock(&cdata_rwlock);
1919 list_add_tail(&cdata->list, &cdata_list);
1920 write_unlock(&cdata_rwlock);
1921
1922 cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev);
1923 return;
1924
1925free_cdata:
1926 cxgb3i_free_big_mem(cdata);
1927}
1928
1929/**
1930 * cxgb3i_sdev_remove - free the allocated resources for the adapter
1931 * @cdev: t3cdev adapter
1932 */
1933void cxgb3i_sdev_remove(struct t3cdev *cdev)
1934{
1935 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
1936
1937 cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev);
1938
1939 write_lock(&cdata_rwlock);
1940 list_del(&cdata->list);
1941 write_unlock(&cdata_rwlock);
1942
1943 sdev_data_cleanup(cdata);
1944}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
deleted file mode 100644
index 6a1d86b1fafe..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#ifndef _CXGB3I_OFFLOAD_H
16#define _CXGB3I_OFFLOAD_H
17
18#include <linux/skbuff.h>
19#include <linux/in.h>
20
21#include "common.h"
22#include "adapter.h"
23#include "t3cdev.h"
24#include "cxgb3_offload.h"
25
26#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
27#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
28#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
29#define cxgb3i_log_debug(fmt, args...) \
30 printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
31
32/**
33 * struct s3_conn - an iscsi tcp connection structure
34 *
35 * @dev: net device of with connection
36 * @cdev: adapter t3cdev for net device
37 * @flags: see c3cn_flags below
38 * @tid: connection id assigned by the h/w
39 * @qset: queue set used by connection
40 * @mss_idx: Maximum Segment Size table index
41 * @l2t: ARP resolution entry for offload packets
42 * @wr_max: maximum in-flight writes
43 * @wr_avail: number of writes available
44 * @wr_unacked: writes since last request for completion notification
45 * @wr_pending_head: head of pending write queue
46 * @wr_pending_tail: tail of pending write queue
47 * @cpl_close: skb for cpl_close_req
48 * @cpl_abort_req: skb for cpl_abort_req
49 * @cpl_abort_rpl: skb for cpl_abort_rpl
50 * @lock: connection status lock
51 * @refcnt: reference count on connection
52 * @state: connection state
53 * @saddr: source ip/port address
54 * @daddr: destination ip/port address
55 * @dst_cache: reference to destination route
56 * @receive_queue: received PDUs
57 * @write_queue: un-pushed pending writes
58 * @retry_timer: retry timer for various operations
59 * @err: connection error status
60 * @callback_lock: lock for opaque user context
61 * @user_data: opaque user context
62 * @rcv_nxt: next receive seq. #
63 * @copied_seq: head of yet unread data
64 * @rcv_wup: rcv_nxt on last window update sent
65 * @snd_nxt: next sequence we send
66 * @snd_una: first byte we want an ack for
67 * @write_seq: tail+1 of data held in send buffer
68 */
69struct s3_conn {
70 struct net_device *dev;
71 struct t3cdev *cdev;
72 unsigned long flags;
73 int tid;
74 int qset;
75 int mss_idx;
76 struct l2t_entry *l2t;
77 int wr_max;
78 int wr_avail;
79 int wr_unacked;
80 struct sk_buff *wr_pending_head;
81 struct sk_buff *wr_pending_tail;
82 struct sk_buff *cpl_close;
83 struct sk_buff *cpl_abort_req;
84 struct sk_buff *cpl_abort_rpl;
85 spinlock_t lock;
86 atomic_t refcnt;
87 volatile unsigned int state;
88 struct sockaddr_in saddr;
89 struct sockaddr_in daddr;
90 struct dst_entry *dst_cache;
91 struct sk_buff_head receive_queue;
92 struct sk_buff_head write_queue;
93 struct timer_list retry_timer;
94 int err;
95 rwlock_t callback_lock;
96 void *user_data;
97
98 u32 rcv_nxt;
99 u32 copied_seq;
100 u32 rcv_wup;
101 u32 snd_nxt;
102 u32 snd_una;
103 u32 write_seq;
104};
105
106/*
107 * connection state
108 */
109enum conn_states {
110 C3CN_STATE_CONNECTING = 1,
111 C3CN_STATE_ESTABLISHED,
112 C3CN_STATE_ACTIVE_CLOSE,
113 C3CN_STATE_PASSIVE_CLOSE,
114 C3CN_STATE_CLOSE_WAIT_1,
115 C3CN_STATE_CLOSE_WAIT_2,
116 C3CN_STATE_ABORTING,
117 C3CN_STATE_CLOSED,
118};
119
120static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
121{
122 return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
123}
124static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
125{
126 return c3cn->state == C3CN_STATE_ESTABLISHED;
127}
128
129/*
130 * Connection flags -- many to track some close related events.
131 */
132enum c3cn_flags {
133 C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
134 C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
135 C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
136 C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
137 C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
138 C3CN_OFFLOAD_DOWN /* offload function off */
139};
140
141/**
142 * cxgb3i_sdev_data - Per adapter data.
143 * Linked off of each Ethernet device port on the adapter.
144 * Also available via the t3cdev structure since we have pointers to our port
145 * net_device's there ...
146 *
147 * @list: list head to link elements
148 * @cdev: t3cdev adapter
149 * @client: CPL client pointer
150 * @ports: array of adapter ports
151 * @sport_next: next port
152 * @sport_conn: source port connection
153 */
154struct cxgb3i_sdev_data {
155 struct list_head list;
156 struct t3cdev *cdev;
157 struct cxgb3_client *client;
158 struct adap_ports ports;
159 spinlock_t lock;
160 unsigned int sport_next;
161 struct s3_conn *sport_conn[0];
162};
163#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
164#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
165
166void cxgb3i_sdev_cleanup(void);
167int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
168void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *);
170
171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
176void cxgb3i_c3cn_release(struct s3_conn *);
177
178/**
179 * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
180 *
181 * @flag: see C3CB_FLAG_* below
182 * @ulp_mode: ULP mode/submode of sk_buff
183 * @seq: tcp sequence number
184 */
185struct cxgb3_skb_rx_cb {
186 __u32 ddigest; /* data digest */
187 __u32 pdulen; /* recovered pdu length */
188};
189
190struct cxgb3_skb_tx_cb {
191 struct sk_buff *wr_next; /* next wr */
192};
193
194struct cxgb3_skb_cb {
195 __u8 flags;
196 __u8 ulp_mode;
197 __u32 seq;
198 union {
199 struct cxgb3_skb_rx_cb rx;
200 struct cxgb3_skb_tx_cb tx;
201 };
202};
203
204#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
205#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags)
206#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
207#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq)
208#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest)
209#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen)
210#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next)
211
212enum c3cb_flags {
213 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
214 C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
215 C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
216};
217
218/**
219 * sge_opaque_hdr -
220 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
221 * and for which we must reserve space.
222 */
223struct sge_opaque_hdr {
224 void *dev;
225 dma_addr_t addr[MAX_SKB_FRAGS + 1];
226};
227
228/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
229#define TX_HEADER_LEN \
230 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
231#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN)
232
233/*
234 * get and set private ip for iscsi traffic
235 */
236#define cxgb3i_get_private_ipv4addr(ndev) \
237 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
238#define cxgb3i_set_private_ipv4addr(ndev, addr) \
239 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
240
241/* max. connections per adapter */
242#define CXGB3I_MAX_CONN 16384
243#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
deleted file mode 100644
index dc5e3e77a351..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ /dev/null
@@ -1,495 +0,0 @@
1/*
2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <linux/crypto.h>
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20
21#include "cxgb3i.h"
22#include "cxgb3i_pdu.h"
23
24#ifdef __DEBUG_CXGB3I_RX__
25#define cxgb3i_rx_debug cxgb3i_log_debug
26#else
27#define cxgb3i_rx_debug(fmt...)
28#endif
29
30#ifdef __DEBUG_CXGB3I_TX__
31#define cxgb3i_tx_debug cxgb3i_log_debug
32#else
33#define cxgb3i_tx_debug(fmt...)
34#endif
35
36/* always allocate rooms for AHS */
37#define SKB_TX_PDU_HEADER_LEN \
38 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
39static unsigned int skb_extra_headroom;
40static struct page *pad_page;
41
42/*
43 * pdu receive, interact with libiscsi_tcp
44 */
45static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
46 unsigned int offset, int offloaded)
47{
48 int status = 0;
49 int bytes_read;
50
51 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
52 switch (status) {
53 case ISCSI_TCP_CONN_ERR:
54 return -EIO;
55 case ISCSI_TCP_SUSPENDED:
56 /* no transfer - just have caller flush queue */
57 return bytes_read;
58 case ISCSI_TCP_SKB_DONE:
59 /*
60 * pdus should always fit in the skb and we should get
61 * segment done notifcation.
62 */
63 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
64 return -EFAULT;
65 case ISCSI_TCP_SEGMENT_DONE:
66 return bytes_read;
67 default:
68 iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
69 "status %d\n", status);
70 return -EINVAL;
71 }
72}
73
74static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
75 struct sk_buff *skb)
76{
77 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
78 bool offloaded = 0;
79 unsigned int offset;
80 int rc;
81
82 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
83 conn, skb, skb->len, skb_ulp_mode(skb));
84
85 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
86 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
87 return -EIO;
88 }
89
90 if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
91 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
92 return -EIO;
93 }
94
95 if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
96 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
97 return -EIO;
98 }
99
100 /* iscsi hdr */
101 rc = read_pdu_skb(conn, skb, 0, 0);
102 if (rc <= 0)
103 return rc;
104
105 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
106 return 0;
107
108 offset = rc;
109 if (conn->hdrdgst_en)
110 offset += ISCSI_DIGEST_SIZE;
111
112 /* iscsi data */
113 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
114 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
115 "itt 0x%x.\n",
116 skb,
117 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
118 tcp_conn->in.datalen,
119 ntohl(tcp_conn->in.hdr->itt));
120 offloaded = 1;
121 } else {
122 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
123 "itt 0x%x.\n",
124 skb,
125 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
126 tcp_conn->in.datalen,
127 ntohl(tcp_conn->in.hdr->itt));
128 offset += sizeof(struct cpl_iscsi_hdr_norss);
129 }
130
131 rc = read_pdu_skb(conn, skb, offset, offloaded);
132 if (rc < 0)
133 return rc;
134 else
135 return 0;
136}
137
138/*
139 * pdu transmit, interact with libiscsi_tcp
140 */
141static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
142{
143 u8 submode = 0;
144
145 if (hcrc)
146 submode |= 1;
147 if (dcrc)
148 submode |= 2;
149 skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
150}
151
152void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
153{
154 struct cxgb3i_task_data *tdata = task->dd_data +
155 sizeof(struct iscsi_tcp_task);
156
157 /* never reached the xmit task callout */
158 if (tdata->skb)
159 __kfree_skb(tdata->skb);
160 memset(tdata, 0, sizeof(struct cxgb3i_task_data));
161
162 /* MNC - Do we need a check in case this is called but
163 * cxgb3i_conn_alloc_pdu has never been called on the task */
164 cxgb3i_release_itt(task, task->hdr_itt);
165 iscsi_tcp_cleanup_task(task);
166}
167
168static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
169 unsigned int offset, unsigned int *off,
170 struct scatterlist **sgp)
171{
172 int i;
173 struct scatterlist *sg;
174
175 for_each_sg(sgl, sg, sgcnt, i) {
176 if (offset < sg->length) {
177 *off = offset;
178 *sgp = sg;
179 return 0;
180 }
181 offset -= sg->length;
182 }
183 return -EFAULT;
184}
185
186static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
187 unsigned int dlen, skb_frag_t *frags,
188 int frag_max)
189{
190 unsigned int datalen = dlen;
191 unsigned int sglen = sg->length - sgoffset;
192 struct page *page = sg_page(sg);
193 int i;
194
195 i = 0;
196 do {
197 unsigned int copy;
198
199 if (!sglen) {
200 sg = sg_next(sg);
201 if (!sg) {
202 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
203 __func__, datalen, dlen);
204 return -EINVAL;
205 }
206 sgoffset = 0;
207 sglen = sg->length;
208 page = sg_page(sg);
209
210 }
211 copy = min(datalen, sglen);
212 if (i && page == frags[i - 1].page &&
213 sgoffset + sg->offset ==
214 frags[i - 1].page_offset + frags[i - 1].size) {
215 frags[i - 1].size += copy;
216 } else {
217 if (i >= frag_max) {
218 cxgb3i_log_error("%s, too many pages %u, "
219 "dlen %u.\n", __func__,
220 frag_max, dlen);
221 return -EINVAL;
222 }
223
224 frags[i].page = page;
225 frags[i].page_offset = sg->offset + sgoffset;
226 frags[i].size = copy;
227 i++;
228 }
229 datalen -= copy;
230 sgoffset += copy;
231 sglen -= copy;
232 } while (datalen);
233
234 return i;
235}
236
237int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
238{
239 struct iscsi_conn *conn = task->conn;
240 struct iscsi_tcp_task *tcp_task = task->dd_data;
241 struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
242 struct scsi_cmnd *sc = task->sc;
243 int headroom = SKB_TX_PDU_HEADER_LEN;
244
245 tcp_task->dd_data = tdata;
246 task->hdr = NULL;
247
248 /* write command, need to send data pdus */
249 if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
250 (opcode == ISCSI_OP_SCSI_CMD &&
251 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
252 headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
253
254 tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
255 if (!tdata->skb)
256 return -ENOMEM;
257 skb_reserve(tdata->skb, TX_HEADER_LEN);
258
259 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
260 task, opcode, tdata->skb);
261
262 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
263 task->hdr_max = SKB_TX_PDU_HEADER_LEN;
264
265 /* data_out uses scsi_cmd's itt */
266 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
267 cxgb3i_reserve_itt(task, &task->hdr->itt);
268
269 return 0;
270}
271
272int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
273 unsigned int count)
274{
275 struct iscsi_conn *conn = task->conn;
276 struct iscsi_tcp_task *tcp_task = task->dd_data;
277 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
278 struct sk_buff *skb = tdata->skb;
279 unsigned int datalen = count;
280 int i, padlen = iscsi_padding(count);
281 struct page *pg;
282
283 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
284 task, task->sc, offset, count, skb);
285
286 skb_put(skb, task->hdr_len);
287 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
288 if (!count)
289 return 0;
290
291 if (task->sc) {
292 struct scsi_data_buffer *sdb = scsi_out(task->sc);
293 struct scatterlist *sg = NULL;
294 int err;
295
296 tdata->offset = offset;
297 tdata->count = count;
298 err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
299 tdata->offset, &tdata->sgoffset, &sg);
300 if (err < 0) {
301 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
302 sdb->table.nents, tdata->offset,
303 sdb->length);
304 return err;
305 }
306 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
307 tdata->frags, MAX_PDU_FRAGS);
308 if (err < 0) {
309 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
310 sdb->table.nents, tdata->offset,
311 tdata->count);
312 return err;
313 }
314 tdata->nr_frags = err;
315
316 if (tdata->nr_frags > MAX_SKB_FRAGS ||
317 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
318 char *dst = skb->data + task->hdr_len;
319 skb_frag_t *frag = tdata->frags;
320
321 /* data fits in the skb's headroom */
322 for (i = 0; i < tdata->nr_frags; i++, frag++) {
323 char *src = kmap_atomic(frag->page,
324 KM_SOFTIRQ0);
325
326 memcpy(dst, src+frag->page_offset, frag->size);
327 dst += frag->size;
328 kunmap_atomic(src, KM_SOFTIRQ0);
329 }
330 if (padlen) {
331 memset(dst, 0, padlen);
332 padlen = 0;
333 }
334 skb_put(skb, count + padlen);
335 } else {
336 /* data fit into frag_list */
337 for (i = 0; i < tdata->nr_frags; i++)
338 get_page(tdata->frags[i].page);
339
340 memcpy(skb_shinfo(skb)->frags, tdata->frags,
341 sizeof(skb_frag_t) * tdata->nr_frags);
342 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
343 skb->len += count;
344 skb->data_len += count;
345 skb->truesize += count;
346 }
347
348 } else {
349 pg = virt_to_page(task->data);
350
351 get_page(pg);
352 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
353 count);
354 skb->len += count;
355 skb->data_len += count;
356 skb->truesize += count;
357 }
358
359 if (padlen) {
360 i = skb_shinfo(skb)->nr_frags;
361 get_page(pad_page);
362 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
363 padlen);
364
365 skb->data_len += padlen;
366 skb->truesize += padlen;
367 skb->len += padlen;
368 }
369
370 return 0;
371}
372
373int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
374{
375 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
376 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
377 struct iscsi_tcp_task *tcp_task = task->dd_data;
378 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
379 struct sk_buff *skb = tdata->skb;
380 unsigned int datalen;
381 int err;
382
383 if (!skb)
384 return 0;
385
386 datalen = skb->data_len;
387 tdata->skb = NULL;
388 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
389 if (err > 0) {
390 int pdulen = err;
391
392 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
393 task, skb, skb->len, skb->data_len, err);
394
395 if (task->conn->hdrdgst_en)
396 pdulen += ISCSI_DIGEST_SIZE;
397 if (datalen && task->conn->datadgst_en)
398 pdulen += ISCSI_DIGEST_SIZE;
399
400 task->conn->txdata_octets += pdulen;
401 return 0;
402 }
403
404 if (err == -EAGAIN || err == -ENOBUFS) {
405 /* reset skb to send when we are called again */
406 tdata->skb = skb;
407 return err;
408 }
409
410 kfree_skb(skb);
411 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
412 task->itt, skb, skb->len, skb->data_len, err);
413 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
414 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
415 return err;
416}
417
418int cxgb3i_pdu_init(void)
419{
420 if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
421 skb_extra_headroom = SKB_TX_HEADROOM;
422 pad_page = alloc_page(GFP_KERNEL);
423 if (!pad_page)
424 return -ENOMEM;
425 memset(page_address(pad_page), 0, PAGE_SIZE);
426 return 0;
427}
428
429void cxgb3i_pdu_cleanup(void)
430{
431 if (pad_page) {
432 __free_page(pad_page);
433 pad_page = NULL;
434 }
435}
436
437void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
438{
439 struct sk_buff *skb;
440 unsigned int read = 0;
441 struct iscsi_conn *conn = c3cn->user_data;
442 int err = 0;
443
444 cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
445
446 read_lock(&c3cn->callback_lock);
447 if (unlikely(!conn || conn->suspend_rx)) {
448 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
449 conn, conn ? conn->id : 0xFF,
450 conn ? conn->suspend_rx : 0xFF);
451 read_unlock(&c3cn->callback_lock);
452 return;
453 }
454 skb = skb_peek(&c3cn->receive_queue);
455 while (!err && skb) {
456 __skb_unlink(skb, &c3cn->receive_queue);
457 read += skb_rx_pdulen(skb);
458 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
459 conn, c3cn, skb, skb_rx_pdulen(skb));
460 err = cxgb3i_conn_read_pdu_skb(conn, skb);
461 __kfree_skb(skb);
462 skb = skb_peek(&c3cn->receive_queue);
463 }
464 read_unlock(&c3cn->callback_lock);
465 c3cn->copied_seq += read;
466 cxgb3i_c3cn_rx_credits(c3cn, read);
467 conn->rxdata_octets += read;
468
469 if (err) {
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
471 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
472 }
473}
474
475void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
476{
477 struct iscsi_conn *conn = c3cn->user_data;
478
479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
480 if (conn) {
481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
482 iscsi_conn_queue_work(conn);
483 }
484}
485
486void cxgb3i_conn_closing(struct s3_conn *c3cn)
487{
488 struct iscsi_conn *conn;
489
490 read_lock(&c3cn->callback_lock);
491 conn = c3cn->user_data;
492 if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
493 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
494 read_unlock(&c3cn->callback_lock);
495}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
deleted file mode 100644
index 0770b23d90da..000000000000
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_PDU_H__
14#define __CXGB3I_ULP2_PDU_H__
15
16struct cpl_iscsi_hdr_norss {
17 union opcode_tid ot;
18 u16 pdu_len_ddp;
19 u16 len;
20 u32 seq;
21 u16 urg;
22 u8 rsvd;
23 u8 status;
24};
25
26struct cpl_rx_data_ddp_norss {
27 union opcode_tid ot;
28 u16 urg;
29 u16 len;
30 u32 seq;
31 u32 nxt_seq;
32 u32 ulp_crc;
33 u32 ddp_status;
34};
35
36#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
37#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
38#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
39#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
40#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
41#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
42#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
43#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
44#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
45#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
46#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
47#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
48#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
49
50#define ULP2_FLAG_DATA_READY 0x1
51#define ULP2_FLAG_DATA_DDPED 0x2
52#define ULP2_FLAG_HCRC_ERROR 0x10
53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40
55
56void cxgb3i_conn_closing(struct s3_conn *c3cn);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif
diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig
new file mode 100644
index 000000000000..17eb5d522f42
--- /dev/null
+++ b/drivers/scsi/cxgbi/Kconfig
@@ -0,0 +1,2 @@
1source "drivers/scsi/cxgbi/cxgb3i/Kconfig"
2source "drivers/scsi/cxgbi/cxgb4i/Kconfig"
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
new file mode 100644
index 000000000000..86007e344955
--- /dev/null
+++ b/drivers/scsi/cxgbi/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/
2obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
index 70d060b7ff4f..09dbf9efc8ea 100644
--- a/drivers/scsi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -1,4 +1,3 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
2 2
3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o 3obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index bfdcaf5c9c57..5cf4e9831f1b 100644
--- a/drivers/scsi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -1,7 +1,7 @@
1config SCSI_CXGB3_ISCSI 1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio S3xx iSCSI support" 2 tristate "Chelsio T3 iSCSI support"
3 depends on CHELSIO_T3_DEPENDS 3 depends on CHELSIO_T3_DEPENDS
4 select CHELSIO_T3 4 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS 5 select SCSI_ISCSI_ATTRS
6 ---help--- 6 ---help---
7 This driver supports iSCSI offload for the Chelsio S3 series devices. 7 This driver supports iSCSI offload for the Chelsio T3 devices.
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
new file mode 100644
index 000000000000..a129a170b47b
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -0,0 +1,1465 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <scsi/scsi_host.h>
21
22#include "common.h"
23#include "t3_cpl.h"
24#include "t3cdev.h"
25#include "cxgb3_defs.h"
26#include "cxgb3_ctl_defs.h"
27#include "cxgb3_offload.h"
28#include "firmware_exports.h"
29#include "cxgb3i.h"
30
31static unsigned int dbg_level;
32#include "../libcxgbi.h"
33
34#define DRV_MODULE_NAME "cxgb3i"
35#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
36#define DRV_MODULE_VERSION "2.0.0"
37#define DRV_MODULE_RELDATE "Jun. 2010"
38
39static char version[] =
40 DRV_MODULE_DESC " " DRV_MODULE_NAME
41 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42
43MODULE_AUTHOR("Chelsio Communications, Inc.");
44MODULE_DESCRIPTION(DRV_MODULE_DESC);
45MODULE_VERSION(DRV_MODULE_VERSION);
46MODULE_LICENSE("GPL");
47
48module_param(dbg_level, uint, 0644);
49MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
50
51static int cxgb3i_rcv_win = 256 * 1024;
52module_param(cxgb3i_rcv_win, int, 0644);
53MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
54
55static int cxgb3i_snd_win = 128 * 1024;
56module_param(cxgb3i_snd_win, int, 0644);
57MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
58
59static int cxgb3i_rx_credit_thres = 10 * 1024;
60module_param(cxgb3i_rx_credit_thres, int, 0644);
61MODULE_PARM_DESC(rx_credit_thres,
62 "RX credits return threshold in bytes (default=10KB)");
63
64static unsigned int cxgb3i_max_connect = 8 * 1024;
65module_param(cxgb3i_max_connect, uint, 0644);
66MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
67
68static unsigned int cxgb3i_sport_base = 20000;
69module_param(cxgb3i_sport_base, uint, 0644);
70MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
71
72static void cxgb3i_dev_open(struct t3cdev *);
73static void cxgb3i_dev_close(struct t3cdev *);
74static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
75
76static struct cxgb3_client t3_client = {
77 .name = DRV_MODULE_NAME,
78 .handlers = cxgb3i_cpl_handlers,
79 .add = cxgb3i_dev_open,
80 .remove = cxgb3i_dev_close,
81 .event_handler = cxgb3i_dev_event_handler,
82};
83
84static struct scsi_host_template cxgb3i_host_template = {
85 .module = THIS_MODULE,
86 .name = DRV_MODULE_NAME,
87 .proc_name = DRV_MODULE_NAME,
88 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
89 .queuecommand = iscsi_queuecommand,
90 .change_queue_depth = iscsi_change_queue_depth,
91 .sg_tablesize = SG_ALL,
92 .max_sectors = 0xFFFF,
93 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
94 .eh_abort_handler = iscsi_eh_abort,
95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc,
98 .use_clustering = DISABLE_CLUSTERING,
99 .this_id = -1,
100};
101
102static struct iscsi_transport cxgb3i_iscsi_transport = {
103 .owner = THIS_MODULE,
104 .name = DRV_MODULE_NAME,
105 /* owner and name should be set already */
106 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
107 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
108 CAP_PADDING_OFFLOAD,
109 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
110 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
111 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
112 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
113 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
114 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
115 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
116 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
117 ISCSI_PERSISTENT_ADDRESS |
118 ISCSI_TARGET_NAME | ISCSI_TPGT |
119 ISCSI_USERNAME | ISCSI_PASSWORD |
120 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
121 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
122 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
123 ISCSI_PING_TMO | ISCSI_RECV_TMO |
124 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
125 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
126 ISCSI_HOST_INITIATOR_NAME |
127 ISCSI_HOST_NETDEV_NAME,
128 .get_host_param = cxgbi_get_host_param,
129 .set_host_param = cxgbi_set_host_param,
130 /* session management */
131 .create_session = cxgbi_create_session,
132 .destroy_session = cxgbi_destroy_session,
133 .get_session_param = iscsi_session_get_param,
134 /* connection management */
135 .create_conn = cxgbi_create_conn,
136 .bind_conn = cxgbi_bind_conn,
137 .destroy_conn = iscsi_tcp_conn_teardown,
138 .start_conn = iscsi_conn_start,
139 .stop_conn = iscsi_conn_stop,
140 .get_conn_param = cxgbi_get_conn_param,
141 .set_param = cxgbi_set_conn_param,
142 .get_stats = cxgbi_get_conn_stats,
143 /* pdu xmit req from user space */
144 .send_pdu = iscsi_conn_send_pdu,
145 /* task */
146 .init_task = iscsi_tcp_task_init,
147 .xmit_task = iscsi_tcp_task_xmit,
148 .cleanup_task = cxgbi_cleanup_task,
149 /* pdu */
150 .alloc_pdu = cxgbi_conn_alloc_pdu,
151 .init_pdu = cxgbi_conn_init_pdu,
152 .xmit_pdu = cxgbi_conn_xmit_pdu,
153 .parse_pdu_itt = cxgbi_parse_pdu_itt,
154 /* TCP connect/disconnect */
155 .ep_connect = cxgbi_ep_connect,
156 .ep_poll = cxgbi_ep_poll,
157 .ep_disconnect = cxgbi_ep_disconnect,
158 /* Error recovery timeout call */
159 .session_recovery_timedout = iscsi_session_recovery_timedout,
160};
161
162static struct scsi_transport_template *cxgb3i_stt;
163
164/*
165 * CPL (Chelsio Protocol Language) defines a message passing interface between
166 * the host driver and Chelsio asic.
167 * The section below implments CPLs that related to iscsi tcp connection
168 * open/close/abort and data send/receive.
169 */
170
171static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
172
173static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
174 const struct l2t_entry *e)
175{
176 unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
177 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
178
179 skb->priority = CPL_PRIORITY_SETUP;
180
181 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
182 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
183 req->local_port = csk->saddr.sin_port;
184 req->peer_port = csk->daddr.sin_port;
185 req->local_ip = csk->saddr.sin_addr.s_addr;
186 req->peer_ip = csk->daddr.sin_addr.s_addr;
187
188 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
189 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
190 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
191 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
192 V_RCV_BUFSIZ(cxgb3i_rcv_win>>10));
193
194 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
195 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
196 csk, csk->state, csk->flags, csk->atid,
197 &req->local_ip, ntohs(req->local_port),
198 &req->peer_ip, ntohs(req->peer_port),
199 csk->mss_idx, e->idx, e->smt_idx);
200
201 l2t_send(csk->cdev->lldev, skb, csk->l2t);
202}
203
204static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
205{
206 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
207}
208
209/*
210 * CPL connection close request: host ->
211 *
212 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
213 * the write queue (i.e., after any unsent txt data).
214 */
215static void send_close_req(struct cxgbi_sock *csk)
216{
217 struct sk_buff *skb = csk->cpl_close;
218 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
219 unsigned int tid = csk->tid;
220
221 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
222 "csk 0x%p,%u,0x%lx,%u.\n",
223 csk, csk->state, csk->flags, csk->tid);
224
225 csk->cpl_close = NULL;
226 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
227 req->wr.wr_lo = htonl(V_WR_TID(tid));
228 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
229 req->rsvd = htonl(csk->write_seq);
230
231 cxgbi_sock_skb_entail(csk, skb);
232 if (csk->state >= CTP_ESTABLISHED)
233 push_tx_frames(csk, 1);
234}
235
236/*
237 * CPL connection abort request: host ->
238 *
239 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
240 * for the same connection and also that we do not try to send a message
241 * after the connection has closed.
242 */
243static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
244{
245 struct cpl_abort_req *req = cplhdr(skb);
246
247 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
248 "t3dev 0x%p, tid %u, skb 0x%p.\n",
249 tdev, GET_TID(req), skb);
250 req->cmd = CPL_ABORT_NO_RST;
251 cxgb3_ofld_send(tdev, skb);
252}
253
254static void send_abort_req(struct cxgbi_sock *csk)
255{
256 struct sk_buff *skb = csk->cpl_abort_req;
257 struct cpl_abort_req *req;
258
259 if (unlikely(csk->state == CTP_ABORTING || !skb))
260 return;
261 cxgbi_sock_set_state(csk, CTP_ABORTING);
262 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
263 /* Purge the send queue so we don't send anything after an abort. */
264 cxgbi_sock_purge_write_queue(csk);
265
266 csk->cpl_abort_req = NULL;
267 req = (struct cpl_abort_req *)skb->head;
268 skb->priority = CPL_PRIORITY_DATA;
269 set_arp_failure_handler(skb, abort_arp_failure);
270 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
271 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
272 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
273 req->rsvd0 = htonl(csk->snd_nxt);
274 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
275 req->cmd = CPL_ABORT_SEND_RST;
276
277 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
278 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
279 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
280 req->rsvd1);
281
282 l2t_send(csk->cdev->lldev, skb, csk->l2t);
283}
284
285/*
286 * CPL connection abort reply: host ->
287 *
288 * Send an ABORT_RPL message in response of the ABORT_REQ received.
289 */
290static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
291{
292 struct sk_buff *skb = csk->cpl_abort_rpl;
293 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
294
295 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
296 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
297 csk, csk->state, csk->flags, csk->tid, rst_status);
298
299 csk->cpl_abort_rpl = NULL;
300 skb->priority = CPL_PRIORITY_DATA;
301 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
302 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
303 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
304 rpl->cmd = rst_status;
305 cxgb3_ofld_send(csk->cdev->lldev, skb);
306}
307
308/*
309 * CPL connection rx data ack: host ->
310 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
311 * credits sent.
312 */
313static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
314{
315 struct sk_buff *skb;
316 struct cpl_rx_data_ack *req;
317 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
318
319 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
320 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
321 csk, csk->state, csk->flags, csk->tid, credits, dack);
322
323 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
324 if (!skb) {
325 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
326 return 0;
327 }
328 req = (struct cpl_rx_data_ack *)skb->head;
329 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
330 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
331 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
332 V_RX_CREDITS(credits));
333 skb->priority = CPL_PRIORITY_ACK;
334 cxgb3_ofld_send(csk->cdev->lldev, skb);
335 return credits;
336}
337
338/*
339 * CPL connection tx data: host ->
340 *
341 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
342 * credits sent.
343 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
344 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
345 */
346
347static unsigned int wrlen __read_mostly;
348static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
349
350static void init_wr_tab(unsigned int wr_len)
351{
352 int i;
353
354 if (skb_wrs[1]) /* already initialized */
355 return;
356 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
357 int sgl_len = (3 * i) / 2 + (i & 1);
358
359 sgl_len += 3;
360 skb_wrs[i] = (sgl_len <= wr_len
361 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
362 }
363 wrlen = wr_len * 8;
364}
365
366static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
367 int len, int req_completion)
368{
369 struct tx_data_wr *req;
370 struct l2t_entry *l2t = csk->l2t;
371
372 skb_reset_transport_header(skb);
373 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
374 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
375 (req_completion ? F_WR_COMPL : 0));
376 req->wr_lo = htonl(V_WR_TID(csk->tid));
377 /* len includes the length of any HW ULP additions */
378 req->len = htonl(len);
379 /* V_TX_ULP_SUBMODE sets both the mode and submode */
380 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
381 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
382 req->sndseq = htonl(csk->snd_nxt);
383 req->param = htonl(V_TX_PORT(l2t->smt_idx));
384
385 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
386 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
387 V_TX_CPU_IDX(csk->rss_qid));
388 /* sendbuffer is in units of 32KB. */
389 req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15));
390 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
391 }
392}
393
394/**
395 * push_tx_frames -- start transmit
396 * @c3cn: the offloaded connection
397 * @req_completion: request wr_ack or not
398 *
399 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
400 * connection's send queue and sends them on to T3. Must be called with the
401 * connection's lock held. Returns the amount of send buffer space that was
402 * freed as a result of sending queued data to T3.
403 */
404
405static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
406{
407 kfree_skb(skb);
408}
409
410static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
411{
412 int total_size = 0;
413 struct sk_buff *skb;
414
415 if (unlikely(csk->state < CTP_ESTABLISHED ||
416 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
417 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
418 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
419 csk, csk->state, csk->flags, csk->tid);
420 return 0;
421 }
422
423 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
424 int len = skb->len; /* length before skb_push */
425 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
426 int wrs_needed = skb_wrs[frags];
427
428 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
429 wrs_needed = 1;
430
431 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
432
433 if (csk->wr_cred < wrs_needed) {
434 log_debug(1 << CXGBI_DBG_PDU_TX,
435 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
436 csk, skb->len, skb->data_len, frags,
437 wrs_needed, csk->wr_cred);
438 break;
439 }
440
441 __skb_unlink(skb, &csk->write_queue);
442 skb->priority = CPL_PRIORITY_DATA;
443 skb->csum = wrs_needed; /* remember this until the WR_ACK */
444 csk->wr_cred -= wrs_needed;
445 csk->wr_una_cred += wrs_needed;
446 cxgbi_sock_enqueue_wr(csk, skb);
447
448 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
449 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
450 "left %u, unack %u.\n",
451 csk, skb->len, skb->data_len, frags, skb->csum,
452 csk->wr_cred, csk->wr_una_cred);
453
454 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
455 if ((req_completion &&
456 csk->wr_una_cred == wrs_needed) ||
457 csk->wr_una_cred >= csk->wr_max_cred / 2) {
458 req_completion = 1;
459 csk->wr_una_cred = 0;
460 }
461 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
462 make_tx_data_wr(csk, skb, len, req_completion);
463 csk->snd_nxt += len;
464 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
465 }
466 total_size += skb->truesize;
467 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
468 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
469 csk, csk->tid, skb);
470 set_arp_failure_handler(skb, arp_failure_skb_discard);
471 l2t_send(csk->cdev->lldev, skb, csk->l2t);
472 }
473 return total_size;
474}
475
476/*
477 * Process a CPL_ACT_ESTABLISH message: -> host
478 * Updates connection state from an active establish CPL message. Runs with
479 * the connection lock held.
480 */
481
482static inline void free_atid(struct cxgbi_sock *csk)
483{
484 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
485 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
486 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
487 cxgbi_sock_put(csk);
488 }
489}
490
491static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
492{
493 struct cxgbi_sock *csk = ctx;
494 struct cpl_act_establish *req = cplhdr(skb);
495 unsigned int tid = GET_TID(req);
496 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
497 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
498
499 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
500 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
501 atid, atid, csk, csk->state, csk->flags, rcv_isn);
502
503 cxgbi_sock_get(csk);
504 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
505 csk->tid = tid;
506 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
507
508 free_atid(csk);
509
510 csk->rss_qid = G_QNUM(ntohs(skb->csum));
511
512 spin_lock_bh(&csk->lock);
513 if (csk->retry_timer.function) {
514 del_timer(&csk->retry_timer);
515 csk->retry_timer.function = NULL;
516 }
517
518 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
519 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
520 csk, csk->state, csk->flags, csk->tid);
521
522 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
523 if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10))
524 csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10);
525
526 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
527
528 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
529 /* upper layer has requested closing */
530 send_abort_req(csk);
531 else {
532 if (skb_queue_len(&csk->write_queue))
533 push_tx_frames(csk, 1);
534 cxgbi_conn_tx_open(csk);
535 }
536
537 spin_unlock_bh(&csk->lock);
538 __kfree_skb(skb);
539 return 0;
540}
541
542/*
543 * Process a CPL_ACT_OPEN_RPL message: -> host
544 * Handle active open failures.
545 */
546static int act_open_rpl_status_to_errno(int status)
547{
548 switch (status) {
549 case CPL_ERR_CONN_RESET:
550 return -ECONNREFUSED;
551 case CPL_ERR_ARP_MISS:
552 return -EHOSTUNREACH;
553 case CPL_ERR_CONN_TIMEDOUT:
554 return -ETIMEDOUT;
555 case CPL_ERR_TCAM_FULL:
556 return -ENOMEM;
557 case CPL_ERR_CONN_EXIST:
558 return -EADDRINUSE;
559 default:
560 return -EIO;
561 }
562}
563
564static void act_open_retry_timer(unsigned long data)
565{
566 struct sk_buff *skb;
567 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
568
569 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
570 "csk 0x%p,%u,0x%lx,%u.\n",
571 csk, csk->state, csk->flags, csk->tid);
572
573 cxgbi_sock_get(csk);
574 spin_lock_bh(&csk->lock);
575 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
576 if (!skb)
577 cxgbi_sock_fail_act_open(csk, -ENOMEM);
578 else {
579 skb->sk = (struct sock *)csk;
580 set_arp_failure_handler(skb, act_open_arp_failure);
581 send_act_open_req(csk, skb, csk->l2t);
582 }
583 spin_unlock_bh(&csk->lock);
584 cxgbi_sock_put(csk);
585}
586
587static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
588{
589 struct cxgbi_sock *csk = ctx;
590 struct cpl_act_open_rpl *rpl = cplhdr(skb);
591
592 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
593 csk, csk->state, csk->flags, csk->atid, rpl->status,
594 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
595 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
596
597 if (rpl->status != CPL_ERR_TCAM_FULL &&
598 rpl->status != CPL_ERR_CONN_EXIST &&
599 rpl->status != CPL_ERR_ARP_MISS)
600 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
601
602 cxgbi_sock_get(csk);
603 spin_lock_bh(&csk->lock);
604 if (rpl->status == CPL_ERR_CONN_EXIST &&
605 csk->retry_timer.function != act_open_retry_timer) {
606 csk->retry_timer.function = act_open_retry_timer;
607 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
608 } else
609 cxgbi_sock_fail_act_open(csk,
610 act_open_rpl_status_to_errno(rpl->status));
611
612 spin_unlock_bh(&csk->lock);
613 cxgbi_sock_put(csk);
614 __kfree_skb(skb);
615 return 0;
616}
617
618/*
619 * Process PEER_CLOSE CPL messages: -> host
620 * Handle peer FIN.
621 */
622static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
623{
624 struct cxgbi_sock *csk = ctx;
625
626 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
627 "csk 0x%p,%u,0x%lx,%u.\n",
628 csk, csk->state, csk->flags, csk->tid);
629
630 cxgbi_sock_rcv_peer_close(csk);
631 __kfree_skb(skb);
632 return 0;
633}
634
635/*
636 * Process CLOSE_CONN_RPL CPL message: -> host
637 * Process a peer ACK to our FIN.
638 */
639static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
640 void *ctx)
641{
642 struct cxgbi_sock *csk = ctx;
643 struct cpl_close_con_rpl *rpl = cplhdr(skb);
644
645 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
646 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
647 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
648
649 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
650 __kfree_skb(skb);
651 return 0;
652}
653
654/*
655 * Process ABORT_REQ_RSS CPL message: -> host
656 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
657 * request except that we need to reply to it.
658 */
659
660static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
661 int *need_rst)
662{
663 switch (abort_reason) {
664 case CPL_ERR_BAD_SYN: /* fall through */
665 case CPL_ERR_CONN_RESET:
666 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
667 case CPL_ERR_XMIT_TIMEDOUT:
668 case CPL_ERR_PERSIST_TIMEDOUT:
669 case CPL_ERR_FINWAIT2_TIMEDOUT:
670 case CPL_ERR_KEEPALIVE_TIMEDOUT:
671 return -ETIMEDOUT;
672 default:
673 return -EIO;
674 }
675}
676
677static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
678{
679 const struct cpl_abort_req_rss *req = cplhdr(skb);
680 struct cxgbi_sock *csk = ctx;
681 int rst_status = CPL_ABORT_NO_RST;
682
683 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
684 "csk 0x%p,%u,0x%lx,%u.\n",
685 csk, csk->state, csk->flags, csk->tid);
686
687 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
688 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
689 goto done;
690 }
691
692 cxgbi_sock_get(csk);
693 spin_lock_bh(&csk->lock);
694
695 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
696 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
697 cxgbi_sock_set_state(csk, CTP_ABORTING);
698 goto out;
699 }
700
701 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
702 send_abort_rpl(csk, rst_status);
703
704 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
705 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
706 cxgbi_sock_closed(csk);
707 }
708
709out:
710 spin_unlock_bh(&csk->lock);
711 cxgbi_sock_put(csk);
712done:
713 __kfree_skb(skb);
714 return 0;
715}
716
717/*
718 * Process ABORT_RPL_RSS CPL message: -> host
719 * Process abort replies. We only process these messages if we anticipate
720 * them as the coordination between SW and HW in this area is somewhat lacking
721 * and sometimes we get ABORT_RPLs after we are done with the connection that
722 * originated the ABORT_REQ.
723 */
724static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
725{
726 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
727 struct cxgbi_sock *csk = ctx;
728
729 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
730 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
731 rpl->status, csk, csk ? csk->state : 0,
732 csk ? csk->flags : 0UL);
733 /*
734 * Ignore replies to post-close aborts indicating that the abort was
735 * requested too late. These connections are terminated when we get
736 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
737 * arrives the TID is either no longer used or it has been recycled.
738 */
739 if (rpl->status == CPL_ERR_ABORT_FAILED)
740 goto rel_skb;
741 /*
742 * Sometimes we've already closed the connection, e.g., a post-close
743 * abort races with ABORT_REQ_RSS, the latter frees the connection
744 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
745 * but FW turns the ABORT_REQ into a regular one and so we get
746 * ABORT_RPL_RSS with status 0 and no connection.
747 */
748 if (csk)
749 cxgbi_sock_rcv_abort_rpl(csk);
750rel_skb:
751 __kfree_skb(skb);
752 return 0;
753}
754
755/*
756 * Process RX_ISCSI_HDR CPL message: -> host
757 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
758 * follow after the bhs.
759 */
760static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
761{
762 struct cxgbi_sock *csk = ctx;
763 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
764 struct cpl_iscsi_hdr_norss data_cpl;
765 struct cpl_rx_data_ddp_norss ddp_cpl;
766 unsigned int hdr_len, data_len, status;
767 unsigned int len;
768 int err;
769
770 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
771 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
772 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
773
774 spin_lock_bh(&csk->lock);
775
776 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
777 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
778 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
779 csk, csk->state, csk->flags, csk->tid);
780 if (csk->state != CTP_ABORTING)
781 goto abort_conn;
782 else
783 goto discard;
784 }
785
786 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
787 cxgbi_skcb_flags(skb) = 0;
788
789 skb_reset_transport_header(skb);
790 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
791
792 len = hdr_len = ntohs(hdr_cpl->len);
793 /* msg coalesce is off or not enough data received */
794 if (skb->len <= hdr_len) {
795 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
796 csk->cdev->ports[csk->port_id]->name, csk->tid,
797 skb->len, hdr_len);
798 goto abort_conn;
799 }
800 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
801
802 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
803 sizeof(ddp_cpl));
804 if (err < 0) {
805 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
806 csk->cdev->ports[csk->port_id]->name, csk->tid,
807 skb->len, sizeof(ddp_cpl), err);
808 goto abort_conn;
809 }
810
811 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
812 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
813 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
814 status = ntohl(ddp_cpl.ddp_status);
815
816 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
817 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
818 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
819
820 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
821 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
822 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
823 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
824 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
825 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
826
827 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
828 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
829 if (err < 0) {
830 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
831 csk->cdev->ports[csk->port_id]->name,
832 csk->tid, sizeof(data_cpl), skb->len, err);
833 goto abort_conn;
834 }
835 data_len = ntohs(data_cpl.len);
836 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
837 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
838 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
839 len += sizeof(data_cpl) + data_len;
840 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
841 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
842
843 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
844 __pskb_trim(skb, len);
845 __skb_queue_tail(&csk->receive_queue, skb);
846 cxgbi_conn_pdu_ready(csk);
847
848 spin_unlock_bh(&csk->lock);
849 return 0;
850
851abort_conn:
852 send_abort_req(csk);
853discard:
854 spin_unlock_bh(&csk->lock);
855 __kfree_skb(skb);
856 return 0;
857}
858
859/*
860 * Process TX_DATA_ACK CPL messages: -> host
861 * Process an acknowledgment of WR completion. Advance snd_una and send the
862 * next batch of work requests from the write queue.
863 */
864static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
865{
866 struct cxgbi_sock *csk = ctx;
867 struct cpl_wr_ack *hdr = cplhdr(skb);
868
869 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
870 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
871 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
872
873 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
874 __kfree_skb(skb);
875 return 0;
876}
877
878/*
879 * for each connection, pre-allocate skbs needed for close/abort requests. So
880 * that we can service the request right away.
881 */
882static int alloc_cpls(struct cxgbi_sock *csk)
883{
884 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
885 GFP_KERNEL);
886 if (!csk->cpl_close)
887 return -ENOMEM;
888 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
889 GFP_KERNEL);
890 if (!csk->cpl_abort_req)
891 goto free_cpl_skbs;
892
893 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
894 GFP_KERNEL);
895 if (!csk->cpl_abort_rpl)
896 goto free_cpl_skbs;
897
898 return 0;
899
900free_cpl_skbs:
901 cxgbi_sock_free_cpl_skbs(csk);
902 return -ENOMEM;
903}
904
905/**
906 * release_offload_resources - release offload resource
907 * @c3cn: the offloaded iscsi tcp connection.
908 * Release resources held by an offload connection (TID, L2T entry, etc.)
909 */
910static void l2t_put(struct cxgbi_sock *csk)
911{
912 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
913
914 if (csk->l2t) {
915 l2t_release(L2DATA(t3dev), csk->l2t);
916 csk->l2t = NULL;
917 cxgbi_sock_put(csk);
918 }
919}
920
921static void release_offload_resources(struct cxgbi_sock *csk)
922{
923 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
924
925 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
926 "csk 0x%p,%u,0x%lx,%u.\n",
927 csk, csk->state, csk->flags, csk->tid);
928
929 csk->rss_qid = 0;
930 cxgbi_sock_free_cpl_skbs(csk);
931
932 if (csk->wr_cred != csk->wr_max_cred) {
933 cxgbi_sock_purge_wr_queue(csk);
934 cxgbi_sock_reset_wr_list(csk);
935 }
936 l2t_put(csk);
937 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
938 free_atid(csk);
939 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
940 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
941 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
942 cxgbi_sock_put(csk);
943 }
944 csk->dst = NULL;
945 csk->cdev = NULL;
946}
947
948static void update_address(struct cxgbi_hba *chba)
949{
950 if (chba->ipv4addr) {
951 if (chba->vdev &&
952 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
953 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
954 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
955 pr_info("%s set %pI4.\n",
956 chba->vdev->name, &chba->ipv4addr);
957 } else if (chba->ipv4addr !=
958 cxgb3i_get_private_ipv4addr(chba->ndev)) {
959 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
960 pr_info("%s set %pI4.\n",
961 chba->ndev->name, &chba->ipv4addr);
962 }
963 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
964 if (chba->vdev)
965 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
966 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
967 }
968}
969
970static int init_act_open(struct cxgbi_sock *csk)
971{
972 struct dst_entry *dst = csk->dst;
973 struct cxgbi_device *cdev = csk->cdev;
974 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
975 struct net_device *ndev = cdev->ports[csk->port_id];
976 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
977 struct sk_buff *skb = NULL;
978
979 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
980 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
981
982 update_address(chba);
983 if (chba->ipv4addr)
984 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
985
986 csk->rss_qid = 0;
987 csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev);
988 if (!csk->l2t) {
989 pr_err("NO l2t available.\n");
990 return -EINVAL;
991 }
992 cxgbi_sock_get(csk);
993
994 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
995 if (csk->atid < 0) {
996 pr_err("NO atid available.\n");
997 goto rel_resource;
998 }
999 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1000 cxgbi_sock_get(csk);
1001
1002 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
1003 if (!skb)
1004 goto rel_resource;
1005 skb->sk = (struct sock *)csk;
1006 set_arp_failure_handler(skb, act_open_arp_failure);
1007
1008 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
1009 csk->wr_una_cred = 0;
1010 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1011 cxgbi_sock_reset_wr_list(csk);
1012 csk->err = 0;
1013
1014 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1015 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1016 csk, csk->state, csk->flags,
1017 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1018 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1019
1020 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1021 send_act_open_req(csk, skb, csk->l2t);
1022 return 0;
1023
1024rel_resource:
1025 if (skb)
1026 __kfree_skb(skb);
1027 return -EINVAL;
1028}
1029
1030cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1031 [CPL_ACT_ESTABLISH] = do_act_establish,
1032 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1033 [CPL_PEER_CLOSE] = do_peer_close,
1034 [CPL_ABORT_REQ_RSS] = do_abort_req,
1035 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1036 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1037 [CPL_TX_DMA_ACK] = do_wr_ack,
1038 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1039};
1040
1041/**
1042 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1043 * @cdev: cxgbi adapter
1044 */
1045int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1046{
1047 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1048 struct adap_ports port;
1049 struct ofld_page_info rx_page_info;
1050 unsigned int wr_len;
1051 int rc;
1052
1053 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1054 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1055 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1056 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1057 return -EINVAL;
1058 }
1059
1060 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1061 cxgb3i_max_connect = CXGBI_MAX_CONN;
1062
1063 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1064 cxgb3i_max_connect);
1065 if (rc < 0)
1066 return rc;
1067
1068 init_wr_tab(wr_len);
1069 cdev->csk_release_offload_resources = release_offload_resources;
1070 cdev->csk_push_tx_frames = push_tx_frames;
1071 cdev->csk_send_abort_req = send_abort_req;
1072 cdev->csk_send_close_req = send_close_req;
1073 cdev->csk_send_rx_credits = send_rx_credits;
1074 cdev->csk_alloc_cpls = alloc_cpls;
1075 cdev->csk_init_act_open = init_act_open;
1076
1077 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1078 return 0;
1079}
1080
1081/*
1082 * functions to program the pagepod in h/w
1083 */
1084static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1085{
1086 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1087
1088 memset(req, 0, sizeof(*req));
1089
1090 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1091 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1092 V_ULPTX_CMD(ULP_MEM_WRITE));
1093 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
1094 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
1095}
1096
1097static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1098 unsigned int idx, unsigned int npods,
1099 struct cxgbi_gather_list *gl)
1100{
1101 struct cxgbi_device *cdev = csk->cdev;
1102 struct cxgbi_ddp_info *ddp = cdev->ddp;
1103 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1104 int i;
1105
1106 log_debug(1 << CXGBI_DBG_DDP,
1107 "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
1108 csk, idx, npods, gl);
1109
1110 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
1111 struct sk_buff *skb = ddp->gl_skb[idx];
1112
1113 /* hold on to the skb until we clear the ddp mapping */
1114 skb_get(skb);
1115
1116 ulp_mem_io_set_hdr(skb, pm_addr);
1117 cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
1118 sizeof(struct ulp_mem_io)),
1119 hdr, gl, i * PPOD_PAGES_MAX);
1120 skb->priority = CPL_PRIORITY_CONTROL;
1121 cxgb3_ofld_send(cdev->lldev, skb);
1122 }
1123 return 0;
1124}
1125
1126static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1127 unsigned int idx, unsigned int npods)
1128{
1129 struct cxgbi_device *cdev = chba->cdev;
1130 struct cxgbi_ddp_info *ddp = cdev->ddp;
1131 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1132 int i;
1133
1134 log_debug(1 << CXGBI_DBG_DDP,
1135 "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
1136 cdev, idx, npods, tag);
1137
1138 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
1139 struct sk_buff *skb = ddp->gl_skb[idx];
1140
1141 if (!skb) {
1142 pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
1143 tag, idx, i, npods);
1144 continue;
1145 }
1146 ddp->gl_skb[idx] = NULL;
1147 memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
1148 ulp_mem_io_set_hdr(skb, pm_addr);
1149 skb->priority = CPL_PRIORITY_CONTROL;
1150 cxgb3_ofld_send(cdev->lldev, skb);
1151 }
1152}
1153
1154static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
1155{
1156 int i;
1157
1158 log_debug(1 << CXGBI_DBG_DDP,
1159 "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
1160
1161 for (i = 0; i < cnt; i++, idx++)
1162 if (ddp->gl_skb[idx]) {
1163 kfree_skb(ddp->gl_skb[idx]);
1164 ddp->gl_skb[idx] = NULL;
1165 }
1166}
1167
1168static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
1169 int cnt, gfp_t gfp)
1170{
1171 int i;
1172
1173 log_debug(1 << CXGBI_DBG_DDP,
1174 "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
1175
1176 for (i = 0; i < cnt; i++) {
1177 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1178 PPOD_SIZE, 0, gfp);
1179 if (skb)
1180 ddp->gl_skb[idx + i] = skb;
1181 else {
1182 ddp_free_gl_skb(ddp, idx, i);
1183 return -ENOMEM;
1184 }
1185 }
1186 return 0;
1187}
1188
1189static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1190 unsigned int tid, int pg_idx, bool reply)
1191{
1192 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1193 GFP_KERNEL);
1194 struct cpl_set_tcb_field *req;
1195 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1196
1197 log_debug(1 << CXGBI_DBG_DDP,
1198 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1199 if (!skb)
1200 return -ENOMEM;
1201
1202 /* set up ulp submode and page size */
1203 req = (struct cpl_set_tcb_field *)skb->head;
1204 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1205 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1206 req->reply = V_NO_REPLY(reply ? 0 : 1);
1207 req->cpu_idx = 0;
1208 req->word = htons(31);
1209 req->mask = cpu_to_be64(0xF0000000);
1210 req->val = cpu_to_be64(val << 28);
1211 skb->priority = CPL_PRIORITY_CONTROL;
1212
1213 cxgb3_ofld_send(csk->cdev->lldev, skb);
1214 return 0;
1215}
1216
1217/**
1218 * cxgb3i_setup_conn_digest - setup conn. digest setting
1219 * @csk: cxgb tcp socket
1220 * @tid: connection id
1221 * @hcrc: header digest enabled
1222 * @dcrc: data digest enabled
1223 * @reply: request reply from h/w
1224 * set up the iscsi digest settings for a connection identified by tid
1225 */
1226static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1227 int hcrc, int dcrc, int reply)
1228{
1229 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1230 GFP_KERNEL);
1231 struct cpl_set_tcb_field *req;
1232 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1233
1234 log_debug(1 << CXGBI_DBG_DDP,
1235 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1236 if (!skb)
1237 return -ENOMEM;
1238
1239 /* set up ulp submode and page size */
1240 req = (struct cpl_set_tcb_field *)skb->head;
1241 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1242 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1243 req->reply = V_NO_REPLY(reply ? 0 : 1);
1244 req->cpu_idx = 0;
1245 req->word = htons(31);
1246 req->mask = cpu_to_be64(0x0F000000);
1247 req->val = cpu_to_be64(val << 24);
1248 skb->priority = CPL_PRIORITY_CONTROL;
1249
1250 cxgb3_ofld_send(csk->cdev->lldev, skb);
1251 return 0;
1252}
1253
1254/**
1255 * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource
1256 * @cdev: cxgb3i adapter
1257 * release all the resource held by the ddp pagepod manager for a given
1258 * adapter if needed
1259 */
1260
1261static void t3_ddp_cleanup(struct cxgbi_device *cdev)
1262{
1263 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1264
1265 if (cxgbi_ddp_cleanup(cdev)) {
1266 pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
1267 tdev->ulp_iscsi = NULL;
1268 }
1269}
1270
1271/**
1272 * ddp_init - initialize the cxgb3 adapter's ddp resource
1273 * @cdev: cxgb3i adapter
1274 * initialize the ddp pagepod manager for a given adapter
1275 */
1276static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1277{
1278 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1279 struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
1280 struct ulp_iscsi_info uinfo;
1281 unsigned int pgsz_factor[4];
1282 int err;
1283
1284 if (ddp) {
1285 kref_get(&ddp->refcnt);
1286 pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
1287 tdev, tdev->ulp_iscsi);
1288 cdev->ddp = ddp;
1289 return -EALREADY;
1290 }
1291
1292 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1293 if (err < 0) {
1294 pr_err("%s, failed to get iscsi param err=%d.\n",
1295 tdev->name, err);
1296 return err;
1297 }
1298
1299 err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit,
1300 uinfo.max_txsz, uinfo.max_rxsz);
1301 if (err < 0)
1302 return err;
1303
1304 ddp = cdev->ddp;
1305
1306 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1307 cxgbi_ddp_page_size_factor(pgsz_factor);
1308 uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
1309
1310 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1311 if (err < 0) {
1312 pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n",
1313 tdev->name, err);
1314 cxgbi_ddp_cleanup(cdev);
1315 return err;
1316 }
1317 tdev->ulp_iscsi = ddp;
1318
1319 cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
1320 cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
1321 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1322 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1323 cdev->csk_ddp_set = ddp_set_map;
1324 cdev->csk_ddp_clear = ddp_clear_map;
1325
1326 pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1327 "%u/%u.\n",
1328 tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1329 ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
1330 ddp->max_rxsz, uinfo.max_rxsz);
1331 return 0;
1332}
1333
1334static void cxgb3i_dev_close(struct t3cdev *t3dev)
1335{
1336 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1337
1338 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1339 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1340 return;
1341 }
1342
1343 cxgbi_device_unregister(cdev);
1344}
1345
1346/**
1347 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1348 * @t3dev: t3cdev adapter
1349 */
1350static void cxgb3i_dev_open(struct t3cdev *t3dev)
1351{
1352 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1353 struct adapter *adapter = tdev2adap(t3dev);
1354 int i, err;
1355
1356 if (cdev) {
1357 pr_info("0x%p, updating.\n", cdev);
1358 return;
1359 }
1360
1361 cdev = cxgbi_device_register(0, adapter->params.nports);
1362 if (!cdev) {
1363 pr_warn("device 0x%p register failed.\n", t3dev);
1364 return;
1365 }
1366
1367 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1368 cdev->lldev = t3dev;
1369 cdev->pdev = adapter->pdev;
1370 cdev->ports = adapter->port;
1371 cdev->nports = adapter->params.nports;
1372 cdev->mtus = adapter->params.mtus;
1373 cdev->nmtus = NMTUS;
1374 cdev->snd_win = cxgb3i_snd_win;
1375 cdev->rcv_win = cxgb3i_rcv_win;
1376 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1377 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1378 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1379 cdev->dev_ddp_cleanup = t3_ddp_cleanup;
1380 cdev->itp = &cxgb3i_iscsi_transport;
1381
1382 err = cxgb3i_ddp_init(cdev);
1383 if (err) {
1384 pr_info("0x%p ddp init failed\n", cdev);
1385 goto err_out;
1386 }
1387
1388 err = cxgb3i_ofld_init(cdev);
1389 if (err) {
1390 pr_info("0x%p offload init failed\n", cdev);
1391 goto err_out;
1392 }
1393
1394 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1395 &cxgb3i_host_template, cxgb3i_stt);
1396 if (err)
1397 goto err_out;
1398
1399 for (i = 0; i < cdev->nports; i++)
1400 cdev->hbas[i]->ipv4addr =
1401 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1402
1403 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1404 cdev, cdev ? cdev->flags : 0, t3dev, err);
1405 return;
1406
1407err_out:
1408 cxgbi_device_unregister(cdev);
1409}
1410
1411static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1412{
1413 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1414
1415 log_debug(1 << CXGBI_DBG_TOE,
1416 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1417 t3dev, cdev, event, port);
1418 if (!cdev)
1419 return;
1420
1421 switch (event) {
1422 case OFFLOAD_STATUS_DOWN:
1423 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1424 break;
1425 case OFFLOAD_STATUS_UP:
1426 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1427 break;
1428 }
1429}
1430
1431/**
1432 * cxgb3i_init_module - module init entry point
1433 *
1434 * initialize any driver wide global data structures and register itself
1435 * with the cxgb3 module
1436 */
1437static int __init cxgb3i_init_module(void)
1438{
1439 int rc;
1440
1441 printk(KERN_INFO "%s", version);
1442
1443 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1444 if (rc < 0)
1445 return rc;
1446
1447 cxgb3_register_client(&t3_client);
1448 return 0;
1449}
1450
1451/**
1452 * cxgb3i_exit_module - module cleanup/exit entry point
1453 *
1454 * go through the driver hba list and for each hba, release any resource held.
1455 * and unregisters iscsi transport and the cxgb3 module
1456 */
1457static void __exit cxgb3i_exit_module(void)
1458{
1459 cxgb3_unregister_client(&t3_client);
1460 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1461 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1462}
1463
1464module_init(cxgb3i_init_module);
1465module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..5f5e3394b594
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -0,0 +1,51 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#define CXGB3I_SCSI_HOST_QDEPTH 1024
17#define CXGB3I_MAX_LUN 512
18#define ISCSI_PDU_NONPAYLOAD_MAX \
19 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
20
21/*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
22#define CXGB3I_TX_HEADER_LEN \
23 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
24
25extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
26
27#define cxgb3i_get_private_ipv4addr(ndev) \
28 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
29#define cxgb3i_set_private_ipv4addr(ndev, addr) \
30 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
31
32struct cpl_iscsi_hdr_norss {
33 union opcode_tid ot;
34 u16 pdu_len_ddp;
35 u16 len;
36 u32 seq;
37 u16 urg;
38 u8 rsvd;
39 u8 status;
40};
41
42struct cpl_rx_data_ddp_norss {
43 union opcode_tid ot;
44 u16 urg;
45 u16 len;
46 u32 seq;
47 u32 nxt_seq;
48 u32 ulp_crc;
49 u32 ddp_status;
50};
51#endif
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
new file mode 100644
index 000000000000..b9f4af7454b7
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -0,0 +1,3 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4
2
3obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
new file mode 100644
index 000000000000..bb94b39b17b3
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_CXGB4_ISCSI
2 tristate "Chelsio T4 iSCSI support"
3 depends on CHELSIO_T4_DEPENDS
4 select CHELSIO_T4
5 select SCSI_ISCSI_ATTRS
6 ---help---
7 This driver supports iSCSI offload for the Chelsio T4 devices.
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
new file mode 100644
index 000000000000..99f2b8c5dd63
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -0,0 +1,1604 @@
1/*
2 * cxgb4i.c: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <scsi/scsi_host.h>
20#include <net/tcp.h>
21#include <net/dst.h>
22#include <linux/netdevice.h>
23
24#include "t4_msg.h"
25#include "cxgb4.h"
26#include "cxgb4_uld.h"
27#include "t4fw_api.h"
28#include "l2t.h"
29#include "cxgb4i.h"
30
31static unsigned int dbg_level;
32
33#include "../libcxgbi.h"
34
35#define DRV_MODULE_NAME "cxgb4i"
36#define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver"
37#define DRV_MODULE_VERSION "0.9.1"
38#define DRV_MODULE_RELDATE "Aug. 2010"
39
40static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
42 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44MODULE_AUTHOR("Chelsio Communications, Inc.");
45MODULE_DESCRIPTION(DRV_MODULE_DESC);
46MODULE_VERSION(DRV_MODULE_VERSION);
47MODULE_LICENSE("GPL");
48
49module_param(dbg_level, uint, 0644);
50MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
51
52static int cxgb4i_rcv_win = 256 * 1024;
53module_param(cxgb4i_rcv_win, int, 0644);
54MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
55
56static int cxgb4i_snd_win = 128 * 1024;
57module_param(cxgb4i_snd_win, int, 0644);
58MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
59
60static int cxgb4i_rx_credit_thres = 10 * 1024;
61module_param(cxgb4i_rx_credit_thres, int, 0644);
62MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
64
65static unsigned int cxgb4i_max_connect = (8 * 1024);
66module_param(cxgb4i_max_connect, uint, 0644);
67MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
68
69static unsigned short cxgb4i_sport_base = 20000;
70module_param(cxgb4i_sport_base, ushort, 0644);
71MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
72
73typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
74
75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state);
78
79static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME,
81 .add = t4_uld_add,
82 .rx_handler = t4_uld_rx_handler,
83 .state_change = t4_uld_state_change,
84};
85
86static struct scsi_host_template cxgb4i_host_template = {
87 .module = THIS_MODULE,
88 .name = DRV_MODULE_NAME,
89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand,
92 .change_queue_depth = iscsi_change_queue_depth,
93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
96 .eh_abort_handler = iscsi_eh_abort,
97 .eh_device_reset_handler = iscsi_eh_device_reset,
98 .eh_target_reset_handler = iscsi_eh_recover_target,
99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING,
101 .this_id = -1,
102};
103
104static struct iscsi_transport cxgb4i_iscsi_transport = {
105 .owner = THIS_MODULE,
106 .name = DRV_MODULE_NAME,
107 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
108 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 CAP_PADDING_OFFLOAD,
110 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
111 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
112 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
113 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
114 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
115 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
116 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
117 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
118 ISCSI_PERSISTENT_ADDRESS |
119 ISCSI_TARGET_NAME | ISCSI_TPGT |
120 ISCSI_USERNAME | ISCSI_PASSWORD |
121 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
122 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
123 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
124 ISCSI_PING_TMO | ISCSI_RECV_TMO |
125 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
126 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
127 ISCSI_HOST_INITIATOR_NAME |
128 ISCSI_HOST_NETDEV_NAME,
129 .get_host_param = cxgbi_get_host_param,
130 .set_host_param = cxgbi_set_host_param,
131 /* session management */
132 .create_session = cxgbi_create_session,
133 .destroy_session = cxgbi_destroy_session,
134 .get_session_param = iscsi_session_get_param,
135 /* connection management */
136 .create_conn = cxgbi_create_conn,
137 .bind_conn = cxgbi_bind_conn,
138 .destroy_conn = iscsi_tcp_conn_teardown,
139 .start_conn = iscsi_conn_start,
140 .stop_conn = iscsi_conn_stop,
141 .get_conn_param = cxgbi_get_conn_param,
142 .set_param = cxgbi_set_conn_param,
143 .get_stats = cxgbi_get_conn_stats,
144 /* pdu xmit req from user space */
145 .send_pdu = iscsi_conn_send_pdu,
146 /* task */
147 .init_task = iscsi_tcp_task_init,
148 .xmit_task = iscsi_tcp_task_xmit,
149 .cleanup_task = cxgbi_cleanup_task,
150 /* pdu */
151 .alloc_pdu = cxgbi_conn_alloc_pdu,
152 .init_pdu = cxgbi_conn_init_pdu,
153 .xmit_pdu = cxgbi_conn_xmit_pdu,
154 .parse_pdu_itt = cxgbi_parse_pdu_itt,
155 /* TCP connect/disconnect */
156 .ep_connect = cxgbi_ep_connect,
157 .ep_poll = cxgbi_ep_poll,
158 .ep_disconnect = cxgbi_ep_disconnect,
159 /* Error recovery timeout call */
160 .session_recovery_timedout = iscsi_session_recovery_timedout,
161};
162
163static struct scsi_transport_template *cxgb4i_stt;
164
165/*
166 * CPL (Chelsio Protocol Language) defines a message passing interface between
167 * the host driver and Chelsio asic.
168 * The section below implments CPLs that related to iscsi tcp connection
169 * open/close/abort and data send/receive.
170 */
171#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
172#define RCV_BUFSIZ_MASK 0x3FFU
173#define MAX_IMM_TX_PKT_LEN 128
174
175static inline void set_queue(struct sk_buff *skb, unsigned int queue,
176 const struct cxgbi_sock *csk)
177{
178 skb->queue_mapping = queue;
179}
180
181static int push_tx_frames(struct cxgbi_sock *, int);
182
183/*
184 * is_ofld_imm - check whether a packet can be sent as immediate data
185 * @skb: the packet
186 *
187 * Returns true if a packet can be sent as an offload WR with immediate
188 * data. We currently use the same limit as for Ethernet packets.
189 */
190static inline int is_ofld_imm(const struct sk_buff *skb)
191{
192 return skb->len <= (MAX_IMM_TX_PKT_LEN -
193 sizeof(struct fw_ofld_tx_data_wr));
194}
195
196static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
197 struct l2t_entry *e)
198{
199 struct cpl_act_open_req *req;
200 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
201 unsigned long long opt0;
202 unsigned int opt2;
203 unsigned int qid_atid = ((unsigned int)csk->atid) |
204 (((unsigned int)csk->rss_qid) << 14);
205
206 opt0 = KEEP_ALIVE(1) |
207 WND_SCALE(wscale) |
208 MSS_IDX(csk->mss_idx) |
209 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
210 TX_CHAN(csk->tx_chan) |
211 SMAC_SEL(csk->smac_idx) |
212 ULP_MODE(ULP_MODE_ISCSI) |
213 RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
214 opt2 = RX_CHANNEL(0) |
215 RSS_QUEUE_VALID |
216 (1 << 20) | (1 << 22) |
217 RSS_QUEUE(csk->rss_qid);
218
219 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
220 req = (struct cpl_act_open_req *)skb->head;
221
222 INIT_TP_WR(req, 0);
223 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
224 qid_atid));
225 req->local_port = csk->saddr.sin_port;
226 req->peer_port = csk->daddr.sin_port;
227 req->local_ip = csk->saddr.sin_addr.s_addr;
228 req->peer_ip = csk->daddr.sin_addr.s_addr;
229 req->opt0 = cpu_to_be64(opt0);
230 req->params = 0;
231 req->opt2 = cpu_to_be32(opt2);
232
233 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
234 "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
235 csk, &req->local_ip, ntohs(req->local_port),
236 &req->peer_ip, ntohs(req->peer_port),
237 csk->atid, csk->rss_qid);
238
239 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
240}
241
242static void send_close_req(struct cxgbi_sock *csk)
243{
244 struct sk_buff *skb = csk->cpl_close;
245 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
246 unsigned int tid = csk->tid;
247
248 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
249 "csk 0x%p,%u,0x%lx, tid %u.\n",
250 csk, csk->state, csk->flags, csk->tid);
251 csk->cpl_close = NULL;
252 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
253 INIT_TP_WR(req, tid);
254 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
255 req->rsvd = 0;
256
257 cxgbi_sock_skb_entail(csk, skb);
258 if (csk->state >= CTP_ESTABLISHED)
259 push_tx_frames(csk, 1);
260}
261
262static void abort_arp_failure(void *handle, struct sk_buff *skb)
263{
264 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
265 struct cpl_abort_req *req;
266
267 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
268 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
269 csk, csk->state, csk->flags, csk->tid);
270 req = (struct cpl_abort_req *)skb->data;
271 req->cmd = CPL_ABORT_NO_RST;
272 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
273}
274
275static void send_abort_req(struct cxgbi_sock *csk)
276{
277 struct cpl_abort_req *req;
278 struct sk_buff *skb = csk->cpl_abort_req;
279
280 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
281 return;
282 cxgbi_sock_set_state(csk, CTP_ABORTING);
283 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
284 cxgbi_sock_purge_write_queue(csk);
285
286 csk->cpl_abort_req = NULL;
287 req = (struct cpl_abort_req *)skb->head;
288 set_queue(skb, CPL_PRIORITY_DATA, csk);
289 req->cmd = CPL_ABORT_SEND_RST;
290 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
291 INIT_TP_WR(req, csk->tid);
292 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
293 req->rsvd0 = htonl(csk->snd_nxt);
294 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
295
296 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
297 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
298 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
299 req->rsvd1);
300
301 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
302}
303
304static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
305{
306 struct sk_buff *skb = csk->cpl_abort_rpl;
307 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
308
309 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
310 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
311 csk, csk->state, csk->flags, csk->tid, rst_status);
312
313 csk->cpl_abort_rpl = NULL;
314 set_queue(skb, CPL_PRIORITY_DATA, csk);
315 INIT_TP_WR(rpl, csk->tid);
316 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
317 rpl->cmd = rst_status;
318 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
319}
320
321/*
322 * CPL connection rx data ack: host ->
323 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
324 * credits sent.
325 */
326static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
327{
328 struct sk_buff *skb;
329 struct cpl_rx_data_ack *req;
330
331 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
332 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
333 csk, csk->state, csk->flags, csk->tid, credits);
334
335 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
336 if (!skb) {
337 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
338 return 0;
339 }
340 req = (struct cpl_rx_data_ack *)skb->head;
341
342 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
343 INIT_TP_WR(req, csk->tid);
344 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
345 csk->tid));
346 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
347 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
348 return credits;
349}
350
351/*
352 * sgl_len - calculates the size of an SGL of the given capacity
353 * @n: the number of SGL entries
354 * Calculates the number of flits needed for a scatter/gather list that
355 * can hold the given number of entries.
356 */
357static inline unsigned int sgl_len(unsigned int n)
358{
359 n--;
360 return (3 * n) / 2 + (n & 1) + 2;
361}
362
363/*
364 * calc_tx_flits_ofld - calculate # of flits for an offload packet
365 * @skb: the packet
366 *
367 * Returns the number of flits needed for the given offload packet.
368 * These packets are already fully constructed and no additional headers
369 * will be added.
370 */
371static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
372{
373 unsigned int flits, cnt;
374
375 if (is_ofld_imm(skb))
376 return DIV_ROUND_UP(skb->len, 8);
377 flits = skb_transport_offset(skb) / 8;
378 cnt = skb_shinfo(skb)->nr_frags;
379 if (skb->tail != skb->transport_header)
380 cnt++;
381 return flits + sgl_len(cnt);
382}
383
384static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
385{
386 struct sk_buff *skb;
387 struct fw_flowc_wr *flowc;
388 int flowclen, i;
389
390 flowclen = 80;
391 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
392 flowc = (struct fw_flowc_wr *)skb->head;
393 flowc->op_to_nparams =
394 htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
395 flowc->flowid_len16 =
396 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
397 FW_WR_FLOWID(csk->tid));
398 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
399 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
400 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
401 flowc->mnemval[1].val = htonl(csk->tx_chan);
402 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
403 flowc->mnemval[2].val = htonl(csk->tx_chan);
404 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
405 flowc->mnemval[3].val = htonl(csk->rss_qid);
406 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
407 flowc->mnemval[4].val = htonl(csk->snd_nxt);
408 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
409 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
410 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
411 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
412 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
413 flowc->mnemval[7].val = htonl(csk->advmss);
414 flowc->mnemval[8].mnemonic = 0;
415 flowc->mnemval[8].val = 0;
416 for (i = 0; i < 9; i++) {
417 flowc->mnemval[i].r4[0] = 0;
418 flowc->mnemval[i].r4[1] = 0;
419 flowc->mnemval[i].r4[2] = 0;
420 }
421 set_queue(skb, CPL_PRIORITY_DATA, csk);
422
423 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
424 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
425 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
426 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
427 csk->advmss);
428
429 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
430}
431
432static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
433 int dlen, int len, u32 credits, int compl)
434{
435 struct fw_ofld_tx_data_wr *req;
436 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
437 unsigned int wr_ulp_mode = 0;
438
439 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
440
441 if (is_ofld_imm(skb)) {
442 req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
443 FW_WR_COMPL(1) |
444 FW_WR_IMMDLEN(dlen));
445 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
446 FW_WR_LEN16(credits));
447 } else {
448 req->op_to_immdlen =
449 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
450 FW_WR_COMPL(1) |
451 FW_WR_IMMDLEN(0));
452 req->flowid_len16 =
453 cpu_to_be32(FW_WR_FLOWID(csk->tid) |
454 FW_WR_LEN16(credits));
455 }
456 if (submode)
457 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
458 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
459 req->tunnel_to_proxy = htonl(wr_ulp_mode) |
460 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
461 req->plen = htonl(len);
462 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
463 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
464}
465
466static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
467{
468 kfree_skb(skb);
469}
470
471static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
472{
473 int total_size = 0;
474 struct sk_buff *skb;
475
476 if (unlikely(csk->state < CTP_ESTABLISHED ||
477 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
478 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
479 1 << CXGBI_DBG_PDU_TX,
480 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
481 csk, csk->state, csk->flags, csk->tid);
482 return 0;
483 }
484
485 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
486 int dlen = skb->len;
487 int len = skb->len;
488 unsigned int credits_needed;
489
490 skb_reset_transport_header(skb);
491 if (is_ofld_imm(skb))
492 credits_needed = DIV_ROUND_UP(dlen +
493 sizeof(struct fw_ofld_tx_data_wr), 16);
494 else
495 credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
496 + sizeof(struct fw_ofld_tx_data_wr),
497 16);
498
499 if (csk->wr_cred < credits_needed) {
500 log_debug(1 << CXGBI_DBG_PDU_TX,
501 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
502 csk, skb->len, skb->data_len,
503 credits_needed, csk->wr_cred);
504 break;
505 }
506 __skb_unlink(skb, &csk->write_queue);
507 set_queue(skb, CPL_PRIORITY_DATA, csk);
508 skb->csum = credits_needed;
509 csk->wr_cred -= credits_needed;
510 csk->wr_una_cred += credits_needed;
511 cxgbi_sock_enqueue_wr(csk, skb);
512
513 log_debug(1 << CXGBI_DBG_PDU_TX,
514 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
515 csk, skb->len, skb->data_len, credits_needed,
516 csk->wr_cred, csk->wr_una_cred);
517
518 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
519 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
520 send_tx_flowc_wr(csk);
521 skb->csum += 5;
522 csk->wr_cred -= 5;
523 csk->wr_una_cred += 5;
524 }
525 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
526 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
527 req_completion);
528 csk->snd_nxt += len;
529 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
530 }
531 total_size += skb->truesize;
532 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
533
534 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
535 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
536 csk, csk->state, csk->flags, csk->tid, skb, len);
537
538 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
539 }
540 return total_size;
541}
542
543static inline void free_atid(struct cxgbi_sock *csk)
544{
545 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
546
547 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
548 cxgb4_free_atid(lldi->tids, csk->atid);
549 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
550 cxgbi_sock_put(csk);
551 }
552}
553
554static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
555{
556 struct cxgbi_sock *csk;
557 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
558 unsigned short tcp_opt = ntohs(req->tcp_opt);
559 unsigned int tid = GET_TID(req);
560 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
561 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
562 struct tid_info *t = lldi->tids;
563 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
564
565 csk = lookup_atid(t, atid);
566 if (unlikely(!csk)) {
567 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
568 goto rel_skb;
569 }
570
571 if (csk->atid != atid) {
572 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
573 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
574 goto rel_skb;
575 }
576
577 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
578 "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
579 csk, csk->state, csk->flags, tid, atid, rcv_isn);
580
581 cxgbi_sock_get(csk);
582 csk->tid = tid;
583 cxgb4_insert_tid(lldi->tids, csk, tid);
584 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
585
586 free_atid(csk);
587
588 spin_lock_bh(&csk->lock);
589 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
590 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
591 csk, csk->state, csk->flags, csk->tid);
592
593 if (csk->retry_timer.function) {
594 del_timer(&csk->retry_timer);
595 csk->retry_timer.function = NULL;
596 }
597
598 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
599 /*
600 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
601 * pass through opt0.
602 */
603 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
604 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
605
606 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
607 if (GET_TCPOPT_TSTAMP(tcp_opt))
608 csk->advmss -= 12;
609 if (csk->advmss < 128)
610 csk->advmss = 128;
611
612 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
613 "csk 0x%p, mss_idx %u, advmss %u.\n",
614 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
615
616 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
617
618 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
619 send_abort_req(csk);
620 else {
621 if (skb_queue_len(&csk->write_queue))
622 push_tx_frames(csk, 0);
623 cxgbi_conn_tx_open(csk);
624 }
625 spin_unlock_bh(&csk->lock);
626
627rel_skb:
628 __kfree_skb(skb);
629}
630
631static int act_open_rpl_status_to_errno(int status)
632{
633 switch (status) {
634 case CPL_ERR_CONN_RESET:
635 return -ECONNREFUSED;
636 case CPL_ERR_ARP_MISS:
637 return -EHOSTUNREACH;
638 case CPL_ERR_CONN_TIMEDOUT:
639 return -ETIMEDOUT;
640 case CPL_ERR_TCAM_FULL:
641 return -ENOMEM;
642 case CPL_ERR_CONN_EXIST:
643 return -EADDRINUSE;
644 default:
645 return -EIO;
646 }
647}
648
649static void csk_act_open_retry_timer(unsigned long data)
650{
651 struct sk_buff *skb;
652 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
653
654 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
655 "csk 0x%p,%u,0x%lx,%u.\n",
656 csk, csk->state, csk->flags, csk->tid);
657
658 cxgbi_sock_get(csk);
659 spin_lock_bh(&csk->lock);
660 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
661 if (!skb)
662 cxgbi_sock_fail_act_open(csk, -ENOMEM);
663 else {
664 skb->sk = (struct sock *)csk;
665 t4_set_arp_err_handler(skb, csk,
666 cxgbi_sock_act_open_req_arp_failure);
667 send_act_open_req(csk, skb, csk->l2t);
668 }
669 spin_unlock_bh(&csk->lock);
670 cxgbi_sock_put(csk);
671}
672
673static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
674{
675 struct cxgbi_sock *csk;
676 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
677 unsigned int tid = GET_TID(rpl);
678 unsigned int atid =
679 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
680 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
681 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
682 struct tid_info *t = lldi->tids;
683
684 csk = lookup_atid(t, atid);
685 if (unlikely(!csk)) {
686 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
687 goto rel_skb;
688 }
689
690 pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
691 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
692 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
693 atid, tid, status, csk, csk->state, csk->flags);
694
695 if (status && status != CPL_ERR_TCAM_FULL &&
696 status != CPL_ERR_CONN_EXIST &&
697 status != CPL_ERR_ARP_MISS)
698 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
699
700 cxgbi_sock_get(csk);
701 spin_lock_bh(&csk->lock);
702
703 if (status == CPL_ERR_CONN_EXIST &&
704 csk->retry_timer.function != csk_act_open_retry_timer) {
705 csk->retry_timer.function = csk_act_open_retry_timer;
706 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
707 } else
708 cxgbi_sock_fail_act_open(csk,
709 act_open_rpl_status_to_errno(status));
710
711 spin_unlock_bh(&csk->lock);
712 cxgbi_sock_put(csk);
713rel_skb:
714 __kfree_skb(skb);
715}
716
717static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
718{
719 struct cxgbi_sock *csk;
720 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
721 unsigned int tid = GET_TID(req);
722 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
723 struct tid_info *t = lldi->tids;
724
725 csk = lookup_tid(t, tid);
726 if (unlikely(!csk)) {
727 pr_err("can't find connection for tid %u.\n", tid);
728 goto rel_skb;
729 }
730 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
731 "csk 0x%p,%u,0x%lx,%u.\n",
732 csk, csk->state, csk->flags, csk->tid);
733 cxgbi_sock_rcv_peer_close(csk);
734rel_skb:
735 __kfree_skb(skb);
736}
737
738static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
739{
740 struct cxgbi_sock *csk;
741 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
742 unsigned int tid = GET_TID(rpl);
743 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
744 struct tid_info *t = lldi->tids;
745
746 csk = lookup_tid(t, tid);
747 if (unlikely(!csk)) {
748 pr_err("can't find connection for tid %u.\n", tid);
749 goto rel_skb;
750 }
751 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
752 "csk 0x%p,%u,0x%lx,%u.\n",
753 csk, csk->state, csk->flags, csk->tid);
754 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
755rel_skb:
756 __kfree_skb(skb);
757}
758
759static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
760 int *need_rst)
761{
762 switch (abort_reason) {
763 case CPL_ERR_BAD_SYN: /* fall through */
764 case CPL_ERR_CONN_RESET:
765 return csk->state > CTP_ESTABLISHED ?
766 -EPIPE : -ECONNRESET;
767 case CPL_ERR_XMIT_TIMEDOUT:
768 case CPL_ERR_PERSIST_TIMEDOUT:
769 case CPL_ERR_FINWAIT2_TIMEDOUT:
770 case CPL_ERR_KEEPALIVE_TIMEDOUT:
771 return -ETIMEDOUT;
772 default:
773 return -EIO;
774 }
775}
776
777static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
778{
779 struct cxgbi_sock *csk;
780 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
781 unsigned int tid = GET_TID(req);
782 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
783 struct tid_info *t = lldi->tids;
784 int rst_status = CPL_ABORT_NO_RST;
785
786 csk = lookup_tid(t, tid);
787 if (unlikely(!csk)) {
788 pr_err("can't find connection for tid %u.\n", tid);
789 goto rel_skb;
790 }
791
792 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
793 "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
794 csk, csk->state, csk->flags, csk->tid, req->status);
795
796 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
797 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
798 goto rel_skb;
799
800 cxgbi_sock_get(csk);
801 spin_lock_bh(&csk->lock);
802
803 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
804 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
805 cxgbi_sock_set_state(csk, CTP_ABORTING);
806 goto done;
807 }
808
809 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
810 send_abort_rpl(csk, rst_status);
811
812 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
813 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
814 cxgbi_sock_closed(csk);
815 }
816done:
817 spin_unlock_bh(&csk->lock);
818 cxgbi_sock_put(csk);
819rel_skb:
820 __kfree_skb(skb);
821}
822
823static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
824{
825 struct cxgbi_sock *csk;
826 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
827 unsigned int tid = GET_TID(rpl);
828 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
829 struct tid_info *t = lldi->tids;
830
831 csk = lookup_tid(t, tid);
832 if (!csk)
833 goto rel_skb;
834
835 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
836 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
837 rpl->status, csk, csk ? csk->state : 0,
838 csk ? csk->flags : 0UL);
839
840 if (rpl->status == CPL_ERR_ABORT_FAILED)
841 goto rel_skb;
842
843 cxgbi_sock_rcv_abort_rpl(csk);
844rel_skb:
845 __kfree_skb(skb);
846}
847
848static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
849{
850 struct cxgbi_sock *csk;
851 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
852 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
853 unsigned int tid = GET_TID(cpl);
854 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
855 struct tid_info *t = lldi->tids;
856
857 csk = lookup_tid(t, tid);
858 if (unlikely(!csk)) {
859 pr_err("can't find conn. for tid %u.\n", tid);
860 goto rel_skb;
861 }
862
863 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
864 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
865 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
866 pdu_len_ddp);
867
868 spin_lock_bh(&csk->lock);
869
870 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
871 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
872 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
873 csk, csk->state, csk->flags, csk->tid);
874 if (csk->state != CTP_ABORTING)
875 goto abort_conn;
876 else
877 goto discard;
878 }
879
880 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
881 cxgbi_skcb_flags(skb) = 0;
882
883 skb_reset_transport_header(skb);
884 __skb_pull(skb, sizeof(*cpl));
885 __pskb_trim(skb, ntohs(cpl->len));
886
887 if (!csk->skb_ulp_lhdr) {
888 unsigned char *bhs;
889 unsigned int hlen, dlen;
890
891 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
892 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
893 csk, csk->state, csk->flags, csk->tid, skb);
894 csk->skb_ulp_lhdr = skb;
895 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
896
897 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
898 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
899 csk->tid, cxgbi_skcb_tcp_seq(skb),
900 csk->rcv_nxt);
901 goto abort_conn;
902 }
903
904 bhs = skb->data;
905 hlen = ntohs(cpl->len);
906 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
907
908 if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) {
909 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
910 "mismatch %u != %u + %u, seq 0x%x.\n",
911 csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40,
912 hlen, dlen, cxgbi_skcb_tcp_seq(skb));
913 goto abort_conn;
914 }
915
916 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
917 if (dlen)
918 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
919 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
920
921 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
922 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
923 csk, skb, *bhs, hlen, dlen,
924 ntohl(*((unsigned int *)(bhs + 16))),
925 ntohl(*((unsigned int *)(bhs + 24))));
926
927 } else {
928 struct sk_buff *lskb = csk->skb_ulp_lhdr;
929
930 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
931 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
932 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
933 csk, csk->state, csk->flags, skb, lskb);
934 }
935
936 __skb_queue_tail(&csk->receive_queue, skb);
937 spin_unlock_bh(&csk->lock);
938 return;
939
940abort_conn:
941 send_abort_req(csk);
942discard:
943 spin_unlock_bh(&csk->lock);
944rel_skb:
945 __kfree_skb(skb);
946}
947
948static void do_rx_data_ddp(struct cxgbi_device *cdev,
949 struct sk_buff *skb)
950{
951 struct cxgbi_sock *csk;
952 struct sk_buff *lskb;
953 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
954 unsigned int tid = GET_TID(rpl);
955 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
956 struct tid_info *t = lldi->tids;
957 unsigned int status = ntohl(rpl->ddpvld);
958
959 csk = lookup_tid(t, tid);
960 if (unlikely(!csk)) {
961 pr_err("can't find connection for tid %u.\n", tid);
962 goto rel_skb;
963 }
964
965 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
966 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
967 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
968
969 spin_lock_bh(&csk->lock);
970
971 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
972 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
973 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
974 csk, csk->state, csk->flags, csk->tid);
975 if (csk->state != CTP_ABORTING)
976 goto abort_conn;
977 else
978 goto discard;
979 }
980
981 if (!csk->skb_ulp_lhdr) {
982 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
983 goto abort_conn;
984 }
985
986 lskb = csk->skb_ulp_lhdr;
987 csk->skb_ulp_lhdr = NULL;
988
989 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
990
991 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
992 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
993 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
994
995 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
996 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
997 csk, lskb, status, cxgbi_skcb_flags(lskb));
998 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
999 }
1000 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1001 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1002 csk, lskb, status, cxgbi_skcb_flags(lskb));
1003 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1004 }
1005 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1006 log_debug(1 << CXGBI_DBG_PDU_RX,
1007 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1008 csk, lskb, status);
1009 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1010 }
1011 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1012 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1013 log_debug(1 << CXGBI_DBG_PDU_RX,
1014 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1015 csk, lskb, status);
1016 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1017 }
1018 log_debug(1 << CXGBI_DBG_PDU_RX,
1019 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1020 csk, lskb, cxgbi_skcb_flags(lskb));
1021
1022 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1023 cxgbi_conn_pdu_ready(csk);
1024 spin_unlock_bh(&csk->lock);
1025 goto rel_skb;
1026
1027abort_conn:
1028 send_abort_req(csk);
1029discard:
1030 spin_unlock_bh(&csk->lock);
1031rel_skb:
1032 __kfree_skb(skb);
1033}
1034
1035static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1036{
1037 struct cxgbi_sock *csk;
1038 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1039 unsigned int tid = GET_TID(rpl);
1040 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1041 struct tid_info *t = lldi->tids;
1042
1043 csk = lookup_tid(t, tid);
1044 if (unlikely(!csk))
1045 pr_err("can't find connection for tid %u.\n", tid);
1046 else {
1047 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1048 "csk 0x%p,%u,0x%lx,%u.\n",
1049 csk, csk->state, csk->flags, csk->tid);
1050 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1051 rpl->seq_vld);
1052 }
1053 __kfree_skb(skb);
1054}
1055
1056static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1057{
1058 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1059 unsigned int tid = GET_TID(rpl);
1060 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1061 struct tid_info *t = lldi->tids;
1062 struct cxgbi_sock *csk;
1063
1064 csk = lookup_tid(t, tid);
1065 if (!csk)
1066 pr_err("can't find conn. for tid %u.\n", tid);
1067
1068 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1069 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1070 csk, csk->state, csk->flags, csk->tid, rpl->status);
1071
1072 if (rpl->status != CPL_ERR_NONE)
1073 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1074 csk, tid, rpl->status);
1075
1076 __kfree_skb(skb);
1077}
1078
1079static int alloc_cpls(struct cxgbi_sock *csk)
1080{
1081 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1082 0, GFP_KERNEL);
1083 if (!csk->cpl_close)
1084 return -ENOMEM;
1085
1086 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1087 0, GFP_KERNEL);
1088 if (!csk->cpl_abort_req)
1089 goto free_cpls;
1090
1091 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1092 0, GFP_KERNEL);
1093 if (!csk->cpl_abort_rpl)
1094 goto free_cpls;
1095 return 0;
1096
1097free_cpls:
1098 cxgbi_sock_free_cpl_skbs(csk);
1099 return -ENOMEM;
1100}
1101
1102static inline void l2t_put(struct cxgbi_sock *csk)
1103{
1104 if (csk->l2t) {
1105 cxgb4_l2t_release(csk->l2t);
1106 csk->l2t = NULL;
1107 cxgbi_sock_put(csk);
1108 }
1109}
1110
1111static void release_offload_resources(struct cxgbi_sock *csk)
1112{
1113 struct cxgb4_lld_info *lldi;
1114
1115 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1116 "csk 0x%p,%u,0x%lx,%u.\n",
1117 csk, csk->state, csk->flags, csk->tid);
1118
1119 cxgbi_sock_free_cpl_skbs(csk);
1120 if (csk->wr_cred != csk->wr_max_cred) {
1121 cxgbi_sock_purge_wr_queue(csk);
1122 cxgbi_sock_reset_wr_list(csk);
1123 }
1124
1125 l2t_put(csk);
1126 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1127 free_atid(csk);
1128 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1129 lldi = cxgbi_cdev_priv(csk->cdev);
1130 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1131 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1132 cxgbi_sock_put(csk);
1133 }
1134 csk->dst = NULL;
1135 csk->cdev = NULL;
1136}
1137
1138static int init_act_open(struct cxgbi_sock *csk)
1139{
1140 struct cxgbi_device *cdev = csk->cdev;
1141 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1142 struct net_device *ndev = cdev->ports[csk->port_id];
1143 struct port_info *pi = netdev_priv(ndev);
1144 struct sk_buff *skb = NULL;
1145 unsigned int step;
1146
1147 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1148 "csk 0x%p,%u,0x%lx,%u.\n",
1149 csk, csk->state, csk->flags, csk->tid);
1150
1151 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1152 if (csk->atid < 0) {
1153 pr_err("%s, NO atid available.\n", ndev->name);
1154 return -EINVAL;
1155 }
1156 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1157 cxgbi_sock_get(csk);
1158
1159 csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0);
1160 if (!csk->l2t) {
1161 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1162 goto rel_resource;
1163 }
1164 cxgbi_sock_get(csk);
1165
1166 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
1167 if (!skb)
1168 goto rel_resource;
1169 skb->sk = (struct sock *)csk;
1170 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1171
1172 if (!csk->mtu)
1173 csk->mtu = dst_mtu(csk->dst);
1174 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1175 csk->tx_chan = cxgb4_port_chan(ndev);
1176 /* SMT two entries per row */
1177 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1178 step = lldi->ntxq / lldi->nchan;
1179 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1180 step = lldi->nrxq / lldi->nchan;
1181 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1182 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
1183 csk->wr_una_cred = 0;
1184 cxgbi_sock_reset_wr_list(csk);
1185 csk->err = 0;
1186 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1187 "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1188 csk, pi->port_id, ndev->name, csk->tx_chan,
1189 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
1190 csk->smac_idx);
1191
1192 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1193 send_act_open_req(csk, skb, csk->l2t);
1194 return 0;
1195
1196rel_resource:
1197 if (skb)
1198 __kfree_skb(skb);
1199 return -EINVAL;
1200}
1201
1202cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1203 [CPL_ACT_ESTABLISH] = do_act_establish,
1204 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1205 [CPL_PEER_CLOSE] = do_peer_close,
1206 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1207 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1208 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1209 [CPL_FW4_ACK] = do_fw4_ack,
1210 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1211 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1212 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1213};
1214
1215int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1216{
1217 int rc;
1218
1219 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1220 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1221
1222 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1223 cxgb4i_max_connect);
1224 if (rc < 0)
1225 return rc;
1226
1227 cdev->csk_release_offload_resources = release_offload_resources;
1228 cdev->csk_push_tx_frames = push_tx_frames;
1229 cdev->csk_send_abort_req = send_abort_req;
1230 cdev->csk_send_close_req = send_close_req;
1231 cdev->csk_send_rx_credits = send_rx_credits;
1232 cdev->csk_alloc_cpls = alloc_cpls;
1233 cdev->csk_init_act_open = init_act_open;
1234
1235 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1236 return 0;
1237}
1238
1239/*
1240 * functions to program the pagepod in h/w
1241 */
1242#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1243static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req,
1244 unsigned int wr_len, unsigned int dlen,
1245 unsigned int pm_addr)
1246{
1247 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1248
1249 INIT_ULPTX_WR(req, wr_len, 0, 0);
1250 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23));
1251 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1252 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1253 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1254
1255 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1256 idata->len = htonl(dlen);
1257}
1258
1259static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1260 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1261 unsigned int npods,
1262 struct cxgbi_gather_list *gl,
1263 unsigned int gl_pidx)
1264{
1265 struct cxgbi_ddp_info *ddp = cdev->ddp;
1266 struct sk_buff *skb;
1267 struct ulp_mem_io *req;
1268 struct ulptx_idata *idata;
1269 struct cxgbi_pagepod *ppod;
1270 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1271 unsigned int dlen = PPOD_SIZE * npods;
1272 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1273 sizeof(struct ulptx_idata) + dlen, 16);
1274 unsigned int i;
1275
1276 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1277 if (!skb) {
1278 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1279 cdev, idx, npods);
1280 return -ENOMEM;
1281 }
1282 req = (struct ulp_mem_io *)skb->head;
1283 set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1284
1285 ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
1286 idata = (struct ulptx_idata *)(req + 1);
1287 ppod = (struct cxgbi_pagepod *)(idata + 1);
1288
1289 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1290 if (!hdr && !gl)
1291 cxgbi_ddp_ppod_clear(ppod);
1292 else
1293 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1294 }
1295
1296 cxgb4_ofld_send(cdev->ports[port_id], skb);
1297 return 0;
1298}
1299
1300static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1301 unsigned int idx, unsigned int npods,
1302 struct cxgbi_gather_list *gl)
1303{
1304 unsigned int i, cnt;
1305 int err = 0;
1306
1307 for (i = 0; i < npods; i += cnt, idx += cnt) {
1308 cnt = npods - i;
1309 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1310 cnt = ULPMEM_IDATA_MAX_NPPODS;
1311 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1312 idx, cnt, gl, 4 * i);
1313 if (err < 0)
1314 break;
1315 }
1316 return err;
1317}
1318
1319static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1320 unsigned int idx, unsigned int npods)
1321{
1322 unsigned int i, cnt;
1323 int err;
1324
1325 for (i = 0; i < npods; i += cnt, idx += cnt) {
1326 cnt = npods - i;
1327 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1328 cnt = ULPMEM_IDATA_MAX_NPPODS;
1329 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1330 idx, cnt, NULL, 0);
1331 if (err < 0)
1332 break;
1333 }
1334}
1335
1336static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1337 int pg_idx, bool reply)
1338{
1339 struct sk_buff *skb;
1340 struct cpl_set_tcb_field *req;
1341
1342 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1343 return 0;
1344
1345 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1346 if (!skb)
1347 return -ENOMEM;
1348
1349 /* set up ulp page size */
1350 req = (struct cpl_set_tcb_field *)skb->head;
1351 INIT_TP_WR(req, csk->tid);
1352 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1353 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1354 req->word_cookie = htons(0);
1355 req->mask = cpu_to_be64(0x3 << 8);
1356 req->val = cpu_to_be64(pg_idx << 8);
1357 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1358
1359 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1360 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1361
1362 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1363 return 0;
1364}
1365
1366static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1367 int hcrc, int dcrc, int reply)
1368{
1369 struct sk_buff *skb;
1370 struct cpl_set_tcb_field *req;
1371
1372 if (!hcrc && !dcrc)
1373 return 0;
1374
1375 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1376 if (!skb)
1377 return -ENOMEM;
1378
1379 csk->hcrc_len = (hcrc ? 4 : 0);
1380 csk->dcrc_len = (dcrc ? 4 : 0);
1381 /* set up ulp submode */
1382 req = (struct cpl_set_tcb_field *)skb->head;
1383 INIT_TP_WR(req, tid);
1384 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1385 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1386 req->word_cookie = htons(0);
1387 req->mask = cpu_to_be64(0x3 << 4);
1388 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1389 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1390 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1391
1392 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1393 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1394
1395 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1396 return 0;
1397}
1398
1399static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1400{
1401 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1402 struct cxgbi_ddp_info *ddp = cdev->ddp;
1403 unsigned int tagmask, pgsz_factor[4];
1404 int err;
1405
1406 if (ddp) {
1407 kref_get(&ddp->refcnt);
1408 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1409 cdev, cdev->ddp);
1410 return -EALREADY;
1411 }
1412
1413 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1414 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1415 lldi->iscsi_iolen, lldi->iscsi_iolen);
1416 if (err < 0)
1417 return err;
1418
1419 ddp = cdev->ddp;
1420
1421 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1422 cxgbi_ddp_page_size_factor(pgsz_factor);
1423 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1424
1425 cdev->csk_ddp_free_gl_skb = NULL;
1426 cdev->csk_ddp_alloc_gl_skb = NULL;
1427 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1428 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1429 cdev->csk_ddp_set = ddp_set_map;
1430 cdev->csk_ddp_clear = ddp_clear_map;
1431
1432 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1433 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1434 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1435 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1436 " %u/%u.\n",
1437 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1438 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1439 ddp->max_rxsz, lldi->iscsi_iolen);
1440 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1441 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1442 ddp->max_rxsz);
1443 return 0;
1444}
1445
1446static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1447{
1448 struct cxgbi_device *cdev;
1449 struct port_info *pi;
1450 int i, rc;
1451
1452 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1453 if (!cdev) {
1454 pr_info("t4 device 0x%p, register failed.\n", lldi);
1455 return NULL;
1456 }
1457 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1458 cdev, lldi->adapter_type, lldi->nports,
1459 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1460 lldi->nrxq, lldi->wr_cred);
1461 for (i = 0; i < lldi->nrxq; i++)
1462 log_debug(1 << CXGBI_DBG_DEV,
1463 "t4 0x%p, rxq id #%d: %u.\n",
1464 cdev, i, lldi->rxq_ids[i]);
1465
1466 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1467 cdev->flags = CXGBI_FLAG_DEV_T4;
1468 cdev->pdev = lldi->pdev;
1469 cdev->ports = lldi->ports;
1470 cdev->nports = lldi->nports;
1471 cdev->mtus = lldi->mtus;
1472 cdev->nmtus = NMTUS;
1473 cdev->snd_win = cxgb4i_snd_win;
1474 cdev->rcv_win = cxgb4i_rcv_win;
1475 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1476 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1477 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1478 cdev->itp = &cxgb4i_iscsi_transport;
1479
1480 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1481 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1482 cdev, lldi->ports[0]->name, cdev->pfvf);
1483
1484 rc = cxgb4i_ddp_init(cdev);
1485 if (rc) {
1486 pr_info("t4 0x%p ddp init failed.\n", cdev);
1487 goto err_out;
1488 }
1489 rc = cxgb4i_ofld_init(cdev);
1490 if (rc) {
1491 pr_info("t4 0x%p ofld init failed.\n", cdev);
1492 goto err_out;
1493 }
1494
1495 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1496 &cxgb4i_host_template, cxgb4i_stt);
1497 if (rc)
1498 goto err_out;
1499
1500 for (i = 0; i < cdev->nports; i++) {
1501 pi = netdev_priv(lldi->ports[i]);
1502 cdev->hbas[i]->port_id = pi->port_id;
1503 }
1504 return cdev;
1505
1506err_out:
1507 cxgbi_device_unregister(cdev);
1508 return ERR_PTR(-ENOMEM);
1509}
1510
1511#define RX_PULL_LEN 128
1512static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1513 const struct pkt_gl *pgl)
1514{
1515 const struct cpl_act_establish *rpl;
1516 struct sk_buff *skb;
1517 unsigned int opc;
1518 struct cxgbi_device *cdev = handle;
1519
1520 if (pgl == NULL) {
1521 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1522
1523 skb = alloc_wr(len, 0, GFP_ATOMIC);
1524 if (!skb)
1525 goto nomem;
1526 skb_copy_to_linear_data(skb, &rsp[1], len);
1527 } else {
1528 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1529 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1530 pgl->va, be64_to_cpu(*rsp),
1531 be64_to_cpu(*(u64 *)pgl->va),
1532 pgl->tot_len);
1533 return 0;
1534 }
1535 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1536 if (unlikely(!skb))
1537 goto nomem;
1538 }
1539
1540 rpl = (struct cpl_act_establish *)skb->data;
1541 opc = rpl->ot.opcode;
1542 log_debug(1 << CXGBI_DBG_TOE,
1543 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1544 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1545 if (cxgb4i_cplhandlers[opc])
1546 cxgb4i_cplhandlers[opc](cdev, skb);
1547 else {
1548 pr_err("No handler for opcode 0x%x.\n", opc);
1549 __kfree_skb(skb);
1550 }
1551 return 0;
1552nomem:
1553 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1554 return 1;
1555}
1556
1557static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1558{
1559 struct cxgbi_device *cdev = handle;
1560
1561 switch (state) {
1562 case CXGB4_STATE_UP:
1563 pr_info("cdev 0x%p, UP.\n", cdev);
1564 /* re-initialize */
1565 break;
1566 case CXGB4_STATE_START_RECOVERY:
1567 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1568 /* close all connections */
1569 break;
1570 case CXGB4_STATE_DOWN:
1571 pr_info("cdev 0x%p, DOWN.\n", cdev);
1572 break;
1573 case CXGB4_STATE_DETACH:
1574 pr_info("cdev 0x%p, DETACH.\n", cdev);
1575 break;
1576 default:
1577 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1578 break;
1579 }
1580 return 0;
1581}
1582
1583static int __init cxgb4i_init_module(void)
1584{
1585 int rc;
1586
1587 printk(KERN_INFO "%s", version);
1588
1589 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1590 if (rc < 0)
1591 return rc;
1592 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1593 return 0;
1594}
1595
1596static void __exit cxgb4i_exit_module(void)
1597{
1598 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1599 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1600 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1601}
1602
1603module_init(cxgb4i_init_module);
1604module_exit(cxgb4i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
new file mode 100644
index 000000000000..1096026ba241
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -0,0 +1,43 @@
1/*
2 * cxgb4i.h: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#ifndef __CXGB4I_H__
15#define __CXGB4I_H__
16
17#define CXGB4I_SCSI_HOST_QDEPTH 1024
18#define CXGB4I_MAX_CONN 16384
19#define CXGB4I_MAX_TARGET CXGB4I_MAX_CONN
20#define CXGB4I_MAX_LUN 0x1000
21
22/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
23#define CXGB4I_TX_HEADER_LEN \
24 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
25
26struct ulptx_idata {
27 __be32 cmd_more;
28 __be32 len;
29};
30
31struct cpl_rx_data_ddp {
32 union opcode_tid ot;
33 __be16 urg;
34 __be16 len;
35 __be32 seq;
36 union {
37 __be32 nxt_seq;
38 __be32 ddp_report;
39 };
40 __be32 ulp_crc;
41 __be32 ddpvld;
42};
43#endif /* __CXGB4I_H__ */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
new file mode 100644
index 000000000000..be5661707dfa
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -0,0 +1,2612 @@
1/*
2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16#include <linux/skbuff.h>
17#include <linux/crypto.h>
18#include <linux/scatterlist.h>
19#include <linux/pci.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_host.h>
23#include <linux/if_vlan.h>
24#include <linux/inet.h>
25#include <net/dst.h>
26#include <net/route.h>
27#include <linux/inetdevice.h> /* ip_dev_find */
28#include <net/tcp.h>
29
30static unsigned int dbg_level;
31
32#include "libcxgbi.h"
33
34#define DRV_MODULE_NAME "libcxgbi"
35#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
36#define DRV_MODULE_VERSION "0.9.0"
37#define DRV_MODULE_RELDATE "Jun. 2010"
38
39MODULE_AUTHOR("Chelsio Communications, Inc.");
40MODULE_DESCRIPTION(DRV_MODULE_DESC);
41MODULE_VERSION(DRV_MODULE_VERSION);
42MODULE_LICENSE("GPL");
43
44module_param(dbg_level, uint, 0644);
45MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
46
47
48/*
49 * cxgbi device management
50 * maintains a list of the cxgbi devices
51 */
52static LIST_HEAD(cdev_list);
53static DEFINE_MUTEX(cdev_mutex);
54
55int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
56 unsigned int max_conn)
57{
58 struct cxgbi_ports_map *pmap = &cdev->pmap;
59
60 pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
61 sizeof(struct cxgbi_sock *),
62 GFP_KERNEL);
63 if (!pmap->port_csk) {
64 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
65 return -ENOMEM;
66 }
67
68 pmap->max_connect = max_conn;
69 pmap->sport_base = base;
70 spin_lock_init(&pmap->lock);
71 return 0;
72}
73EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
74
75void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
76{
77 struct cxgbi_ports_map *pmap = &cdev->pmap;
78 struct cxgbi_sock *csk;
79 int i;
80
81 for (i = 0; i < pmap->max_connect; i++) {
82 if (pmap->port_csk[i]) {
83 csk = pmap->port_csk[i];
84 pmap->port_csk[i] = NULL;
85 log_debug(1 << CXGBI_DBG_SOCK,
86 "csk 0x%p, cdev 0x%p, offload down.\n",
87 csk, cdev);
88 spin_lock_bh(&csk->lock);
89 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
90 cxgbi_sock_closed(csk);
91 spin_unlock_bh(&csk->lock);
92 cxgbi_sock_put(csk);
93 }
94 }
95}
96EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
97
98static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
99{
100 log_debug(1 << CXGBI_DBG_DEV,
101 "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
102 cxgbi_hbas_remove(cdev);
103 cxgbi_device_portmap_cleanup(cdev);
104 if (cdev->dev_ddp_cleanup)
105 cdev->dev_ddp_cleanup(cdev);
106 else
107 cxgbi_ddp_cleanup(cdev);
108 if (cdev->ddp)
109 cxgbi_ddp_cleanup(cdev);
110 if (cdev->pmap.max_connect)
111 cxgbi_free_big_mem(cdev->pmap.port_csk);
112 kfree(cdev);
113}
114
115struct cxgbi_device *cxgbi_device_register(unsigned int extra,
116 unsigned int nports)
117{
118 struct cxgbi_device *cdev;
119
120 cdev = kzalloc(sizeof(*cdev) + extra + nports *
121 (sizeof(struct cxgbi_hba *) +
122 sizeof(struct net_device *)),
123 GFP_KERNEL);
124 if (!cdev) {
125 pr_warn("nport %d, OOM.\n", nports);
126 return NULL;
127 }
128 cdev->ports = (struct net_device **)(cdev + 1);
129 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
130 sizeof(struct net_device *));
131 if (extra)
132 cdev->dd_data = ((char *)cdev->hbas) +
133 nports * sizeof(struct cxgbi_hba *);
134 spin_lock_init(&cdev->pmap.lock);
135
136 mutex_lock(&cdev_mutex);
137 list_add_tail(&cdev->list_head, &cdev_list);
138 mutex_unlock(&cdev_mutex);
139
140 log_debug(1 << CXGBI_DBG_DEV,
141 "cdev 0x%p, p# %u.\n", cdev, nports);
142 return cdev;
143}
144EXPORT_SYMBOL_GPL(cxgbi_device_register);
145
146void cxgbi_device_unregister(struct cxgbi_device *cdev)
147{
148 log_debug(1 << CXGBI_DBG_DEV,
149 "cdev 0x%p, p# %u,%s.\n",
150 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
151 mutex_lock(&cdev_mutex);
152 list_del(&cdev->list_head);
153 mutex_unlock(&cdev_mutex);
154 cxgbi_device_destroy(cdev);
155}
156EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
157
158void cxgbi_device_unregister_all(unsigned int flag)
159{
160 struct cxgbi_device *cdev, *tmp;
161
162 mutex_lock(&cdev_mutex);
163 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
164 if ((cdev->flags & flag) == flag) {
165 log_debug(1 << CXGBI_DBG_DEV,
166 "cdev 0x%p, p# %u,%s.\n",
167 cdev, cdev->nports, cdev->nports ?
168 cdev->ports[0]->name : "");
169 list_del(&cdev->list_head);
170 cxgbi_device_destroy(cdev);
171 }
172 }
173 mutex_unlock(&cdev_mutex);
174}
175EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
176
177struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
178{
179 struct cxgbi_device *cdev, *tmp;
180
181 mutex_lock(&cdev_mutex);
182 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
183 if (cdev->lldev == lldev) {
184 mutex_unlock(&cdev_mutex);
185 return cdev;
186 }
187 }
188 mutex_unlock(&cdev_mutex);
189 log_debug(1 << CXGBI_DBG_DEV,
190 "lldev 0x%p, NO match found.\n", lldev);
191 return NULL;
192}
193EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
194
195static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
196 int *port)
197{
198 struct net_device *vdev = NULL;
199 struct cxgbi_device *cdev, *tmp;
200 int i;
201
202 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
203 vdev = ndev;
204 ndev = vlan_dev_real_dev(ndev);
205 log_debug(1 << CXGBI_DBG_DEV,
206 "vlan dev %s -> %s.\n", vdev->name, ndev->name);
207 }
208
209 mutex_lock(&cdev_mutex);
210 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
211 for (i = 0; i < cdev->nports; i++) {
212 if (ndev == cdev->ports[i]) {
213 cdev->hbas[i]->vdev = vdev;
214 mutex_unlock(&cdev_mutex);
215 if (port)
216 *port = i;
217 return cdev;
218 }
219 }
220 }
221 mutex_unlock(&cdev_mutex);
222 log_debug(1 << CXGBI_DBG_DEV,
223 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
224 return NULL;
225}
226
227void cxgbi_hbas_remove(struct cxgbi_device *cdev)
228{
229 int i;
230 struct cxgbi_hba *chba;
231
232 log_debug(1 << CXGBI_DBG_DEV,
233 "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
234
235 for (i = 0; i < cdev->nports; i++) {
236 chba = cdev->hbas[i];
237 if (chba) {
238 cdev->hbas[i] = NULL;
239 iscsi_host_remove(chba->shost);
240 pci_dev_put(cdev->pdev);
241 iscsi_host_free(chba->shost);
242 }
243 }
244}
245EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
246
247int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun,
248 unsigned int max_id, struct scsi_host_template *sht,
249 struct scsi_transport_template *stt)
250{
251 struct cxgbi_hba *chba;
252 struct Scsi_Host *shost;
253 int i, err;
254
255 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
256
257 for (i = 0; i < cdev->nports; i++) {
258 shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
259 if (!shost) {
260 pr_info("0x%p, p%d, %s, host alloc failed.\n",
261 cdev, i, cdev->ports[i]->name);
262 err = -ENOMEM;
263 goto err_out;
264 }
265
266 shost->transportt = stt;
267 shost->max_lun = max_lun;
268 shost->max_id = max_id;
269 shost->max_channel = 0;
270 shost->max_cmd_len = 16;
271
272 chba = iscsi_host_priv(shost);
273 chba->cdev = cdev;
274 chba->ndev = cdev->ports[i];
275 chba->shost = shost;
276
277 log_debug(1 << CXGBI_DBG_DEV,
278 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
279 cdev, i, cdev->ports[i]->name, chba);
280
281 pci_dev_get(cdev->pdev);
282 err = iscsi_host_add(shost, &cdev->pdev->dev);
283 if (err) {
284 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
285 cdev, i, cdev->ports[i]->name);
286 pci_dev_put(cdev->pdev);
287 scsi_host_put(shost);
288 goto err_out;
289 }
290
291 cdev->hbas[i] = chba;
292 }
293
294 return 0;
295
296err_out:
297 cxgbi_hbas_remove(cdev);
298 return err;
299}
300EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
301
302/*
303 * iSCSI offload
304 *
305 * - source port management
306 * To find a free source port in the port allocation map we use a very simple
307 * rotor scheme to look for the next free port.
308 *
309 * If a source port has been specified make sure that it doesn't collide with
310 * our normal source port allocation map. If it's outside the range of our
311 * allocation/deallocation scheme just let them use it.
312 *
313 * If the source port is outside our allocation range, the caller is
314 * responsible for keeping track of their port usage.
315 */
316static int sock_get_port(struct cxgbi_sock *csk)
317{
318 struct cxgbi_device *cdev = csk->cdev;
319 struct cxgbi_ports_map *pmap = &cdev->pmap;
320 unsigned int start;
321 int idx;
322
323 if (!pmap->max_connect) {
324 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
325 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
326 return -EADDRNOTAVAIL;
327 }
328
329 if (csk->saddr.sin_port) {
330 pr_err("source port NON-ZERO %u.\n",
331 ntohs(csk->saddr.sin_port));
332 return -EADDRINUSE;
333 }
334
335 spin_lock_bh(&pmap->lock);
336 if (pmap->used >= pmap->max_connect) {
337 spin_unlock_bh(&pmap->lock);
338 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
339 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
340 return -EADDRNOTAVAIL;
341 }
342
343 start = idx = pmap->next;
344 do {
345 if (++idx >= pmap->max_connect)
346 idx = 0;
347 if (!pmap->port_csk[idx]) {
348 pmap->used++;
349 csk->saddr.sin_port =
350 htons(pmap->sport_base + idx);
351 pmap->next = idx;
352 pmap->port_csk[idx] = csk;
353 spin_unlock_bh(&pmap->lock);
354 cxgbi_sock_get(csk);
355 log_debug(1 << CXGBI_DBG_SOCK,
356 "cdev 0x%p, p#%u %s, p %u, %u.\n",
357 cdev, csk->port_id,
358 cdev->ports[csk->port_id]->name,
359 pmap->sport_base + idx, pmap->next);
360 return 0;
361 }
362 } while (idx != start);
363 spin_unlock_bh(&pmap->lock);
364
365 /* should not happen */
366 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
367 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
368 pmap->next);
369 return -EADDRNOTAVAIL;
370}
371
372static void sock_put_port(struct cxgbi_sock *csk)
373{
374 struct cxgbi_device *cdev = csk->cdev;
375 struct cxgbi_ports_map *pmap = &cdev->pmap;
376
377 if (csk->saddr.sin_port) {
378 int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
379
380 csk->saddr.sin_port = 0;
381 if (idx < 0 || idx >= pmap->max_connect) {
382 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
383 cdev, csk->port_id,
384 cdev->ports[csk->port_id]->name,
385 ntohs(csk->saddr.sin_port));
386 return;
387 }
388
389 spin_lock_bh(&pmap->lock);
390 pmap->port_csk[idx] = NULL;
391 pmap->used--;
392 spin_unlock_bh(&pmap->lock);
393
394 log_debug(1 << CXGBI_DBG_SOCK,
395 "cdev 0x%p, p#%u %s, release %u.\n",
396 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
397 pmap->sport_base + idx);
398
399 cxgbi_sock_put(csk);
400 }
401}
402
403/*
404 * iscsi tcp connection
405 */
406void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
407{
408 if (csk->cpl_close) {
409 kfree_skb(csk->cpl_close);
410 csk->cpl_close = NULL;
411 }
412 if (csk->cpl_abort_req) {
413 kfree_skb(csk->cpl_abort_req);
414 csk->cpl_abort_req = NULL;
415 }
416 if (csk->cpl_abort_rpl) {
417 kfree_skb(csk->cpl_abort_rpl);
418 csk->cpl_abort_rpl = NULL;
419 }
420}
421EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
422
423static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
424{
425 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
426
427 if (!csk) {
428 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
429 return NULL;
430 }
431
432 if (cdev->csk_alloc_cpls(csk) < 0) {
433 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
434 kfree(csk);
435 return NULL;
436 }
437
438 spin_lock_init(&csk->lock);
439 kref_init(&csk->refcnt);
440 skb_queue_head_init(&csk->receive_queue);
441 skb_queue_head_init(&csk->write_queue);
442 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
443 rwlock_init(&csk->callback_lock);
444 csk->cdev = cdev;
445 csk->flags = 0;
446 cxgbi_sock_set_state(csk, CTP_CLOSED);
447
448 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
449
450 return csk;
451}
452
453static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
454 __be16 sport, __be16 dport, u8 tos)
455{
456 struct rtable *rt;
457 struct flowi fl = {
458 .oif = 0,
459 .nl_u = {
460 .ip4_u = {
461 .daddr = daddr,
462 .saddr = saddr,
463 .tos = tos }
464 },
465 .proto = IPPROTO_TCP,
466 .uli_u = {
467 .ports = {
468 .sport = sport,
469 .dport = dport }
470 }
471 };
472
473 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
474 return NULL;
475
476 return rt;
477}
478
479static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
480{
481 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
482 struct dst_entry *dst;
483 struct net_device *ndev;
484 struct cxgbi_device *cdev;
485 struct rtable *rt = NULL;
486 struct cxgbi_sock *csk = NULL;
487 unsigned int mtu = 0;
488 int port = 0xFFFF;
489 int err = 0;
490
491 if (daddr->sin_family != AF_INET) {
492 pr_info("address family 0x%x NOT supported.\n",
493 daddr->sin_family);
494 err = -EAFNOSUPPORT;
495 goto err_out;
496 }
497
498 rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
499 if (!rt) {
500 pr_info("no route to ipv4 0x%x, port %u.\n",
501 daddr->sin_addr.s_addr, daddr->sin_port);
502 err = -ENETUNREACH;
503 goto err_out;
504 }
505 dst = &rt->dst;
506 ndev = dst->neighbour->dev;
507
508 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
509 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
510 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
511 ndev->name);
512 err = -ENETUNREACH;
513 goto rel_rt;
514 }
515
516 if (ndev->flags & IFF_LOOPBACK) {
517 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
518 mtu = ndev->mtu;
519 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
520 dst->neighbour->dev->name, ndev->name, mtu);
521 }
522
523 cdev = cxgbi_device_find_by_netdev(ndev, &port);
524 if (!cdev) {
525 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
526 &daddr->sin_addr.s_addr, ndev->name);
527 err = -ENETUNREACH;
528 goto rel_rt;
529 }
530 log_debug(1 << CXGBI_DBG_SOCK,
531 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
532 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
533 port, ndev->name, cdev);
534
535 csk = cxgbi_sock_create(cdev);
536 if (!csk) {
537 err = -ENOMEM;
538 goto rel_rt;
539 }
540 csk->cdev = cdev;
541 csk->port_id = port;
542 csk->mtu = mtu;
543 csk->dst = dst;
544 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
545 csk->daddr.sin_port = daddr->sin_port;
546 csk->saddr.sin_addr.s_addr = rt->rt_src;
547
548 return csk;
549
550rel_rt:
551 ip_rt_put(rt);
552 if (csk)
553 cxgbi_sock_closed(csk);
554err_out:
555 return ERR_PTR(err);
556}
557
558void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
559 unsigned int opt)
560{
561 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
562 dst_confirm(csk->dst);
563 smp_mb();
564 cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
565}
566EXPORT_SYMBOL_GPL(cxgbi_sock_established);
567
568static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
569{
570 log_debug(1 << CXGBI_DBG_SOCK,
571 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
572 csk, csk->state, csk->flags, csk->user_data);
573
574 if (csk->state != CTP_ESTABLISHED) {
575 read_lock_bh(&csk->callback_lock);
576 if (csk->user_data)
577 iscsi_conn_failure(csk->user_data,
578 ISCSI_ERR_CONN_FAILED);
579 read_unlock_bh(&csk->callback_lock);
580 }
581}
582
583void cxgbi_sock_closed(struct cxgbi_sock *csk)
584{
585 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
586 csk, (csk)->state, (csk)->flags, (csk)->tid);
587 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
588 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
589 return;
590 if (csk->saddr.sin_port)
591 sock_put_port(csk);
592 if (csk->dst)
593 dst_release(csk->dst);
594 csk->cdev->csk_release_offload_resources(csk);
595 cxgbi_sock_set_state(csk, CTP_CLOSED);
596 cxgbi_inform_iscsi_conn_closing(csk);
597 cxgbi_sock_put(csk);
598}
599EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
600
601static void need_active_close(struct cxgbi_sock *csk)
602{
603 int data_lost;
604 int close_req = 0;
605
606 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
607 csk, (csk)->state, (csk)->flags, (csk)->tid);
608 spin_lock_bh(&csk->lock);
609 dst_confirm(csk->dst);
610 data_lost = skb_queue_len(&csk->receive_queue);
611 __skb_queue_purge(&csk->receive_queue);
612
613 if (csk->state == CTP_ACTIVE_OPEN)
614 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
615 else if (csk->state == CTP_ESTABLISHED) {
616 close_req = 1;
617 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
618 } else if (csk->state == CTP_PASSIVE_CLOSE) {
619 close_req = 1;
620 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
621 }
622
623 if (close_req) {
624 if (data_lost)
625 csk->cdev->csk_send_abort_req(csk);
626 else
627 csk->cdev->csk_send_close_req(csk);
628 }
629
630 spin_unlock_bh(&csk->lock);
631}
632
633void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
634{
635 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
636 csk, csk->state, csk->flags,
637 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
638 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
639 errno);
640
641 cxgbi_sock_set_state(csk, CTP_CONNECTING);
642 csk->err = errno;
643 cxgbi_sock_closed(csk);
644}
645EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
646
647void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
648{
649 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
650
651 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
652 csk, (csk)->state, (csk)->flags, (csk)->tid);
653 cxgbi_sock_get(csk);
654 spin_lock_bh(&csk->lock);
655 if (csk->state == CTP_ACTIVE_OPEN)
656 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
657 spin_unlock_bh(&csk->lock);
658 cxgbi_sock_put(csk);
659 __kfree_skb(skb);
660}
661EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
662
663void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
664{
665 cxgbi_sock_get(csk);
666 spin_lock_bh(&csk->lock);
667 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
668 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
669 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
670 else {
671 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
672 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
673 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
674 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
675 csk, csk->state, csk->flags, csk->tid);
676 cxgbi_sock_closed(csk);
677 }
678 }
679 spin_unlock_bh(&csk->lock);
680 cxgbi_sock_put(csk);
681}
682EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
683
684void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
685{
686 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
687 csk, (csk)->state, (csk)->flags, (csk)->tid);
688 cxgbi_sock_get(csk);
689 spin_lock_bh(&csk->lock);
690
691 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
692 goto done;
693
694 switch (csk->state) {
695 case CTP_ESTABLISHED:
696 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
697 break;
698 case CTP_ACTIVE_CLOSE:
699 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
700 break;
701 case CTP_CLOSE_WAIT_1:
702 cxgbi_sock_closed(csk);
703 break;
704 case CTP_ABORTING:
705 break;
706 default:
707 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
708 csk, csk->state, csk->flags, csk->tid);
709 }
710 cxgbi_inform_iscsi_conn_closing(csk);
711done:
712 spin_unlock_bh(&csk->lock);
713 cxgbi_sock_put(csk);
714}
715EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
716
717void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
718{
719 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
720 csk, (csk)->state, (csk)->flags, (csk)->tid);
721 cxgbi_sock_get(csk);
722 spin_lock_bh(&csk->lock);
723
724 csk->snd_una = snd_nxt - 1;
725 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
726 goto done;
727
728 switch (csk->state) {
729 case CTP_ACTIVE_CLOSE:
730 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
731 break;
732 case CTP_CLOSE_WAIT_1:
733 case CTP_CLOSE_WAIT_2:
734 cxgbi_sock_closed(csk);
735 break;
736 case CTP_ABORTING:
737 break;
738 default:
739 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
740 csk, csk->state, csk->flags, csk->tid);
741 }
742done:
743 spin_unlock_bh(&csk->lock);
744 cxgbi_sock_put(csk);
745}
746EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
747
748void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
749 unsigned int snd_una, int seq_chk)
750{
751 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
752 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
753 csk, csk->state, csk->flags, csk->tid, credits,
754 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
755
756 spin_lock_bh(&csk->lock);
757
758 csk->wr_cred += credits;
759 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
760 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
761
762 while (credits) {
763 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
764
765 if (unlikely(!p)) {
766 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
767 csk, csk->state, csk->flags, csk->tid, credits,
768 csk->wr_cred, csk->wr_una_cred);
769 break;
770 }
771
772 if (unlikely(credits < p->csum)) {
773 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
774 csk, csk->state, csk->flags, csk->tid,
775 credits, csk->wr_cred, csk->wr_una_cred,
776 p->csum);
777 p->csum -= credits;
778 break;
779 } else {
780 cxgbi_sock_dequeue_wr(csk);
781 credits -= p->csum;
782 kfree_skb(p);
783 }
784 }
785
786 cxgbi_sock_check_wr_invariants(csk);
787
788 if (seq_chk) {
789 if (unlikely(before(snd_una, csk->snd_una))) {
790 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
791 csk, csk->state, csk->flags, csk->tid, snd_una,
792 csk->snd_una);
793 goto done;
794 }
795
796 if (csk->snd_una != snd_una) {
797 csk->snd_una = snd_una;
798 dst_confirm(csk->dst);
799 }
800 }
801
802 if (skb_queue_len(&csk->write_queue)) {
803 if (csk->cdev->csk_push_tx_frames(csk, 0))
804 cxgbi_conn_tx_open(csk);
805 } else
806 cxgbi_conn_tx_open(csk);
807done:
808 spin_unlock_bh(&csk->lock);
809}
810EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
811
812static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
813 unsigned short mtu)
814{
815 int i = 0;
816
817 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
818 ++i;
819
820 return i;
821}
822
823unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
824{
825 unsigned int idx;
826 struct dst_entry *dst = csk->dst;
827
828 csk->advmss = dst_metric(dst, RTAX_ADVMSS);
829
830 if (csk->advmss > pmtu - 40)
831 csk->advmss = pmtu - 40;
832 if (csk->advmss < csk->cdev->mtus[0] - 40)
833 csk->advmss = csk->cdev->mtus[0] - 40;
834 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
835
836 return idx;
837}
838EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
839
840void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
841{
842 cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
843 __skb_queue_tail(&csk->write_queue, skb);
844}
845EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
846
847void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
848{
849 struct sk_buff *skb;
850
851 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
852 kfree_skb(skb);
853}
854EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
855
856void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
857{
858 int pending = cxgbi_sock_count_pending_wrs(csk);
859
860 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
861 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
862 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
863}
864EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
865
866static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
867{
868 struct cxgbi_device *cdev = csk->cdev;
869 struct sk_buff *next;
870 int err, copied = 0;
871
872 spin_lock_bh(&csk->lock);
873
874 if (csk->state != CTP_ESTABLISHED) {
875 log_debug(1 << CXGBI_DBG_PDU_TX,
876 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
877 csk, csk->state, csk->flags, csk->tid);
878 err = -EAGAIN;
879 goto out_err;
880 }
881
882 if (csk->err) {
883 log_debug(1 << CXGBI_DBG_PDU_TX,
884 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
885 csk, csk->state, csk->flags, csk->tid, csk->err);
886 err = -EPIPE;
887 goto out_err;
888 }
889
890 if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
891 log_debug(1 << CXGBI_DBG_PDU_TX,
892 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
893 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
894 csk->snd_una, cdev->snd_win);
895 err = -ENOBUFS;
896 goto out_err;
897 }
898
899 while (skb) {
900 int frags = skb_shinfo(skb)->nr_frags +
901 (skb->len != skb->data_len);
902
903 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
904 pr_err("csk 0x%p, skb head %u < %u.\n",
905 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
906 err = -EINVAL;
907 goto out_err;
908 }
909
910 if (frags >= SKB_WR_LIST_SIZE) {
911 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
912 csk, skb_shinfo(skb)->nr_frags, skb->len,
913 skb->data_len, (uint)(SKB_WR_LIST_SIZE));
914 err = -EINVAL;
915 goto out_err;
916 }
917
918 next = skb->next;
919 skb->next = NULL;
920 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
921 cxgbi_sock_skb_entail(csk, skb);
922 copied += skb->len;
923 csk->write_seq += skb->len +
924 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
925 skb = next;
926 }
927done:
928 if (likely(skb_queue_len(&csk->write_queue)))
929 cdev->csk_push_tx_frames(csk, 1);
930 spin_unlock_bh(&csk->lock);
931 return copied;
932
933out_err:
934 if (copied == 0 && err == -EPIPE)
935 copied = csk->err ? csk->err : -EPIPE;
936 else
937 copied = err;
938 goto done;
939}
940
941/*
942 * Direct Data Placement -
943 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
944 * final destination host-memory buffers based on the Initiator Task Tag (ITT)
945 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
946 * The host memory address is programmed into h/w in the format of pagepod
947 * entries.
948 * The location of the pagepod entry is encoded into ddp tag which is used as
949 * the base for ITT/TTT.
950 */
951
952static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
953static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
954static unsigned char page_idx = DDP_PGIDX_MAX;
955
956static unsigned char sw_tag_idx_bits;
957static unsigned char sw_tag_age_bits;
958
959/*
960 * Direct-Data Placement page size adjustment
961 */
962static int ddp_adjust_page_table(void)
963{
964 int i;
965 unsigned int base_order, order;
966
967 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
968 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
969 PAGE_SIZE, 1UL << ddp_page_shift[0]);
970 return -EINVAL;
971 }
972
973 base_order = get_order(1UL << ddp_page_shift[0]);
974 order = get_order(1UL << PAGE_SHIFT);
975
976 for (i = 0; i < DDP_PGIDX_MAX; i++) {
977 /* first is the kernel page size, then just doubling */
978 ddp_page_order[i] = order - base_order + i;
979 ddp_page_shift[i] = PAGE_SHIFT + i;
980 }
981 return 0;
982}
983
984static int ddp_find_page_index(unsigned long pgsz)
985{
986 int i;
987
988 for (i = 0; i < DDP_PGIDX_MAX; i++) {
989 if (pgsz == (1UL << ddp_page_shift[i]))
990 return i;
991 }
992 pr_info("ddp page size %lu not supported.\n", pgsz);
993 return DDP_PGIDX_MAX;
994}
995
996static void ddp_setup_host_page_size(void)
997{
998 if (page_idx == DDP_PGIDX_MAX) {
999 page_idx = ddp_find_page_index(PAGE_SIZE);
1000
1001 if (page_idx == DDP_PGIDX_MAX) {
1002 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
1003 if (ddp_adjust_page_table() < 0) {
1004 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
1005 return;
1006 }
1007 page_idx = ddp_find_page_index(PAGE_SIZE);
1008 }
1009 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
1010 }
1011}
1012
1013void cxgbi_ddp_page_size_factor(int *pgsz_factor)
1014{
1015 int i;
1016
1017 for (i = 0; i < DDP_PGIDX_MAX; i++)
1018 pgsz_factor[i] = ddp_page_order[i];
1019}
1020EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
1021
1022/*
1023 * DDP setup & teardown
1024 */
1025
1026void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
1027 struct cxgbi_pagepod_hdr *hdr,
1028 struct cxgbi_gather_list *gl, unsigned int gidx)
1029{
1030 int i;
1031
1032 memcpy(ppod, hdr, sizeof(*hdr));
1033 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
1034 ppod->addr[i] = gidx < gl->nelem ?
1035 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
1036 }
1037}
1038EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
1039
1040void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
1041{
1042 memset(ppod, 0, sizeof(*ppod));
1043}
1044EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
1045
1046static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
1047 unsigned int start, unsigned int max,
1048 unsigned int count,
1049 struct cxgbi_gather_list *gl)
1050{
1051 unsigned int i, j, k;
1052
1053 /* not enough entries */
1054 if ((max - start) < count) {
1055 log_debug(1 << CXGBI_DBG_DDP,
1056 "NOT enough entries %u+%u < %u.\n", start, count, max);
1057 return -EBUSY;
1058 }
1059
1060 max -= count;
1061 spin_lock(&ddp->map_lock);
1062 for (i = start; i < max;) {
1063 for (j = 0, k = i; j < count; j++, k++) {
1064 if (ddp->gl_map[k])
1065 break;
1066 }
1067 if (j == count) {
1068 for (j = 0, k = i; j < count; j++, k++)
1069 ddp->gl_map[k] = gl;
1070 spin_unlock(&ddp->map_lock);
1071 return i;
1072 }
1073 i += j + 1;
1074 }
1075 spin_unlock(&ddp->map_lock);
1076 log_debug(1 << CXGBI_DBG_DDP,
1077 "NO suitable entries %u available.\n", count);
1078 return -EBUSY;
1079}
1080
1081static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
1082 int start, int count)
1083{
1084 spin_lock(&ddp->map_lock);
1085 memset(&ddp->gl_map[start], 0,
1086 count * sizeof(struct cxgbi_gather_list *));
1087 spin_unlock(&ddp->map_lock);
1088}
1089
1090static inline void ddp_gl_unmap(struct pci_dev *pdev,
1091 struct cxgbi_gather_list *gl)
1092{
1093 int i;
1094
1095 for (i = 0; i < gl->nelem; i++)
1096 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
1097 PCI_DMA_FROMDEVICE);
1098}
1099
1100static inline int ddp_gl_map(struct pci_dev *pdev,
1101 struct cxgbi_gather_list *gl)
1102{
1103 int i;
1104
1105 for (i = 0; i < gl->nelem; i++) {
1106 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
1107 PAGE_SIZE,
1108 PCI_DMA_FROMDEVICE);
1109 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
1110 log_debug(1 << CXGBI_DBG_DDP,
1111 "page %d 0x%p, 0x%p dma mapping err.\n",
1112 i, gl->pages[i], pdev);
1113 goto unmap;
1114 }
1115 }
1116 return i;
1117unmap:
1118 if (i) {
1119 unsigned int nelem = gl->nelem;
1120
1121 gl->nelem = i;
1122 ddp_gl_unmap(pdev, gl);
1123 gl->nelem = nelem;
1124 }
1125 return -EINVAL;
1126}
1127
1128static void ddp_release_gl(struct cxgbi_gather_list *gl,
1129 struct pci_dev *pdev)
1130{
1131 ddp_gl_unmap(pdev, gl);
1132 kfree(gl);
1133}
1134
1135static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
1136 struct scatterlist *sgl,
1137 unsigned int sgcnt,
1138 struct pci_dev *pdev,
1139 gfp_t gfp)
1140{
1141 struct cxgbi_gather_list *gl;
1142 struct scatterlist *sg = sgl;
1143 struct page *sgpage = sg_page(sg);
1144 unsigned int sglen = sg->length;
1145 unsigned int sgoffset = sg->offset;
1146 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
1147 PAGE_SHIFT;
1148 int i = 1, j = 0;
1149
1150 if (xferlen < DDP_THRESHOLD) {
1151 log_debug(1 << CXGBI_DBG_DDP,
1152 "xfer %u < threshold %u, no ddp.\n",
1153 xferlen, DDP_THRESHOLD);
1154 return NULL;
1155 }
1156
1157 gl = kzalloc(sizeof(struct cxgbi_gather_list) +
1158 npages * (sizeof(dma_addr_t) +
1159 sizeof(struct page *)), gfp);
1160 if (!gl) {
1161 log_debug(1 << CXGBI_DBG_DDP,
1162 "xfer %u, %u pages, OOM.\n", xferlen, npages);
1163 return NULL;
1164 }
1165
1166 log_debug(1 << CXGBI_DBG_DDP,
1167 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1168
1169 gl->pages = (struct page **)&gl->phys_addr[npages];
1170 gl->nelem = npages;
1171 gl->length = xferlen;
1172 gl->offset = sgoffset;
1173 gl->pages[0] = sgpage;
1174
1175 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
1176 i++, sg = sg_next(sg)) {
1177 struct page *page = sg_page(sg);
1178
1179 if (sgpage == page && sg->offset == sgoffset + sglen)
1180 sglen += sg->length;
1181 else {
1182 /* make sure the sgl is fit for ddp:
1183 * each has the same page size, and
1184 * all of the middle pages are used completely
1185 */
1186 if ((j && sgoffset) || ((i != sgcnt - 1) &&
1187 ((sglen + sgoffset) & ~PAGE_MASK))) {
1188 log_debug(1 << CXGBI_DBG_DDP,
1189 "page %d/%u, %u + %u.\n",
1190 i, sgcnt, sgoffset, sglen);
1191 goto error_out;
1192 }
1193
1194 j++;
1195 if (j == gl->nelem || sg->offset) {
1196 log_debug(1 << CXGBI_DBG_DDP,
1197 "page %d/%u, offset %u.\n",
1198 j, gl->nelem, sg->offset);
1199 goto error_out;
1200 }
1201 gl->pages[j] = page;
1202 sglen = sg->length;
1203 sgoffset = sg->offset;
1204 sgpage = page;
1205 }
1206 }
1207 gl->nelem = ++j;
1208
1209 if (ddp_gl_map(pdev, gl) < 0)
1210 goto error_out;
1211
1212 return gl;
1213
1214error_out:
1215 kfree(gl);
1216 return NULL;
1217}
1218
1219static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
1220{
1221 struct cxgbi_device *cdev = chba->cdev;
1222 struct cxgbi_ddp_info *ddp = cdev->ddp;
1223 u32 idx;
1224
1225 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
1226 if (idx < ddp->nppods) {
1227 struct cxgbi_gather_list *gl = ddp->gl_map[idx];
1228 unsigned int npods;
1229
1230 if (!gl || !gl->nelem) {
1231 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1232 tag, idx, gl, gl ? gl->nelem : 0);
1233 return;
1234 }
1235 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1236 log_debug(1 << CXGBI_DBG_DDP,
1237 "tag 0x%x, release idx %u, npods %u.\n",
1238 tag, idx, npods);
1239 cdev->csk_ddp_clear(chba, tag, idx, npods);
1240 ddp_unmark_entries(ddp, idx, npods);
1241 ddp_release_gl(gl, ddp->pdev);
1242 } else
1243 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
1244}
1245
1246static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
1247 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
1248 gfp_t gfp)
1249{
1250 struct cxgbi_device *cdev = csk->cdev;
1251 struct cxgbi_ddp_info *ddp = cdev->ddp;
1252 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1253 struct cxgbi_pagepod_hdr hdr;
1254 unsigned int npods;
1255 int idx = -1;
1256 int err = -ENOMEM;
1257 u32 tag;
1258
1259 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1260 if (ddp->idx_last == ddp->nppods)
1261 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
1262 npods, gl);
1263 else {
1264 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
1265 ddp->nppods, npods,
1266 gl);
1267 if (idx < 0 && ddp->idx_last >= npods) {
1268 idx = ddp_find_unused_entries(ddp, 0,
1269 min(ddp->idx_last + npods, ddp->nppods),
1270 npods, gl);
1271 }
1272 }
1273 if (idx < 0) {
1274 log_debug(1 << CXGBI_DBG_DDP,
1275 "xferlen %u, gl %u, npods %u NO DDP.\n",
1276 gl->length, gl->nelem, npods);
1277 return idx;
1278 }
1279
1280 if (cdev->csk_ddp_alloc_gl_skb) {
1281 err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
1282 if (err < 0)
1283 goto unmark_entries;
1284 }
1285
1286 tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1287 tag |= idx << PPOD_IDX_SHIFT;
1288
1289 hdr.rsvd = 0;
1290 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
1291 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
1292 hdr.max_offset = htonl(gl->length);
1293 hdr.page_offset = htonl(gl->offset);
1294
1295 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
1296 if (err < 0) {
1297 if (cdev->csk_ddp_free_gl_skb)
1298 cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
1299 goto unmark_entries;
1300 }
1301
1302 ddp->idx_last = idx;
1303 log_debug(1 << CXGBI_DBG_DDP,
1304 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1305 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
1306 npods);
1307 *tagp = tag;
1308 return 0;
1309
1310unmark_entries:
1311 ddp_unmark_entries(ddp, idx, npods);
1312 return err;
1313}
1314
1315int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
1316 unsigned int sw_tag, unsigned int xferlen,
1317 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
1318{
1319 struct cxgbi_device *cdev = csk->cdev;
1320 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1321 struct cxgbi_gather_list *gl;
1322 int err;
1323
1324 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
1325 xferlen < DDP_THRESHOLD) {
1326 log_debug(1 << CXGBI_DBG_DDP,
1327 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1328 return -EINVAL;
1329 }
1330
1331 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1332 log_debug(1 << CXGBI_DBG_DDP,
1333 "sw_tag 0x%x NOT usable.\n", sw_tag);
1334 return -EINVAL;
1335 }
1336
1337 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
1338 if (!gl)
1339 return -ENOMEM;
1340
1341 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
1342 if (err < 0)
1343 ddp_release_gl(gl, cdev->pdev);
1344
1345 return err;
1346}
1347
1348static void ddp_destroy(struct kref *kref)
1349{
1350 struct cxgbi_ddp_info *ddp = container_of(kref,
1351 struct cxgbi_ddp_info,
1352 refcnt);
1353 struct cxgbi_device *cdev = ddp->cdev;
1354 int i = 0;
1355
1356 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1357
1358 while (i < ddp->nppods) {
1359 struct cxgbi_gather_list *gl = ddp->gl_map[i];
1360
1361 if (gl) {
1362 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
1363 >> PPOD_PAGES_SHIFT;
1364 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1365 kfree(gl);
1366 if (cdev->csk_ddp_free_gl_skb)
1367 cdev->csk_ddp_free_gl_skb(ddp, i, npods);
1368 i += npods;
1369 } else
1370 i++;
1371 }
1372 cxgbi_free_big_mem(ddp);
1373}
1374
1375int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
1376{
1377 struct cxgbi_ddp_info *ddp = cdev->ddp;
1378
1379 log_debug(1 << CXGBI_DBG_DDP,
1380 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1381 cdev->ddp = NULL;
1382 if (ddp)
1383 return kref_put(&ddp->refcnt, ddp_destroy);
1384 return 0;
1385}
1386EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
1387
1388int cxgbi_ddp_init(struct cxgbi_device *cdev,
1389 unsigned int llimit, unsigned int ulimit,
1390 unsigned int max_txsz, unsigned int max_rxsz)
1391{
1392 struct cxgbi_ddp_info *ddp;
1393 unsigned int ppmax, bits;
1394
1395 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
1396 bits = __ilog2_u32(ppmax) + 1;
1397 if (bits > PPOD_IDX_MAX_SIZE)
1398 bits = PPOD_IDX_MAX_SIZE;
1399 ppmax = (1 << (bits - 1)) - 1;
1400
1401 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
1402 ppmax * (sizeof(struct cxgbi_gather_list *) +
1403 sizeof(struct sk_buff *)),
1404 GFP_KERNEL);
1405 if (!ddp) {
1406 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1407 return -ENOMEM;
1408 }
1409 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
1410 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
1411 ppmax * sizeof(struct cxgbi_gather_list *));
1412 cdev->ddp = ddp;
1413
1414 spin_lock_init(&ddp->map_lock);
1415 kref_init(&ddp->refcnt);
1416
1417 ddp->cdev = cdev;
1418 ddp->pdev = cdev->pdev;
1419 ddp->llimit = llimit;
1420 ddp->ulimit = ulimit;
1421 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
1422 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
1423 ddp->nppods = ppmax;
1424 ddp->idx_last = ppmax;
1425 ddp->idx_bits = bits;
1426 ddp->idx_mask = (1 << bits) - 1;
1427 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
1428
1429 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1430 cdev->tag_format.rsvd_bits = ddp->idx_bits;
1431 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
1432 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
1433
1434 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1435 cdev->ports[0]->name, cdev->tag_format.sw_bits,
1436 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
1437 cdev->tag_format.rsvd_mask);
1438
1439 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1440 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1441 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1442 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1443
1444 log_debug(1 << CXGBI_DBG_DDP,
1445 "%s max payload size: %u/%u, %u/%u.\n",
1446 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
1447 cdev->rx_max_size, ddp->max_rxsz);
1448 return 0;
1449}
1450EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
1451
1452/*
1453 * APIs interacting with open-iscsi libraries
1454 */
1455
1456static unsigned char padding[4];
1457
1458static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1459{
1460 struct scsi_cmnd *sc = task->sc;
1461 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1462 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1463 struct cxgbi_hba *chba = cconn->chba;
1464 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1465 u32 tag = ntohl((__force u32)hdr_itt);
1466
1467 log_debug(1 << CXGBI_DBG_DDP,
1468 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
1469 if (sc &&
1470 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1471 cxgbi_is_ddp_tag(tformat, tag))
1472 ddp_tag_release(chba, tag);
1473}
1474
1475static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1476{
1477 struct scsi_cmnd *sc = task->sc;
1478 struct iscsi_conn *conn = task->conn;
1479 struct iscsi_session *sess = conn->session;
1480 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1481 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1482 struct cxgbi_hba *chba = cconn->chba;
1483 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1484 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
1485 u32 tag = 0;
1486 int err = -EINVAL;
1487
1488 if (sc &&
1489 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
1490 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
1491 scsi_in(sc)->length,
1492 scsi_in(sc)->table.sgl,
1493 scsi_in(sc)->table.nents,
1494 GFP_ATOMIC);
1495 if (err < 0)
1496 log_debug(1 << CXGBI_DBG_DDP,
1497 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1498 cconn->cep->csk, task, scsi_in(sc)->length,
1499 scsi_in(sc)->table.nents);
1500 }
1501
1502 if (err < 0)
1503 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1504 /* the itt need to sent in big-endian order */
1505 *hdr_itt = (__force itt_t)htonl(tag);
1506
1507 log_debug(1 << CXGBI_DBG_DDP,
1508 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1509 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1510 return 0;
1511}
1512
1513void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1514{
1515 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1516 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1517 struct cxgbi_device *cdev = cconn->chba->cdev;
1518 u32 tag = ntohl((__force u32) itt);
1519 u32 sw_bits;
1520
1521 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
1522 if (idx)
1523 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
1524 if (age)
1525 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
1526
1527 log_debug(1 << CXGBI_DBG_DDP,
1528 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1529 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1530 age ? *age : 0xFF);
1531}
1532EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1533
1534void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1535{
1536 struct iscsi_conn *conn = csk->user_data;
1537
1538 if (conn) {
1539 log_debug(1 << CXGBI_DBG_SOCK,
1540 "csk 0x%p, cid %d.\n", csk, conn->id);
1541 iscsi_conn_queue_work(conn);
1542 }
1543}
1544EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1545
1546/*
1547 * pdu receive, interact with libiscsi_tcp
1548 */
1549static inline int read_pdu_skb(struct iscsi_conn *conn,
1550 struct sk_buff *skb,
1551 unsigned int offset,
1552 int offloaded)
1553{
1554 int status = 0;
1555 int bytes_read;
1556
1557 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1558 switch (status) {
1559 case ISCSI_TCP_CONN_ERR:
1560 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1561 skb, offset, offloaded);
1562 return -EIO;
1563 case ISCSI_TCP_SUSPENDED:
1564 log_debug(1 << CXGBI_DBG_PDU_RX,
1565 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1566 skb, offset, offloaded, bytes_read);
1567 /* no transfer - just have caller flush queue */
1568 return bytes_read;
1569 case ISCSI_TCP_SKB_DONE:
1570 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1571 skb, offset, offloaded);
1572 /*
1573 * pdus should always fit in the skb and we should get
1574 * segment done notifcation.
1575 */
1576 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1577 return -EFAULT;
1578 case ISCSI_TCP_SEGMENT_DONE:
1579 log_debug(1 << CXGBI_DBG_PDU_RX,
1580 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1581 skb, offset, offloaded, bytes_read);
1582 return bytes_read;
1583 default:
1584 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1585 skb, offset, offloaded, status);
1586 return -EINVAL;
1587 }
1588}
1589
1590static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1591{
1592 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1593
1594 log_debug(1 << CXGBI_DBG_PDU_RX,
1595 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1596 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1597
1598 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1599 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1600 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1601 return -EIO;
1602 }
1603
1604 if (conn->hdrdgst_en &&
1605 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1606 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1607 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1608 return -EIO;
1609 }
1610
1611 return read_pdu_skb(conn, skb, 0, 0);
1612}
1613
1614static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1615 struct sk_buff *skb, unsigned int offset)
1616{
1617 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1618 bool offloaded = 0;
1619 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1620
1621 log_debug(1 << CXGBI_DBG_PDU_RX,
1622 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1623 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1624
1625 if (conn->datadgst_en &&
1626 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1627 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1628 conn, lskb, cxgbi_skcb_flags(lskb));
1629 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1630 return -EIO;
1631 }
1632
1633 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1634 return 0;
1635
1636 /* coalesced, add header digest length */
1637 if (lskb == skb && conn->hdrdgst_en)
1638 offset += ISCSI_DIGEST_SIZE;
1639
1640 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1641 offloaded = 1;
1642
1643 if (opcode == ISCSI_OP_SCSI_DATA_IN)
1644 log_debug(1 << CXGBI_DBG_PDU_RX,
1645 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1646 skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1647 tcp_conn->in.datalen, offloaded ? "is" : "not");
1648
1649 return read_pdu_skb(conn, skb, offset, offloaded);
1650}
1651
1652static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1653{
1654 struct cxgbi_device *cdev = csk->cdev;
1655 int must_send;
1656 u32 credits;
1657
1658 log_debug(1 << CXGBI_DBG_PDU_RX,
1659 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1660 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1661 csk->rcv_wup, cdev->rx_credit_thres,
1662 cdev->rcv_win);
1663
1664 if (csk->state != CTP_ESTABLISHED)
1665 return;
1666
1667 credits = csk->copied_seq - csk->rcv_wup;
1668 if (unlikely(!credits))
1669 return;
1670 if (unlikely(cdev->rx_credit_thres == 0))
1671 return;
1672
1673 must_send = credits + 16384 >= cdev->rcv_win;
1674 if (must_send || credits >= cdev->rx_credit_thres)
1675 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1676}
1677
1678void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1679{
1680 struct cxgbi_device *cdev = csk->cdev;
1681 struct iscsi_conn *conn = csk->user_data;
1682 struct sk_buff *skb;
1683 unsigned int read = 0;
1684 int err = 0;
1685
1686 log_debug(1 << CXGBI_DBG_PDU_RX,
1687 "csk 0x%p, conn 0x%p.\n", csk, conn);
1688
1689 if (unlikely(!conn || conn->suspend_rx)) {
1690 log_debug(1 << CXGBI_DBG_PDU_RX,
1691 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1692 csk, conn, conn ? conn->id : 0xFF,
1693 conn ? conn->suspend_rx : 0xFF);
1694 return;
1695 }
1696
1697 while (!err) {
1698 skb = skb_peek(&csk->receive_queue);
1699 if (!skb ||
1700 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1701 if (skb)
1702 log_debug(1 << CXGBI_DBG_PDU_RX,
1703 "skb 0x%p, NOT ready 0x%lx.\n",
1704 skb, cxgbi_skcb_flags(skb));
1705 break;
1706 }
1707 __skb_unlink(skb, &csk->receive_queue);
1708
1709 read += cxgbi_skcb_rx_pdulen(skb);
1710 log_debug(1 << CXGBI_DBG_PDU_RX,
1711 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1712 csk, skb, skb->len, cxgbi_skcb_flags(skb),
1713 cxgbi_skcb_rx_pdulen(skb));
1714
1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1716 err = skb_read_pdu_bhs(conn, skb);
1717 if (err < 0) {
1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1719 "f 0x%lx, plen %u.\n",
1720 csk, skb, skb->len,
1721 cxgbi_skcb_flags(skb),
1722 cxgbi_skcb_rx_pdulen(skb));
1723 goto skb_done;
1724 }
1725 err = skb_read_pdu_data(conn, skb, skb,
1726 err + cdev->skb_rx_extra);
1727 if (err < 0)
1728 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1729 "f 0x%lx, plen %u.\n",
1730 csk, skb, skb->len,
1731 cxgbi_skcb_flags(skb),
1732 cxgbi_skcb_rx_pdulen(skb));
1733 } else {
1734 err = skb_read_pdu_bhs(conn, skb);
1735 if (err < 0) {
1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1737 "f 0x%lx, plen %u.\n",
1738 csk, skb, skb->len,
1739 cxgbi_skcb_flags(skb),
1740 cxgbi_skcb_rx_pdulen(skb));
1741 goto skb_done;
1742 }
1743
1744 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1745 struct sk_buff *dskb;
1746
1747 dskb = skb_peek(&csk->receive_queue);
1748 if (!dskb) {
1749 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1750 " plen %u, NO data.\n",
1751 csk, skb, skb->len,
1752 cxgbi_skcb_flags(skb),
1753 cxgbi_skcb_rx_pdulen(skb));
1754 err = -EIO;
1755 goto skb_done;
1756 }
1757 __skb_unlink(dskb, &csk->receive_queue);
1758
1759 err = skb_read_pdu_data(conn, skb, dskb, 0);
1760 if (err < 0)
1761 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1762 "f 0x%lx, plen %u, dskb 0x%p,"
1763 "%u.\n",
1764 csk, skb, skb->len,
1765 cxgbi_skcb_flags(skb),
1766 cxgbi_skcb_rx_pdulen(skb),
1767 dskb, dskb->len);
1768 __kfree_skb(dskb);
1769 } else
1770 err = skb_read_pdu_data(conn, skb, skb, 0);
1771 }
1772skb_done:
1773 __kfree_skb(skb);
1774
1775 if (err < 0)
1776 break;
1777 }
1778
1779 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1780 if (read) {
1781 csk->copied_seq += read;
1782 csk_return_rx_credits(csk, read);
1783 conn->rxdata_octets += read;
1784 }
1785
1786 if (err < 0) {
1787 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1788 csk, conn, err, read);
1789 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1790 }
1791}
1792EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1793
1794static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1795 unsigned int offset, unsigned int *off,
1796 struct scatterlist **sgp)
1797{
1798 int i;
1799 struct scatterlist *sg;
1800
1801 for_each_sg(sgl, sg, sgcnt, i) {
1802 if (offset < sg->length) {
1803 *off = offset;
1804 *sgp = sg;
1805 return 0;
1806 }
1807 offset -= sg->length;
1808 }
1809 return -EFAULT;
1810}
1811
1812static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1813 unsigned int dlen, skb_frag_t *frags,
1814 int frag_max)
1815{
1816 unsigned int datalen = dlen;
1817 unsigned int sglen = sg->length - sgoffset;
1818 struct page *page = sg_page(sg);
1819 int i;
1820
1821 i = 0;
1822 do {
1823 unsigned int copy;
1824
1825 if (!sglen) {
1826 sg = sg_next(sg);
1827 if (!sg) {
1828 pr_warn("sg %d NULL, len %u/%u.\n",
1829 i, datalen, dlen);
1830 return -EINVAL;
1831 }
1832 sgoffset = 0;
1833 sglen = sg->length;
1834 page = sg_page(sg);
1835
1836 }
1837 copy = min(datalen, sglen);
1838 if (i && page == frags[i - 1].page &&
1839 sgoffset + sg->offset ==
1840 frags[i - 1].page_offset + frags[i - 1].size) {
1841 frags[i - 1].size += copy;
1842 } else {
1843 if (i >= frag_max) {
1844 pr_warn("too many pages %u, dlen %u.\n",
1845 frag_max, dlen);
1846 return -EINVAL;
1847 }
1848
1849 frags[i].page = page;
1850 frags[i].page_offset = sg->offset + sgoffset;
1851 frags[i].size = copy;
1852 i++;
1853 }
1854 datalen -= copy;
1855 sgoffset += copy;
1856 sglen -= copy;
1857 } while (datalen);
1858
1859 return i;
1860}
1861
1862int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1863{
1864 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1865 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1866 struct cxgbi_device *cdev = cconn->chba->cdev;
1867 struct iscsi_conn *conn = task->conn;
1868 struct iscsi_tcp_task *tcp_task = task->dd_data;
1869 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1870 struct scsi_cmnd *sc = task->sc;
1871 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1872
1873 tcp_task->dd_data = tdata;
1874 task->hdr = NULL;
1875
1876 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1877 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1878 (opcode == ISCSI_OP_SCSI_CMD &&
1879 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1880 /* data could goes into skb head */
1881 headroom += min_t(unsigned int,
1882 SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1883 conn->max_xmit_dlength);
1884
1885 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1886 if (!tdata->skb) {
1887 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
1888 cdev->skb_tx_rsvd, headroom, opcode);
1889 return -ENOMEM;
1890 }
1891
1892 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1893 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1894 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1895
1896 /* data_out uses scsi_cmd's itt */
1897 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
1898 task_reserve_itt(task, &task->hdr->itt);
1899
1900 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1901 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1902 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1903 conn->max_xmit_dlength, ntohl(task->hdr->itt));
1904
1905 return 0;
1906}
1907EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
1908
1909static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
1910{
1911 u8 submode = 0;
1912
1913 if (hcrc)
1914 submode |= 1;
1915 if (dcrc)
1916 submode |= 2;
1917 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
1918}
1919
1920int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1921 unsigned int count)
1922{
1923 struct iscsi_conn *conn = task->conn;
1924 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1925 struct sk_buff *skb = tdata->skb;
1926 unsigned int datalen = count;
1927 int i, padlen = iscsi_padding(count);
1928 struct page *pg;
1929
1930 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1931 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1932 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1933 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
1934
1935 skb_put(skb, task->hdr_len);
1936 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1937 if (!count)
1938 return 0;
1939
1940 if (task->sc) {
1941 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1942 struct scatterlist *sg = NULL;
1943 int err;
1944
1945 tdata->offset = offset;
1946 tdata->count = count;
1947 err = sgl_seek_offset(
1948 sdb->table.sgl, sdb->table.nents,
1949 tdata->offset, &tdata->sgoffset, &sg);
1950 if (err < 0) {
1951 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1952 sdb->table.nents, tdata->offset, sdb->length);
1953 return err;
1954 }
1955 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1956 tdata->frags, MAX_PDU_FRAGS);
1957 if (err < 0) {
1958 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1959 sdb->table.nents, tdata->offset, tdata->count);
1960 return err;
1961 }
1962 tdata->nr_frags = err;
1963
1964 if (tdata->nr_frags > MAX_SKB_FRAGS ||
1965 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
1966 char *dst = skb->data + task->hdr_len;
1967 skb_frag_t *frag = tdata->frags;
1968
1969 /* data fits in the skb's headroom */
1970 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1971 char *src = kmap_atomic(frag->page,
1972 KM_SOFTIRQ0);
1973
1974 memcpy(dst, src+frag->page_offset, frag->size);
1975 dst += frag->size;
1976 kunmap_atomic(src, KM_SOFTIRQ0);
1977 }
1978 if (padlen) {
1979 memset(dst, 0, padlen);
1980 padlen = 0;
1981 }
1982 skb_put(skb, count + padlen);
1983 } else {
1984 /* data fit into frag_list */
1985 for (i = 0; i < tdata->nr_frags; i++)
1986 get_page(tdata->frags[i].page);
1987
1988 memcpy(skb_shinfo(skb)->frags, tdata->frags,
1989 sizeof(skb_frag_t) * tdata->nr_frags);
1990 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
1991 skb->len += count;
1992 skb->data_len += count;
1993 skb->truesize += count;
1994 }
1995
1996 } else {
1997 pg = virt_to_page(task->data);
1998
1999 get_page(pg);
2000 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
2001 count);
2002 skb->len += count;
2003 skb->data_len += count;
2004 skb->truesize += count;
2005 }
2006
2007 if (padlen) {
2008 i = skb_shinfo(skb)->nr_frags;
2009 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2010 virt_to_page(padding), offset_in_page(padding),
2011 padlen);
2012
2013 skb->data_len += padlen;
2014 skb->truesize += padlen;
2015 skb->len += padlen;
2016 }
2017
2018 return 0;
2019}
2020EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2021
2022int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2023{
2024 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2025 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2026 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2027 struct sk_buff *skb = tdata->skb;
2028 unsigned int datalen;
2029 int err;
2030
2031 if (!skb) {
2032 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2033 "task 0x%p, skb NULL.\n", task);
2034 return 0;
2035 }
2036
2037 datalen = skb->data_len;
2038 tdata->skb = NULL;
2039 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2040 if (err > 0) {
2041 int pdulen = err;
2042
2043 log_debug(1 << CXGBI_DBG_PDU_TX,
2044 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2045 task, task->sc, skb, skb->len, skb->data_len, err);
2046
2047 if (task->conn->hdrdgst_en)
2048 pdulen += ISCSI_DIGEST_SIZE;
2049
2050 if (datalen && task->conn->datadgst_en)
2051 pdulen += ISCSI_DIGEST_SIZE;
2052
2053 task->conn->txdata_octets += pdulen;
2054 return 0;
2055 }
2056
2057 if (err == -EAGAIN || err == -ENOBUFS) {
2058 log_debug(1 << CXGBI_DBG_PDU_TX,
2059 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2060 task, skb, skb->len, skb->data_len, err);
2061 /* reset skb to send when we are called again */
2062 tdata->skb = skb;
2063 return err;
2064 }
2065
2066 kfree_skb(skb);
2067 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2068 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2069 task->itt, skb, skb->len, skb->data_len, err);
2070 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2071 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2072 return err;
2073}
2074EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2075
2076void cxgbi_cleanup_task(struct iscsi_task *task)
2077{
2078 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2079
2080 log_debug(1 << CXGBI_DBG_ISCSI,
2081 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2082 task, tdata->skb, task->hdr_itt);
2083
2084 /* never reached the xmit task callout */
2085 if (tdata->skb)
2086 __kfree_skb(tdata->skb);
2087 memset(tdata, 0, sizeof(*tdata));
2088
2089 task_release_itt(task, task->hdr_itt);
2090 iscsi_tcp_cleanup_task(task);
2091}
2092EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2093
2094void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2095 struct iscsi_stats *stats)
2096{
2097 struct iscsi_conn *conn = cls_conn->dd_data;
2098
2099 stats->txdata_octets = conn->txdata_octets;
2100 stats->rxdata_octets = conn->rxdata_octets;
2101 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2102 stats->dataout_pdus = conn->dataout_pdus_cnt;
2103 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2104 stats->datain_pdus = conn->datain_pdus_cnt;
2105 stats->r2t_pdus = conn->r2t_pdus_cnt;
2106 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2107 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2108 stats->digest_err = 0;
2109 stats->timeout_err = 0;
2110 stats->custom_length = 1;
2111 strcpy(stats->custom[0].desc, "eh_abort_cnt");
2112 stats->custom[0].value = conn->eh_abort_cnt;
2113}
2114EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2115
2116static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2117{
2118 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2119 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2120 struct cxgbi_device *cdev = cconn->chba->cdev;
2121 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2122 unsigned int max_def = 512 * MAX_SKB_FRAGS;
2123 unsigned int max = max(max_def, headroom);
2124
2125 max = min(cconn->chba->cdev->tx_max_size, max);
2126 if (conn->max_xmit_dlength)
2127 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2128 else
2129 conn->max_xmit_dlength = max;
2130 cxgbi_align_pdu_size(conn->max_xmit_dlength);
2131
2132 return 0;
2133}
2134
2135static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2136{
2137 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2138 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2139 unsigned int max = cconn->chba->cdev->rx_max_size;
2140
2141 cxgbi_align_pdu_size(max);
2142
2143 if (conn->max_recv_dlength) {
2144 if (conn->max_recv_dlength > max) {
2145 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2146 conn->max_recv_dlength, max);
2147 return -EINVAL;
2148 }
2149 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2150 cxgbi_align_pdu_size(conn->max_recv_dlength);
2151 } else
2152 conn->max_recv_dlength = max;
2153
2154 return 0;
2155}
2156
2157int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2158 enum iscsi_param param, char *buf, int buflen)
2159{
2160 struct iscsi_conn *conn = cls_conn->dd_data;
2161 struct iscsi_session *session = conn->session;
2162 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2163 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2164 struct cxgbi_sock *csk = cconn->cep->csk;
2165 int value, err = 0;
2166
2167 log_debug(1 << CXGBI_DBG_ISCSI,
2168 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2169 cls_conn, param, buflen, buf);
2170
2171 switch (param) {
2172 case ISCSI_PARAM_HDRDGST_EN:
2173 err = iscsi_set_param(cls_conn, param, buf, buflen);
2174 if (!err && conn->hdrdgst_en)
2175 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2176 conn->hdrdgst_en,
2177 conn->datadgst_en, 0);
2178 break;
2179 case ISCSI_PARAM_DATADGST_EN:
2180 err = iscsi_set_param(cls_conn, param, buf, buflen);
2181 if (!err && conn->datadgst_en)
2182 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2183 conn->hdrdgst_en,
2184 conn->datadgst_en, 0);
2185 break;
2186 case ISCSI_PARAM_MAX_R2T:
2187 sscanf(buf, "%d", &value);
2188 if (value <= 0 || !is_power_of_2(value))
2189 return -EINVAL;
2190 if (session->max_r2t == value)
2191 break;
2192 iscsi_tcp_r2tpool_free(session);
2193 err = iscsi_set_param(cls_conn, param, buf, buflen);
2194 if (!err && iscsi_tcp_r2tpool_alloc(session))
2195 return -ENOMEM;
2196 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2197 err = iscsi_set_param(cls_conn, param, buf, buflen);
2198 if (!err)
2199 err = cxgbi_conn_max_recv_dlength(conn);
2200 break;
2201 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2202 err = iscsi_set_param(cls_conn, param, buf, buflen);
2203 if (!err)
2204 err = cxgbi_conn_max_xmit_dlength(conn);
2205 break;
2206 default:
2207 return iscsi_set_param(cls_conn, param, buf, buflen);
2208 }
2209 return err;
2210}
2211EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2212
2213int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
2214 enum iscsi_param param, char *buf)
2215{
2216 struct iscsi_conn *iconn = cls_conn->dd_data;
2217 int len;
2218
2219 log_debug(1 << CXGBI_DBG_ISCSI,
2220 "cls_conn 0x%p, param %d.\n", cls_conn, param);
2221
2222 switch (param) {
2223 case ISCSI_PARAM_CONN_PORT:
2224 spin_lock_bh(&iconn->session->lock);
2225 len = sprintf(buf, "%hu\n", iconn->portal_port);
2226 spin_unlock_bh(&iconn->session->lock);
2227 break;
2228 case ISCSI_PARAM_CONN_ADDRESS:
2229 spin_lock_bh(&iconn->session->lock);
2230 len = sprintf(buf, "%s\n", iconn->portal_address);
2231 spin_unlock_bh(&iconn->session->lock);
2232 break;
2233 default:
2234 return iscsi_conn_get_param(cls_conn, param, buf);
2235 }
2236 return len;
2237}
2238EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
2239
2240struct iscsi_cls_conn *
2241cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2242{
2243 struct iscsi_cls_conn *cls_conn;
2244 struct iscsi_conn *conn;
2245 struct iscsi_tcp_conn *tcp_conn;
2246 struct cxgbi_conn *cconn;
2247
2248 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2249 if (!cls_conn)
2250 return NULL;
2251
2252 conn = cls_conn->dd_data;
2253 tcp_conn = conn->dd_data;
2254 cconn = tcp_conn->dd_data;
2255 cconn->iconn = conn;
2256
2257 log_debug(1 << CXGBI_DBG_ISCSI,
2258 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2259 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2260
2261 return cls_conn;
2262}
2263EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2264
2265int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2266 struct iscsi_cls_conn *cls_conn,
2267 u64 transport_eph, int is_leading)
2268{
2269 struct iscsi_conn *conn = cls_conn->dd_data;
2270 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2271 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2272 struct iscsi_endpoint *ep;
2273 struct cxgbi_endpoint *cep;
2274 struct cxgbi_sock *csk;
2275 int err;
2276
2277 ep = iscsi_lookup_endpoint(transport_eph);
2278 if (!ep)
2279 return -EINVAL;
2280
2281 /* setup ddp pagesize */
2282 cep = ep->dd_data;
2283 csk = cep->csk;
2284 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
2285 if (err < 0)
2286 return err;
2287
2288 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2289 if (err)
2290 return -EINVAL;
2291
2292 /* calculate the tag idx bits needed for this conn based on cmds_max */
2293 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2294
2295 write_lock_bh(&csk->callback_lock);
2296 csk->user_data = conn;
2297 cconn->chba = cep->chba;
2298 cconn->cep = cep;
2299 cep->cconn = cconn;
2300 write_unlock_bh(&csk->callback_lock);
2301
2302 cxgbi_conn_max_xmit_dlength(conn);
2303 cxgbi_conn_max_recv_dlength(conn);
2304
2305 spin_lock_bh(&conn->session->lock);
2306 sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
2307 conn->portal_port = ntohs(csk->daddr.sin_port);
2308 spin_unlock_bh(&conn->session->lock);
2309
2310 log_debug(1 << CXGBI_DBG_ISCSI,
2311 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2312 cls_session, cls_conn, ep, cconn, csk);
2313 /* init recv engine */
2314 iscsi_tcp_hdr_recv_prep(tcp_conn);
2315
2316 return 0;
2317}
2318EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2319
2320struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2321 u16 cmds_max, u16 qdepth,
2322 u32 initial_cmdsn)
2323{
2324 struct cxgbi_endpoint *cep;
2325 struct cxgbi_hba *chba;
2326 struct Scsi_Host *shost;
2327 struct iscsi_cls_session *cls_session;
2328 struct iscsi_session *session;
2329
2330 if (!ep) {
2331 pr_err("missing endpoint.\n");
2332 return NULL;
2333 }
2334
2335 cep = ep->dd_data;
2336 chba = cep->chba;
2337 shost = chba->shost;
2338
2339 BUG_ON(chba != iscsi_host_priv(shost));
2340
2341 cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2342 cmds_max, 0,
2343 sizeof(struct iscsi_tcp_task) +
2344 sizeof(struct cxgbi_task_data),
2345 initial_cmdsn, ISCSI_MAX_TARGET);
2346 if (!cls_session)
2347 return NULL;
2348
2349 session = cls_session->dd_data;
2350 if (iscsi_tcp_r2tpool_alloc(session))
2351 goto remove_session;
2352
2353 log_debug(1 << CXGBI_DBG_ISCSI,
2354 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2355 return cls_session;
2356
2357remove_session:
2358 iscsi_session_teardown(cls_session);
2359 return NULL;
2360}
2361EXPORT_SYMBOL_GPL(cxgbi_create_session);
2362
2363void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2364{
2365 log_debug(1 << CXGBI_DBG_ISCSI,
2366 "cls sess 0x%p.\n", cls_session);
2367
2368 iscsi_tcp_r2tpool_free(cls_session->dd_data);
2369 iscsi_session_teardown(cls_session);
2370}
2371EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2372
2373int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2374 char *buf, int buflen)
2375{
2376 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2377
2378 if (!chba->ndev) {
2379 shost_printk(KERN_ERR, shost, "Could not get host param. "
2380 "netdev for host not set.\n");
2381 return -ENODEV;
2382 }
2383
2384 log_debug(1 << CXGBI_DBG_ISCSI,
2385 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2386 shost, chba, chba->ndev->name, param, buflen, buf);
2387
2388 switch (param) {
2389 case ISCSI_HOST_PARAM_IPADDRESS:
2390 {
2391 __be32 addr = in_aton(buf);
2392 log_debug(1 << CXGBI_DBG_ISCSI,
2393 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2394 cxgbi_set_iscsi_ipv4(chba, addr);
2395 return 0;
2396 }
2397 case ISCSI_HOST_PARAM_HWADDRESS:
2398 case ISCSI_HOST_PARAM_NETDEV_NAME:
2399 return 0;
2400 default:
2401 return iscsi_host_set_param(shost, param, buf, buflen);
2402 }
2403}
2404EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2405
2406int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2407 char *buf)
2408{
2409 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2410 int len = 0;
2411
2412 if (!chba->ndev) {
2413 shost_printk(KERN_ERR, shost, "Could not get host param. "
2414 "netdev for host not set.\n");
2415 return -ENODEV;
2416 }
2417
2418 log_debug(1 << CXGBI_DBG_ISCSI,
2419 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2420 shost, chba, chba->ndev->name, param);
2421
2422 switch (param) {
2423 case ISCSI_HOST_PARAM_HWADDRESS:
2424 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2425 break;
2426 case ISCSI_HOST_PARAM_NETDEV_NAME:
2427 len = sprintf(buf, "%s\n", chba->ndev->name);
2428 break;
2429 case ISCSI_HOST_PARAM_IPADDRESS:
2430 {
2431 __be32 addr;
2432
2433 addr = cxgbi_get_iscsi_ipv4(chba);
2434 len = sprintf(buf, "%pI4", &addr);
2435 log_debug(1 << CXGBI_DBG_ISCSI,
2436 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr);
2437 break;
2438 }
2439 default:
2440 return iscsi_host_get_param(shost, param, buf);
2441 }
2442
2443 return len;
2444}
2445EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2446
2447struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2448 struct sockaddr *dst_addr,
2449 int non_blocking)
2450{
2451 struct iscsi_endpoint *ep;
2452 struct cxgbi_endpoint *cep;
2453 struct cxgbi_hba *hba = NULL;
2454 struct cxgbi_sock *csk;
2455 int err = -EINVAL;
2456
2457 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2458 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2459 shost, non_blocking, dst_addr);
2460
2461 if (shost) {
2462 hba = iscsi_host_priv(shost);
2463 if (!hba) {
2464 pr_info("shost 0x%p, priv NULL.\n", shost);
2465 goto err_out;
2466 }
2467 }
2468
2469 csk = cxgbi_check_route(dst_addr);
2470 if (IS_ERR(csk))
2471 return (struct iscsi_endpoint *)csk;
2472 cxgbi_sock_get(csk);
2473
2474 if (!hba)
2475 hba = csk->cdev->hbas[csk->port_id];
2476 else if (hba != csk->cdev->hbas[csk->port_id]) {
2477 pr_info("Could not connect through requested host %u"
2478 "hba 0x%p != 0x%p (%u).\n",
2479 shost->host_no, hba,
2480 csk->cdev->hbas[csk->port_id], csk->port_id);
2481 err = -ENOSPC;
2482 goto release_conn;
2483 }
2484
2485 err = sock_get_port(csk);
2486 if (err)
2487 goto release_conn;
2488
2489 cxgbi_sock_set_state(csk, CTP_CONNECTING);
2490 err = csk->cdev->csk_init_act_open(csk);
2491 if (err)
2492 goto release_conn;
2493
2494 if (cxgbi_sock_is_closing(csk)) {
2495 err = -ENOSPC;
2496 pr_info("csk 0x%p is closing.\n", csk);
2497 goto release_conn;
2498 }
2499
2500 ep = iscsi_create_endpoint(sizeof(*cep));
2501 if (!ep) {
2502 err = -ENOMEM;
2503 pr_info("iscsi alloc ep, OOM.\n");
2504 goto release_conn;
2505 }
2506
2507 cep = ep->dd_data;
2508 cep->csk = csk;
2509 cep->chba = hba;
2510
2511 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2512 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2513 ep, cep, csk, hba, hba->ndev->name);
2514 return ep;
2515
2516release_conn:
2517 cxgbi_sock_put(csk);
2518 cxgbi_sock_closed(csk);
2519err_out:
2520 return ERR_PTR(err);
2521}
2522EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2523
2524int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2525{
2526 struct cxgbi_endpoint *cep = ep->dd_data;
2527 struct cxgbi_sock *csk = cep->csk;
2528
2529 if (!cxgbi_sock_is_established(csk))
2530 return 0;
2531 return 1;
2532}
2533EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2534
2535void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2536{
2537 struct cxgbi_endpoint *cep = ep->dd_data;
2538 struct cxgbi_conn *cconn = cep->cconn;
2539 struct cxgbi_sock *csk = cep->csk;
2540
2541 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2542 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2543 ep, cep, cconn, csk, csk->state, csk->flags);
2544
2545 if (cconn && cconn->iconn) {
2546 iscsi_suspend_tx(cconn->iconn);
2547 write_lock_bh(&csk->callback_lock);
2548 cep->csk->user_data = NULL;
2549 cconn->cep = NULL;
2550 write_unlock_bh(&csk->callback_lock);
2551 }
2552 iscsi_destroy_endpoint(ep);
2553
2554 if (likely(csk->state >= CTP_ESTABLISHED))
2555 need_active_close(csk);
2556 else
2557 cxgbi_sock_closed(csk);
2558
2559 cxgbi_sock_put(csk);
2560}
2561EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2562
2563int cxgbi_iscsi_init(struct iscsi_transport *itp,
2564 struct scsi_transport_template **stt)
2565{
2566 *stt = iscsi_register_transport(itp);
2567 if (*stt == NULL) {
2568 pr_err("unable to register %s transport 0x%p.\n",
2569 itp->name, itp);
2570 return -ENODEV;
2571 }
2572 log_debug(1 << CXGBI_DBG_ISCSI,
2573 "%s, registered iscsi transport 0x%p.\n",
2574 itp->name, stt);
2575 return 0;
2576}
2577EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2578
2579void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2580 struct scsi_transport_template **stt)
2581{
2582 if (*stt) {
2583 log_debug(1 << CXGBI_DBG_ISCSI,
2584 "de-register transport 0x%p, %s, stt 0x%p.\n",
2585 itp, itp->name, *stt);
2586 *stt = NULL;
2587 iscsi_unregister_transport(itp);
2588 }
2589}
2590EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2591
2592static int __init libcxgbi_init_module(void)
2593{
2594 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2595 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2596
2597 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2598 ISCSI_ITT_MASK, sw_tag_idx_bits,
2599 ISCSI_AGE_MASK, sw_tag_age_bits);
2600
2601 ddp_setup_host_page_size();
2602 return 0;
2603}
2604
2605static void __exit libcxgbi_exit_module(void)
2606{
2607 cxgbi_device_unregister_all(0xFF);
2608 return;
2609}
2610
2611module_init(libcxgbi_init_module);
2612module_exit(libcxgbi_exit_module);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
new file mode 100644
index 000000000000..c57d59db000c
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -0,0 +1,745 @@
1/*
2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#ifndef __LIBCXGBI_H__
15#define __LIBCXGBI_H__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/debugfs.h>
21#include <linux/list.h>
22#include <linux/netdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/scatterlist.h>
25#include <linux/skbuff.h>
26#include <linux/vmalloc.h>
27#include <scsi/scsi_device.h>
28#include <scsi/libiscsi_tcp.h>
29
30enum cxgbi_dbg_flag {
31 CXGBI_DBG_ISCSI,
32 CXGBI_DBG_DDP,
33 CXGBI_DBG_TOE,
34 CXGBI_DBG_SOCK,
35
36 CXGBI_DBG_PDU_TX,
37 CXGBI_DBG_PDU_RX,
38 CXGBI_DBG_DEV,
39};
40
41#define log_debug(level, fmt, ...) \
42 do { \
43 if (dbg_level & (level)) \
44 pr_info(fmt, ##__VA_ARGS__); \
45 } while (0)
46
47/* max. connections per adapter */
48#define CXGBI_MAX_CONN 16384
49
50/* always allocate rooms for AHS */
51#define SKB_TX_ISCSI_PDU_HEADER_MAX \
52 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
53
54#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/
55
56/*
57 * align pdu size to multiple of 512 for better performance
58 */
59#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
60
61#define ULP2_MODE_ISCSI 2
62
63#define ULP2_MAX_PKT_SIZE 16224
64#define ULP2_MAX_PDU_PAYLOAD \
65 (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
66
67/*
68 * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
69 * bytes are not sent by the host but are part of the TCP payload and therefore
70 * consume TCP sequence space.
71 */
72static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
73static inline unsigned int cxgbi_ulp_extra_len(int submode)
74{
75 return ulp2_extra_len[submode & 3];
76}
77
78/*
79 * struct pagepod_hdr, pagepod - pagepod format
80 */
81
82#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
83#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
84#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
85#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
86
87struct cxgbi_pagepod_hdr {
88 u32 vld_tid;
89 u32 pgsz_tag_clr;
90 u32 max_offset;
91 u32 page_offset;
92 u64 rsvd;
93};
94
95#define PPOD_PAGES_MAX 4
96struct cxgbi_pagepod {
97 struct cxgbi_pagepod_hdr hdr;
98 u64 addr[PPOD_PAGES_MAX + 1];
99};
100
101struct cxgbi_tag_format {
102 unsigned char sw_bits;
103 unsigned char rsvd_bits;
104 unsigned char rsvd_shift;
105 unsigned char filler[1];
106 u32 rsvd_mask;
107};
108
109struct cxgbi_gather_list {
110 unsigned int tag;
111 unsigned int length;
112 unsigned int offset;
113 unsigned int nelem;
114 struct page **pages;
115 dma_addr_t phys_addr[0];
116};
117
118struct cxgbi_ddp_info {
119 struct kref refcnt;
120 struct cxgbi_device *cdev;
121 struct pci_dev *pdev;
122 unsigned int max_txsz;
123 unsigned int max_rxsz;
124 unsigned int llimit;
125 unsigned int ulimit;
126 unsigned int nppods;
127 unsigned int idx_last;
128 unsigned char idx_bits;
129 unsigned char filler[3];
130 unsigned int idx_mask;
131 unsigned int rsvd_tag_mask;
132 spinlock_t map_lock;
133 struct cxgbi_gather_list **gl_map;
134 struct sk_buff **gl_skb;
135};
136
137#define DDP_PGIDX_MAX 4
138#define DDP_THRESHOLD 2048
139
140#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
141
142#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
143#define PPOD_SIZE_SHIFT 6
144
145#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
146#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
147#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
148
149#define PPOD_COLOR_SHIFT 0
150#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
151
152#define PPOD_IDX_SHIFT 6
153#define PPOD_IDX_MAX_SIZE 24
154
155#define PPOD_TID_SHIFT 0
156#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
157
158#define PPOD_TAG_SHIFT 6
159#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
160
161#define PPOD_VALID_SHIFT 24
162#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
163#define PPOD_VALID_FLAG PPOD_VALID(1U)
164
165/*
166 * sge_opaque_hdr -
167 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
168 * and for which we must reserve space.
169 */
170struct sge_opaque_hdr {
171 void *dev;
172 dma_addr_t addr[MAX_SKB_FRAGS + 1];
173};
174
175struct cxgbi_sock {
176 struct cxgbi_device *cdev;
177
178 int tid;
179 int atid;
180 unsigned long flags;
181 unsigned int mtu;
182 unsigned short rss_qid;
183 unsigned short txq_idx;
184 unsigned short advmss;
185 unsigned int tx_chan;
186 unsigned int rx_chan;
187 unsigned int mss_idx;
188 unsigned int smac_idx;
189 unsigned char port_id;
190 int wr_max_cred;
191 int wr_cred;
192 int wr_una_cred;
193 unsigned char hcrc_len;
194 unsigned char dcrc_len;
195
196 void *l2t;
197 struct sk_buff *wr_pending_head;
198 struct sk_buff *wr_pending_tail;
199 struct sk_buff *cpl_close;
200 struct sk_buff *cpl_abort_req;
201 struct sk_buff *cpl_abort_rpl;
202 struct sk_buff *skb_ulp_lhdr;
203 spinlock_t lock;
204 struct kref refcnt;
205 unsigned int state;
206 struct sockaddr_in saddr;
207 struct sockaddr_in daddr;
208 struct dst_entry *dst;
209 struct sk_buff_head receive_queue;
210 struct sk_buff_head write_queue;
211 struct timer_list retry_timer;
212 int err;
213 rwlock_t callback_lock;
214 void *user_data;
215
216 u32 rcv_nxt;
217 u32 copied_seq;
218 u32 rcv_wup;
219 u32 snd_nxt;
220 u32 snd_una;
221 u32 write_seq;
222};
223
224/*
225 * connection states
226 */
227enum cxgbi_sock_states{
228 CTP_CLOSED,
229 CTP_CONNECTING,
230 CTP_ACTIVE_OPEN,
231 CTP_ESTABLISHED,
232 CTP_ACTIVE_CLOSE,
233 CTP_PASSIVE_CLOSE,
234 CTP_CLOSE_WAIT_1,
235 CTP_CLOSE_WAIT_2,
236 CTP_ABORTING,
237};
238
239/*
240 * Connection flags -- many to track some close related events.
241 */
242enum cxgbi_sock_flags {
243 CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */
244 CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */
245 CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */
246 CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */
247 CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
248 CTPF_HAS_ATID, /* reserved atid */
249 CTPF_HAS_TID, /* reserved hw tid */
250 CTPF_OFFLOAD_DOWN, /* offload function off */
251};
252
253struct cxgbi_skb_rx_cb {
254 __u32 ddigest;
255 __u32 pdulen;
256};
257
258struct cxgbi_skb_tx_cb {
259 void *l2t;
260 struct sk_buff *wr_next;
261};
262
263enum cxgbi_skcb_flags {
264 SKCBF_TX_NEED_HDR, /* packet needs a header */
265 SKCBF_RX_COALESCED, /* received whole pdu */
266 SKCBF_RX_HDR, /* recieved pdu header */
267 SKCBF_RX_DATA, /* recieved pdu payload */
268 SKCBF_RX_STATUS, /* recieved ddp status */
269 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
270 SKCBF_RX_HCRC_ERR, /* header digest error */
271 SKCBF_RX_DCRC_ERR, /* data digest error */
272 SKCBF_RX_PAD_ERR, /* padding byte error */
273};
274
275struct cxgbi_skb_cb {
276 unsigned char ulp_mode;
277 unsigned long flags;
278 unsigned int seq;
279 union {
280 struct cxgbi_skb_rx_cb rx;
281 struct cxgbi_skb_tx_cb tx;
282 };
283};
284
285#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
286#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
287#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
288#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
289#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
290#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
291#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
292
293static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
294 enum cxgbi_skcb_flags flag)
295{
296 __set_bit(flag, &(cxgbi_skcb_flags(skb)));
297}
298
299static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
300 enum cxgbi_skcb_flags flag)
301{
302 __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
303}
304
305static inline int cxgbi_skcb_test_flag(struct sk_buff *skb,
306 enum cxgbi_skcb_flags flag)
307{
308 return test_bit(flag, &(cxgbi_skcb_flags(skb)));
309}
310
311static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
312 enum cxgbi_sock_flags flag)
313{
314 __set_bit(flag, &csk->flags);
315 log_debug(1 << CXGBI_DBG_SOCK,
316 "csk 0x%p,%u,0x%lx, bit %d.\n",
317 csk, csk->state, csk->flags, flag);
318}
319
320static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
321 enum cxgbi_sock_flags flag)
322{
323 __clear_bit(flag, &csk->flags);
324 log_debug(1 << CXGBI_DBG_SOCK,
325 "csk 0x%p,%u,0x%lx, bit %d.\n",
326 csk, csk->state, csk->flags, flag);
327}
328
329static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
330 enum cxgbi_sock_flags flag)
331{
332 if (csk == NULL)
333 return 0;
334 return test_bit(flag, &csk->flags);
335}
336
337static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
338{
339 log_debug(1 << CXGBI_DBG_SOCK,
340 "csk 0x%p,%u,0x%lx, state -> %u.\n",
341 csk, csk->state, csk->flags, state);
342 csk->state = state;
343}
344
345static inline void cxgbi_sock_free(struct kref *kref)
346{
347 struct cxgbi_sock *csk = container_of(kref,
348 struct cxgbi_sock,
349 refcnt);
350 if (csk) {
351 log_debug(1 << CXGBI_DBG_SOCK,
352 "free csk 0x%p, state %u, flags 0x%lx\n",
353 csk, csk->state, csk->flags);
354 kfree(csk);
355 }
356}
357
358static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
359{
360 log_debug(1 << CXGBI_DBG_SOCK,
361 "%s, put csk 0x%p, ref %u-1.\n",
362 fn, csk, atomic_read(&csk->refcnt.refcount));
363 kref_put(&csk->refcnt, cxgbi_sock_free);
364}
365#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
366
367static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
368{
369 log_debug(1 << CXGBI_DBG_SOCK,
370 "%s, get csk 0x%p, ref %u+1.\n",
371 fn, csk, atomic_read(&csk->refcnt.refcount));
372 kref_get(&csk->refcnt);
373}
374#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
375
376static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
377{
378 return csk->state >= CTP_ACTIVE_CLOSE;
379}
380
381static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
382{
383 return csk->state == CTP_ESTABLISHED;
384}
385
386static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
387{
388 struct sk_buff *skb;
389
390 while ((skb = __skb_dequeue(&csk->write_queue)))
391 __kfree_skb(skb);
392}
393
394static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
395{
396 unsigned int wscale = 0;
397
398 while (wscale < 14 && (65535 << wscale) < win)
399 wscale++;
400 return wscale;
401}
402
403static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
404{
405 struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
406
407 if (skb) {
408 __skb_put(skb, wrlen);
409 memset(skb->head, 0, wrlen + dlen);
410 } else
411 pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
412 return skb;
413}
414
415
416/*
417 * The number of WRs needed for an skb depends on the number of fragments
418 * in the skb and whether it has any payload in its main body. This maps the
419 * length of the gather list represented by an skb into the # of necessary WRs.
420 * The extra two fragments are for iscsi bhs and payload padding.
421 */
422#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
423
424static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
425{
426 csk->wr_pending_head = csk->wr_pending_tail = NULL;
427}
428
429static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
430 struct sk_buff *skb)
431{
432 cxgbi_skcb_tx_wr_next(skb) = NULL;
433 /*
434 * We want to take an extra reference since both us and the driver
435 * need to free the packet before it's really freed. We know there's
436 * just one user currently so we use atomic_set rather than skb_get
437 * to avoid the atomic op.
438 */
439 atomic_set(&skb->users, 2);
440
441 if (!csk->wr_pending_head)
442 csk->wr_pending_head = skb;
443 else
444 cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
445 csk->wr_pending_tail = skb;
446}
447
448static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
449{
450 int n = 0;
451 const struct sk_buff *skb = csk->wr_pending_head;
452
453 while (skb) {
454 n += skb->csum;
455 skb = cxgbi_skcb_tx_wr_next(skb);
456 }
457 return n;
458}
459
460static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
461{
462 return csk->wr_pending_head;
463}
464
465static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
466{
467 struct sk_buff *skb = csk->wr_pending_head;
468
469 if (likely(skb)) {
470 csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
471 cxgbi_skcb_tx_wr_next(skb) = NULL;
472 }
473 return skb;
474}
475
476void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
477void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
478void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
479void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
480void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
481void cxgbi_sock_closed(struct cxgbi_sock *);
482void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
483void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
484void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
485void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
486void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
487 int);
488unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
489void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
490
491struct cxgbi_hba {
492 struct net_device *ndev;
493 struct net_device *vdev; /* vlan dev */
494 struct Scsi_Host *shost;
495 struct cxgbi_device *cdev;
496 __be32 ipv4addr;
497 unsigned char port_id;
498};
499
500struct cxgbi_ports_map {
501 unsigned int max_connect;
502 unsigned int used;
503 unsigned short sport_base;
504 spinlock_t lock;
505 unsigned int next;
506 struct cxgbi_sock **port_csk;
507};
508
509#define CXGBI_FLAG_DEV_T3 0x1
510#define CXGBI_FLAG_DEV_T4 0x2
511#define CXGBI_FLAG_ADAPTER_RESET 0x4
512#define CXGBI_FLAG_IPV4_SET 0x10
513struct cxgbi_device {
514 struct list_head list_head;
515 unsigned int flags;
516 struct net_device **ports;
517 void *lldev;
518 struct cxgbi_hba **hbas;
519 const unsigned short *mtus;
520 unsigned char nmtus;
521 unsigned char nports;
522 struct pci_dev *pdev;
523 struct dentry *debugfs_root;
524 struct iscsi_transport *itp;
525
526 unsigned int pfvf;
527 unsigned int snd_win;
528 unsigned int rcv_win;
529 unsigned int rx_credit_thres;
530 unsigned int skb_tx_rsvd;
531 unsigned int skb_rx_extra; /* for msg coalesced mode */
532 unsigned int tx_max_size;
533 unsigned int rx_max_size;
534 struct cxgbi_ports_map pmap;
535 struct cxgbi_tag_format tag_format;
536 struct cxgbi_ddp_info *ddp;
537
538 void (*dev_ddp_cleanup)(struct cxgbi_device *);
539 void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
540 int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
541 int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
542 unsigned int, unsigned int,
543 struct cxgbi_gather_list *);
544 void (*csk_ddp_clear)(struct cxgbi_hba *,
545 unsigned int, unsigned int, unsigned int);
546 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
547 unsigned int, int, int, int);
548 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
549 unsigned int, int, bool);
550
551 void (*csk_release_offload_resources)(struct cxgbi_sock *);
552 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
553 u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
554 int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
555 void (*csk_send_abort_req)(struct cxgbi_sock *);
556 void (*csk_send_close_req)(struct cxgbi_sock *);
557 int (*csk_alloc_cpls)(struct cxgbi_sock *);
558 int (*csk_init_act_open)(struct cxgbi_sock *);
559
560 void *dd_data;
561};
562#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
563
564struct cxgbi_conn {
565 struct cxgbi_endpoint *cep;
566 struct iscsi_conn *iconn;
567 struct cxgbi_hba *chba;
568 u32 task_idx_bits;
569};
570
571struct cxgbi_endpoint {
572 struct cxgbi_conn *cconn;
573 struct cxgbi_hba *chba;
574 struct cxgbi_sock *csk;
575};
576
577#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
578struct cxgbi_task_data {
579 unsigned short nr_frags;
580 skb_frag_t frags[MAX_PDU_FRAGS];
581 struct sk_buff *skb;
582 unsigned int offset;
583 unsigned int count;
584 unsigned int sgoffset;
585};
586#define iscsi_task_cxgbi_data(task) \
587 ((task)->dd_data + sizeof(struct iscsi_tcp_task))
588
589static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag)
590{
591 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
592}
593
594static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat,
595 u32 sw_tag)
596{
597 sw_tag >>= (32 - tformat->rsvd_bits);
598 return !sw_tag;
599}
600
601static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat,
602 u32 sw_tag)
603{
604 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
605 u32 mask = (1 << shift) - 1;
606
607 if (sw_tag && (sw_tag & ~mask)) {
608 u32 v1 = sw_tag & ((1 << shift) - 1);
609 u32 v2 = (sw_tag >> (shift - 1)) << shift;
610
611 return v2 | v1 | 1 << shift;
612 }
613
614 return sw_tag | 1 << shift;
615}
616
617static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat,
618 u32 sw_tag)
619{
620 u32 mask = (1 << tformat->rsvd_shift) - 1;
621
622 if (sw_tag && (sw_tag & ~mask)) {
623 u32 v1 = sw_tag & mask;
624 u32 v2 = sw_tag >> tformat->rsvd_shift;
625
626 v2 <<= tformat->rsvd_bits + tformat->rsvd_shift;
627
628 return v2 | v1;
629 }
630
631 return sw_tag;
632}
633
634static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat,
635 u32 tag)
636{
637 if (cxgbi_is_ddp_tag(tformat, tag))
638 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
639
640 return 0;
641}
642
643static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
644 u32 tag)
645{
646 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
647 u32 v1, v2;
648
649 if (cxgbi_is_ddp_tag(tformat, tag)) {
650 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
651 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
652 } else {
653 u32 mask = (1 << shift) - 1;
654 tag &= ~(1 << shift);
655 v1 = tag & mask;
656 v2 = (tag >> 1) & ~mask;
657 }
658 return v1 | v2;
659}
660
661static inline void *cxgbi_alloc_big_mem(unsigned int size,
662 gfp_t gfp)
663{
664 void *p = kmalloc(size, gfp);
665 if (!p)
666 p = vmalloc(size);
667 if (p)
668 memset(p, 0, size);
669 return p;
670}
671
672static inline void cxgbi_free_big_mem(void *addr)
673{
674 if (is_vmalloc_addr(addr))
675 vfree(addr);
676 else
677 kfree(addr);
678}
679
680static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
681{
682 if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
683 chba->ipv4addr = ipaddr;
684 else
685 pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
686 chba->ndev->name);
687}
688
689static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba)
690{
691 return chba->ipv4addr;
692}
693
694struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
695void cxgbi_device_unregister(struct cxgbi_device *);
696void cxgbi_device_unregister_all(unsigned int flag);
697struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
698int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int,
699 struct scsi_host_template *,
700 struct scsi_transport_template *);
701void cxgbi_hbas_remove(struct cxgbi_device *);
702
703int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
704 unsigned int max_conn);
705void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
706
707void cxgbi_conn_tx_open(struct cxgbi_sock *);
708void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
709int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
710int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
711int cxgbi_conn_xmit_pdu(struct iscsi_task *);
712
713void cxgbi_cleanup_task(struct iscsi_task *task);
714
715void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
716int cxgbi_set_conn_param(struct iscsi_cls_conn *,
717 enum iscsi_param, char *, int);
718int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
719struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
720int cxgbi_bind_conn(struct iscsi_cls_session *,
721 struct iscsi_cls_conn *, u64, int);
722void cxgbi_destroy_session(struct iscsi_cls_session *);
723struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
724 u16, u16, u32);
725int cxgbi_set_host_param(struct Scsi_Host *,
726 enum iscsi_host_param, char *, int);
727int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
728struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
729 struct sockaddr *, int);
730int cxgbi_ep_poll(struct iscsi_endpoint *, int);
731void cxgbi_ep_disconnect(struct iscsi_endpoint *);
732
733int cxgbi_iscsi_init(struct iscsi_transport *,
734 struct scsi_transport_template **);
735void cxgbi_iscsi_cleanup(struct iscsi_transport *,
736 struct scsi_transport_template **);
737void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
738int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
739 unsigned int, unsigned int);
740int cxgbi_ddp_cleanup(struct cxgbi_device *);
741void cxgbi_ddp_page_size_factor(int *);
742void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *);
743void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *,
744 struct cxgbi_gather_list *, unsigned int);
745#endif /*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1a970a76b1b9..6b729324b8d3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler 2 * Generic SCSI-3 ALUA SCSI Device Handler
3 * 3 *
4 * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH. 4 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -20,17 +20,19 @@
20 * 20 *
21 */ 21 */
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/delay.h>
23#include <scsi/scsi.h> 24#include <scsi/scsi.h>
24#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
25#include <scsi/scsi_dh.h> 26#include <scsi/scsi_dh.h>
26 27
27#define ALUA_DH_NAME "alua" 28#define ALUA_DH_NAME "alua"
28#define ALUA_DH_VER "1.2" 29#define ALUA_DH_VER "1.3"
29 30
30#define TPGS_STATE_OPTIMIZED 0x0 31#define TPGS_STATE_OPTIMIZED 0x0
31#define TPGS_STATE_NONOPTIMIZED 0x1 32#define TPGS_STATE_NONOPTIMIZED 0x1
32#define TPGS_STATE_STANDBY 0x2 33#define TPGS_STATE_STANDBY 0x2
33#define TPGS_STATE_UNAVAILABLE 0x3 34#define TPGS_STATE_UNAVAILABLE 0x3
35#define TPGS_STATE_LBA_DEPENDENT 0x4
34#define TPGS_STATE_OFFLINE 0xe 36#define TPGS_STATE_OFFLINE 0xe
35#define TPGS_STATE_TRANSITIONING 0xf 37#define TPGS_STATE_TRANSITIONING 0xf
36 38
@@ -39,6 +41,7 @@
39#define TPGS_SUPPORT_NONOPTIMIZED 0x02 41#define TPGS_SUPPORT_NONOPTIMIZED 0x02
40#define TPGS_SUPPORT_STANDBY 0x04 42#define TPGS_SUPPORT_STANDBY 0x04
41#define TPGS_SUPPORT_UNAVAILABLE 0x08 43#define TPGS_SUPPORT_UNAVAILABLE 0x08
44#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
42#define TPGS_SUPPORT_OFFLINE 0x40 45#define TPGS_SUPPORT_OFFLINE 0x40
43#define TPGS_SUPPORT_TRANSITION 0x80 46#define TPGS_SUPPORT_TRANSITION 0x80
44 47
@@ -460,6 +463,8 @@ static char print_alua_state(int state)
460 return 'S'; 463 return 'S';
461 case TPGS_STATE_UNAVAILABLE: 464 case TPGS_STATE_UNAVAILABLE:
462 return 'U'; 465 return 'U';
466 case TPGS_STATE_LBA_DEPENDENT:
467 return 'L';
463 case TPGS_STATE_OFFLINE: 468 case TPGS_STATE_OFFLINE:
464 return 'O'; 469 return 'O';
465 case TPGS_STATE_TRANSITIONING: 470 case TPGS_STATE_TRANSITIONING:
@@ -542,7 +547,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
542 int len, k, off, valid_states = 0; 547 int len, k, off, valid_states = 0;
543 char *ucp; 548 char *ucp;
544 unsigned err; 549 unsigned err;
550 unsigned long expiry, interval = 10;
545 551
552 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
546 retry: 553 retry:
547 err = submit_rtpg(sdev, h); 554 err = submit_rtpg(sdev, h);
548 555
@@ -553,7 +560,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
553 return SCSI_DH_IO; 560 return SCSI_DH_IO;
554 561
555 err = alua_check_sense(sdev, &sense_hdr); 562 err = alua_check_sense(sdev, &sense_hdr);
556 if (err == ADD_TO_MLQUEUE) 563 if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
557 goto retry; 564 goto retry;
558 sdev_printk(KERN_INFO, sdev, 565 sdev_printk(KERN_INFO, sdev,
559 "%s: rtpg sense code %02x/%02x/%02x\n", 566 "%s: rtpg sense code %02x/%02x/%02x\n",
@@ -587,38 +594,37 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
587 } 594 }
588 595
589 sdev_printk(KERN_INFO, sdev, 596 sdev_printk(KERN_INFO, sdev,
590 "%s: port group %02x state %c supports %c%c%c%c%c%c\n", 597 "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n",
591 ALUA_DH_NAME, h->group_id, print_alua_state(h->state), 598 ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
592 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 599 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
593 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 600 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
601 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
594 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', 602 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
595 valid_states&TPGS_SUPPORT_STANDBY?'S':'s', 603 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
596 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', 604 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
597 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); 605 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
598 606
599 if (h->tpgs & TPGS_MODE_EXPLICIT) { 607 switch (h->state) {
600 switch (h->state) { 608 case TPGS_STATE_TRANSITIONING:
601 case TPGS_STATE_TRANSITIONING: 609 if (time_before(jiffies, expiry)) {
602 /* State transition, retry */ 610 /* State transition, retry */
611 interval *= 10;
612 msleep(interval);
603 goto retry; 613 goto retry;
604 break;
605 case TPGS_STATE_OFFLINE:
606 /* Path is offline, fail */
607 err = SCSI_DH_DEV_OFFLINED;
608 break;
609 default:
610 break;
611 } 614 }
612 } else { 615 /* Transitioning time exceeded, set port to standby */
613 /* Only Implicit ALUA support */ 616 err = SCSI_DH_RETRY;
614 if (h->state == TPGS_STATE_OPTIMIZED || 617 h->state = TPGS_STATE_STANDBY;
615 h->state == TPGS_STATE_NONOPTIMIZED || 618 break;
616 h->state == TPGS_STATE_STANDBY) 619 case TPGS_STATE_OFFLINE:
617 /* Useable path if active */ 620 case TPGS_STATE_UNAVAILABLE:
618 err = SCSI_DH_OK; 621 /* Path unuseable for unavailable/offline */
619 else 622 err = SCSI_DH_DEV_OFFLINED;
620 /* Path unuseable for unavailable/offline */ 623 break;
621 err = SCSI_DH_DEV_OFFLINED; 624 default:
625 /* Useable path if active */
626 err = SCSI_DH_OK;
627 break;
622 } 628 }
623 return err; 629 return err;
624} 630}
@@ -672,7 +678,9 @@ static int alua_activate(struct scsi_device *sdev,
672 goto out; 678 goto out;
673 } 679 }
674 680
675 if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { 681 if (h->tpgs & TPGS_MODE_EXPLICIT &&
682 h->state != TPGS_STATE_OPTIMIZED &&
683 h->state != TPGS_STATE_LBA_DEPENDENT) {
676 h->callback_fn = fn; 684 h->callback_fn = fn;
677 h->callback_data = data; 685 h->callback_data = data;
678 err = submit_stpg(h); 686 err = submit_stpg(h);
@@ -698,8 +706,11 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
698 struct alua_dh_data *h = get_alua_data(sdev); 706 struct alua_dh_data *h = get_alua_data(sdev);
699 int ret = BLKPREP_OK; 707 int ret = BLKPREP_OK;
700 708
701 if (h->state != TPGS_STATE_OPTIMIZED && 709 if (h->state == TPGS_STATE_TRANSITIONING)
702 h->state != TPGS_STATE_NONOPTIMIZED) { 710 ret = BLKPREP_DEFER;
711 else if (h->state != TPGS_STATE_OPTIMIZED &&
712 h->state != TPGS_STATE_NONOPTIMIZED &&
713 h->state != TPGS_STATE_LBA_DEPENDENT) {
703 ret = BLKPREP_KILL; 714 ret = BLKPREP_KILL;
704 req->cmd_flags |= REQ_QUIET; 715 req->cmd_flags |= REQ_QUIET;
705 } 716 }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 9eb7a9ebccae..bb63f1a1f808 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -80,8 +80,6 @@ static struct libfc_function_template fnic_transport_template = {
80static int fnic_slave_alloc(struct scsi_device *sdev) 80static int fnic_slave_alloc(struct scsi_device *sdev)
81{ 81{
82 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 82 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
83 struct fc_lport *lp = shost_priv(sdev->host);
84 struct fnic *fnic = lport_priv(lp);
85 83
86 sdev->tagged_supported = 1; 84 sdev->tagged_supported = 1;
87 85
@@ -89,8 +87,6 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
89 return -ENXIO; 87 return -ENXIO;
90 88
91 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); 89 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
92 rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
93
94 return 0; 90 return 0;
95} 91}
96 92
@@ -113,6 +109,15 @@ static struct scsi_host_template fnic_host_template = {
113 .shost_attrs = fnic_attrs, 109 .shost_attrs = fnic_attrs,
114}; 110};
115 111
112static void
113fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
114{
115 if (timeout)
116 rport->dev_loss_tmo = timeout;
117 else
118 rport->dev_loss_tmo = 1;
119}
120
116static void fnic_get_host_speed(struct Scsi_Host *shost); 121static void fnic_get_host_speed(struct Scsi_Host *shost);
117static struct scsi_transport_template *fnic_fc_transport; 122static struct scsi_transport_template *fnic_fc_transport;
118static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); 123static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
@@ -140,6 +145,7 @@ static struct fc_function_template fnic_fc_functions = {
140 .show_starget_port_name = 1, 145 .show_starget_port_name = 1,
141 .show_starget_port_id = 1, 146 .show_starget_port_id = 1,
142 .show_rport_dev_loss_tmo = 1, 147 .show_rport_dev_loss_tmo = 1,
148 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
143 .issue_fc_host_lip = fnic_reset, 149 .issue_fc_host_lip = fnic_reset,
144 .get_fc_host_stats = fnic_get_stats, 150 .get_fc_host_stats = fnic_get_stats,
145 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 151 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
@@ -706,6 +712,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
706 goto err_out_free_exch_mgr; 712 goto err_out_free_exch_mgr;
707 } 713 }
708 fc_host_maxframe_size(lp->host) = lp->mfs; 714 fc_host_maxframe_size(lp->host) = lp->mfs;
715 fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
709 716
710 sprintf(fc_host_symbolic_name(lp->host), 717 sprintf(fc_host_symbolic_name(lp->host),
711 DRV_NAME " v" DRV_VERSION " over %s", fnic->name); 718 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8a8f803439e1..4f7a5829ea4c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -376,6 +376,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
376 shost->this_id = sht->this_id; 376 shost->this_id = sht->this_id;
377 shost->can_queue = sht->can_queue; 377 shost->can_queue = sht->can_queue;
378 shost->sg_tablesize = sht->sg_tablesize; 378 shost->sg_tablesize = sht->sg_tablesize;
379 shost->sg_prot_tablesize = sht->sg_prot_tablesize;
379 shost->cmd_per_lun = sht->cmd_per_lun; 380 shost->cmd_per_lun = sht->cmd_per_lun;
380 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 381 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
381 shost->use_clustering = sht->use_clustering; 382 shost->use_clustering = sht->use_clustering;
@@ -411,9 +412,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
411 412
412 device_initialize(&shost->shost_gendev); 413 device_initialize(&shost->shost_gendev);
413 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 414 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
414#ifndef CONFIG_SYSFS_DEPRECATED
415 shost->shost_gendev.bus = &scsi_bus_type; 415 shost->shost_gendev.bus = &scsi_bus_type;
416#endif
417 shost->shost_gendev.type = &scsi_host_type; 416 shost->shost_gendev.type = &scsi_host_type;
418 417
419 device_initialize(&shost->shost_dev); 418 device_initialize(&shost->shost_dev);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 9f75a6d519a2..00d08b25425f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -50,7 +50,6 @@ static unsigned int max_lun = IBMVFC_MAX_LUN;
50static unsigned int max_targets = IBMVFC_MAX_TARGETS; 50static unsigned int max_targets = IBMVFC_MAX_TARGETS;
51static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; 51static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; 52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
53static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
54static unsigned int ibmvfc_debug = IBMVFC_DEBUG; 53static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
55static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; 54static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
56static LIST_HEAD(ibmvfc_head); 55static LIST_HEAD(ibmvfc_head);
@@ -84,11 +83,6 @@ MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
84module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); 83module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(debug, "Enable driver debug information. " 84MODULE_PARM_DESC(debug, "Enable driver debug information. "
86 "[Default=" __stringify(IBMVFC_DEBUG) "]"); 85 "[Default=" __stringify(IBMVFC_DEBUG) "]");
87module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
89 "transport should insulate the loss of a remote port. Once this "
90 "value is exceeded, the scsi target is removed. "
91 "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
92module_param_named(log_level, log_level, uint, 0); 86module_param_named(log_level, log_level, uint, 0);
93MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " 87MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
94 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); 88 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
@@ -2496,41 +2490,66 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2496 LEAVE; 2490 LEAVE;
2497} 2491}
2498 2492
2499static const struct { 2493static const struct ibmvfc_async_desc ae_desc [] = {
2500 enum ibmvfc_async_event ae; 2494 { IBMVFC_AE_ELS_PLOGI, "PLOGI", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2501 const char *desc; 2495 { IBMVFC_AE_ELS_LOGO, "LOGO", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2502} ae_desc [] = { 2496 { IBMVFC_AE_ELS_PRLO, "PRLO", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2503 { IBMVFC_AE_ELS_PLOGI, "PLOGI" }, 2497 { IBMVFC_AE_SCN_NPORT, "N-Port SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2504 { IBMVFC_AE_ELS_LOGO, "LOGO" }, 2498 { IBMVFC_AE_SCN_GROUP, "Group SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2505 { IBMVFC_AE_ELS_PRLO, "PRLO" }, 2499 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN", IBMVFC_DEFAULT_LOG_LEVEL },
2506 { IBMVFC_AE_SCN_NPORT, "N-Port SCN" }, 2500 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN", IBMVFC_DEFAULT_LOG_LEVEL },
2507 { IBMVFC_AE_SCN_GROUP, "Group SCN" }, 2501 { IBMVFC_AE_LINK_UP, "Link Up", IBMVFC_DEFAULT_LOG_LEVEL },
2508 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" }, 2502 { IBMVFC_AE_LINK_DOWN, "Link Down", IBMVFC_DEFAULT_LOG_LEVEL },
2509 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" }, 2503 { IBMVFC_AE_LINK_DEAD, "Link Dead", IBMVFC_DEFAULT_LOG_LEVEL },
2510 { IBMVFC_AE_LINK_UP, "Link Up" }, 2504 { IBMVFC_AE_HALT, "Halt", IBMVFC_DEFAULT_LOG_LEVEL },
2511 { IBMVFC_AE_LINK_DOWN, "Link Down" }, 2505 { IBMVFC_AE_RESUME, "Resume", IBMVFC_DEFAULT_LOG_LEVEL },
2512 { IBMVFC_AE_LINK_DEAD, "Link Dead" }, 2506 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed", IBMVFC_DEFAULT_LOG_LEVEL },
2513 { IBMVFC_AE_HALT, "Halt" },
2514 { IBMVFC_AE_RESUME, "Resume" },
2515 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
2516}; 2507};
2517 2508
2518static const char *unknown_ae = "Unknown async"; 2509static const struct ibmvfc_async_desc unknown_ae = {
2510 0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL
2511};
2519 2512
2520/** 2513/**
2521 * ibmvfc_get_ae_desc - Get text description for async event 2514 * ibmvfc_get_ae_desc - Get text description for async event
2522 * @ae: async event 2515 * @ae: async event
2523 * 2516 *
2524 **/ 2517 **/
2525static const char *ibmvfc_get_ae_desc(u64 ae) 2518static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2526{ 2519{
2527 int i; 2520 int i;
2528 2521
2529 for (i = 0; i < ARRAY_SIZE(ae_desc); i++) 2522 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2530 if (ae_desc[i].ae == ae) 2523 if (ae_desc[i].ae == ae)
2531 return ae_desc[i].desc; 2524 return &ae_desc[i];
2525
2526 return &unknown_ae;
2527}
2528
2529static const struct {
2530 enum ibmvfc_ae_link_state state;
2531 const char *desc;
2532} link_desc [] = {
2533 { IBMVFC_AE_LS_LINK_UP, " link up" },
2534 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
2535 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
2536 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
2537};
2532 2538
2533 return unknown_ae; 2539/**
2540 * ibmvfc_get_link_state - Get text description for link state
2541 * @state: link state
2542 *
2543 **/
2544static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2545{
2546 int i;
2547
2548 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2549 if (link_desc[i].state == state)
2550 return link_desc[i].desc;
2551
2552 return "";
2534} 2553}
2535 2554
2536/** 2555/**
@@ -2542,11 +2561,12 @@ static const char *ibmvfc_get_ae_desc(u64 ae)
2542static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, 2561static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2543 struct ibmvfc_host *vhost) 2562 struct ibmvfc_host *vhost)
2544{ 2563{
2545 const char *desc = ibmvfc_get_ae_desc(crq->event); 2564 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event);
2546 struct ibmvfc_target *tgt; 2565 struct ibmvfc_target *tgt;
2547 2566
2548 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2567 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2549 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2568 " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
2569 ibmvfc_get_link_state(crq->link_state));
2550 2570
2551 switch (crq->event) { 2571 switch (crq->event) {
2552 case IBMVFC_AE_RESUME: 2572 case IBMVFC_AE_RESUME:
@@ -2788,7 +2808,6 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
2788static int ibmvfc_slave_configure(struct scsi_device *sdev) 2808static int ibmvfc_slave_configure(struct scsi_device *sdev)
2789{ 2809{
2790 struct Scsi_Host *shost = sdev->host; 2810 struct Scsi_Host *shost = sdev->host;
2791 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2792 unsigned long flags = 0; 2811 unsigned long flags = 0;
2793 2812
2794 spin_lock_irqsave(shost->host_lock, flags); 2813 spin_lock_irqsave(shost->host_lock, flags);
@@ -2800,8 +2819,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2800 scsi_activate_tcq(sdev, sdev->queue_depth); 2819 scsi_activate_tcq(sdev, sdev->queue_depth);
2801 } else 2820 } else
2802 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2821 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2803
2804 rport->dev_loss_tmo = dev_loss_tmo;
2805 spin_unlock_irqrestore(shost->host_lock, flags); 2822 spin_unlock_irqrestore(shost->host_lock, flags);
2806 return 0; 2823 return 0;
2807} 2824}
@@ -4285,8 +4302,10 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4285 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4302 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4286 rc = ibmvfc_reset_crq(vhost); 4303 rc = ibmvfc_reset_crq(vhost);
4287 spin_lock_irqsave(vhost->host->host_lock, flags); 4304 spin_lock_irqsave(vhost->host->host_lock, flags);
4288 if (rc || (rc = ibmvfc_send_crq_init(vhost)) || 4305 if (rc == H_CLOSED)
4289 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { 4306 vio_enable_interrupts(to_vio_dev(vhost->dev));
4307 else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4308 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4290 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4309 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4291 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); 4310 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4292 } 4311 }
@@ -4744,6 +4763,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4744 if ((rc = scsi_add_host(shost, dev))) 4763 if ((rc = scsi_add_host(shost, dev)))
4745 goto release_event_pool; 4764 goto release_event_pool;
4746 4765
4766 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
4767
4747 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, 4768 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
4748 &ibmvfc_trace_attr))) { 4769 &ibmvfc_trace_attr))) {
4749 dev_err(dev, "Failed to create trace file. rc=%d\n", rc); 4770 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 608af394c8cf..ef663e7c9bbc 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -541,6 +541,12 @@ enum ibmvfc_async_event {
541 IBMVFC_AE_ADAPTER_FAILED = 0x1000, 541 IBMVFC_AE_ADAPTER_FAILED = 0x1000,
542}; 542};
543 543
544struct ibmvfc_async_desc {
545 enum ibmvfc_async_event ae;
546 const char *desc;
547 int log_level;
548};
549
544struct ibmvfc_crq { 550struct ibmvfc_crq {
545 volatile u8 valid; 551 volatile u8 valid;
546 volatile u8 format; 552 volatile u8 format;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 52568588039f..df9a12c8b373 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1096,6 +1096,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 res->bus = cfgtew->u.cfgte->res_addr.bus; 1096 res->bus = cfgtew->u.cfgte->res_addr.bus;
1097 res->target = cfgtew->u.cfgte->res_addr.target; 1097 res->target = cfgtew->u.cfgte->res_addr.target;
1098 res->lun = cfgtew->u.cfgte->res_addr.lun; 1098 res->lun = cfgtew->u.cfgte->res_addr.lun;
1099 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1099 } 1100 }
1100 1101
1101 ipr_update_ata_class(res, proto); 1102 ipr_update_ata_class(res, proto);
@@ -1142,7 +1143,7 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1142 int i; 1143 int i;
1143 char *p = buffer; 1144 char *p = buffer;
1144 1145
1145 res_path[0] = '\0'; 1146 *p = '\0';
1146 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1147 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1148 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1149 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
@@ -1670,7 +1671,7 @@ static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1670 1671
1671 array_entry = error->array_member; 1672 array_entry = error->array_member;
1672 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1673 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673 sizeof(error->array_member)); 1674 ARRAY_SIZE(error->array_member));
1674 1675
1675 for (i = 0; i < num_entries; i++, array_entry++) { 1676 for (i = 0; i < num_entries; i++, array_entry++) {
1676 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1677 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
@@ -2151,8 +2152,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2151 ipr_err_separator; 2152 ipr_err_separator;
2152 2153
2153 array_entry = error->array_member; 2154 array_entry = error->array_member;
2154 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 2155 num_entries = min_t(u32, error->num_entries,
2155 sizeof(error->array_member)); 2156 ARRAY_SIZE(error->array_member));
2156 2157
2157 for (i = 0; i < num_entries; i++, array_entry++) { 2158 for (i = 0; i < num_entries; i++, array_entry++) {
2158 2159
@@ -2166,10 +2167,10 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2166 2167
2167 ipr_err("Array Member %d:\n", i); 2168 ipr_err("Array Member %d:\n", i);
2168 ipr_log_ext_vpd(&array_entry->vpd); 2169 ipr_log_ext_vpd(&array_entry->vpd);
2169 ipr_err("Current Location: %s", 2170 ipr_err("Current Location: %s\n",
2170 ipr_format_res_path(array_entry->res_path, buffer, 2171 ipr_format_res_path(array_entry->res_path, buffer,
2171 sizeof(buffer))); 2172 sizeof(buffer)));
2172 ipr_err("Expected Location: %s", 2173 ipr_err("Expected Location: %s\n",
2173 ipr_format_res_path(array_entry->expected_res_path, 2174 ipr_format_res_path(array_entry->expected_res_path,
2174 buffer, sizeof(buffer))); 2175 buffer, sizeof(buffer)));
2175 2176
@@ -4089,6 +4090,7 @@ static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4089/** 4090/**
4090 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4091 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4091 * @dev: device struct 4092 * @dev: device struct
4093 * @attr: device attribute structure
4092 * @buf: buffer 4094 * @buf: buffer
4093 * 4095 *
4094 * Return value: 4096 * Return value:
@@ -4122,6 +4124,7 @@ static struct device_attribute ipr_adapter_handle_attr = {
4122 * ipr_show_resource_path - Show the resource path or the resource address for 4124 * ipr_show_resource_path - Show the resource path or the resource address for
4123 * this device. 4125 * this device.
4124 * @dev: device struct 4126 * @dev: device struct
4127 * @attr: device attribute structure
4125 * @buf: buffer 4128 * @buf: buffer
4126 * 4129 *
4127 * Return value: 4130 * Return value:
@@ -4159,8 +4162,45 @@ static struct device_attribute ipr_resource_path_attr = {
4159}; 4162};
4160 4163
4161/** 4164/**
4165 * ipr_show_device_id - Show the device_id for this device.
4166 * @dev: device struct
4167 * @attr: device attribute structure
4168 * @buf: buffer
4169 *
4170 * Return value:
4171 * number of bytes printed to buffer
4172 **/
4173static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4174{
4175 struct scsi_device *sdev = to_scsi_device(dev);
4176 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4177 struct ipr_resource_entry *res;
4178 unsigned long lock_flags = 0;
4179 ssize_t len = -ENXIO;
4180
4181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4182 res = (struct ipr_resource_entry *)sdev->hostdata;
4183 if (res && ioa_cfg->sis64)
4184 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4185 else if (res)
4186 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4187
4188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4189 return len;
4190}
4191
4192static struct device_attribute ipr_device_id_attr = {
4193 .attr = {
4194 .name = "device_id",
4195 .mode = S_IRUGO,
4196 },
4197 .show = ipr_show_device_id
4198};
4199
4200/**
4162 * ipr_show_resource_type - Show the resource type for this device. 4201 * ipr_show_resource_type - Show the resource type for this device.
4163 * @dev: device struct 4202 * @dev: device struct
4203 * @attr: device attribute structure
4164 * @buf: buffer 4204 * @buf: buffer
4165 * 4205 *
4166 * Return value: 4206 * Return value:
@@ -4195,6 +4235,7 @@ static struct device_attribute ipr_resource_type_attr = {
4195static struct device_attribute *ipr_dev_attrs[] = { 4235static struct device_attribute *ipr_dev_attrs[] = {
4196 &ipr_adapter_handle_attr, 4236 &ipr_adapter_handle_attr,
4197 &ipr_resource_path_attr, 4237 &ipr_resource_path_attr,
4238 &ipr_device_id_attr,
4198 &ipr_resource_type_attr, 4239 &ipr_resource_type_attr,
4199 NULL, 4240 NULL,
4200}; 4241};
@@ -4898,39 +4939,15 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4898/** 4939/**
4899 * ipr_handle_other_interrupt - Handle "other" interrupts 4940 * ipr_handle_other_interrupt - Handle "other" interrupts
4900 * @ioa_cfg: ioa config struct 4941 * @ioa_cfg: ioa config struct
4942 * @int_reg: interrupt register
4901 * 4943 *
4902 * Return value: 4944 * Return value:
4903 * IRQ_NONE / IRQ_HANDLED 4945 * IRQ_NONE / IRQ_HANDLED
4904 **/ 4946 **/
4905static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) 4947static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4948 volatile u32 int_reg)
4906{ 4949{
4907 irqreturn_t rc = IRQ_HANDLED; 4950 irqreturn_t rc = IRQ_HANDLED;
4908 volatile u32 int_reg, int_mask_reg;
4909
4910 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4911 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4912
4913 /* If an interrupt on the adapter did not occur, ignore it.
4914 * Or in the case of SIS 64, check for a stage change interrupt.
4915 */
4916 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4917 if (ioa_cfg->sis64) {
4918 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4919 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4920 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4921
4922 /* clear stage change */
4923 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4924 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4925 list_del(&ioa_cfg->reset_cmd->queue);
4926 del_timer(&ioa_cfg->reset_cmd->timer);
4927 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4928 return IRQ_HANDLED;
4929 }
4930 }
4931
4932 return IRQ_NONE;
4933 }
4934 4951
4935 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 4952 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4936 /* Mask the interrupt */ 4953 /* Mask the interrupt */
@@ -4991,7 +5008,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4991{ 5008{
4992 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5009 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4993 unsigned long lock_flags = 0; 5010 unsigned long lock_flags = 0;
4994 volatile u32 int_reg; 5011 volatile u32 int_reg, int_mask_reg;
4995 u32 ioasc; 5012 u32 ioasc;
4996 u16 cmd_index; 5013 u16 cmd_index;
4997 int num_hrrq = 0; 5014 int num_hrrq = 0;
@@ -5006,6 +5023,33 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5006 return IRQ_NONE; 5023 return IRQ_NONE;
5007 } 5024 }
5008 5025
5026 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5027 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5028
5029 /* If an interrupt on the adapter did not occur, ignore it.
5030 * Or in the case of SIS 64, check for a stage change interrupt.
5031 */
5032 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
5033 if (ioa_cfg->sis64) {
5034 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5036 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5037
5038 /* clear stage change */
5039 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5040 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5041 list_del(&ioa_cfg->reset_cmd->queue);
5042 del_timer(&ioa_cfg->reset_cmd->timer);
5043 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5045 return IRQ_HANDLED;
5046 }
5047 }
5048
5049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5050 return IRQ_NONE;
5051 }
5052
5009 while (1) { 5053 while (1) {
5010 ipr_cmd = NULL; 5054 ipr_cmd = NULL;
5011 5055
@@ -5045,7 +5089,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5045 /* Clear the PCI interrupt */ 5089 /* Clear the PCI interrupt */
5046 do { 5090 do {
5047 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5091 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5092 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5049 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5093 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5050 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5094 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5051 5095
@@ -5060,7 +5104,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5060 } 5104 }
5061 5105
5062 if (unlikely(rc == IRQ_NONE)) 5106 if (unlikely(rc == IRQ_NONE))
5063 rc = ipr_handle_other_interrupt(ioa_cfg); 5107 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5064 5108
5065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5066 return rc; 5110 return rc;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4d31625ab9cf..aa8bb2f2c6ee 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -26,6 +26,7 @@
26#ifndef _IPR_H 26#ifndef _IPR_H
27#define _IPR_H 27#define _IPR_H
28 28
29#include <asm/unaligned.h>
29#include <linux/types.h> 30#include <linux/types.h>
30#include <linux/completion.h> 31#include <linux/completion.h>
31#include <linux/libata.h> 32#include <linux/libata.h>
@@ -37,8 +38,8 @@
37/* 38/*
38 * Literals 39 * Literals
39 */ 40 */
40#define IPR_DRIVER_VERSION "2.5.0" 41#define IPR_DRIVER_VERSION "2.5.1"
41#define IPR_DRIVER_DATE "(February 11, 2010)" 42#define IPR_DRIVER_DATE "(August 10, 2010)"
42 43
43/* 44/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -318,6 +319,11 @@ struct ipr_ext_vpd {
318 __be32 wwid[2]; 319 __be32 wwid[2];
319}__attribute__((packed)); 320}__attribute__((packed));
320 321
322struct ipr_ext_vpd64 {
323 struct ipr_vpd vpd;
324 __be32 wwid[4];
325}__attribute__((packed));
326
321struct ipr_std_inq_data { 327struct ipr_std_inq_data {
322 u8 peri_qual_dev_type; 328 u8 peri_qual_dev_type;
323#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) 329#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5)
@@ -372,7 +378,7 @@ struct ipr_config_table_entry {
372 378
373 struct ipr_res_addr res_addr; 379 struct ipr_res_addr res_addr;
374 __be32 res_handle; 380 __be32 res_handle;
375 __be32 reserved4[2]; 381 __be32 lun_wwn[2];
376 struct ipr_std_inq_data std_inq_data; 382 struct ipr_std_inq_data std_inq_data;
377}__attribute__ ((packed, aligned (4))); 383}__attribute__ ((packed, aligned (4)));
378 384
@@ -394,7 +400,7 @@ struct ipr_config_table_entry64 {
394 __be64 res_path; 400 __be64 res_path;
395 struct ipr_std_inq_data std_inq_data; 401 struct ipr_std_inq_data std_inq_data;
396 u8 reserved2[4]; 402 u8 reserved2[4];
397 __be64 reserved3[2]; // description text 403 __be64 reserved3[2];
398 u8 reserved4[8]; 404 u8 reserved4[8];
399}__attribute__ ((packed, aligned (8))); 405}__attribute__ ((packed, aligned (8)));
400 406
@@ -913,7 +919,7 @@ struct ipr_hostrcb_type_24_error {
913 u8 array_id; 919 u8 array_id;
914 u8 last_res_path[8]; 920 u8 last_res_path[8];
915 u8 protection_level[8]; 921 u8 protection_level[8];
916 struct ipr_ext_vpd array_vpd; 922 struct ipr_ext_vpd64 array_vpd;
917 u8 description[16]; 923 u8 description[16];
918 u8 reserved2[3]; 924 u8 reserved2[3];
919 u8 num_entries; 925 u8 num_entries;
@@ -1210,6 +1216,7 @@ struct ipr_resource_entry {
1210 1216
1211 __be32 res_handle; 1217 __be32 res_handle;
1212 __be64 dev_id; 1218 __be64 dev_id;
1219 __be64 lun_wwn;
1213 struct scsi_lun dev_lun; 1220 struct scsi_lun dev_lun;
1214 u8 res_path[8]; 1221 u8 res_path[8];
1215 1222
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 042153cbbde1..e1a395b438ee 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -162,6 +162,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
162 unsigned int xfer = 0; 162 unsigned int xfer = 0;
163 unsigned int si; 163 unsigned int si;
164 164
165 /* If the device fell off, no sense in issuing commands */
166 if (dev->gone)
167 return AC_ERR_SYSTEM;
168
165 task = sas_alloc_task(GFP_ATOMIC); 169 task = sas_alloc_task(GFP_ATOMIC);
166 if (!task) 170 if (!task)
167 return AC_ERR_SYSTEM; 171 return AC_ERR_SYSTEM;
@@ -347,6 +351,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
347static struct ata_port_operations sas_sata_ops = { 351static struct ata_port_operations sas_sata_ops = {
348 .phy_reset = sas_ata_phy_reset, 352 .phy_reset = sas_ata_phy_reset,
349 .post_internal_cmd = sas_ata_post_internal, 353 .post_internal_cmd = sas_ata_post_internal,
354 .qc_defer = ata_std_qc_defer,
350 .qc_prep = ata_noop_qc_prep, 355 .qc_prep = ata_noop_qc_prep,
351 .qc_issue = sas_ata_qc_issue, 356 .qc_issue = sas_ata_qc_issue,
352 .qc_fill_rtf = sas_ata_qc_fill_rtf, 357 .qc_fill_rtf = sas_ata_qc_fill_rtf,
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 83dd5070a15c..505ffe358293 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -175,10 +175,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
175 switch (resp->result) { 175 switch (resp->result) {
176 case SMP_RESP_PHY_VACANT: 176 case SMP_RESP_PHY_VACANT:
177 phy->phy_state = PHY_VACANT; 177 phy->phy_state = PHY_VACANT;
178 return; 178 break;
179 default: 179 default:
180 phy->phy_state = PHY_NOT_PRESENT; 180 phy->phy_state = PHY_NOT_PRESENT;
181 return; 181 break;
182 case SMP_RESP_FUNC_ACC: 182 case SMP_RESP_FUNC_ACC:
183 phy->phy_state = PHY_EMPTY; /* do not know yet */ 183 phy->phy_state = PHY_EMPTY; /* do not know yet */
184 break; 184 break;
@@ -209,7 +209,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
209 phy->phy->negotiated_linkrate = phy->linkrate; 209 phy->phy->negotiated_linkrate = phy->linkrate;
210 210
211 if (!rediscover) 211 if (!rediscover)
212 sas_phy_add(phy->phy); 212 if (sas_phy_add(phy->phy)) {
213 sas_phy_free(phy->phy);
214 return;
215 }
213 216
214 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", 217 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
215 SAS_ADDR(dev->sas_addr), phy->phy_id, 218 SAS_ADDR(dev->sas_addr), phy->phy_id,
@@ -1724,6 +1727,7 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
1724 struct domain_device *child, *n; 1727 struct domain_device *child, *n;
1725 1728
1726 list_for_each_entry_safe(child, n, &ex->children, siblings) { 1729 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1730 child->gone = 1;
1727 if (child->dev_type == EDGE_DEV || 1731 if (child->dev_type == EDGE_DEV ||
1728 child->dev_type == FANOUT_DEV) 1732 child->dev_type == FANOUT_DEV)
1729 sas_unregister_ex_tree(child); 1733 sas_unregister_ex_tree(child);
@@ -1744,6 +1748,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1744 &ex_dev->children, siblings) { 1748 &ex_dev->children, siblings) {
1745 if (SAS_ADDR(child->sas_addr) == 1749 if (SAS_ADDR(child->sas_addr) ==
1746 SAS_ADDR(phy->attached_sas_addr)) { 1750 SAS_ADDR(phy->attached_sas_addr)) {
1751 child->gone = 1;
1747 if (child->dev_type == EDGE_DEV || 1752 if (child->dev_type == EDGE_DEV ||
1748 child->dev_type == FANOUT_DEV) 1753 child->dev_type == FANOUT_DEV)
1749 sas_unregister_ex_tree(child); 1754 sas_unregister_ex_tree(child);
@@ -1752,6 +1757,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1752 break; 1757 break;
1753 } 1758 }
1754 } 1759 }
1760 parent->gone = 1;
1755 sas_disable_routing(parent, phy->attached_sas_addr); 1761 sas_disable_routing(parent, phy->attached_sas_addr);
1756 } 1762 }
1757 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1763 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f0cfba9a1fc8..55f09e92ab59 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -130,17 +130,6 @@ static void sas_scsi_task_done(struct sas_task *task)
130 sc->scsi_done(sc); 130 sc->scsi_done(sc);
131} 131}
132 132
133static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
134{
135 enum task_attribute ta = TASK_ATTR_SIMPLE;
136 if (cmd->request && blk_rq_tagged(cmd->request)) {
137 if (cmd->device->ordered_tags &&
138 (cmd->request->cmd_flags & REQ_HARDBARRIER))
139 ta = TASK_ATTR_ORDERED;
140 }
141 return ta;
142}
143
144static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, 133static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
145 struct domain_device *dev, 134 struct domain_device *dev,
146 gfp_t gfp_flags) 135 gfp_t gfp_flags)
@@ -160,7 +149,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
160 task->ssp_task.retry_count = 1; 149 task->ssp_task.retry_count = 1;
161 int_to_scsilun(cmd->device->lun, &lun); 150 int_to_scsilun(cmd->device->lun, &lun);
162 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); 151 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
163 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); 152 task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
164 memcpy(task->ssp_task.cdb, cmd->cmnd, 16); 153 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
165 154
166 task->scatter = scsi_sglist(cmd); 155 task->scatter = scsi_sglist(cmd);
@@ -228,6 +217,13 @@ int sas_queuecommand(struct scsi_cmnd *cmd,
228 goto out; 217 goto out;
229 } 218 }
230 219
220 /* If the device fell off, no sense in issuing commands */
221 if (dev->gone) {
222 cmd->result = DID_BAD_TARGET << 16;
223 scsi_done(cmd);
224 goto out;
225 }
226
231 res = -ENOMEM; 227 res = -ENOMEM;
232 task = sas_create_task(cmd, dev, GFP_ATOMIC); 228 task = sas_create_task(cmd, dev, GFP_ATOMIC);
233 if (!task) 229 if (!task)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 23ce45708335..f681eea57730 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -586,6 +586,11 @@ lpfc_issue_lip(struct Scsi_Host *shost)
586 phba->cfg_link_speed); 586 phba->cfg_link_speed);
587 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 587 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
588 phba->fc_ratov * 2); 588 phba->fc_ratov * 2);
589 if ((mbxstatus == MBX_SUCCESS) &&
590 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
592 "2859 SLI authentication is required "
593 "for INIT_LINK but has not done yet\n");
589 } 594 }
590 595
591 lpfc_set_loopback_flag(phba); 596 lpfc_set_loopback_flag(phba);
@@ -2159,6 +2164,11 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
2159 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 2164 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
2160 vport->cfg_nodev_tmo = val; 2165 vport->cfg_nodev_tmo = val;
2161 vport->cfg_devloss_tmo = val; 2166 vport->cfg_devloss_tmo = val;
2167 /*
2168 * For compat: set the fc_host dev loss so new rports
2169 * will get the value.
2170 */
2171 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
2162 lpfc_update_rport_devloss_tmo(vport); 2172 lpfc_update_rport_devloss_tmo(vport);
2163 return 0; 2173 return 0;
2164 } 2174 }
@@ -2208,6 +2218,7 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
2208 vport->cfg_nodev_tmo = val; 2218 vport->cfg_nodev_tmo = val;
2209 vport->cfg_devloss_tmo = val; 2219 vport->cfg_devloss_tmo = val;
2210 vport->dev_loss_tmo_changed = 1; 2220 vport->dev_loss_tmo_changed = 1;
2221 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
2211 lpfc_update_rport_devloss_tmo(vport); 2222 lpfc_update_rport_devloss_tmo(vport);
2212 return 0; 2223 return 0;
2213 } 2224 }
@@ -3776,6 +3787,11 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
3776 case MBX_PORT_CAPABILITIES: 3787 case MBX_PORT_CAPABILITIES:
3777 case MBX_PORT_IOV_CONTROL: 3788 case MBX_PORT_IOV_CONTROL:
3778 break; 3789 break;
3790 case MBX_SECURITY_MGMT:
3791 case MBX_AUTH_PORT:
3792 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
3793 return -EPERM;
3794 break;
3779 case MBX_READ_SPARM64: 3795 case MBX_READ_SPARM64:
3780 case MBX_READ_LA: 3796 case MBX_READ_LA:
3781 case MBX_READ_LA64: 3797 case MBX_READ_LA64:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 49d0cf99c24c..f5d60b55f53a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -259,6 +259,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
259 struct bsg_job_data *dd_data; 259 struct bsg_job_data *dd_data;
260 uint32_t creg_val; 260 uint32_t creg_val;
261 int rc = 0; 261 int rc = 0;
262 int iocb_stat;
262 263
263 /* in case no data is transferred */ 264 /* in case no data is transferred */
264 job->reply->reply_payload_rcv_len = 0; 265 job->reply->reply_payload_rcv_len = 0;
@@ -373,14 +374,13 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
373 readl(phba->HCregaddr); /* flush */ 374 readl(phba->HCregaddr); /* flush */
374 } 375 }
375 376
376 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 377 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
377 378 if (iocb_stat == IOCB_SUCCESS)
378 if (rc == IOCB_SUCCESS)
379 return 0; /* done for now */ 379 return 0; /* done for now */
380 else if (rc == IOCB_BUSY) 380 else if (iocb_stat == IOCB_BUSY)
381 rc = EAGAIN; 381 rc = -EAGAIN;
382 else 382 else
383 rc = EIO; 383 rc = -EIO;
384 384
385 385
386 /* iocb failed so cleanup */ 386 /* iocb failed so cleanup */
@@ -631,9 +631,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
631 if (rc == IOCB_SUCCESS) 631 if (rc == IOCB_SUCCESS)
632 return 0; /* done for now */ 632 return 0; /* done for now */
633 else if (rc == IOCB_BUSY) 633 else if (rc == IOCB_BUSY)
634 rc = EAGAIN; 634 rc = -EAGAIN;
635 else 635 else
636 rc = EIO; 636 rc = -EIO;
637 637
638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
639 job->request_payload.sg_cnt, DMA_TO_DEVICE); 639 job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -1299,7 +1299,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1299 /* Allocate buffer for command iocb */ 1299 /* Allocate buffer for command iocb */
1300 ctiocb = lpfc_sli_get_iocbq(phba); 1300 ctiocb = lpfc_sli_get_iocbq(phba);
1301 if (!ctiocb) { 1301 if (!ctiocb) {
1302 rc = ENOMEM; 1302 rc = -ENOMEM;
1303 goto no_ctiocb; 1303 goto no_ctiocb;
1304 } 1304 }
1305 1305
@@ -1518,7 +1518,7 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1518 loopback_mode = (struct diag_mode_set *) 1518 loopback_mode = (struct diag_mode_set *)
1519 job->request->rqst_data.h_vendor.vendor_cmd; 1519 job->request->rqst_data.h_vendor.vendor_cmd;
1520 link_flags = loopback_mode->type; 1520 link_flags = loopback_mode->type;
1521 timeout = loopback_mode->timeout; 1521 timeout = loopback_mode->timeout * 100;
1522 1522
1523 if ((phba->link_state == LPFC_HBA_ERROR) || 1523 if ((phba->link_state == LPFC_HBA_ERROR) ||
1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -1649,17 +1649,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1649 1649
1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1651 if (!mbox) 1651 if (!mbox)
1652 return ENOMEM; 1652 return -ENOMEM;
1653 1653
1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0); 1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1656 if (status) { 1656 if (status) {
1657 mempool_free(mbox, phba->mbox_mem_pool); 1657 mempool_free(mbox, phba->mbox_mem_pool);
1658 return ENOMEM; 1658 return -ENOMEM;
1659 } 1659 }
1660 1660
1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1662 mbox->context1 = NULL; 1662 mbox->context1 = NULL;
1663 mbox->context2 = NULL;
1663 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1664 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1664 1665
1665 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1666 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -1667,7 +1668,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1667 kfree(dmabuff); 1668 kfree(dmabuff);
1668 if (status != MBX_TIMEOUT) 1669 if (status != MBX_TIMEOUT)
1669 mempool_free(mbox, phba->mbox_mem_pool); 1670 mempool_free(mbox, phba->mbox_mem_pool);
1670 return ENODEV; 1671 return -ENODEV;
1671 } 1672 }
1672 1673
1673 *rpi = mbox->u.mb.un.varWords[0]; 1674 *rpi = mbox->u.mb.un.varWords[0];
@@ -1693,7 +1694,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1693 /* Allocate mboxq structure */ 1694 /* Allocate mboxq structure */
1694 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1695 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1695 if (mbox == NULL) 1696 if (mbox == NULL)
1696 return ENOMEM; 1697 return -ENOMEM;
1697 1698
1698 lpfc_unreg_login(phba, 0, rpi, mbox); 1699 lpfc_unreg_login(phba, 0, rpi, mbox);
1699 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1700 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
@@ -1701,7 +1702,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1701 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1702 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1702 if (status != MBX_TIMEOUT) 1703 if (status != MBX_TIMEOUT)
1703 mempool_free(mbox, phba->mbox_mem_pool); 1704 mempool_free(mbox, phba->mbox_mem_pool);
1704 return EIO; 1705 return -EIO;
1705 } 1706 }
1706 1707
1707 mempool_free(mbox, phba->mbox_mem_pool); 1708 mempool_free(mbox, phba->mbox_mem_pool);
@@ -1730,6 +1731,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1730 struct ulp_bde64 *bpl = NULL; 1731 struct ulp_bde64 *bpl = NULL;
1731 struct lpfc_sli_ct_request *ctreq = NULL; 1732 struct lpfc_sli_ct_request *ctreq = NULL;
1732 int ret_val = 0; 1733 int ret_val = 0;
1734 int time_left;
1735 int iocb_stat = 0;
1733 unsigned long flags; 1736 unsigned long flags;
1734 1737
1735 *txxri = 0; 1738 *txxri = 0;
@@ -1737,7 +1740,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1737 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 1740 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1738 SLI_CT_ELX_LOOPBACK); 1741 SLI_CT_ELX_LOOPBACK);
1739 if (!evt) 1742 if (!evt)
1740 return ENOMEM; 1743 return -ENOMEM;
1741 1744
1742 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1745 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1743 list_add(&evt->node, &phba->ct_ev_waiters); 1746 list_add(&evt->node, &phba->ct_ev_waiters);
@@ -1770,7 +1773,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1770 if (cmdiocbq == NULL || rspiocbq == NULL || 1773 if (cmdiocbq == NULL || rspiocbq == NULL ||
1771 dmabuf == NULL || bpl == NULL || ctreq == NULL || 1774 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
1772 dmabuf->virt == NULL) { 1775 dmabuf->virt == NULL) {
1773 ret_val = ENOMEM; 1776 ret_val = -ENOMEM;
1774 goto err_get_xri_exit; 1777 goto err_get_xri_exit;
1775 } 1778 }
1776 1779
@@ -1806,24 +1809,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1806 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 1809 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1807 cmdiocbq->vport = phba->pport; 1810 cmdiocbq->vport = phba->pport;
1808 1811
1809 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 1812 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1810 rspiocbq, 1813 rspiocbq,
1811 (phba->fc_ratov * 2) 1814 (phba->fc_ratov * 2)
1812 + LPFC_DRVR_TIMEOUT); 1815 + LPFC_DRVR_TIMEOUT);
1813 if (ret_val) 1816 if (iocb_stat) {
1817 ret_val = -EIO;
1814 goto err_get_xri_exit; 1818 goto err_get_xri_exit;
1815 1819 }
1816 *txxri = rsp->ulpContext; 1820 *txxri = rsp->ulpContext;
1817 1821
1818 evt->waiting = 1; 1822 evt->waiting = 1;
1819 evt->wait_time_stamp = jiffies; 1823 evt->wait_time_stamp = jiffies;
1820 ret_val = wait_event_interruptible_timeout( 1824 time_left = wait_event_interruptible_timeout(
1821 evt->wq, !list_empty(&evt->events_to_see), 1825 evt->wq, !list_empty(&evt->events_to_see),
1822 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 1826 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1823 if (list_empty(&evt->events_to_see)) 1827 if (list_empty(&evt->events_to_see))
1824 ret_val = (ret_val) ? EINTR : ETIMEDOUT; 1828 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
1825 else { 1829 else {
1826 ret_val = IOCB_SUCCESS;
1827 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1830 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1828 list_move(evt->events_to_see.prev, &evt->events_to_get); 1831 list_move(evt->events_to_see.prev, &evt->events_to_get);
1829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1832 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -1845,7 +1848,7 @@ err_get_xri_exit:
1845 kfree(dmabuf); 1848 kfree(dmabuf);
1846 } 1849 }
1847 1850
1848 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) 1851 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
1849 lpfc_sli_release_iocbq(phba, cmdiocbq); 1852 lpfc_sli_release_iocbq(phba, cmdiocbq);
1850 if (rspiocbq) 1853 if (rspiocbq)
1851 lpfc_sli_release_iocbq(phba, rspiocbq); 1854 lpfc_sli_release_iocbq(phba, rspiocbq);
@@ -1959,6 +1962,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1959 uint32_t num_bde; 1962 uint32_t num_bde;
1960 struct lpfc_dmabufext *rxbuffer = NULL; 1963 struct lpfc_dmabufext *rxbuffer = NULL;
1961 int ret_val = 0; 1964 int ret_val = 0;
1965 int iocb_stat;
1962 int i = 0; 1966 int i = 0;
1963 1967
1964 cmdiocbq = lpfc_sli_get_iocbq(phba); 1968 cmdiocbq = lpfc_sli_get_iocbq(phba);
@@ -1973,7 +1977,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1973 } 1977 }
1974 1978
1975 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 1979 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1976 ret_val = ENOMEM; 1980 ret_val = -ENOMEM;
1977 goto err_post_rxbufs_exit; 1981 goto err_post_rxbufs_exit;
1978 } 1982 }
1979 1983
@@ -2022,16 +2026,16 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2022 cmd->ulpClass = CLASS3; 2026 cmd->ulpClass = CLASS3;
2023 cmd->ulpContext = rxxri; 2027 cmd->ulpContext = rxxri;
2024 2028
2025 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 2029 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2026 2030 0);
2027 if (ret_val == IOCB_ERROR) { 2031 if (iocb_stat == IOCB_ERROR) {
2028 diag_cmd_data_free(phba, 2032 diag_cmd_data_free(phba,
2029 (struct lpfc_dmabufext *)mp[0]); 2033 (struct lpfc_dmabufext *)mp[0]);
2030 if (mp[1]) 2034 if (mp[1])
2031 diag_cmd_data_free(phba, 2035 diag_cmd_data_free(phba,
2032 (struct lpfc_dmabufext *)mp[1]); 2036 (struct lpfc_dmabufext *)mp[1]);
2033 dmp = list_entry(next, struct lpfc_dmabuf, list); 2037 dmp = list_entry(next, struct lpfc_dmabuf, list);
2034 ret_val = EIO; 2038 ret_val = -EIO;
2035 goto err_post_rxbufs_exit; 2039 goto err_post_rxbufs_exit;
2036 } 2040 }
2037 2041
@@ -2045,7 +2049,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2045 cmdiocbq = lpfc_sli_get_iocbq(phba); 2049 cmdiocbq = lpfc_sli_get_iocbq(phba);
2046 if (!cmdiocbq) { 2050 if (!cmdiocbq) {
2047 dmp = list_entry(next, struct lpfc_dmabuf, list); 2051 dmp = list_entry(next, struct lpfc_dmabuf, list);
2048 ret_val = EIO; 2052 ret_val = -EIO;
2049 goto err_post_rxbufs_exit; 2053 goto err_post_rxbufs_exit;
2050 } 2054 }
2051 2055
@@ -2111,6 +2115,8 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2111 uint32_t num_bde; 2115 uint32_t num_bde;
2112 uint8_t *ptr = NULL, *rx_databuf = NULL; 2116 uint8_t *ptr = NULL, *rx_databuf = NULL;
2113 int rc = 0; 2117 int rc = 0;
2118 int time_left;
2119 int iocb_stat;
2114 unsigned long flags; 2120 unsigned long flags;
2115 void *dataout = NULL; 2121 void *dataout = NULL;
2116 uint32_t total_mem; 2122 uint32_t total_mem;
@@ -2185,22 +2191,18 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2185 ptr, size); 2191 ptr, size);
2186 2192
2187 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2193 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2188 if (rc) { 2194 if (rc)
2189 rc = -ENOMEM;
2190 goto loopback_test_exit; 2195 goto loopback_test_exit;
2191 }
2192 2196
2193 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2197 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2194 if (rc) { 2198 if (rc) {
2195 lpfcdiag_loop_self_unreg(phba, rpi); 2199 lpfcdiag_loop_self_unreg(phba, rpi);
2196 rc = -ENOMEM;
2197 goto loopback_test_exit; 2200 goto loopback_test_exit;
2198 } 2201 }
2199 2202
2200 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2203 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2201 if (rc) { 2204 if (rc) {
2202 lpfcdiag_loop_self_unreg(phba, rpi); 2205 lpfcdiag_loop_self_unreg(phba, rpi);
2203 rc = -ENOMEM;
2204 goto loopback_test_exit; 2206 goto loopback_test_exit;
2205 } 2207 }
2206 2208
@@ -2290,21 +2292,22 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2290 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2292 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2291 cmdiocbq->vport = phba->pport; 2293 cmdiocbq->vport = phba->pport;
2292 2294
2293 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 2295 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2294 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); 2296 rspiocbq, (phba->fc_ratov * 2) +
2297 LPFC_DRVR_TIMEOUT);
2295 2298
2296 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { 2299 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2297 rc = -EIO; 2300 rc = -EIO;
2298 goto err_loopback_test_exit; 2301 goto err_loopback_test_exit;
2299 } 2302 }
2300 2303
2301 evt->waiting = 1; 2304 evt->waiting = 1;
2302 rc = wait_event_interruptible_timeout( 2305 time_left = wait_event_interruptible_timeout(
2303 evt->wq, !list_empty(&evt->events_to_see), 2306 evt->wq, !list_empty(&evt->events_to_see),
2304 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2307 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2305 evt->waiting = 0; 2308 evt->waiting = 0;
2306 if (list_empty(&evt->events_to_see)) 2309 if (list_empty(&evt->events_to_see))
2307 rc = (rc) ? -EINTR : -ETIMEDOUT; 2310 rc = (time_left) ? -EINTR : -ETIMEDOUT;
2308 else { 2311 else {
2309 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2312 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2310 list_move(evt->events_to_see.prev, &evt->events_to_get); 2313 list_move(evt->events_to_see.prev, &evt->events_to_get);
@@ -2470,6 +2473,17 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2470 to += sizeof(MAILBOX_t); 2473 to += sizeof(MAILBOX_t);
2471 size = pmboxq->u.mb.un.varWords[5]; 2474 size = pmboxq->u.mb.un.varWords[5];
2472 memcpy(to, from, size); 2475 memcpy(to, from, size);
2476 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2477 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2478 struct lpfc_mbx_nembed_cmd *nembed_sge =
2479 (struct lpfc_mbx_nembed_cmd *)
2480 &pmboxq->u.mb.un.varWords[0];
2481
2482 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2483 virt;
2484 to += sizeof(MAILBOX_t);
2485 size = nembed_sge->sge[0].length;
2486 memcpy(to, from, size);
2473 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { 2487 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2474 from = (uint8_t *)dd_data->context_un. 2488 from = (uint8_t *)dd_data->context_un.
2475 mbox.dmp->dma.virt; 2489 mbox.dmp->dma.virt;
@@ -2911,6 +2925,59 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2911 from += sizeof(MAILBOX_t); 2925 from += sizeof(MAILBOX_t);
2912 memcpy((uint8_t *)dmp->dma.virt, from, 2926 memcpy((uint8_t *)dmp->dma.virt, from,
2913 bde->tus.f.bdeSize); 2927 bde->tus.f.bdeSize);
2928 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2929 struct lpfc_mbx_nembed_cmd *nembed_sge;
2930 struct mbox_header *header;
2931 uint32_t receive_length;
2932
2933 /* rebuild the command for sli4 using our own buffers
2934 * like we do for biu diags
2935 */
2936 header = (struct mbox_header *)&pmb->un.varWords[0];
2937 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2938 &pmb->un.varWords[0];
2939 receive_length = nembed_sge->sge[0].length;
2940
2941 /* receive length cannot be greater than mailbox
2942 * extension size
2943 */
2944 if ((receive_length == 0) ||
2945 (receive_length > MAILBOX_EXT_SIZE)) {
2946 rc = -ERANGE;
2947 goto job_done;
2948 }
2949
2950 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2951 if (!rxbmp) {
2952 rc = -ENOMEM;
2953 goto job_done;
2954 }
2955
2956 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2957 if (!rxbmp->virt) {
2958 rc = -ENOMEM;
2959 goto job_done;
2960 }
2961
2962 INIT_LIST_HEAD(&rxbmp->list);
2963 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2964 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2965 0);
2966 if (!dmp) {
2967 rc = -ENOMEM;
2968 goto job_done;
2969 }
2970
2971 INIT_LIST_HEAD(&dmp->dma.list);
2972 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2973 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2974 /* copy the transmit data found in the mailbox
2975 * extension area
2976 */
2977 from = (uint8_t *)mb;
2978 from += sizeof(MAILBOX_t);
2979 memcpy((uint8_t *)dmp->dma.virt, from,
2980 header->cfg_mhdr.payload_length);
2914 } 2981 }
2915 } 2982 }
2916 2983
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8d09191c327e..e6ca12f6c6cb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3250,6 +3250,8 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3250 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 3250 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3251 3251
3252 pmb->context1 = NULL; 3252 pmb->context1 = NULL;
3253 pmb->context2 = NULL;
3254
3253 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3255 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3254 kfree(mp); 3256 kfree(mp);
3255 mempool_free(pmb, phba->mbox_mem_pool); 3257 mempool_free(pmb, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 1f62ea8c165d..c3d7174e3469 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1015,7 +1015,6 @@ static void
1015lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1015lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1016{ 1016{
1017 struct lpfc_vport *vport = mboxq->vport; 1017 struct lpfc_vport *vport = mboxq->vport;
1018 unsigned long flags;
1019 1018
1020 if (mboxq->u.mb.mbxStatus) { 1019 if (mboxq->u.mb.mbxStatus) {
1021 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1020 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1029,18 +1028,18 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1029 /* Start FCoE discovery by sending a FLOGI. */ 1028 /* Start FCoE discovery by sending a FLOGI. */
1030 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1029 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1031 /* Set the FCFI registered flag */ 1030 /* Set the FCFI registered flag */
1032 spin_lock_irqsave(&phba->hbalock, flags); 1031 spin_lock_irq(&phba->hbalock);
1033 phba->fcf.fcf_flag |= FCF_REGISTERED; 1032 phba->fcf.fcf_flag |= FCF_REGISTERED;
1034 spin_unlock_irqrestore(&phba->hbalock, flags); 1033 spin_unlock_irq(&phba->hbalock);
1035 /* If there is a pending FCoE event, restart FCF table scan. */ 1034 /* If there is a pending FCoE event, restart FCF table scan. */
1036 if (lpfc_check_pending_fcoe_event(phba, 1)) { 1035 if (lpfc_check_pending_fcoe_event(phba, 1)) {
1037 mempool_free(mboxq, phba->mbox_mem_pool); 1036 mempool_free(mboxq, phba->mbox_mem_pool);
1038 return; 1037 return;
1039 } 1038 }
1040 spin_lock_irqsave(&phba->hbalock, flags); 1039 spin_lock_irq(&phba->hbalock);
1041 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1040 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1042 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1041 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1043 spin_unlock_irqrestore(&phba->hbalock, flags); 1042 spin_unlock_irq(&phba->hbalock);
1044 if (vport->port_state != LPFC_FLOGI) 1043 if (vport->port_state != LPFC_FLOGI)
1045 lpfc_initial_flogi(vport); 1044 lpfc_initial_flogi(vport);
1046 1045
@@ -1240,14 +1239,13 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1240{ 1239{
1241 LPFC_MBOXQ_t *fcf_mbxq; 1240 LPFC_MBOXQ_t *fcf_mbxq;
1242 int rc; 1241 int rc;
1243 unsigned long flags;
1244 1242
1245 spin_lock_irqsave(&phba->hbalock, flags); 1243 spin_lock_irq(&phba->hbalock);
1246 1244
1247 /* If the FCF is not availabe do nothing. */ 1245 /* If the FCF is not availabe do nothing. */
1248 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1246 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1249 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1247 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1250 spin_unlock_irqrestore(&phba->hbalock, flags); 1248 spin_unlock_irq(&phba->hbalock);
1251 return; 1249 return;
1252 } 1250 }
1253 1251
@@ -1255,19 +1253,19 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1255 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1256 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1257 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1255 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1258 spin_unlock_irqrestore(&phba->hbalock, flags); 1256 spin_unlock_irq(&phba->hbalock);
1259 if (phba->pport->port_state != LPFC_FLOGI) 1257 if (phba->pport->port_state != LPFC_FLOGI)
1260 lpfc_initial_flogi(phba->pport); 1258 lpfc_initial_flogi(phba->pport);
1261 return; 1259 return;
1262 } 1260 }
1263 spin_unlock_irqrestore(&phba->hbalock, flags); 1261 spin_unlock_irq(&phba->hbalock);
1264 1262
1265 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1263 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1266 GFP_KERNEL); 1264 GFP_KERNEL);
1267 if (!fcf_mbxq) { 1265 if (!fcf_mbxq) {
1268 spin_lock_irqsave(&phba->hbalock, flags); 1266 spin_lock_irq(&phba->hbalock);
1269 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1267 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1270 spin_unlock_irqrestore(&phba->hbalock, flags); 1268 spin_unlock_irq(&phba->hbalock);
1271 return; 1269 return;
1272 } 1270 }
1273 1271
@@ -1276,9 +1274,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1276 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1274 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1277 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1278 if (rc == MBX_NOT_FINISHED) { 1276 if (rc == MBX_NOT_FINISHED) {
1279 spin_lock_irqsave(&phba->hbalock, flags); 1277 spin_lock_irq(&phba->hbalock);
1280 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1278 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1281 spin_unlock_irqrestore(&phba->hbalock, flags); 1279 spin_unlock_irq(&phba->hbalock);
1282 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1283 } 1281 }
1284 1282
@@ -2851,6 +2849,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2849 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2852 2850
2853 pmb->context1 = NULL; 2851 pmb->context1 = NULL;
2852 pmb->context2 = NULL;
2854 2853
2855 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 2854 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
2856 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 2855 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3149,6 +3148,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3149 ndlp = (struct lpfc_nodelist *) pmb->context2; 3148 ndlp = (struct lpfc_nodelist *) pmb->context2;
3150 pmb->context1 = NULL; 3149 pmb->context1 = NULL;
3151 pmb->context2 = NULL; 3150 pmb->context2 = NULL;
3151
3152 if (mb->mbxStatus) { 3152 if (mb->mbxStatus) {
3153 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3153 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3154 "0258 Register Fabric login error: 0x%x\n", 3154 "0258 Register Fabric login error: 0x%x\n",
@@ -3218,6 +3218,9 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3218 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3218 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3219 struct lpfc_vport *vport = pmb->vport; 3219 struct lpfc_vport *vport = pmb->vport;
3220 3220
3221 pmb->context1 = NULL;
3222 pmb->context2 = NULL;
3223
3221 if (mb->mbxStatus) { 3224 if (mb->mbxStatus) {
3222out: 3225out:
3223 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3226 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -3249,8 +3252,6 @@ out:
3249 return; 3252 return;
3250 } 3253 }
3251 3254
3252 pmb->context1 = NULL;
3253
3254 ndlp->nlp_rpi = mb->un.varWords[0]; 3255 ndlp->nlp_rpi = mb->un.varWords[0];
3255 ndlp->nlp_flag |= NLP_RPI_VALID; 3256 ndlp->nlp_flag |= NLP_RPI_VALID;
3256 ndlp->nlp_type |= NLP_FABRIC; 3257 ndlp->nlp_type |= NLP_FABRIC;
@@ -4784,6 +4785,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4784 struct lpfc_vport *vport = pmb->vport; 4785 struct lpfc_vport *vport = pmb->vport;
4785 4786
4786 pmb->context1 = NULL; 4787 pmb->context1 = NULL;
4788 pmb->context2 = NULL;
4787 4789
4788 ndlp->nlp_rpi = mb->un.varWords[0]; 4790 ndlp->nlp_rpi = mb->un.varWords[0];
4789 ndlp->nlp_flag |= NLP_RPI_VALID; 4791 ndlp->nlp_flag |= NLP_RPI_VALID;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 1676f61291e7..a631647051d9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1380,6 +1380,9 @@ typedef struct { /* FireFly BIU registers */
1380#define MBX_INIT_VFI 0xA3 1380#define MBX_INIT_VFI 0xA3
1381#define MBX_INIT_VPI 0xA4 1381#define MBX_INIT_VPI 0xA4
1382 1382
1383#define MBX_AUTH_PORT 0xF8
1384#define MBX_SECURITY_MGMT 0xF9
1385
1383/* IOCB Commands */ 1386/* IOCB Commands */
1384 1387
1385#define CMD_RCV_SEQUENCE_CX 0x01 1388#define CMD_RCV_SEQUENCE_CX 0x01
@@ -1502,7 +1505,8 @@ typedef struct { /* FireFly BIU registers */
1502#define MBXERR_DMA_ERROR 15 1505#define MBXERR_DMA_ERROR 15
1503#define MBXERR_ERROR 16 1506#define MBXERR_ERROR 16
1504#define MBXERR_LINK_DOWN 0x33 1507#define MBXERR_LINK_DOWN 0x33
1505#define MBX_NOT_FINISHED 255 1508#define MBXERR_SEC_NO_PERMISSION 0xF02
1509#define MBX_NOT_FINISHED 255
1506 1510
1507#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1511#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
1508#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ 1512#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index da9ba06ad583..295c7ddb36c1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1076,21 +1076,16 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1076 } else { 1076 } else {
1077 /* 1077 /*
1078 * If heart beat timeout called with hb_outstanding set 1078 * If heart beat timeout called with hb_outstanding set
1079 * we need to take the HBA offline. 1079 * we need to give the hb mailbox cmd a chance to
1080 * complete or TMO.
1080 */ 1081 */
1081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1082 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1082 "0459 Adapter heartbeat failure, " 1083 "0459 Adapter heartbeat still out"
1083 "taking this port offline.\n"); 1084 "standing:last compl time was %d ms.\n",
1084 1085 jiffies_to_msecs(jiffies
1085 spin_lock_irq(&phba->hbalock); 1086 - phba->last_completion_time));
1086 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1087 mod_timer(&phba->hb_tmofunc,
1087 spin_unlock_irq(&phba->hbalock); 1088 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1088
1089 lpfc_offline_prep(phba);
1090 lpfc_offline(phba);
1091 lpfc_unblock_mgmt_io(phba);
1092 phba->link_state = LPFC_HBA_ERROR;
1093 lpfc_hba_down_post(phba);
1094 } 1089 }
1095 } 1090 }
1096} 1091}
@@ -1277,13 +1272,21 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1277 if (phba->hba_flag & DEFER_ERATT) 1272 if (phba->hba_flag & DEFER_ERATT)
1278 lpfc_handle_deferred_eratt(phba); 1273 lpfc_handle_deferred_eratt(phba);
1279 1274
1280 if (phba->work_hs & HS_FFER6) { 1275 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1281 /* Re-establishing Link */ 1276 if (phba->work_hs & HS_FFER6)
1282 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1277 /* Re-establishing Link */
1283 "1301 Re-establishing Link " 1278 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1284 "Data: x%x x%x x%x\n", 1279 "1301 Re-establishing Link "
1285 phba->work_hs, 1280 "Data: x%x x%x x%x\n",
1286 phba->work_status[0], phba->work_status[1]); 1281 phba->work_hs, phba->work_status[0],
1282 phba->work_status[1]);
1283 if (phba->work_hs & HS_FFER8)
1284 /* Device Zeroization */
1285 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1286 "2861 Host Authentication device "
1287 "zeroization Data:x%x x%x x%x\n",
1288 phba->work_hs, phba->work_status[0],
1289 phba->work_status[1]);
1287 1290
1288 spin_lock_irq(&phba->hbalock); 1291 spin_lock_irq(&phba->hbalock);
1289 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1292 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
@@ -2817,6 +2820,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2817 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2820 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2818 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2821 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2819 2822
2823 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2824
2820 /* This value is also unchanging */ 2825 /* This value is also unchanging */
2821 memset(fc_host_active_fc4s(shost), 0, 2826 memset(fc_host_active_fc4s(shost), 0,
2822 sizeof(fc_host_active_fc4s(shost))); 2827 sizeof(fc_host_active_fc4s(shost)));
@@ -2883,65 +2888,6 @@ lpfc_stop_port(struct lpfc_hba *phba)
2883} 2888}
2884 2889
2885/** 2890/**
2886 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2887 * @phba: pointer to lpfc hba data structure.
2888 *
2889 * This routine is invoked to remove the driver default fcf record from
2890 * the port. This routine currently acts on FCF Index 0.
2891 *
2892 **/
2893void
2894lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2895{
2896 int rc = 0;
2897 LPFC_MBOXQ_t *mboxq;
2898 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2899 uint32_t mbox_tmo, req_len;
2900 uint32_t shdr_status, shdr_add_status;
2901
2902 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2903 if (!mboxq) {
2904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2905 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2906 return;
2907 }
2908
2909 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2910 sizeof(struct lpfc_sli4_cfg_mhdr);
2911 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2912 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2913 req_len, LPFC_SLI4_MBX_EMBED);
2914 /*
2915 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2916 * supports multiple FCF indices.
2917 */
2918 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2919 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2920 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2921 phba->fcf.current_rec.fcf_indx);
2922
2923 if (!phba->sli4_hba.intr_enable)
2924 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2925 else {
2926 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2927 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2928 }
2929 /* The IOCTL status is embedded in the mailbox subheader. */
2930 shdr_status = bf_get(lpfc_mbox_hdr_status,
2931 &del_fcf_record->header.cfg_shdr.response);
2932 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2933 &del_fcf_record->header.cfg_shdr.response);
2934 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2936 "2516 DEL FCF of default FCF Index failed "
2937 "mbx status x%x, status x%x add_status x%x\n",
2938 rc, shdr_status, shdr_add_status);
2939 }
2940 if (rc != MBX_TIMEOUT)
2941 mempool_free(mboxq, phba->mbox_mem_pool);
2942}
2943
2944/**
2945 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2891 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2946 * @phba: Pointer to hba for which this call is being executed. 2892 * @phba: Pointer to hba for which this call is being executed.
2947 * 2893 *
@@ -4283,12 +4229,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4283{ 4229{
4284 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4230 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4285 4231
4286 /* unregister default FCFI from the HBA */
4287 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4288
4289 /* Free the default FCR table */
4290 lpfc_sli_remove_dflt_fcf(phba);
4291
4292 /* Free memory allocated for msi-x interrupt vector entries */ 4232 /* Free memory allocated for msi-x interrupt vector entries */
4293 kfree(phba->sli4_hba.msix_entries); 4233 kfree(phba->sli4_hba.msix_entries);
4294 4234
@@ -4316,9 +4256,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4316 lpfc_sli4_cq_event_release_all(phba); 4256 lpfc_sli4_cq_event_release_all(phba);
4317 lpfc_sli4_cq_event_pool_destroy(phba); 4257 lpfc_sli4_cq_event_pool_destroy(phba);
4318 4258
4319 /* Reset SLI4 HBA FCoE function */
4320 lpfc_pci_function_reset(phba);
4321
4322 /* Free the bsmbx region. */ 4259 /* Free the bsmbx region. */
4323 lpfc_destroy_bootstrap_mbox(phba); 4260 lpfc_destroy_bootstrap_mbox(phba);
4324 4261
@@ -4545,7 +4482,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
4545{ 4482{
4546 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4483 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4547 LIST_HEAD(sglq_list); 4484 LIST_HEAD(sglq_list);
4548 int rc = 0;
4549 4485
4550 spin_lock_irq(&phba->hbalock); 4486 spin_lock_irq(&phba->hbalock);
4551 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4487 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
@@ -4558,11 +4494,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
4558 kfree(sglq_entry); 4494 kfree(sglq_entry);
4559 phba->sli4_hba.total_sglq_bufs--; 4495 phba->sli4_hba.total_sglq_bufs--;
4560 } 4496 }
4561 rc = lpfc_sli4_remove_all_sgl_pages(phba);
4562 if (rc) {
4563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4564 "2005 Unable to deregister pages from HBA: %x\n", rc);
4565 }
4566 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4497 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4567} 4498}
4568 4499
@@ -4725,8 +4656,8 @@ out_free_mem:
4725 * 4656 *
4726 * Return codes 4657 * Return codes
4727 * 0 - successful 4658 * 0 - successful
4728 * ENOMEM - No availble memory 4659 * -ENOMEM - No availble memory
4729 * EIO - The mailbox failed to complete successfully. 4660 * -EIO - The mailbox failed to complete successfully.
4730 **/ 4661 **/
4731int 4662int
4732lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4663lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5419,7 +5350,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5419 * 5350 *
5420 * Return codes 5351 * Return codes
5421 * 0 - successful 5352 * 0 - successful
5422 * ENOMEM - could not allocated memory. 5353 * -ENOMEM - could not allocated memory.
5423 **/ 5354 **/
5424static int 5355static int
5425lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5356lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
@@ -5518,8 +5449,8 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5518 * 5449 *
5519 * Return codes 5450 * Return codes
5520 * 0 - successful 5451 * 0 - successful
5521 * ENOMEM - No availble memory 5452 * -ENOMEM - No availble memory
5522 * EIO - The mailbox failed to complete successfully. 5453 * -EIO - The mailbox failed to complete successfully.
5523 **/ 5454 **/
5524static int 5455static int
5525lpfc_sli4_read_config(struct lpfc_hba *phba) 5456lpfc_sli4_read_config(struct lpfc_hba *phba)
@@ -5622,8 +5553,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5622 * 5553 *
5623 * Return codes 5554 * Return codes
5624 * 0 - successful 5555 * 0 - successful
5625 * ENOMEM - No availble memory 5556 * -ENOMEM - No availble memory
5626 * EIO - The mailbox failed to complete successfully. 5557 * -EIO - The mailbox failed to complete successfully.
5627 **/ 5558 **/
5628static int 5559static int
5629lpfc_setup_endian_order(struct lpfc_hba *phba) 5560lpfc_setup_endian_order(struct lpfc_hba *phba)
@@ -5671,8 +5602,8 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
5671 * 5602 *
5672 * Return codes 5603 * Return codes
5673 * 0 - successful 5604 * 0 - successful
5674 * ENOMEM - No availble memory 5605 * -ENOMEM - No availble memory
5675 * EIO - The mailbox failed to complete successfully. 5606 * -EIO - The mailbox failed to complete successfully.
5676 **/ 5607 **/
5677static int 5608static int
5678lpfc_sli4_queue_create(struct lpfc_hba *phba) 5609lpfc_sli4_queue_create(struct lpfc_hba *phba)
@@ -5966,8 +5897,8 @@ out_error:
5966 * 5897 *
5967 * Return codes 5898 * Return codes
5968 * 0 - successful 5899 * 0 - successful
5969 * ENOMEM - No availble memory 5900 * -ENOMEM - No availble memory
5970 * EIO - The mailbox failed to complete successfully. 5901 * -EIO - The mailbox failed to complete successfully.
5971 **/ 5902 **/
5972static void 5903static void
5973lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5904lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
@@ -6030,8 +5961,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6030 * 5961 *
6031 * Return codes 5962 * Return codes
6032 * 0 - successful 5963 * 0 - successful
6033 * ENOMEM - No availble memory 5964 * -ENOMEM - No availble memory
6034 * EIO - The mailbox failed to complete successfully. 5965 * -EIO - The mailbox failed to complete successfully.
6035 **/ 5966 **/
6036int 5967int
6037lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5968lpfc_sli4_queue_setup(struct lpfc_hba *phba)
@@ -6275,8 +6206,8 @@ out_error:
6275 * 6206 *
6276 * Return codes 6207 * Return codes
6277 * 0 - successful 6208 * 0 - successful
6278 * ENOMEM - No availble memory 6209 * -ENOMEM - No availble memory
6279 * EIO - The mailbox failed to complete successfully. 6210 * -EIO - The mailbox failed to complete successfully.
6280 **/ 6211 **/
6281void 6212void
6282lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6213lpfc_sli4_queue_unset(struct lpfc_hba *phba)
@@ -6481,8 +6412,8 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6481 * 6412 *
6482 * Return codes 6413 * Return codes
6483 * 0 - successful 6414 * 0 - successful
6484 * ENOMEM - No availble memory 6415 * -ENOMEM - No availble memory
6485 * EIO - The mailbox failed to complete successfully. 6416 * -EIO - The mailbox failed to complete successfully.
6486 **/ 6417 **/
6487int 6418int
6488lpfc_pci_function_reset(struct lpfc_hba *phba) 6419lpfc_pci_function_reset(struct lpfc_hba *phba)
@@ -6592,50 +6523,6 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6592} 6523}
6593 6524
6594/** 6525/**
6595 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6596 * @phba: pointer to lpfc hba data structure.
6597 * @fcfi: fcf index.
6598 *
6599 * This routine is invoked to unregister a FCFI from device.
6600 **/
6601void
6602lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6603{
6604 LPFC_MBOXQ_t *mbox;
6605 uint32_t mbox_tmo;
6606 int rc;
6607 unsigned long flags;
6608
6609 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6610
6611 if (!mbox)
6612 return;
6613
6614 lpfc_unreg_fcfi(mbox, fcfi);
6615
6616 if (!phba->sli4_hba.intr_enable)
6617 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6618 else {
6619 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6620 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6621 }
6622 if (rc != MBX_TIMEOUT)
6623 mempool_free(mbox, phba->mbox_mem_pool);
6624 if (rc != MBX_SUCCESS)
6625 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6626 "2517 Unregister FCFI command failed "
6627 "status %d, mbxStatus x%x\n", rc,
6628 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6629 else {
6630 spin_lock_irqsave(&phba->hbalock, flags);
6631 /* Mark the FCFI is no longer registered */
6632 phba->fcf.fcf_flag &=
6633 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6634 spin_unlock_irqrestore(&phba->hbalock, flags);
6635 }
6636}
6637
6638/**
6639 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6526 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6640 * @phba: pointer to lpfc hba data structure. 6527 * @phba: pointer to lpfc hba data structure.
6641 * 6528 *
@@ -7372,10 +7259,14 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7372 7259
7373 phba->pport->work_port_events = 0; 7260 phba->pport->work_port_events = 0;
7374 7261
7375 lpfc_sli4_hba_down(phba); 7262 /* Stop the SLI4 device port */
7263 lpfc_stop_port(phba);
7376 7264
7377 lpfc_sli4_disable_intr(phba); 7265 lpfc_sli4_disable_intr(phba);
7378 7266
7267 /* Reset SLI4 HBA FCoE function */
7268 lpfc_pci_function_reset(phba);
7269
7379 return; 7270 return;
7380} 7271}
7381 7272
@@ -7424,15 +7315,15 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7424 spin_unlock_irq(&phba->hbalock); 7315 spin_unlock_irq(&phba->hbalock);
7425 } 7316 }
7426 7317
7427 /* Tear down the queues in the HBA */
7428 lpfc_sli4_queue_unset(phba);
7429
7430 /* Disable PCI subsystem interrupt */ 7318 /* Disable PCI subsystem interrupt */
7431 lpfc_sli4_disable_intr(phba); 7319 lpfc_sli4_disable_intr(phba);
7432 7320
7433 /* Stop kthread signal shall trigger work_done one more time */ 7321 /* Stop kthread signal shall trigger work_done one more time */
7434 kthread_stop(phba->worker_thread); 7322 kthread_stop(phba->worker_thread);
7435 7323
7324 /* Reset SLI4 HBA FCoE function */
7325 lpfc_pci_function_reset(phba);
7326
7436 /* Stop the SLI4 device port */ 7327 /* Stop the SLI4 device port */
7437 phba->pport->work_port_events = 0; 7328 phba->pport->work_port_events = 0;
7438} 7329}
@@ -8368,7 +8259,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8368 list_del_init(&vport->listentry); 8259 list_del_init(&vport->listentry);
8369 spin_unlock_irq(&phba->hbalock); 8260 spin_unlock_irq(&phba->hbalock);
8370 8261
8371 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8262 /* Perform scsi free before driver resource_unset since scsi
8372 * buffers are released to their corresponding pools here. 8263 * buffers are released to their corresponding pools here.
8373 */ 8264 */
8374 lpfc_scsi_free(phba); 8265 lpfc_scsi_free(phba);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e51aa6b45b3..3a658953486c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -599,6 +599,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
599 iocb->ulpClass = CLASS3; 599 iocb->ulpClass = CLASS3;
600 psb->status = IOSTAT_SUCCESS; 600 psb->status = IOSTAT_SUCCESS;
601 /* Put it back into the SCSI buffer list */ 601 /* Put it back into the SCSI buffer list */
602 psb->cur_iocbq.context1 = psb;
602 lpfc_release_scsi_buf_s3(phba, psb); 603 lpfc_release_scsi_buf_s3(phba, psb);
603 604
604 } 605 }
@@ -849,6 +850,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
849 iocb->ulpBdeCount = 1; 850 iocb->ulpBdeCount = 1;
850 iocb->ulpLe = 1; 851 iocb->ulpLe = 1;
851 iocb->ulpClass = CLASS3; 852 iocb->ulpClass = CLASS3;
853 psb->cur_iocbq.context1 = psb;
852 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 854 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
853 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 855 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
854 else 856 else
@@ -2276,15 +2278,24 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2276 * Check SLI validation that all the transfer was actually done 2278 * Check SLI validation that all the transfer was actually done
2277 * (fcpi_parm should be zero). Apply check only to reads. 2279 * (fcpi_parm should be zero). Apply check only to reads.
2278 */ 2280 */
2279 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 2281 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2280 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2281 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 2282 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2282 "9029 FCP Read Check Error Data: " 2283 "9029 FCP Read Check Error Data: "
2283 "x%x x%x x%x x%x\n", 2284 "x%x x%x x%x x%x x%x\n",
2284 be32_to_cpu(fcpcmd->fcpDl), 2285 be32_to_cpu(fcpcmd->fcpDl),
2285 be32_to_cpu(fcprsp->rspResId), 2286 be32_to_cpu(fcprsp->rspResId),
2286 fcpi_parm, cmnd->cmnd[0]); 2287 fcpi_parm, cmnd->cmnd[0], scsi_status);
2287 host_status = DID_ERROR; 2288 switch (scsi_status) {
2289 case SAM_STAT_GOOD:
2290 case SAM_STAT_CHECK_CONDITION:
2291 /* Fabric dropped a data frame. Fail any successful
2292 * command in which we detected dropped frames.
2293 * A status of good or some check conditions could
2294 * be considered a successful command.
2295 */
2296 host_status = DID_ERROR;
2297 break;
2298 }
2288 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2299 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2289 } 2300 }
2290 2301
@@ -3072,7 +3083,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3072 if (ret) 3083 if (ret)
3073 return ret; 3084 return ret;
3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3085 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3075 BUG_ON(!lpfc_cmd); 3086 if (!lpfc_cmd) {
3087 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3088 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3089 "x%x ID %d "
3090 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3091 cmnd->device->lun, cmnd->serial_number);
3092 return SUCCESS;
3093 }
3076 3094
3077 /* 3095 /*
3078 * If pCmd field of the corresponding lpfc_scsi_buf structure 3096 * If pCmd field of the corresponding lpfc_scsi_buf structure
@@ -3656,7 +3674,6 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3656 * 3674 *
3657 * This routine configures following items 3675 * This routine configures following items
3658 * - Tag command queuing support for @sdev if supported. 3676 * - Tag command queuing support for @sdev if supported.
3659 * - Dev loss time out value of fc_rport.
3660 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 3677 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3661 * 3678 *
3662 * Return codes: 3679 * Return codes:
@@ -3667,21 +3684,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
3667{ 3684{
3668 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3685 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3669 struct lpfc_hba *phba = vport->phba; 3686 struct lpfc_hba *phba = vport->phba;
3670 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
3671 3687
3672 if (sdev->tagged_supported) 3688 if (sdev->tagged_supported)
3673 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 3689 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3674 else 3690 else
3675 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 3691 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3676 3692
3677 /*
3678 * Initialize the fc transport attributes for the target
3679 * containing this scsi device. Also note that the driver's
3680 * target pointer is stored in the starget_data for the
3681 * driver's sysfs entry point functions.
3682 */
3683 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3684
3685 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3693 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3686 lpfc_sli_handle_fast_ring_event(phba, 3694 lpfc_sli_handle_fast_ring_event(phba,
3687 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3695 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fb8905f893f5..0d1e187b005d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1677,6 +1677,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1677 case MBX_RESUME_RPI: 1677 case MBX_RESUME_RPI:
1678 case MBX_READ_EVENT_LOG_STATUS: 1678 case MBX_READ_EVENT_LOG_STATUS:
1679 case MBX_READ_EVENT_LOG: 1679 case MBX_READ_EVENT_LOG:
1680 case MBX_SECURITY_MGMT:
1681 case MBX_AUTH_PORT:
1680 ret = mbxCommand; 1682 ret = mbxCommand;
1681 break; 1683 break;
1682 default: 1684 default:
@@ -1730,10 +1732,11 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1730void 1732void
1731lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1733lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1732{ 1734{
1735 struct lpfc_vport *vport = pmb->vport;
1733 struct lpfc_dmabuf *mp; 1736 struct lpfc_dmabuf *mp;
1737 struct lpfc_nodelist *ndlp;
1734 uint16_t rpi, vpi; 1738 uint16_t rpi, vpi;
1735 int rc; 1739 int rc;
1736 struct lpfc_vport *vport = pmb->vport;
1737 1740
1738 mp = (struct lpfc_dmabuf *) (pmb->context1); 1741 mp = (struct lpfc_dmabuf *) (pmb->context1);
1739 1742
@@ -1774,6 +1777,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1774 return; 1777 return;
1775 } 1778 }
1776 1779
1780 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
1781 ndlp = (struct lpfc_nodelist *)pmb->context2;
1782 lpfc_nlp_put(ndlp);
1783 pmb->context2 = NULL;
1784 }
1785
1786 /* Check security permission status on INIT_LINK mailbox command */
1787 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
1788 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1789 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1790 "2860 SLI authentication is required "
1791 "for INIT_LINK but has not done yet\n");
1792
1777 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1793 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1778 lpfc_sli4_mbox_cmd_free(phba, pmb); 1794 lpfc_sli4_mbox_cmd_free(phba, pmb);
1779 else 1795 else
@@ -3651,11 +3667,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
3651 i = 0; 3667 i = 0;
3652 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 3668 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3653 3669
3654 /* Check every 100ms for 5 retries, then every 500ms for 5, then 3670 /* Check every 10ms for 10 retries, then every 100ms for 90
3655 * every 2.5 sec for 5, then reset board and every 2.5 sec for 3671 * retries, then every 1 sec for 50 retires for a total of
3656 * 4. 3672 * ~60 seconds before reset the board again and check every
3673 * 1 sec for 50 retries. The up to 60 seconds before the
3674 * board ready is required by the Falcon FIPS zeroization
3675 * complete, and any reset the board in between shall cause
3676 * restart of zeroization, further delay the board ready.
3657 */ 3677 */
3658 if (i++ >= 20) { 3678 if (i++ >= 200) {
3659 /* Adapter failed to init, timeout, status reg 3679 /* Adapter failed to init, timeout, status reg
3660 <status> */ 3680 <status> */
3661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3683,16 +3703,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
3683 return -EIO; 3703 return -EIO;
3684 } 3704 }
3685 3705
3686 if (i <= 5) { 3706 if (i <= 10)
3687 msleep(10); 3707 msleep(10);
3688 } else if (i <= 10) { 3708 else if (i <= 100)
3689 msleep(500); 3709 msleep(100);
3690 } else { 3710 else
3691 msleep(2500); 3711 msleep(1000);
3692 }
3693 3712
3694 if (i == 15) { 3713 if (i == 150) {
3695 /* Do post */ 3714 /* Do post */
3696 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3715 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3697 lpfc_sli_brdrestart(phba); 3716 lpfc_sli_brdrestart(phba);
3698 } 3717 }
@@ -4186,7 +4205,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4186 * 4205 *
4187 * Return codes 4206 * Return codes
4188 * 0 - successful 4207 * 0 - successful
4189 * ENOMEM - could not allocated memory. 4208 * -ENOMEM - could not allocated memory.
4190 **/ 4209 **/
4191static int 4210static int
4192lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4211lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
@@ -5943,6 +5962,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5943 uint8_t command_type = ELS_COMMAND_NON_FIP; 5962 uint8_t command_type = ELS_COMMAND_NON_FIP;
5944 uint8_t cmnd; 5963 uint8_t cmnd;
5945 uint16_t xritag; 5964 uint16_t xritag;
5965 uint16_t abrt_iotag;
5966 struct lpfc_iocbq *abrtiocbq;
5946 struct ulp_bde64 *bpl = NULL; 5967 struct ulp_bde64 *bpl = NULL;
5947 uint32_t els_id = ELS_ID_DEFAULT; 5968 uint32_t els_id = ELS_ID_DEFAULT;
5948 int numBdes, i; 5969 int numBdes, i;
@@ -6155,9 +6176,17 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6155 case CMD_ABORT_XRI_CX: 6176 case CMD_ABORT_XRI_CX:
6156 /* words 0-2 memcpy should be 0 rserved */ 6177 /* words 0-2 memcpy should be 0 rserved */
6157 /* port will send abts */ 6178 /* port will send abts */
6158 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6179 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
6180 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
6181 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
6182 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
6183 } else
6184 fip = 0;
6185
6186 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
6159 /* 6187 /*
6160 * The link is down so the fw does not need to send abts 6188 * The link is down, or the command was ELS_FIP
6189 * so the fw does not need to send abts
6161 * on the wire. 6190 * on the wire.
6162 */ 6191 */
6163 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 6192 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
@@ -6901,37 +6930,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
6901} 6930}
6902 6931
6903/** 6932/**
6904 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
6905 * @phba: Pointer to HBA context object.
6906 *
6907 * This function cleans up all queues, iocb, buffers, mailbox commands while
6908 * shutting down the SLI4 HBA FCoE function. This function is called with no
6909 * lock held and always returns 1.
6910 *
6911 * This function does the following to cleanup driver FCoE function resources:
6912 * - Free discovery resources for each virtual port
6913 * - Cleanup any pending fabric iocbs
6914 * - Iterate through the iocb txq and free each entry in the list.
6915 * - Free up any buffer posted to the HBA.
6916 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6917 * - Free mailbox commands in the mailbox queue.
6918 **/
6919int
6920lpfc_sli4_hba_down(struct lpfc_hba *phba)
6921{
6922 /* Stop the SLI4 device port */
6923 lpfc_stop_port(phba);
6924
6925 /* Tear down the queues in the HBA */
6926 lpfc_sli4_queue_unset(phba);
6927
6928 /* unregister default FCFI from the HBA */
6929 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
6930
6931 return 1;
6932}
6933
6934/**
6935 * lpfc_sli_pcimem_bcopy - SLI memory copy function 6933 * lpfc_sli_pcimem_bcopy - SLI memory copy function
6936 * @srcp: Source memory pointer. 6934 * @srcp: Source memory pointer.
6937 * @destp: Destination memory pointer. 6935 * @destp: Destination memory pointer.
@@ -7888,7 +7886,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
7888 /* Check if there is a deferred error condition is active */ 7886 /* Check if there is a deferred error condition is active */
7889 if ((HS_FFER1 & phba->work_hs) && 7887 if ((HS_FFER1 & phba->work_hs) &&
7890 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 7888 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7891 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 7889 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
7892 phba->hba_flag |= DEFER_ERATT; 7890 phba->hba_flag |= DEFER_ERATT;
7893 /* Clear all interrupt enable conditions */ 7891 /* Clear all interrupt enable conditions */
7894 writel(0, phba->HCregaddr); 7892 writel(0, phba->HCregaddr);
@@ -8204,7 +8202,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8204 */ 8202 */
8205 if ((HS_FFER1 & phba->work_hs) && 8203 if ((HS_FFER1 & phba->work_hs) &&
8206 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8204 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
8207 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 8205 HS_FFER6 | HS_FFER7 | HS_FFER8) &
8206 phba->work_hs)) {
8208 phba->hba_flag |= DEFER_ERATT; 8207 phba->hba_flag |= DEFER_ERATT;
8209 /* Clear all interrupt enable conditions */ 8208 /* Clear all interrupt enable conditions */
8210 writel(0, phba->HCregaddr); 8209 writel(0, phba->HCregaddr);
@@ -8476,7 +8475,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8476 * If there is deferred error attention, do not check for any interrupt. 8475 * If there is deferred error attention, do not check for any interrupt.
8477 */ 8476 */
8478 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8477 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8479 spin_unlock_irq(&phba->hbalock); 8478 spin_unlock(&phba->hbalock);
8480 return IRQ_NONE; 8479 return IRQ_NONE;
8481 } 8480 }
8482 8481
@@ -9724,8 +9723,8 @@ out_fail:
9724 * command to finish before continuing. 9723 * command to finish before continuing.
9725 * 9724 *
9726 * On success this function will return a zero. If unable to allocate enough 9725 * On success this function will return a zero. If unable to allocate enough
9727 * memory this function will return ENOMEM. If the queue create mailbox command 9726 * memory this function will return -ENOMEM. If the queue create mailbox command
9728 * fails this function will return ENXIO. 9727 * fails this function will return -ENXIO.
9729 **/ 9728 **/
9730uint32_t 9729uint32_t
9731lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 9730lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
@@ -9840,8 +9839,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9840 * command to finish before continuing. 9839 * command to finish before continuing.
9841 * 9840 *
9842 * On success this function will return a zero. If unable to allocate enough 9841 * On success this function will return a zero. If unable to allocate enough
9843 * memory this function will return ENOMEM. If the queue create mailbox command 9842 * memory this function will return -ENOMEM. If the queue create mailbox command
9844 * fails this function will return ENXIO. 9843 * fails this function will return -ENXIO.
9845 **/ 9844 **/
9846uint32_t 9845uint32_t
9847lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 9846lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
@@ -10011,8 +10010,8 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
10011 * command to finish before continuing. 10010 * command to finish before continuing.
10012 * 10011 *
10013 * On success this function will return a zero. If unable to allocate enough 10012 * On success this function will return a zero. If unable to allocate enough
10014 * memory this function will return ENOMEM. If the queue create mailbox command 10013 * memory this function will return -ENOMEM. If the queue create mailbox command
10015 * fails this function will return ENXIO. 10014 * fails this function will return -ENXIO.
10016 **/ 10015 **/
10017int32_t 10016int32_t
10018lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 10017lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
@@ -10146,8 +10145,8 @@ out:
10146 * command to finish before continuing. 10145 * command to finish before continuing.
10147 * 10146 *
10148 * On success this function will return a zero. If unable to allocate enough 10147 * On success this function will return a zero. If unable to allocate enough
10149 * memory this function will return ENOMEM. If the queue create mailbox command 10148 * memory this function will return -ENOMEM. If the queue create mailbox command
10150 * fails this function will return ENXIO. 10149 * fails this function will return -ENXIO.
10151 **/ 10150 **/
10152uint32_t 10151uint32_t
10153lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 10152lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
@@ -10234,8 +10233,8 @@ out:
10234 * mailbox command to finish before continuing. 10233 * mailbox command to finish before continuing.
10235 * 10234 *
10236 * On success this function will return a zero. If unable to allocate enough 10235 * On success this function will return a zero. If unable to allocate enough
10237 * memory this function will return ENOMEM. If the queue create mailbox command 10236 * memory this function will return -ENOMEM. If the queue create mailbox command
10238 * fails this function will return ENXIO. 10237 * fails this function will return -ENXIO.
10239 **/ 10238 **/
10240uint32_t 10239uint32_t
10241lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10240lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -10403,7 +10402,7 @@ out:
10403 * The @eq struct is used to get the queue ID of the queue to destroy. 10402 * The @eq struct is used to get the queue ID of the queue to destroy.
10404 * 10403 *
10405 * On success this function will return a zero. If the queue destroy mailbox 10404 * On success this function will return a zero. If the queue destroy mailbox
10406 * command fails this function will return ENXIO. 10405 * command fails this function will return -ENXIO.
10407 **/ 10406 **/
10408uint32_t 10407uint32_t
10409lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 10408lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
@@ -10458,7 +10457,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10458 * The @cq struct is used to get the queue ID of the queue to destroy. 10457 * The @cq struct is used to get the queue ID of the queue to destroy.
10459 * 10458 *
10460 * On success this function will return a zero. If the queue destroy mailbox 10459 * On success this function will return a zero. If the queue destroy mailbox
10461 * command fails this function will return ENXIO. 10460 * command fails this function will return -ENXIO.
10462 **/ 10461 **/
10463uint32_t 10462uint32_t
10464lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 10463lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
@@ -10511,7 +10510,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10511 * The @mq struct is used to get the queue ID of the queue to destroy. 10510 * The @mq struct is used to get the queue ID of the queue to destroy.
10512 * 10511 *
10513 * On success this function will return a zero. If the queue destroy mailbox 10512 * On success this function will return a zero. If the queue destroy mailbox
10514 * command fails this function will return ENXIO. 10513 * command fails this function will return -ENXIO.
10515 **/ 10514 **/
10516uint32_t 10515uint32_t
10517lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 10516lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
@@ -10564,7 +10563,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10564 * The @wq struct is used to get the queue ID of the queue to destroy. 10563 * The @wq struct is used to get the queue ID of the queue to destroy.
10565 * 10564 *
10566 * On success this function will return a zero. If the queue destroy mailbox 10565 * On success this function will return a zero. If the queue destroy mailbox
10567 * command fails this function will return ENXIO. 10566 * command fails this function will return -ENXIO.
10568 **/ 10567 **/
10569uint32_t 10568uint32_t
10570lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 10569lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
@@ -10616,7 +10615,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10616 * The @rq struct is used to get the queue ID of the queue to destroy. 10615 * The @rq struct is used to get the queue ID of the queue to destroy.
10617 * 10616 *
10618 * On success this function will return a zero. If the queue destroy mailbox 10617 * On success this function will return a zero. If the queue destroy mailbox
10619 * command fails this function will return ENXIO. 10618 * command fails this function will return -ENXIO.
10620 **/ 10619 **/
10621uint32_t 10620uint32_t
10622lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10621lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -10758,51 +10757,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10758 } 10757 }
10759 return 0; 10758 return 0;
10760} 10759}
10761/**
10762 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10763 * @phba: The virtual port for which this call being executed.
10764 *
10765 * This routine will remove all of the sgl pages registered with the hba.
10766 *
10767 * Return codes:
10768 * 0 - Success
10769 * -ENXIO, -ENOMEM - Failure
10770 **/
10771int
10772lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10773{
10774 LPFC_MBOXQ_t *mbox;
10775 int rc;
10776 uint32_t shdr_status, shdr_add_status;
10777 union lpfc_sli4_cfg_shdr *shdr;
10778
10779 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10780 if (!mbox)
10781 return -ENOMEM;
10782
10783 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10784 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10785 LPFC_SLI4_MBX_EMBED);
10786 if (!phba->sli4_hba.intr_enable)
10787 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10788 else
10789 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10790 /* The IOCTL status is embedded in the mailbox subheader. */
10791 shdr = (union lpfc_sli4_cfg_shdr *)
10792 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10793 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10794 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10795 if (rc != MBX_TIMEOUT)
10796 mempool_free(mbox, phba->mbox_mem_pool);
10797 if (shdr_status || shdr_add_status || rc) {
10798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10799 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10800 "status x%x add_status x%x, mbx status x%x\n",
10801 shdr_status, shdr_add_status, rc);
10802 rc = -ENXIO;
10803 }
10804 return rc;
10805}
10806 10760
10807/** 10761/**
10808 * lpfc_sli4_next_xritag - Get an xritag for the io 10762 * lpfc_sli4_next_xritag - Get an xritag for the io
@@ -11819,7 +11773,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11819 * 11773 *
11820 * Return codes 11774 * Return codes
11821 * 0 - successful 11775 * 0 - successful
11822 * EIO - The mailbox failed to complete successfully. 11776 * -EIO - The mailbox failed to complete successfully.
11823 * When this error occurs, the driver is not guaranteed 11777 * When this error occurs, the driver is not guaranteed
11824 * to have any rpi regions posted to the device and 11778 * to have any rpi regions posted to the device and
11825 * must either attempt to repost the regions or take a 11779 * must either attempt to repost the regions or take a
@@ -11857,8 +11811,8 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11857 * 11811 *
11858 * Return codes 11812 * Return codes
11859 * 0 - successful 11813 * 0 - successful
11860 * ENOMEM - No available memory 11814 * -ENOMEM - No available memory
11861 * EIO - The mailbox failed to complete successfully. 11815 * -EIO - The mailbox failed to complete successfully.
11862 **/ 11816 **/
11863int 11817int
11864lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 11818lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
@@ -12805,8 +12759,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12805 LPFC_MBOXQ_t *mb, *nextmb; 12759 LPFC_MBOXQ_t *mb, *nextmb;
12806 struct lpfc_dmabuf *mp; 12760 struct lpfc_dmabuf *mp;
12807 struct lpfc_nodelist *ndlp; 12761 struct lpfc_nodelist *ndlp;
12762 struct lpfc_nodelist *act_mbx_ndlp = NULL;
12808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 12763 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
12764 LIST_HEAD(mbox_cmd_list);
12809 12765
12766 /* Clean up internally queued mailbox commands with the vport */
12810 spin_lock_irq(&phba->hbalock); 12767 spin_lock_irq(&phba->hbalock);
12811 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12768 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12812 if (mb->vport != vport) 12769 if (mb->vport != vport)
@@ -12816,6 +12773,28 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12816 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 12773 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12817 continue; 12774 continue;
12818 12775
12776 list_del(&mb->list);
12777 list_add_tail(&mb->list, &mbox_cmd_list);
12778 }
12779 /* Clean up active mailbox command with the vport */
12780 mb = phba->sli.mbox_active;
12781 if (mb && (mb->vport == vport)) {
12782 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12783 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12784 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12785 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12786 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
12787 /* Put reference count for delayed processing */
12788 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
12789 /* Unregister the RPI when mailbox complete */
12790 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12791 }
12792 }
12793 spin_unlock_irq(&phba->hbalock);
12794
12795 /* Release the cleaned-up mailbox commands */
12796 while (!list_empty(&mbox_cmd_list)) {
12797 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
12819 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12798 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12820 if (phba->sli_rev == LPFC_SLI_REV4) 12799 if (phba->sli_rev == LPFC_SLI_REV4)
12821 __lpfc_sli4_free_rpi(phba, 12800 __lpfc_sli4_free_rpi(phba,
@@ -12826,36 +12805,24 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12826 kfree(mp); 12805 kfree(mp);
12827 } 12806 }
12828 ndlp = (struct lpfc_nodelist *) mb->context2; 12807 ndlp = (struct lpfc_nodelist *) mb->context2;
12808 mb->context2 = NULL;
12829 if (ndlp) { 12809 if (ndlp) {
12830 spin_lock_irq(shost->host_lock); 12810 spin_lock(shost->host_lock);
12831 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12811 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12832 spin_unlock_irq(shost->host_lock); 12812 spin_unlock(shost->host_lock);
12833 lpfc_nlp_put(ndlp); 12813 lpfc_nlp_put(ndlp);
12834 mb->context2 = NULL;
12835 } 12814 }
12836 } 12815 }
12837 list_del(&mb->list);
12838 mempool_free(mb, phba->mbox_mem_pool); 12816 mempool_free(mb, phba->mbox_mem_pool);
12839 } 12817 }
12840 mb = phba->sli.mbox_active; 12818
12841 if (mb && (mb->vport == vport)) { 12819 /* Release the ndlp with the cleaned-up active mailbox command */
12842 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12820 if (act_mbx_ndlp) {
12843 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12821 spin_lock(shost->host_lock);
12844 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12822 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12845 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12823 spin_unlock(shost->host_lock);
12846 ndlp = (struct lpfc_nodelist *) mb->context2; 12824 lpfc_nlp_put(act_mbx_ndlp);
12847 if (ndlp) {
12848 spin_lock_irq(shost->host_lock);
12849 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
12850 spin_unlock_irq(shost->host_lock);
12851 lpfc_nlp_put(ndlp);
12852 mb->context2 = NULL;
12853 }
12854 /* Unregister the RPI when mailbox complete */
12855 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12856 }
12857 } 12825 }
12858 spin_unlock_irq(&phba->hbalock);
12859} 12826}
12860 12827
12861/** 12828/**
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a3b24d99a2a7..a0ca572ec28b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -481,7 +481,6 @@ struct lpfc_rpi_hdr {
481 */ 481 */
482int lpfc_pci_function_reset(struct lpfc_hba *); 482int lpfc_pci_function_reset(struct lpfc_hba *);
483int lpfc_sli4_hba_setup(struct lpfc_hba *); 483int lpfc_sli4_hba_setup(struct lpfc_hba *);
484int lpfc_sli4_hba_down(struct lpfc_hba *);
485int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, 484int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
486 uint8_t, uint32_t, bool); 485 uint8_t, uint32_t, bool);
487void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); 486void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
@@ -514,7 +513,6 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *);
514void lpfc_sli4_queue_unset(struct lpfc_hba *); 513void lpfc_sli4_queue_unset(struct lpfc_hba *);
515int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); 514int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
516int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 515int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
517int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
518uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 516uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
519int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 517int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
520int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 518int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 61afb3420a96..f93120e4c796 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.16" 21#define LPFC_DRIVER_VERSION "8.3.17"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1655507a682c..a5281ce893d0 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -580,7 +580,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
580 "static vport.\n"); 580 "static vport.\n");
581 return VPORT_ERROR; 581 return VPORT_ERROR;
582 } 582 }
583 583 spin_lock_irq(&phba->hbalock);
584 vport->load_flag |= FC_UNLOADING;
585 spin_unlock_irq(&phba->hbalock);
584 /* 586 /*
585 * If we are not unloading the driver then prevent the vport_delete 587 * If we are not unloading the driver then prevent the vport_delete
586 * from happening until after this vport's discovery is finished. 588 * from happening until after this vport's discovery is finished.
@@ -618,10 +620,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
618 scsi_host_put(shost); 620 scsi_host_put(shost);
619 return VPORT_INVAL; 621 return VPORT_INVAL;
620 } 622 }
621 spin_lock_irq(&phba->hbalock);
622 vport->load_flag |= FC_UNLOADING;
623 spin_unlock_irq(&phba->hbalock);
624
625 lpfc_free_sysfs_attr(vport); 623 lpfc_free_sysfs_attr(vport);
626 624
627 lpfc_debugfs_terminate(vport); 625 lpfc_debugfs_terminate(vport);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 51e2579a743a..d3c9cdee292b 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -61,6 +61,11 @@ MODULE_VERSION(MEGASAS_VERSION);
61MODULE_AUTHOR("megaraidlinux@lsi.com"); 61MODULE_AUTHOR("megaraidlinux@lsi.com");
62MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); 62MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
63 63
64static int megasas_transition_to_ready(struct megasas_instance *instance);
65static int megasas_get_pd_list(struct megasas_instance *instance);
66static int megasas_issue_init_mfi(struct megasas_instance *instance);
67static int megasas_register_aen(struct megasas_instance *instance,
68 u32 seq_num, u32 class_locale_word);
64/* 69/*
65 * PCI ID table for all supported controllers 70 * PCI ID table for all supported controllers
66 */ 71 */
@@ -163,7 +168,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
163static inline void 168static inline void
164megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) 169megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
165{ 170{
166 writel(1, &(regs)->outbound_intr_mask); 171 writel(0, &(regs)->outbound_intr_mask);
167 172
168 /* Dummy readl to force pci flush */ 173 /* Dummy readl to force pci flush */
169 readl(&regs->outbound_intr_mask); 174 readl(&regs->outbound_intr_mask);
@@ -199,24 +204,27 @@ static int
199megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 204megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
200{ 205{
201 u32 status; 206 u32 status;
207 u32 mfiStatus = 0;
202 /* 208 /*
203 * Check if it is our interrupt 209 * Check if it is our interrupt
204 */ 210 */
205 status = readl(&regs->outbound_intr_status); 211 status = readl(&regs->outbound_intr_status);
206 212
207 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 213 if (status & MFI_OB_INTR_STATUS_MASK)
208 return 1; 214 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
209 } 215 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
216 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
210 217
211 /* 218 /*
212 * Clear the interrupt by writing back the same value 219 * Clear the interrupt by writing back the same value
213 */ 220 */
214 writel(status, &regs->outbound_intr_status); 221 if (mfiStatus)
222 writel(status, &regs->outbound_intr_status);
215 223
216 /* Dummy readl to force pci flush */ 224 /* Dummy readl to force pci flush */
217 readl(&regs->outbound_intr_status); 225 readl(&regs->outbound_intr_status);
218 226
219 return 0; 227 return mfiStatus;
220} 228}
221 229
222/** 230/**
@@ -231,8 +239,69 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance,
231 u32 frame_count, 239 u32 frame_count,
232 struct megasas_register_set __iomem *regs) 240 struct megasas_register_set __iomem *regs)
233{ 241{
242 unsigned long flags;
243 spin_lock_irqsave(&instance->hba_lock, flags);
234 writel((frame_phys_addr >> 3)|(frame_count), 244 writel((frame_phys_addr >> 3)|(frame_count),
235 &(regs)->inbound_queue_port); 245 &(regs)->inbound_queue_port);
246 spin_unlock_irqrestore(&instance->hba_lock, flags);
247}
248
249/**
250 * megasas_adp_reset_xscale - For controller reset
251 * @regs: MFI register set
252 */
253static int
254megasas_adp_reset_xscale(struct megasas_instance *instance,
255 struct megasas_register_set __iomem *regs)
256{
257 u32 i;
258 u32 pcidata;
259 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
260
261 for (i = 0; i < 3; i++)
262 msleep(1000); /* sleep for 3 secs */
263 pcidata = 0;
264 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
265 printk(KERN_NOTICE "pcidata = %x\n", pcidata);
266 if (pcidata & 0x2) {
267 printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
268 pcidata &= ~0x2;
269 pci_write_config_dword(instance->pdev,
270 MFI_1068_PCSR_OFFSET, pcidata);
271
272 for (i = 0; i < 2; i++)
273 msleep(1000); /* need to wait 2 secs again */
274
275 pcidata = 0;
276 pci_read_config_dword(instance->pdev,
277 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
278 printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
279 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
280 printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
281 pcidata = 0;
282 pci_write_config_dword(instance->pdev,
283 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
284 }
285 }
286 return 0;
287}
288
289/**
290 * megasas_check_reset_xscale - For controller reset check
291 * @regs: MFI register set
292 */
293static int
294megasas_check_reset_xscale(struct megasas_instance *instance,
295 struct megasas_register_set __iomem *regs)
296{
297 u32 consumer;
298 consumer = *instance->consumer;
299
300 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
301 (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
302 return 1;
303 }
304 return 0;
236} 305}
237 306
238static struct megasas_instance_template megasas_instance_template_xscale = { 307static struct megasas_instance_template megasas_instance_template_xscale = {
@@ -242,6 +311,8 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
242 .disable_intr = megasas_disable_intr_xscale, 311 .disable_intr = megasas_disable_intr_xscale,
243 .clear_intr = megasas_clear_intr_xscale, 312 .clear_intr = megasas_clear_intr_xscale,
244 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 313 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
314 .adp_reset = megasas_adp_reset_xscale,
315 .check_reset = megasas_check_reset_xscale,
245}; 316};
246 317
247/** 318/**
@@ -263,7 +334,7 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
263{ 334{
264 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 335 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
265 336
266 writel(~0x80000004, &(regs)->outbound_intr_mask); 337 writel(~0x80000000, &(regs)->outbound_intr_mask);
267 338
268 /* Dummy readl to force pci flush */ 339 /* Dummy readl to force pci flush */
269 readl(&regs->outbound_intr_mask); 340 readl(&regs->outbound_intr_mask);
@@ -306,7 +377,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
306 status = readl(&regs->outbound_intr_status); 377 status = readl(&regs->outbound_intr_status);
307 378
308 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 379 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
309 return 1; 380 return 0;
310 } 381 }
311 382
312 /* 383 /*
@@ -317,7 +388,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
317 /* Dummy readl to force pci flush */ 388 /* Dummy readl to force pci flush */
318 readl(&regs->outbound_doorbell_clear); 389 readl(&regs->outbound_doorbell_clear);
319 390
320 return 0; 391 return 1;
321} 392}
322/** 393/**
323 * megasas_fire_cmd_ppc - Sends command to the FW 394 * megasas_fire_cmd_ppc - Sends command to the FW
@@ -331,10 +402,34 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
331 u32 frame_count, 402 u32 frame_count,
332 struct megasas_register_set __iomem *regs) 403 struct megasas_register_set __iomem *regs)
333{ 404{
405 unsigned long flags;
406 spin_lock_irqsave(&instance->hba_lock, flags);
334 writel((frame_phys_addr | (frame_count<<1))|1, 407 writel((frame_phys_addr | (frame_count<<1))|1,
335 &(regs)->inbound_queue_port); 408 &(regs)->inbound_queue_port);
409 spin_unlock_irqrestore(&instance->hba_lock, flags);
410}
411
412/**
413 * megasas_adp_reset_ppc - For controller reset
414 * @regs: MFI register set
415 */
416static int
417megasas_adp_reset_ppc(struct megasas_instance *instance,
418 struct megasas_register_set __iomem *regs)
419{
420 return 0;
336} 421}
337 422
423/**
424 * megasas_check_reset_ppc - For controller reset check
425 * @regs: MFI register set
426 */
427static int
428megasas_check_reset_ppc(struct megasas_instance *instance,
429 struct megasas_register_set __iomem *regs)
430{
431 return 0;
432}
338static struct megasas_instance_template megasas_instance_template_ppc = { 433static struct megasas_instance_template megasas_instance_template_ppc = {
339 434
340 .fire_cmd = megasas_fire_cmd_ppc, 435 .fire_cmd = megasas_fire_cmd_ppc,
@@ -342,6 +437,8 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
342 .disable_intr = megasas_disable_intr_ppc, 437 .disable_intr = megasas_disable_intr_ppc,
343 .clear_intr = megasas_clear_intr_ppc, 438 .clear_intr = megasas_clear_intr_ppc,
344 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 439 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
440 .adp_reset = megasas_adp_reset_ppc,
441 .check_reset = megasas_check_reset_ppc,
345}; 442};
346 443
347/** 444/**
@@ -396,7 +493,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
396 status = readl(&regs->outbound_intr_status); 493 status = readl(&regs->outbound_intr_status);
397 494
398 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 495 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
399 return 1; 496 return 0;
400 } 497 }
401 498
402 /* 499 /*
@@ -409,7 +506,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
409 */ 506 */
410 readl(&regs->outbound_intr_status); 507 readl(&regs->outbound_intr_status);
411 508
412 return 0; 509 return 1;
413} 510}
414 511
415/** 512/**
@@ -425,11 +522,33 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
425 struct megasas_register_set __iomem *regs) 522 struct megasas_register_set __iomem *regs)
426{ 523{
427 unsigned long flags; 524 unsigned long flags;
428 spin_lock_irqsave(&instance->fire_lock, flags); 525 spin_lock_irqsave(&instance->hba_lock, flags);
429 writel(0, &(regs)->inbound_high_queue_port); 526 writel(0, &(regs)->inbound_high_queue_port);
430 writel((frame_phys_addr | (frame_count<<1))|1, 527 writel((frame_phys_addr | (frame_count<<1))|1,
431 &(regs)->inbound_low_queue_port); 528 &(regs)->inbound_low_queue_port);
432 spin_unlock_irqrestore(&instance->fire_lock, flags); 529 spin_unlock_irqrestore(&instance->hba_lock, flags);
530}
531
532/**
533 * megasas_adp_reset_skinny - For controller reset
534 * @regs: MFI register set
535 */
536static int
537megasas_adp_reset_skinny(struct megasas_instance *instance,
538 struct megasas_register_set __iomem *regs)
539{
540 return 0;
541}
542
543/**
544 * megasas_check_reset_skinny - For controller reset check
545 * @regs: MFI register set
546 */
547static int
548megasas_check_reset_skinny(struct megasas_instance *instance,
549 struct megasas_register_set __iomem *regs)
550{
551 return 0;
433} 552}
434 553
435static struct megasas_instance_template megasas_instance_template_skinny = { 554static struct megasas_instance_template megasas_instance_template_skinny = {
@@ -439,6 +558,8 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
439 .disable_intr = megasas_disable_intr_skinny, 558 .disable_intr = megasas_disable_intr_skinny,
440 .clear_intr = megasas_clear_intr_skinny, 559 .clear_intr = megasas_clear_intr_skinny,
441 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 560 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
561 .adp_reset = megasas_adp_reset_skinny,
562 .check_reset = megasas_check_reset_skinny,
442}; 563};
443 564
444 565
@@ -494,23 +615,29 @@ static int
494megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 615megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
495{ 616{
496 u32 status; 617 u32 status;
618 u32 mfiStatus = 0;
497 /* 619 /*
498 * Check if it is our interrupt 620 * Check if it is our interrupt
499 */ 621 */
500 status = readl(&regs->outbound_intr_status); 622 status = readl(&regs->outbound_intr_status);
501 623
502 if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) 624 if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) {
503 return 1; 625 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
626 }
627 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
628 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
629 }
504 630
505 /* 631 /*
506 * Clear the interrupt by writing back the same value 632 * Clear the interrupt by writing back the same value
507 */ 633 */
508 writel(status, &regs->outbound_doorbell_clear); 634 if (mfiStatus)
635 writel(status, &regs->outbound_doorbell_clear);
509 636
510 /* Dummy readl to force pci flush */ 637 /* Dummy readl to force pci flush */
511 readl(&regs->outbound_intr_status); 638 readl(&regs->outbound_intr_status);
512 639
513 return 0; 640 return mfiStatus;
514} 641}
515/** 642/**
516 * megasas_fire_cmd_gen2 - Sends command to the FW 643 * megasas_fire_cmd_gen2 - Sends command to the FW
@@ -524,8 +651,74 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance,
524 u32 frame_count, 651 u32 frame_count,
525 struct megasas_register_set __iomem *regs) 652 struct megasas_register_set __iomem *regs)
526{ 653{
654 unsigned long flags;
655 spin_lock_irqsave(&instance->hba_lock, flags);
527 writel((frame_phys_addr | (frame_count<<1))|1, 656 writel((frame_phys_addr | (frame_count<<1))|1,
528 &(regs)->inbound_queue_port); 657 &(regs)->inbound_queue_port);
658 spin_unlock_irqrestore(&instance->hba_lock, flags);
659}
660
661/**
662 * megasas_adp_reset_gen2 - For controller reset
663 * @regs: MFI register set
664 */
665static int
666megasas_adp_reset_gen2(struct megasas_instance *instance,
667 struct megasas_register_set __iomem *reg_set)
668{
669 u32 retry = 0 ;
670 u32 HostDiag;
671
672 writel(0, &reg_set->seq_offset);
673 writel(4, &reg_set->seq_offset);
674 writel(0xb, &reg_set->seq_offset);
675 writel(2, &reg_set->seq_offset);
676 writel(7, &reg_set->seq_offset);
677 writel(0xd, &reg_set->seq_offset);
678 msleep(1000);
679
680 HostDiag = (u32)readl(&reg_set->host_diag);
681
682 while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
683 msleep(100);
684 HostDiag = (u32)readl(&reg_set->host_diag);
685 printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
686 retry, HostDiag);
687
688 if (retry++ >= 100)
689 return 1;
690
691 }
692
693 printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
694
695 writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
696
697 ssleep(10);
698
699 HostDiag = (u32)readl(&reg_set->host_diag);
700 while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
701 msleep(100);
702 HostDiag = (u32)readl(&reg_set->host_diag);
703 printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
704 retry, HostDiag);
705
706 if (retry++ >= 1000)
707 return 1;
708
709 }
710 return 0;
711}
712
713/**
714 * megasas_check_reset_gen2 - For controller reset check
715 * @regs: MFI register set
716 */
717static int
718megasas_check_reset_gen2(struct megasas_instance *instance,
719 struct megasas_register_set __iomem *regs)
720{
721 return 0;
529} 722}
530 723
531static struct megasas_instance_template megasas_instance_template_gen2 = { 724static struct megasas_instance_template megasas_instance_template_gen2 = {
@@ -535,11 +728,13 @@ static struct megasas_instance_template megasas_instance_template_gen2 = {
535 .disable_intr = megasas_disable_intr_gen2, 728 .disable_intr = megasas_disable_intr_gen2,
536 .clear_intr = megasas_clear_intr_gen2, 729 .clear_intr = megasas_clear_intr_gen2,
537 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 730 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
731 .adp_reset = megasas_adp_reset_gen2,
732 .check_reset = megasas_check_reset_gen2,
538}; 733};
539 734
540/** 735/**
541* This is the end of set of functions & definitions 736* This is the end of set of functions & definitions
542* specific to ppc (deviceid : 0x60) controllers 737* specific to gen2 (deviceid : 0x78, 0x79) controllers
543*/ 738*/
544 739
545/** 740/**
@@ -598,8 +793,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
598 instance->instancet->fire_cmd(instance, 793 instance->instancet->fire_cmd(instance,
599 cmd->frame_phys_addr, 0, instance->reg_set); 794 cmd->frame_phys_addr, 0, instance->reg_set);
600 795
601 wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), 796 wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
602 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
603 797
604 return 0; 798 return 0;
605} 799}
@@ -647,8 +841,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
647 /* 841 /*
648 * Wait for this cmd to complete 842 * Wait for this cmd to complete
649 */ 843 */
650 wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF), 844 wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
651 MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); 845 cmd->sync_cmd = 0;
652 846
653 megasas_return_cmd(instance, cmd); 847 megasas_return_cmd(instance, cmd);
654 return 0; 848 return 0;
@@ -1130,14 +1324,22 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
1130 u32 frame_count; 1324 u32 frame_count;
1131 struct megasas_cmd *cmd; 1325 struct megasas_cmd *cmd;
1132 struct megasas_instance *instance; 1326 struct megasas_instance *instance;
1327 unsigned long flags;
1133 1328
1134 instance = (struct megasas_instance *) 1329 instance = (struct megasas_instance *)
1135 scmd->device->host->hostdata; 1330 scmd->device->host->hostdata;
1136 1331
1137 /* Don't process if we have already declared adapter dead */ 1332 if (instance->issuepend_done == 0)
1138 if (instance->hw_crit_error)
1139 return SCSI_MLQUEUE_HOST_BUSY; 1333 return SCSI_MLQUEUE_HOST_BUSY;
1140 1334
1335 spin_lock_irqsave(&instance->hba_lock, flags);
1336 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
1337 spin_unlock_irqrestore(&instance->hba_lock, flags);
1338 return SCSI_MLQUEUE_HOST_BUSY;
1339 }
1340
1341 spin_unlock_irqrestore(&instance->hba_lock, flags);
1342
1141 scmd->scsi_done = done; 1343 scmd->scsi_done = done;
1142 scmd->result = 0; 1344 scmd->result = 0;
1143 1345
@@ -1273,6 +1475,18 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1273 return 0; 1475 return 0;
1274} 1476}
1275 1477
1478static void megaraid_sas_kill_hba(struct megasas_instance *instance)
1479{
1480 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1481 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
1482 writel(MFI_STOP_ADP,
1483 &instance->reg_set->reserved_0[0]);
1484 } else {
1485 writel(MFI_STOP_ADP,
1486 &instance->reg_set->inbound_doorbell);
1487 }
1488}
1489
1276/** 1490/**
1277 * megasas_complete_cmd_dpc - Returns FW's controller structure 1491 * megasas_complete_cmd_dpc - Returns FW's controller structure
1278 * @instance_addr: Address of adapter soft state 1492 * @instance_addr: Address of adapter soft state
@@ -1290,7 +1504,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1290 unsigned long flags; 1504 unsigned long flags;
1291 1505
1292 /* If we have already declared adapter dead, donot complete cmds */ 1506 /* If we have already declared adapter dead, donot complete cmds */
1293 if (instance->hw_crit_error) 1507 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
1294 return; 1508 return;
1295 1509
1296 spin_lock_irqsave(&instance->completion_lock, flags); 1510 spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1300,6 +1514,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1300 1514
1301 while (consumer != producer) { 1515 while (consumer != producer) {
1302 context = instance->reply_queue[consumer]; 1516 context = instance->reply_queue[consumer];
1517 if (context >= instance->max_fw_cmds) {
1518 printk(KERN_ERR "Unexpected context value %x\n",
1519 context);
1520 BUG();
1521 }
1303 1522
1304 cmd = instance->cmd_list[context]; 1523 cmd = instance->cmd_list[context];
1305 1524
@@ -1349,7 +1568,76 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1349static int megasas_wait_for_outstanding(struct megasas_instance *instance) 1568static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1350{ 1569{
1351 int i; 1570 int i;
1571 u32 reset_index;
1352 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 1572 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
1573 u8 adprecovery;
1574 unsigned long flags;
1575 struct list_head clist_local;
1576 struct megasas_cmd *reset_cmd;
1577
1578 spin_lock_irqsave(&instance->hba_lock, flags);
1579 adprecovery = instance->adprecovery;
1580 spin_unlock_irqrestore(&instance->hba_lock, flags);
1581
1582 if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
1583
1584 INIT_LIST_HEAD(&clist_local);
1585 spin_lock_irqsave(&instance->hba_lock, flags);
1586 list_splice_init(&instance->internal_reset_pending_q,
1587 &clist_local);
1588 spin_unlock_irqrestore(&instance->hba_lock, flags);
1589
1590 printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
1591 for (i = 0; i < wait_time; i++) {
1592 msleep(1000);
1593 spin_lock_irqsave(&instance->hba_lock, flags);
1594 adprecovery = instance->adprecovery;
1595 spin_unlock_irqrestore(&instance->hba_lock, flags);
1596 if (adprecovery == MEGASAS_HBA_OPERATIONAL)
1597 break;
1598 }
1599
1600 if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
1601 printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
1602 spin_lock_irqsave(&instance->hba_lock, flags);
1603 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
1604 spin_unlock_irqrestore(&instance->hba_lock, flags);
1605 return FAILED;
1606 }
1607
1608 reset_index = 0;
1609 while (!list_empty(&clist_local)) {
1610 reset_cmd = list_entry((&clist_local)->next,
1611 struct megasas_cmd, list);
1612 list_del_init(&reset_cmd->list);
1613 if (reset_cmd->scmd) {
1614 reset_cmd->scmd->result = DID_RESET << 16;
1615 printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n",
1616 reset_index, reset_cmd,
1617 reset_cmd->scmd->cmnd[0],
1618 reset_cmd->scmd->serial_number);
1619
1620 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
1621 megasas_return_cmd(instance, reset_cmd);
1622 } else if (reset_cmd->sync_cmd) {
1623 printk(KERN_NOTICE "megasas:%p synch cmds"
1624 "reset queue\n",
1625 reset_cmd);
1626
1627 reset_cmd->cmd_status = ENODATA;
1628 instance->instancet->fire_cmd(instance,
1629 reset_cmd->frame_phys_addr,
1630 0, instance->reg_set);
1631 } else {
1632 printk(KERN_NOTICE "megasas: %p unexpected"
1633 "cmds lst\n",
1634 reset_cmd);
1635 }
1636 reset_index++;
1637 }
1638
1639 return SUCCESS;
1640 }
1353 1641
1354 for (i = 0; i < wait_time; i++) { 1642 for (i = 0; i < wait_time; i++) {
1355 1643
@@ -1372,6 +1660,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1372 } 1660 }
1373 1661
1374 if (atomic_read(&instance->fw_outstanding)) { 1662 if (atomic_read(&instance->fw_outstanding)) {
1663 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
1375 /* 1664 /*
1376 * Send signal to FW to stop processing any pending cmds. 1665 * Send signal to FW to stop processing any pending cmds.
1377 * The controller will be taken offline by the OS now. 1666 * The controller will be taken offline by the OS now.
@@ -1387,10 +1676,14 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1387 &instance->reg_set->inbound_doorbell); 1676 &instance->reg_set->inbound_doorbell);
1388 } 1677 }
1389 megasas_dump_pending_frames(instance); 1678 megasas_dump_pending_frames(instance);
1390 instance->hw_crit_error = 1; 1679 spin_lock_irqsave(&instance->hba_lock, flags);
1680 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
1681 spin_unlock_irqrestore(&instance->hba_lock, flags);
1391 return FAILED; 1682 return FAILED;
1392 } 1683 }
1393 1684
1685 printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
1686
1394 return SUCCESS; 1687 return SUCCESS;
1395} 1688}
1396 1689
@@ -1412,7 +1705,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1412 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", 1705 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
1413 scmd->serial_number, scmd->cmnd[0], scmd->retries); 1706 scmd->serial_number, scmd->cmnd[0], scmd->retries);
1414 1707
1415 if (instance->hw_crit_error) { 1708 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1416 printk(KERN_ERR "megasas: cannot recover from previous reset " 1709 printk(KERN_ERR "megasas: cannot recover from previous reset "
1417 "failures\n"); 1710 "failures\n");
1418 return FAILED; 1711 return FAILED;
@@ -1567,7 +1860,8 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
1567 instance->aen_cmd = NULL; 1860 instance->aen_cmd = NULL;
1568 megasas_return_cmd(instance, cmd); 1861 megasas_return_cmd(instance, cmd);
1569 1862
1570 if (instance->unload == 0) { 1863 if ((instance->unload == 0) &&
1864 ((instance->issuepend_done == 1))) {
1571 struct megasas_aen_event *ev; 1865 struct megasas_aen_event *ev;
1572 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 1866 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
1573 if (!ev) { 1867 if (!ev) {
@@ -1662,6 +1956,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1662 struct megasas_header *hdr = &cmd->frame->hdr; 1956 struct megasas_header *hdr = &cmd->frame->hdr;
1663 unsigned long flags; 1957 unsigned long flags;
1664 1958
1959 /* flag for the retry reset */
1960 cmd->retry_for_fw_reset = 0;
1961
1665 if (cmd->scmd) 1962 if (cmd->scmd)
1666 cmd->scmd->SCp.ptr = NULL; 1963 cmd->scmd->SCp.ptr = NULL;
1667 1964
@@ -1782,39 +2079,301 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1782} 2079}
1783 2080
1784/** 2081/**
2082 * megasas_issue_pending_cmds_again - issue all pending cmds
2083 * in FW again because of the fw reset
2084 * @instance: Adapter soft state
2085 */
2086static inline void
2087megasas_issue_pending_cmds_again(struct megasas_instance *instance)
2088{
2089 struct megasas_cmd *cmd;
2090 struct list_head clist_local;
2091 union megasas_evt_class_locale class_locale;
2092 unsigned long flags;
2093 u32 seq_num;
2094
2095 INIT_LIST_HEAD(&clist_local);
2096 spin_lock_irqsave(&instance->hba_lock, flags);
2097 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
2098 spin_unlock_irqrestore(&instance->hba_lock, flags);
2099
2100 while (!list_empty(&clist_local)) {
2101 cmd = list_entry((&clist_local)->next,
2102 struct megasas_cmd, list);
2103 list_del_init(&cmd->list);
2104
2105 if (cmd->sync_cmd || cmd->scmd) {
2106 printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
2107 "detected to be pending while HBA reset.\n",
2108 cmd, cmd->scmd, cmd->sync_cmd);
2109
2110 cmd->retry_for_fw_reset++;
2111
2112 if (cmd->retry_for_fw_reset == 3) {
2113 printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
2114 "was tried multiple times during reset."
2115 "Shutting down the HBA\n",
2116 cmd, cmd->scmd, cmd->sync_cmd);
2117 megaraid_sas_kill_hba(instance);
2118
2119 instance->adprecovery =
2120 MEGASAS_HW_CRITICAL_ERROR;
2121 return;
2122 }
2123 }
2124
2125 if (cmd->sync_cmd == 1) {
2126 if (cmd->scmd) {
2127 printk(KERN_NOTICE "megaraid_sas: unexpected"
2128 "cmd attached to internal command!\n");
2129 }
2130 printk(KERN_NOTICE "megasas: %p synchronous cmd"
2131 "on the internal reset queue,"
2132 "issue it again.\n", cmd);
2133 cmd->cmd_status = ENODATA;
2134 instance->instancet->fire_cmd(instance,
2135 cmd->frame_phys_addr ,
2136 0, instance->reg_set);
2137 } else if (cmd->scmd) {
2138 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx"
2139 "detected on the internal queue, issue again.\n",
2140 cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number);
2141
2142 atomic_inc(&instance->fw_outstanding);
2143 instance->instancet->fire_cmd(instance,
2144 cmd->frame_phys_addr,
2145 cmd->frame_count-1, instance->reg_set);
2146 } else {
2147 printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
2148 "internal reset defer list while re-issue!!\n",
2149 cmd);
2150 }
2151 }
2152
2153 if (instance->aen_cmd) {
2154 printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
2155 megasas_return_cmd(instance, instance->aen_cmd);
2156
2157 instance->aen_cmd = NULL;
2158 }
2159
2160 /*
2161 * Initiate AEN (Asynchronous Event Notification)
2162 */
2163 seq_num = instance->last_seq_num;
2164 class_locale.members.reserved = 0;
2165 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2166 class_locale.members.class = MR_EVT_CLASS_DEBUG;
2167
2168 megasas_register_aen(instance, seq_num, class_locale.word);
2169}
2170
2171/**
2172 * Move the internal reset pending commands to a deferred queue.
2173 *
2174 * We move the commands pending at internal reset time to a
2175 * pending queue. This queue would be flushed after successful
2176 * completion of the internal reset sequence. if the internal reset
2177 * did not complete in time, the kernel reset handler would flush
2178 * these commands.
2179 **/
2180static void
2181megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
2182{
2183 struct megasas_cmd *cmd;
2184 int i;
2185 u32 max_cmd = instance->max_fw_cmds;
2186 u32 defer_index;
2187 unsigned long flags;
2188
2189 defer_index = 0;
2190 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
2191 for (i = 0; i < max_cmd; i++) {
2192 cmd = instance->cmd_list[i];
2193 if (cmd->sync_cmd == 1 || cmd->scmd) {
2194 printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
2195 "on the defer queue as internal\n",
2196 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
2197
2198 if (!list_empty(&cmd->list)) {
2199 printk(KERN_NOTICE "megaraid_sas: ERROR while"
2200 " moving this cmd:%p, %d %p, it was"
2201 "discovered on some list?\n",
2202 cmd, cmd->sync_cmd, cmd->scmd);
2203
2204 list_del_init(&cmd->list);
2205 }
2206 defer_index++;
2207 list_add_tail(&cmd->list,
2208 &instance->internal_reset_pending_q);
2209 }
2210 }
2211 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
2212}
2213
2214
2215static void
2216process_fw_state_change_wq(struct work_struct *work)
2217{
2218 struct megasas_instance *instance =
2219 container_of(work, struct megasas_instance, work_init);
2220 u32 wait;
2221 unsigned long flags;
2222
2223 if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
2224 printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
2225 instance->adprecovery);
2226 return ;
2227 }
2228
2229 if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
2230 printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
2231 "state, restarting it...\n");
2232
2233 instance->instancet->disable_intr(instance->reg_set);
2234 atomic_set(&instance->fw_outstanding, 0);
2235
2236 atomic_set(&instance->fw_reset_no_pci_access, 1);
2237 instance->instancet->adp_reset(instance, instance->reg_set);
2238 atomic_set(&instance->fw_reset_no_pci_access, 0 );
2239
2240 printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
2241 "initiating next stage...\n");
2242
2243 printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
2244 "state 2 starting...\n");
2245
2246 /*waitting for about 20 second before start the second init*/
2247 for (wait = 0; wait < 30; wait++) {
2248 msleep(1000);
2249 }
2250
2251 if (megasas_transition_to_ready(instance)) {
2252 printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
2253
2254 megaraid_sas_kill_hba(instance);
2255 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2256 return ;
2257 }
2258
2259 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2260 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2261 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
2262 ) {
2263 *instance->consumer = *instance->producer;
2264 } else {
2265 *instance->consumer = 0;
2266 *instance->producer = 0;
2267 }
2268
2269 megasas_issue_init_mfi(instance);
2270
2271 spin_lock_irqsave(&instance->hba_lock, flags);
2272 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2273 spin_unlock_irqrestore(&instance->hba_lock, flags);
2274 instance->instancet->enable_intr(instance->reg_set);
2275
2276 megasas_issue_pending_cmds_again(instance);
2277 instance->issuepend_done = 1;
2278 }
2279 return ;
2280}
2281
2282/**
1785 * megasas_deplete_reply_queue - Processes all completed commands 2283 * megasas_deplete_reply_queue - Processes all completed commands
1786 * @instance: Adapter soft state 2284 * @instance: Adapter soft state
1787 * @alt_status: Alternate status to be returned to 2285 * @alt_status: Alternate status to be returned to
1788 * SCSI mid-layer instead of the status 2286 * SCSI mid-layer instead of the status
1789 * returned by the FW 2287 * returned by the FW
2288 * Note: this must be called with hba lock held
1790 */ 2289 */
1791static int 2290static int
1792megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) 2291megasas_deplete_reply_queue(struct megasas_instance *instance,
2292 u8 alt_status)
1793{ 2293{
1794 /* 2294 u32 mfiStatus;
1795 * Check if it is our interrupt 2295 u32 fw_state;
1796 * Clear the interrupt 2296
1797 */ 2297 if ((mfiStatus = instance->instancet->check_reset(instance,
1798 if(instance->instancet->clear_intr(instance->reg_set)) 2298 instance->reg_set)) == 1) {
2299 return IRQ_HANDLED;
2300 }
2301
2302 if ((mfiStatus = instance->instancet->clear_intr(
2303 instance->reg_set)
2304 ) == 0) {
1799 return IRQ_NONE; 2305 return IRQ_NONE;
2306 }
2307
2308 instance->mfiStatus = mfiStatus;
2309
2310 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
2311 fw_state = instance->instancet->read_fw_status_reg(
2312 instance->reg_set) & MFI_STATE_MASK;
2313
2314 if (fw_state != MFI_STATE_FAULT) {
2315 printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
2316 fw_state);
2317 }
2318
2319 if ((fw_state == MFI_STATE_FAULT) &&
2320 (instance->disableOnlineCtrlReset == 0)) {
2321 printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
2322
2323 if ((instance->pdev->device ==
2324 PCI_DEVICE_ID_LSI_SAS1064R) ||
2325 (instance->pdev->device ==
2326 PCI_DEVICE_ID_DELL_PERC5) ||
2327 (instance->pdev->device ==
2328 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2329
2330 *instance->consumer =
2331 MEGASAS_ADPRESET_INPROG_SIGN;
2332 }
2333
2334
2335 instance->instancet->disable_intr(instance->reg_set);
2336 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2337 instance->issuepend_done = 0;
2338
2339 atomic_set(&instance->fw_outstanding, 0);
2340 megasas_internal_reset_defer_cmds(instance);
2341
2342 printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
2343 fw_state, instance->adprecovery);
2344
2345 schedule_work(&instance->work_init);
2346 return IRQ_HANDLED;
2347
2348 } else {
2349 printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
2350 fw_state, instance->disableOnlineCtrlReset);
2351 }
2352 }
1800 2353
1801 if (instance->hw_crit_error)
1802 goto out_done;
1803 /*
1804 * Schedule the tasklet for cmd completion
1805 */
1806 tasklet_schedule(&instance->isr_tasklet); 2354 tasklet_schedule(&instance->isr_tasklet);
1807out_done:
1808 return IRQ_HANDLED; 2355 return IRQ_HANDLED;
1809} 2356}
1810
1811/** 2357/**
1812 * megasas_isr - isr entry point 2358 * megasas_isr - isr entry point
1813 */ 2359 */
1814static irqreturn_t megasas_isr(int irq, void *devp) 2360static irqreturn_t megasas_isr(int irq, void *devp)
1815{ 2361{
1816 return megasas_deplete_reply_queue((struct megasas_instance *)devp, 2362 struct megasas_instance *instance;
1817 DID_OK); 2363 unsigned long flags;
2364 irqreturn_t rc;
2365
2366 if (atomic_read(
2367 &(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
2368 return IRQ_HANDLED;
2369
2370 instance = (struct megasas_instance *)devp;
2371
2372 spin_lock_irqsave(&instance->hba_lock, flags);
2373 rc = megasas_deplete_reply_queue(instance, DID_OK);
2374 spin_unlock_irqrestore(&instance->hba_lock, flags);
2375
2376 return rc;
1818} 2377}
1819 2378
1820/** 2379/**
@@ -1971,7 +2530,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1971 "in %d secs\n", fw_state, max_wait); 2530 "in %d secs\n", fw_state, max_wait);
1972 return -ENODEV; 2531 return -ENODEV;
1973 } 2532 }
1974 }; 2533 }
1975 printk(KERN_INFO "megasas: FW now in Ready state\n"); 2534 printk(KERN_INFO "megasas: FW now in Ready state\n");
1976 2535
1977 return 0; 2536 return 0;
@@ -2053,6 +2612,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2053 */ 2612 */
2054 sgl_sz = sge_sz * instance->max_num_sge; 2613 sgl_sz = sge_sz * instance->max_num_sge;
2055 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; 2614 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
2615 frame_count = 15;
2056 2616
2057 /* 2617 /*
2058 * We need one extra frame for the MFI command 2618 * We need one extra frame for the MFI command
@@ -2200,6 +2760,7 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
2200 cmd = instance->cmd_list[i]; 2760 cmd = instance->cmd_list[i];
2201 memset(cmd, 0, sizeof(struct megasas_cmd)); 2761 memset(cmd, 0, sizeof(struct megasas_cmd));
2202 cmd->index = i; 2762 cmd->index = i;
2763 cmd->scmd = NULL;
2203 cmd->instance = instance; 2764 cmd->instance = instance;
2204 2765
2205 list_add_tail(&cmd->list, &instance->cmd_pool); 2766 list_add_tail(&cmd->list, &instance->cmd_pool);
@@ -2367,7 +2928,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
2367 2928
2368 /* the following function will get the instance PD LIST */ 2929 /* the following function will get the instance PD LIST */
2369 2930
2370 if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) { 2931 if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
2371 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2932 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2372 2933
2373 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { 2934 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
@@ -2681,6 +3242,21 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2681 if (megasas_issue_init_mfi(instance)) 3242 if (megasas_issue_init_mfi(instance))
2682 goto fail_fw_init; 3243 goto fail_fw_init;
2683 3244
3245 instance->fw_support_ieee = 0;
3246 instance->fw_support_ieee =
3247 (instance->instancet->read_fw_status_reg(reg_set) &
3248 0x04000000);
3249
3250 printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
3251 instance->fw_support_ieee);
3252
3253 if (instance->fw_support_ieee)
3254 instance->flag_ieee = 1;
3255
3256 /** for passthrough
3257 * the following function will get the PD LIST.
3258 */
3259
2684 memset(instance->pd_list, 0 , 3260 memset(instance->pd_list, 0 ,
2685 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 3261 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
2686 megasas_get_pd_list(instance); 3262 megasas_get_pd_list(instance);
@@ -2707,6 +3283,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2707 max_sectors_2 = ctrl_info->max_request_size; 3283 max_sectors_2 = ctrl_info->max_request_size;
2708 3284
2709 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 3285 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
3286 instance->disableOnlineCtrlReset =
3287 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2710 } 3288 }
2711 3289
2712 instance->max_sectors_per_req = instance->max_num_sge * 3290 instance->max_sectors_per_req = instance->max_num_sge *
@@ -2928,6 +3506,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
2928 dcmd->flags = MFI_FRAME_DIR_READ; 3506 dcmd->flags = MFI_FRAME_DIR_READ;
2929 dcmd->timeout = 0; 3507 dcmd->timeout = 0;
2930 dcmd->pad_0 = 0; 3508 dcmd->pad_0 = 0;
3509 instance->last_seq_num = seq_num;
2931 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 3510 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
2932 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 3511 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
2933 dcmd->mbox.w[0] = seq_num; 3512 dcmd->mbox.w[0] = seq_num;
@@ -3096,6 +3675,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3096 3675
3097 instance = (struct megasas_instance *)host->hostdata; 3676 instance = (struct megasas_instance *)host->hostdata;
3098 memset(instance, 0, sizeof(*instance)); 3677 memset(instance, 0, sizeof(*instance));
3678 atomic_set( &instance->fw_reset_no_pci_access, 0 );
3099 3679
3100 instance->producer = pci_alloc_consistent(pdev, sizeof(u32), 3680 instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
3101 &instance->producer_h); 3681 &instance->producer_h);
@@ -3113,6 +3693,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3113 megasas_poll_wait_aen = 0; 3693 megasas_poll_wait_aen = 0;
3114 instance->flag_ieee = 0; 3694 instance->flag_ieee = 0;
3115 instance->ev = NULL; 3695 instance->ev = NULL;
3696 instance->issuepend_done = 1;
3697 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
3698 megasas_poll_wait_aen = 0;
3116 3699
3117 instance->evt_detail = pci_alloc_consistent(pdev, 3700 instance->evt_detail = pci_alloc_consistent(pdev,
3118 sizeof(struct 3701 sizeof(struct
@@ -3129,6 +3712,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3129 * Initialize locks and queues 3712 * Initialize locks and queues
3130 */ 3713 */
3131 INIT_LIST_HEAD(&instance->cmd_pool); 3714 INIT_LIST_HEAD(&instance->cmd_pool);
3715 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
3132 3716
3133 atomic_set(&instance->fw_outstanding,0); 3717 atomic_set(&instance->fw_outstanding,0);
3134 3718
@@ -3136,7 +3720,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3136 init_waitqueue_head(&instance->abort_cmd_wait_q); 3720 init_waitqueue_head(&instance->abort_cmd_wait_q);
3137 3721
3138 spin_lock_init(&instance->cmd_pool_lock); 3722 spin_lock_init(&instance->cmd_pool_lock);
3139 spin_lock_init(&instance->fire_lock); 3723 spin_lock_init(&instance->hba_lock);
3140 spin_lock_init(&instance->completion_lock); 3724 spin_lock_init(&instance->completion_lock);
3141 spin_lock_init(&poll_aen_lock); 3725 spin_lock_init(&poll_aen_lock);
3142 3726
@@ -3161,6 +3745,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3161 instance->flag = 0; 3745 instance->flag = 0;
3162 instance->unload = 1; 3746 instance->unload = 1;
3163 instance->last_time = 0; 3747 instance->last_time = 0;
3748 instance->disableOnlineCtrlReset = 1;
3749
3750 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
3164 3751
3165 /* 3752 /*
3166 * Initialize MFI Firmware 3753 * Initialize MFI Firmware
@@ -3252,6 +3839,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)
3252 struct megasas_cmd *cmd; 3839 struct megasas_cmd *cmd;
3253 struct megasas_dcmd_frame *dcmd; 3840 struct megasas_dcmd_frame *dcmd;
3254 3841
3842 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
3843 return;
3844
3255 cmd = megasas_get_cmd(instance); 3845 cmd = megasas_get_cmd(instance);
3256 3846
3257 if (!cmd) 3847 if (!cmd)
@@ -3289,6 +3879,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
3289 struct megasas_cmd *cmd; 3879 struct megasas_cmd *cmd;
3290 struct megasas_dcmd_frame *dcmd; 3880 struct megasas_dcmd_frame *dcmd;
3291 3881
3882 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
3883 return;
3884
3292 cmd = megasas_get_cmd(instance); 3885 cmd = megasas_get_cmd(instance);
3293 3886
3294 if (!cmd) 3887 if (!cmd)
@@ -3779,6 +4372,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3779 struct megasas_iocpacket *ioc; 4372 struct megasas_iocpacket *ioc;
3780 struct megasas_instance *instance; 4373 struct megasas_instance *instance;
3781 int error; 4374 int error;
4375 int i;
4376 unsigned long flags;
4377 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
3782 4378
3783 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 4379 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
3784 if (!ioc) 4380 if (!ioc)
@@ -3795,8 +4391,8 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3795 goto out_kfree_ioc; 4391 goto out_kfree_ioc;
3796 } 4392 }
3797 4393
3798 if (instance->hw_crit_error == 1) { 4394 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
3799 printk(KERN_DEBUG "Controller in Crit ERROR\n"); 4395 printk(KERN_ERR "Controller in crit error\n");
3800 error = -ENODEV; 4396 error = -ENODEV;
3801 goto out_kfree_ioc; 4397 goto out_kfree_ioc;
3802 } 4398 }
@@ -3813,6 +4409,35 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
3813 error = -ERESTARTSYS; 4409 error = -ERESTARTSYS;
3814 goto out_kfree_ioc; 4410 goto out_kfree_ioc;
3815 } 4411 }
4412
4413 for (i = 0; i < wait_time; i++) {
4414
4415 spin_lock_irqsave(&instance->hba_lock, flags);
4416 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
4417 spin_unlock_irqrestore(&instance->hba_lock, flags);
4418 break;
4419 }
4420 spin_unlock_irqrestore(&instance->hba_lock, flags);
4421
4422 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4423 printk(KERN_NOTICE "megasas: waiting"
4424 "for controller reset to finish\n");
4425 }
4426
4427 msleep(1000);
4428 }
4429
4430 spin_lock_irqsave(&instance->hba_lock, flags);
4431 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
4432 spin_unlock_irqrestore(&instance->hba_lock, flags);
4433
4434 printk(KERN_ERR "megaraid_sas: timed out while"
4435 "waiting for HBA to recover\n");
4436 error = -ENODEV;
4437 goto out_kfree_ioc;
4438 }
4439 spin_unlock_irqrestore(&instance->hba_lock, flags);
4440
3816 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 4441 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
3817 up(&instance->ioctl_sem); 4442 up(&instance->ioctl_sem);
3818 4443
@@ -3826,6 +4451,9 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
3826 struct megasas_instance *instance; 4451 struct megasas_instance *instance;
3827 struct megasas_aen aen; 4452 struct megasas_aen aen;
3828 int error; 4453 int error;
4454 int i;
4455 unsigned long flags;
4456 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
3829 4457
3830 if (file->private_data != file) { 4458 if (file->private_data != file) {
3831 printk(KERN_DEBUG "megasas: fasync_helper was not " 4459 printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -3841,14 +4469,42 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
3841 if (!instance) 4469 if (!instance)
3842 return -ENODEV; 4470 return -ENODEV;
3843 4471
3844 if (instance->hw_crit_error == 1) { 4472 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
3845 error = -ENODEV; 4473 return -ENODEV;
3846 } 4474 }
3847 4475
3848 if (instance->unload == 1) { 4476 if (instance->unload == 1) {
3849 return -ENODEV; 4477 return -ENODEV;
3850 } 4478 }
3851 4479
4480 for (i = 0; i < wait_time; i++) {
4481
4482 spin_lock_irqsave(&instance->hba_lock, flags);
4483 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
4484 spin_unlock_irqrestore(&instance->hba_lock,
4485 flags);
4486 break;
4487 }
4488
4489 spin_unlock_irqrestore(&instance->hba_lock, flags);
4490
4491 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4492 printk(KERN_NOTICE "megasas: waiting for"
4493 "controller reset to finish\n");
4494 }
4495
4496 msleep(1000);
4497 }
4498
4499 spin_lock_irqsave(&instance->hba_lock, flags);
4500 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
4501 spin_unlock_irqrestore(&instance->hba_lock, flags);
4502 printk(KERN_ERR "megaraid_sas: timed out while waiting"
4503 "for HBA to recover.\n");
4504 return -ENODEV;
4505 }
4506 spin_unlock_irqrestore(&instance->hba_lock, flags);
4507
3852 mutex_lock(&instance->aen_mutex); 4508 mutex_lock(&instance->aen_mutex);
3853 error = megasas_register_aen(instance, aen.seq_num, 4509 error = megasas_register_aen(instance, aen.seq_num,
3854 aen.class_locale_word); 4510 aen.class_locale_word);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 9d8b6bf605aa..16a4f68a34b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -60,6 +60,7 @@
60#define MFI_STATE_READY 0xB0000000 60#define MFI_STATE_READY 0xB0000000
61#define MFI_STATE_OPERATIONAL 0xC0000000 61#define MFI_STATE_OPERATIONAL 0xC0000000
62#define MFI_STATE_FAULT 0xF0000000 62#define MFI_STATE_FAULT 0xF0000000
63#define MFI_RESET_REQUIRED 0x00000001
63 64
64#define MEGAMFI_FRAME_SIZE 64 65#define MEGAMFI_FRAME_SIZE 64
65 66
@@ -73,6 +74,12 @@
73 * HOTPLUG : Resume from Hotplug 74 * HOTPLUG : Resume from Hotplug
74 * MFI_STOP_ADP : Send signal to FW to stop processing 75 * MFI_STOP_ADP : Send signal to FW to stop processing
75 */ 76 */
77#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
78#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
79#define DIAG_WRITE_ENABLE (0x00000080)
80#define DIAG_RESET_ADAPTER (0x00000004)
81
82#define MFI_ADP_RESET 0x00000040
76#define MFI_INIT_ABORT 0x00000001 83#define MFI_INIT_ABORT 0x00000001
77#define MFI_INIT_READY 0x00000002 84#define MFI_INIT_READY 0x00000002
78#define MFI_INIT_MFIMODE 0x00000004 85#define MFI_INIT_MFIMODE 0x00000004
@@ -402,8 +409,40 @@ struct megasas_ctrl_prop {
402 u16 ecc_bucket_leak_rate; 409 u16 ecc_bucket_leak_rate;
403 u8 restore_hotspare_on_insertion; 410 u8 restore_hotspare_on_insertion;
404 u8 expose_encl_devices; 411 u8 expose_encl_devices;
405 u8 reserved[38]; 412 u8 maintainPdFailHistory;
413 u8 disallowHostRequestReordering;
414 u8 abortCCOnError;
415 u8 loadBalanceMode;
416 u8 disableAutoDetectBackplane;
417
418 u8 snapVDSpace;
419
420 /*
421 * Add properties that can be controlled by
422 * a bit in the following structure.
423 */
406 424
425 struct {
426 u32 copyBackDisabled : 1;
427 u32 SMARTerEnabled : 1;
428 u32 prCorrectUnconfiguredAreas : 1;
429 u32 useFdeOnly : 1;
430 u32 disableNCQ : 1;
431 u32 SSDSMARTerEnabled : 1;
432 u32 SSDPatrolReadEnabled : 1;
433 u32 enableSpinDownUnconfigured : 1;
434 u32 autoEnhancedImport : 1;
435 u32 enableSecretKeyControl : 1;
436 u32 disableOnlineCtrlReset : 1;
437 u32 allowBootWithPinnedCache : 1;
438 u32 disableSpinDownHS : 1;
439 u32 enableJBOD : 1;
440 u32 reserved :18;
441 } OnOffProperties;
442 u8 autoSnapVDSpace;
443 u8 viewSpace;
444 u16 spinDownTime;
445 u8 reserved[24];
407} __packed; 446} __packed;
408 447
409/* 448/*
@@ -704,6 +743,12 @@ struct megasas_ctrl_info {
704 */ 743 */
705#define IS_DMA64 (sizeof(dma_addr_t) == 8) 744#define IS_DMA64 (sizeof(dma_addr_t) == 8)
706 745
746#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
747
748#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
749#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
750#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
751
707#define MFI_OB_INTR_STATUS_MASK 0x00000002 752#define MFI_OB_INTR_STATUS_MASK 0x00000002
708#define MFI_POLL_TIMEOUT_SECS 60 753#define MFI_POLL_TIMEOUT_SECS 60
709#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) 754#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
@@ -714,6 +759,9 @@ struct megasas_ctrl_info {
714#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 759#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
715#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) 760#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
716 761
762#define MFI_1068_PCSR_OFFSET 0x84
763#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
764#define MFI_1068_FW_READY 0xDDDD0000
717/* 765/*
718* register set for both 1068 and 1078 controllers 766* register set for both 1068 and 1078 controllers
719* structure extended for 1078 registers 767* structure extended for 1078 registers
@@ -755,8 +803,10 @@ struct megasas_register_set {
755 u32 inbound_high_queue_port ; /*00C4h*/ 803 u32 inbound_high_queue_port ; /*00C4h*/
756 804
757 u32 reserved_5; /*00C8h*/ 805 u32 reserved_5; /*00C8h*/
758 u32 index_registers[820]; /*00CCh*/ 806 u32 res_6[11]; /*CCh*/
759 807 u32 host_diag;
808 u32 seq_offset;
809 u32 index_registers[807]; /*00CCh*/
760} __attribute__ ((packed)); 810} __attribute__ ((packed));
761 811
762struct megasas_sge32 { 812struct megasas_sge32 {
@@ -1226,11 +1276,12 @@ struct megasas_instance {
1226 1276
1227 struct megasas_cmd **cmd_list; 1277 struct megasas_cmd **cmd_list;
1228 struct list_head cmd_pool; 1278 struct list_head cmd_pool;
1279 /* used to sync fire the cmd to fw */
1229 spinlock_t cmd_pool_lock; 1280 spinlock_t cmd_pool_lock;
1281 /* used to sync fire the cmd to fw */
1282 spinlock_t hba_lock;
1230 /* used to synch producer, consumer ptrs in dpc */ 1283 /* used to synch producer, consumer ptrs in dpc */
1231 spinlock_t completion_lock; 1284 spinlock_t completion_lock;
1232 /* used to sync fire the cmd to fw */
1233 spinlock_t fire_lock;
1234 struct dma_pool *frame_dma_pool; 1285 struct dma_pool *frame_dma_pool;
1235 struct dma_pool *sense_dma_pool; 1286 struct dma_pool *sense_dma_pool;
1236 1287
@@ -1247,19 +1298,36 @@ struct megasas_instance {
1247 1298
1248 struct pci_dev *pdev; 1299 struct pci_dev *pdev;
1249 u32 unique_id; 1300 u32 unique_id;
1301 u32 fw_support_ieee;
1250 1302
1251 atomic_t fw_outstanding; 1303 atomic_t fw_outstanding;
1252 u32 hw_crit_error; 1304 atomic_t fw_reset_no_pci_access;
1253 1305
1254 struct megasas_instance_template *instancet; 1306 struct megasas_instance_template *instancet;
1255 struct tasklet_struct isr_tasklet; 1307 struct tasklet_struct isr_tasklet;
1308 struct work_struct work_init;
1256 1309
1257 u8 flag; 1310 u8 flag;
1258 u8 unload; 1311 u8 unload;
1259 u8 flag_ieee; 1312 u8 flag_ieee;
1313 u8 issuepend_done;
1314 u8 disableOnlineCtrlReset;
1315 u8 adprecovery;
1260 unsigned long last_time; 1316 unsigned long last_time;
1317 u32 mfiStatus;
1318 u32 last_seq_num;
1261 1319
1262 struct timer_list io_completion_timer; 1320 struct timer_list io_completion_timer;
1321 struct list_head internal_reset_pending_q;
1322};
1323
1324enum {
1325 MEGASAS_HBA_OPERATIONAL = 0,
1326 MEGASAS_ADPRESET_SM_INFAULT = 1,
1327 MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
1328 MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
1329 MEGASAS_HW_CRITICAL_ERROR = 4,
1330 MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
1263}; 1331};
1264 1332
1265struct megasas_instance_template { 1333struct megasas_instance_template {
@@ -1272,6 +1340,10 @@ struct megasas_instance_template {
1272 int (*clear_intr)(struct megasas_register_set __iomem *); 1340 int (*clear_intr)(struct megasas_register_set __iomem *);
1273 1341
1274 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); 1342 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
1343 int (*adp_reset)(struct megasas_instance *, \
1344 struct megasas_register_set __iomem *);
1345 int (*check_reset)(struct megasas_instance *, \
1346 struct megasas_register_set __iomem *);
1275}; 1347};
1276 1348
1277#define MEGASAS_IS_LOGICAL(scp) \ 1349#define MEGASAS_IS_LOGICAL(scp) \
@@ -1291,7 +1363,9 @@ struct megasas_cmd {
1291 u32 index; 1363 u32 index;
1292 u8 sync_cmd; 1364 u8 sync_cmd;
1293 u8 cmd_status; 1365 u8 cmd_status;
1294 u16 abort_aen; 1366 u8 abort_aen;
1367 u8 retry_for_fw_reset;
1368
1295 1369
1296 struct list_head list; 1370 struct list_head list;
1297 struct scsi_cmnd *scmd; 1371 struct scsi_cmnd *scmd;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 57bcd5c9dcff..12faf64f91b0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -534,7 +534,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
534 if (event_data->DiscoveryStatus) 534 if (event_data->DiscoveryStatus)
535 printk("discovery_status(0x%08x)", 535 printk("discovery_status(0x%08x)",
536 le32_to_cpu(event_data->DiscoveryStatus)); 536 le32_to_cpu(event_data->DiscoveryStatus));
537 printk("\n"); 537 printk("\n");
538 return; 538 return;
539 } 539 }
540 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 540 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 53857c6b6d4d..ecc855c550aa 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -11,7 +11,6 @@ if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m
11 11
12config PCMCIA_AHA152X 12config PCMCIA_AHA152X
13 tristate "Adaptec AHA152X PCMCIA support" 13 tristate "Adaptec AHA152X PCMCIA support"
14 depends on !64BIT
15 select SCSI_SPI_ATTRS 14 select SCSI_SPI_ATTRS
16 help 15 help
17 Say Y here if you intend to attach this type of PCMCIA SCSI host 16 Say Y here if you intend to attach this type of PCMCIA SCSI host
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9793aa6afb10..d8db0137c0c7 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4194,6 +4194,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4194 4194
4195 nvmd_type = ioctl_payload->minor_function; 4195 nvmd_type = ioctl_payload->minor_function;
4196 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4196 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4197 if (!fw_control_context)
4198 return -ENOMEM;
4197 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; 4199 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
4198 fw_control_context->len = ioctl_payload->length; 4200 fw_control_context->len = ioctl_payload->length;
4199 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4201 circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4272,6 +4274,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4272 4274
4273 nvmd_type = ioctl_payload->minor_function; 4275 nvmd_type = ioctl_payload->minor_function;
4274 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4276 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4277 if (!fw_control_context)
4278 return -ENOMEM;
4275 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4279 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4276 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, 4280 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4277 ioctl_payload->func_specific, 4281 ioctl_payload->func_specific,
@@ -4381,6 +4385,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4381 struct pm8001_ioctl_payload *ioctl_payload = payload; 4385 struct pm8001_ioctl_payload *ioctl_payload = payload;
4382 4386
4383 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4387 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4388 if (!fw_control_context)
4389 return -ENOMEM;
4384 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; 4390 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
4385 if (fw_control->len != 0) { 4391 if (fw_control->len != 0) {
4386 if (pm8001_mem_alloc(pm8001_ha->pdev, 4392 if (pm8001_mem_alloc(pm8001_ha->pdev,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 114bc5a81171..2ff4342ae362 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1538,22 +1538,22 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1538 if (!fcport) 1538 if (!fcport)
1539 return; 1539 return;
1540 1540
1541 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1542 return;
1543
1544 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1545 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1546 return;
1547 }
1548
1549 /* 1541 /*
1550 * Transport has effectively 'deleted' the rport, clear 1542 * Transport has effectively 'deleted' the rport, clear
1551 * all local references. 1543 * all local references.
1552 */ 1544 */
1553 spin_lock_irq(host->host_lock); 1545 spin_lock_irq(host->host_lock);
1554 fcport->rport = NULL; 1546 fcport->rport = fcport->drport = NULL;
1555 *((fc_port_t **)rport->dd_data) = NULL; 1547 *((fc_port_t **)rport->dd_data) = NULL;
1556 spin_unlock_irq(host->host_lock); 1548 spin_unlock_irq(host->host_lock);
1549
1550 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1551 return;
1552
1553 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1554 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1555 return;
1556 }
1557} 1557}
1558 1558
1559static void 1559static void
@@ -1676,14 +1676,14 @@ static void
1676qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1676qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1677{ 1677{
1678 scsi_qla_host_t *vha = shost_priv(shost); 1678 scsi_qla_host_t *vha = shost_priv(shost);
1679 u64 node_name; 1679 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1680 0xFF, 0xFF, 0xFF, 0xFF};
1681 u64 fabric_name = wwn_to_u64(node_name);
1680 1682
1681 if (vha->device_flags & SWITCH_FOUND) 1683 if (vha->device_flags & SWITCH_FOUND)
1682 node_name = wwn_to_u64(vha->fabric_node_name); 1684 fabric_name = wwn_to_u64(vha->fabric_node_name);
1683 else
1684 node_name = wwn_to_u64(vha->node_name);
1685 1685
1686 fc_host_fabric_name(shost) = node_name; 1686 fc_host_fabric_name(shost) = fabric_name;
1687} 1687}
1688 1688
1689static void 1689static void
@@ -1776,6 +1776,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1776 } 1776 }
1777 1777
1778 /* initialize attributes */ 1778 /* initialize attributes */
1779 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1779 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1780 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1780 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1781 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1781 fc_host_supported_classes(vha->host) = 1782 fc_host_supported_classes(vha->host) =
@@ -1984,6 +1985,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1984 struct qla_hw_data *ha = vha->hw; 1985 struct qla_hw_data *ha = vha->hw;
1985 u32 speed = FC_PORTSPEED_UNKNOWN; 1986 u32 speed = FC_PORTSPEED_UNKNOWN;
1986 1987
1988 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1987 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1989 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1988 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1990 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1989 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1991 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9067629817ea..fdfbf83a6330 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1254,10 +1254,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1254 return -EINVAL; 1254 return -EINVAL;
1255 } 1255 }
1256 1256
1257 if (fcport->loop_id == FC_NO_LOOP_ID) { 1257 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, " 1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
1259 "loop_id = 0x%x\n", 1259 __func__, vha->host_no));
1260 __func__, vha->host_no, fcport->loop_id));
1261 return -EINVAL; 1260 return -EINVAL;
1262 } 1261 }
1263 1262
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index d2a4e1530708..e1d3ad40a946 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -706,6 +706,11 @@ typedef struct {
706#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ 706#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
707#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ 707#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
708 708
709/*
710 * ISP81xx mailbox commands
711 */
712#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
713
709/* Firmware return data sizes */ 714/* Firmware return data sizes */
710#define FCAL_MAP_SIZE 128 715#define FCAL_MAP_SIZE 128
711 716
@@ -2860,6 +2865,7 @@ typedef struct scsi_qla_host {
2860#define NPIV_CONFIG_NEEDED 16 2865#define NPIV_CONFIG_NEEDED 16
2861#define ISP_UNRECOVERABLE 17 2866#define ISP_UNRECOVERABLE 17
2862#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2867#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2868#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2863 2869
2864 uint32_t device_flags; 2870 uint32_t device_flags;
2865#define SWITCH_FOUND BIT_0 2871#define SWITCH_FOUND BIT_0
@@ -3003,6 +3009,8 @@ typedef struct scsi_qla_host {
3003 3009
3004#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 3010#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
3005 3011
3012#define QLA_SG_ALL 1024
3013
3006enum nexus_wait_type { 3014enum nexus_wait_type {
3007 WAIT_HOST = 0, 3015 WAIT_HOST = 0,
3008 WAIT_TARGET, 3016 WAIT_TARGET,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1a1b281cea33..c33dec827e1e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -352,6 +352,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
352extern int 352extern int
353qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 353qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
354 354
355extern int
356qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *);
355extern int qla2x00_get_data_rate(scsi_qla_host_t *); 357extern int qla2x00_get_data_rate(scsi_qla_host_t *);
356extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t, 358extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
357 uint16_t *); 359 uint16_t *);
@@ -501,7 +503,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
501/* PCI related functions */ 503/* PCI related functions */
502extern int qla82xx_pci_config(struct scsi_qla_host *); 504extern int qla82xx_pci_config(struct scsi_qla_host *);
503extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); 505extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
504extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
505extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *); 506extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
506extern int qla82xx_pci_region_offset(struct pci_dev *, int); 507extern int qla82xx_pci_region_offset(struct pci_dev *, int);
507extern int qla82xx_iospace_config(struct qla_hw_data *); 508extern int qla82xx_iospace_config(struct qla_hw_data *);
@@ -509,8 +510,8 @@ extern int qla82xx_iospace_config(struct qla_hw_data *);
509/* Initialization related functions */ 510/* Initialization related functions */
510extern void qla82xx_reset_chip(struct scsi_qla_host *); 511extern void qla82xx_reset_chip(struct scsi_qla_host *);
511extern void qla82xx_config_rings(struct scsi_qla_host *); 512extern void qla82xx_config_rings(struct scsi_qla_host *);
512extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
513extern void qla82xx_watchdog(scsi_qla_host_t *); 513extern void qla82xx_watchdog(scsi_qla_host_t *);
514extern int qla82xx_start_firmware(scsi_qla_host_t *);
514 515
515/* Firmware and flash related functions */ 516/* Firmware and flash related functions */
516extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); 517extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
@@ -533,25 +534,17 @@ extern irqreturn_t qla82xx_msix_default(int, void *);
533extern irqreturn_t qla82xx_msix_rsp_q(int, void *); 534extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
534extern void qla82xx_enable_intrs(struct qla_hw_data *); 535extern void qla82xx_enable_intrs(struct qla_hw_data *);
535extern void qla82xx_disable_intrs(struct qla_hw_data *); 536extern void qla82xx_disable_intrs(struct qla_hw_data *);
536extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
537extern void qla82xx_poll(int, void *); 537extern void qla82xx_poll(int, void *);
538extern void qla82xx_init_flags(struct qla_hw_data *); 538extern void qla82xx_init_flags(struct qla_hw_data *);
539 539
540/* ISP 8021 hardware related */ 540/* ISP 8021 hardware related */
541extern int qla82xx_crb_win_lock(struct qla_hw_data *); 541extern void qla82xx_set_drv_active(scsi_qla_host_t *);
542extern void qla82xx_crb_win_unlock(struct qla_hw_data *); 542extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
543extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
544extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); 543extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
545extern int qla82xx_rd_32(struct qla_hw_data *, ulong); 544extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
546extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); 545extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
547extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); 546extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
548extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
549extern int qla82xx_load_fw(scsi_qla_host_t *);
550extern int qla82xx_rom_lock(struct qla_hw_data *);
551extern void qla82xx_rom_unlock(struct qla_hw_data *); 547extern void qla82xx_rom_unlock(struct qla_hw_data *);
552extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
553extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
554extern unsigned long qla82xx_decode_crb_addr(unsigned long);
555 548
556/* ISP 8021 IDC */ 549/* ISP 8021 IDC */
557extern void qla82xx_clear_drv_active(struct qla_hw_data *); 550extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9c383baebe27..3cafbef40737 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -954,6 +954,19 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
954} 954}
955 955
956/** 956/**
957 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
958 *
959 * Returns 0 on success.
960 */
961int
962qla81xx_reset_mpi(scsi_qla_host_t *vha)
963{
964 uint16_t mb[4] = {0x1010, 0, 1, 0};
965
966 return qla81xx_write_mpi_register(vha, mb);
967}
968
969/**
957 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 970 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
958 * @ha: HA context 971 * @ha: HA context
959 * 972 *
@@ -967,6 +980,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
967 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 980 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
968 uint32_t cnt, d2; 981 uint32_t cnt, d2;
969 uint16_t wd; 982 uint16_t wd;
983 static int abts_cnt; /* ISP abort retry counts */
970 984
971 spin_lock_irqsave(&ha->hardware_lock, flags); 985 spin_lock_irqsave(&ha->hardware_lock, flags);
972 986
@@ -1000,6 +1014,23 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1000 barrier(); 1014 barrier();
1001 } 1015 }
1002 1016
1017 /* If required, do an MPI FW reset now */
1018 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1019 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1020 if (++abts_cnt < 5) {
1021 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1022 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1023 } else {
1024 /*
1025 * We exhausted the ISP abort retries. We have to
1026 * set the board offline.
1027 */
1028 abts_cnt = 0;
1029 vha->flags.online = 0;
1030 }
1031 }
1032 }
1033
1003 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 1034 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1004 RD_REG_DWORD(&reg->hccr); 1035 RD_REG_DWORD(&reg->hccr);
1005 1036
@@ -2799,6 +2830,9 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2799 if (!IS_IIDMA_CAPABLE(ha)) 2830 if (!IS_IIDMA_CAPABLE(ha))
2800 return; 2831 return;
2801 2832
2833 if (atomic_read(&fcport->state) != FCS_ONLINE)
2834 return;
2835
2802 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2836 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2803 fcport->fp_speed > ha->link_data_rate) 2837 fcport->fp_speed > ha->link_data_rate)
2804 return; 2838 return;
@@ -3878,17 +3912,19 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3878 LOOP_DOWN_TIME); 3912 LOOP_DOWN_TIME);
3879 } 3913 }
3880 3914
3881 /* Make sure for ISP 82XX IO DMA is complete */ 3915 if (!ha->flags.eeh_busy) {
3882 if (IS_QLA82XX(ha)) { 3916 /* Make sure for ISP 82XX IO DMA is complete */
3883 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3917 if (IS_QLA82XX(ha)) {
3884 WAIT_HOST) == QLA_SUCCESS) { 3918 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3885 DEBUG2(qla_printk(KERN_INFO, ha, 3919 WAIT_HOST) == QLA_SUCCESS) {
3886 "Done wait for pending commands\n")); 3920 DEBUG2(qla_printk(KERN_INFO, ha,
3921 "Done wait for pending commands\n"));
3922 }
3887 } 3923 }
3888 }
3889 3924
3890 /* Requeue all commands in outstanding command list. */ 3925 /* Requeue all commands in outstanding command list. */
3891 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3926 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3927 }
3892} 3928}
3893 3929
3894/* 3930/*
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 28f65be19dad..e0e43d9e7ed1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -412,8 +412,14 @@ skip_rio:
412 "Unrecoverable Hardware Error: adapter " 412 "Unrecoverable Hardware Error: adapter "
413 "marked OFFLINE!\n"); 413 "marked OFFLINE!\n");
414 vha->flags.online = 0; 414 vha->flags.online = 0;
415 } else 415 } else {
416 /* Check to see if MPI timeout occured */
417 if ((mbx & MBX_3) && (ha->flags.port0))
418 set_bit(MPI_RESET_NEEDED,
419 &vha->dpc_flags);
420
416 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 421 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
422 }
417 } else if (mb[1] == 0) { 423 } else if (mb[1] == 0) {
418 qla_printk(KERN_INFO, ha, 424 qla_printk(KERN_INFO, ha,
419 "Unrecoverable Hardware Error: adapter marked " 425 "Unrecoverable Hardware Error: adapter marked "
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a595ec8264f8..effd8a1403d9 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3828,8 +3828,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3828 3828
3829 /* Copy mailbox information */ 3829 /* Copy mailbox information */
3830 memcpy( mresp, mcp->mb, 64); 3830 memcpy( mresp, mcp->mb, 64);
3831 mresp[3] = mcp->mb[18];
3832 mresp[4] = mcp->mb[19];
3833 return rval; 3831 return rval;
3834} 3832}
3835 3833
@@ -3890,9 +3888,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3890 } 3888 }
3891 3889
3892 /* Copy mailbox information */ 3890 /* Copy mailbox information */
3893 memcpy( mresp, mcp->mb, 32); 3891 memcpy(mresp, mcp->mb, 64);
3894 return rval; 3892 return rval;
3895} 3893}
3894
3896int 3895int
3897qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3896qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3898{ 3897{
@@ -3953,6 +3952,67 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3953} 3952}
3954 3953
3955int 3954int
3955qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3956{
3957 int rval;
3958 uint32_t stat, timer;
3959 uint16_t mb0 = 0;
3960 struct qla_hw_data *ha = vha->hw;
3961 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3962
3963 rval = QLA_SUCCESS;
3964
3965 DEBUG11(qla_printk(KERN_INFO, ha,
3966 "%s(%ld): entered.\n", __func__, vha->host_no));
3967
3968 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3969
3970 /* Write the MBC data to the registers */
3971 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
3972 WRT_REG_WORD(&reg->mailbox1, mb[0]);
3973 WRT_REG_WORD(&reg->mailbox2, mb[1]);
3974 WRT_REG_WORD(&reg->mailbox3, mb[2]);
3975 WRT_REG_WORD(&reg->mailbox4, mb[3]);
3976
3977 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
3978
3979 /* Poll for MBC interrupt */
3980 for (timer = 6000000; timer; timer--) {
3981 /* Check for pending interrupts. */
3982 stat = RD_REG_DWORD(&reg->host_status);
3983 if (stat & HSRX_RISC_INT) {
3984 stat &= 0xff;
3985
3986 if (stat == 0x1 || stat == 0x2 ||
3987 stat == 0x10 || stat == 0x11) {
3988 set_bit(MBX_INTERRUPT,
3989 &ha->mbx_cmd_flags);
3990 mb0 = RD_REG_WORD(&reg->mailbox0);
3991 WRT_REG_DWORD(&reg->hccr,
3992 HCCRX_CLR_RISC_INT);
3993 RD_REG_DWORD(&reg->hccr);
3994 break;
3995 }
3996 }
3997 udelay(5);
3998 }
3999
4000 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4001 rval = mb0 & MBS_MASK;
4002 else
4003 rval = QLA_FUNCTION_FAILED;
4004
4005 if (rval != QLA_SUCCESS) {
4006 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
4007 __func__, vha->host_no, rval, mb[0]));
4008 } else {
4009 DEBUG11(printk(KERN_INFO
4010 "%s(%ld): done.\n", __func__, vha->host_no));
4011 }
4012
4013 return rval;
4014}
4015int
3956qla2x00_get_data_rate(scsi_qla_host_t *vha) 4016qla2x00_get_data_rate(scsi_qla_host_t *vha)
3957{ 4017{
3958 int rval; 4018 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0a71cc71eab2..8d9edfb39803 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -403,6 +403,54 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
403 return off; 403 return off;
404} 404}
405 405
406static int
407qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408{
409 struct crb_128M_2M_sub_block_map *m;
410
411 if (*off >= QLA82XX_CRB_MAX)
412 return -1;
413
414 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415 *off = (*off - QLA82XX_PCI_CAMQM) +
416 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417 return 0;
418 }
419
420 if (*off < QLA82XX_PCI_CRBSPACE)
421 return -1;
422
423 *off -= QLA82XX_PCI_CRBSPACE;
424
425 /* Try direct map */
426 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430 return 0;
431 }
432 /* Not in direct map, use crb window */
433 return 1;
434}
435
436#define CRB_WIN_LOCK_TIMEOUT 100000000
437static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438{
439 int done = 0, timeout = 0;
440
441 while (!done) {
442 /* acquire semaphore3 from PCI HW block */
443 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444 if (done == 1)
445 break;
446 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447 return -1;
448 timeout++;
449 }
450 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451 return 0;
452}
453
406int 454int
407qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) 455qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
408{ 456{
@@ -453,24 +501,6 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
453 return data; 501 return data;
454} 502}
455 503
456#define CRB_WIN_LOCK_TIMEOUT 100000000
457int qla82xx_crb_win_lock(struct qla_hw_data *ha)
458{
459 int done = 0, timeout = 0;
460
461 while (!done) {
462 /* acquire semaphore3 from PCI HW block */
463 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
464 if (done == 1)
465 break;
466 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
467 return -1;
468 timeout++;
469 }
470 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
471 return 0;
472}
473
474#define IDC_LOCK_TIMEOUT 100000000 504#define IDC_LOCK_TIMEOUT 100000000
475int qla82xx_idc_lock(struct qla_hw_data *ha) 505int qla82xx_idc_lock(struct qla_hw_data *ha)
476{ 506{
@@ -504,36 +534,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha)
504 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 534 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
505} 535}
506 536
507int
508qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
509{
510 struct crb_128M_2M_sub_block_map *m;
511
512 if (*off >= QLA82XX_CRB_MAX)
513 return -1;
514
515 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
516 *off = (*off - QLA82XX_PCI_CAMQM) +
517 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
518 return 0;
519 }
520
521 if (*off < QLA82XX_PCI_CRBSPACE)
522 return -1;
523
524 *off -= QLA82XX_PCI_CRBSPACE;
525
526 /* Try direct map */
527 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
528
529 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
530 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
531 return 0;
532 }
533 /* Not in direct map, use crb window */
534 return 1;
535}
536
537/* PCI Windowing for DDR regions. */ 537/* PCI Windowing for DDR regions. */
538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ 538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539 (((addr) <= (high)) && ((addr) >= (low))) 539 (((addr) <= (high)) && ((addr) >= (low)))
@@ -557,7 +557,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
557 557
558int qla82xx_pci_set_window_warning_count; 558int qla82xx_pci_set_window_warning_count;
559 559
560unsigned long 560static unsigned long
561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) 561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562{ 562{
563 int window; 563 int window;
@@ -798,7 +798,8 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
798} 798}
799 799
800#define MTU_FUDGE_FACTOR 100 800#define MTU_FUDGE_FACTOR 100
801unsigned long qla82xx_decode_crb_addr(unsigned long addr) 801static unsigned long
802qla82xx_decode_crb_addr(unsigned long addr)
802{ 803{
803 int i; 804 int i;
804 unsigned long base_addr, offset, pci_base; 805 unsigned long base_addr, offset, pci_base;
@@ -824,7 +825,7 @@ unsigned long qla82xx_decode_crb_addr(unsigned long addr)
824static long rom_max_timeout = 100; 825static long rom_max_timeout = 100;
825static long qla82xx_rom_lock_timeout = 100; 826static long qla82xx_rom_lock_timeout = 100;
826 827
827int 828static int
828qla82xx_rom_lock(struct qla_hw_data *ha) 829qla82xx_rom_lock(struct qla_hw_data *ha)
829{ 830{
830 int done = 0, timeout = 0; 831 int done = 0, timeout = 0;
@@ -842,7 +843,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
842 return 0; 843 return 0;
843} 844}
844 845
845int 846static int
846qla82xx_wait_rom_busy(struct qla_hw_data *ha) 847qla82xx_wait_rom_busy(struct qla_hw_data *ha)
847{ 848{
848 long timeout = 0; 849 long timeout = 0;
@@ -862,7 +863,7 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
862 return 0; 863 return 0;
863} 864}
864 865
865int 866static int
866qla82xx_wait_rom_done(struct qla_hw_data *ha) 867qla82xx_wait_rom_done(struct qla_hw_data *ha)
867{ 868{
868 long timeout = 0; 869 long timeout = 0;
@@ -882,7 +883,7 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
882 return 0; 883 return 0;
883} 884}
884 885
885int 886static int
886qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 887qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
887{ 888{
888 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 889 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
@@ -905,7 +906,7 @@ qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
905 return 0; 906 return 0;
906} 907}
907 908
908int 909static int
909qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 910qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
910{ 911{
911 int ret, loops = 0; 912 int ret, loops = 0;
@@ -926,7 +927,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
926 return ret; 927 return ret;
927} 928}
928 929
929int 930static int
930qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 931qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
931{ 932{
932 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 933 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
@@ -940,7 +941,7 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
940 return 0; 941 return 0;
941} 942}
942 943
943int 944static int
944qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) 945qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
945{ 946{
946 long timeout = 0; 947 long timeout = 0;
@@ -964,7 +965,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 return ret; 965 return ret;
965} 966}
966 967
967int 968static int
968qla82xx_flash_set_write_enable(struct qla_hw_data *ha) 969qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
969{ 970{
970 uint32_t val; 971 uint32_t val;
@@ -981,7 +982,7 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
981 return 0; 982 return 0;
982} 983}
983 984
984int 985static int
985qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 986qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
986{ 987{
987 if (qla82xx_flash_set_write_enable(ha)) 988 if (qla82xx_flash_set_write_enable(ha))
@@ -996,7 +997,7 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
996 return qla82xx_flash_wait_write_finish(ha); 997 return qla82xx_flash_wait_write_finish(ha);
997} 998}
998 999
999int 1000static int
1000qla82xx_write_disable_flash(struct qla_hw_data *ha) 1001qla82xx_write_disable_flash(struct qla_hw_data *ha)
1001{ 1002{
1002 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1003 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
@@ -1008,7 +1009,7 @@ qla82xx_write_disable_flash(struct qla_hw_data *ha)
1008 return 0; 1009 return 0;
1009} 1010}
1010 1011
1011int 1012static int
1012ql82xx_rom_lock_d(struct qla_hw_data *ha) 1013ql82xx_rom_lock_d(struct qla_hw_data *ha)
1013{ 1014{
1014 int loops = 0; 1015 int loops = 0;
@@ -1024,7 +1025,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
1024 return 0;; 1025 return 0;;
1025} 1026}
1026 1027
1027int 1028static int
1028qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, 1029qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1029 uint32_t data) 1030 uint32_t data)
1030{ 1031{
@@ -1061,7 +1062,8 @@ done_write:
1061/* This routine does CRB initialize sequence 1062/* This routine does CRB initialize sequence
1062 * to put the ISP into operational state 1063 * to put the ISP into operational state
1063 */ 1064 */
1064int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) 1065static int
1066qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1065{ 1067{
1066 int addr, val; 1068 int addr, val;
1067 int i ; 1069 int i ;
@@ -1207,7 +1209,8 @@ int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1207 return 0; 1209 return 0;
1208} 1210}
1209 1211
1210int qla82xx_check_for_bad_spd(struct qla_hw_data *ha) 1212static int
1213qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1211{ 1214{
1212 u32 val = 0; 1215 u32 val = 0;
1213 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS); 1216 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
@@ -1225,7 +1228,116 @@ int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1225 return 0; 1228 return 0;
1226} 1229}
1227 1230
1228int 1231static int
1232qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1233 u64 off, void *data, int size)
1234{
1235 int i, j, ret = 0, loop, sz[2], off0;
1236 int scale, shift_amount, startword;
1237 uint32_t temp;
1238 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1239
1240 /*
1241 * If not MN, go check for MS or invalid.
1242 */
1243 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1244 mem_crb = QLA82XX_CRB_QDR_NET;
1245 else {
1246 mem_crb = QLA82XX_CRB_DDR_NET;
1247 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1248 return qla82xx_pci_mem_write_direct(ha,
1249 off, data, size);
1250 }
1251
1252 off0 = off & 0x7;
1253 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1254 sz[1] = size - sz[0];
1255
1256 off8 = off & 0xfffffff0;
1257 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1258 shift_amount = 4;
1259 scale = 2;
1260 startword = (off & 0xf)/8;
1261
1262 for (i = 0; i < loop; i++) {
1263 if (qla82xx_pci_mem_read_2M(ha, off8 +
1264 (i << shift_amount), &word[i * scale], 8))
1265 return -1;
1266 }
1267
1268 switch (size) {
1269 case 1:
1270 tmpw = *((uint8_t *)data);
1271 break;
1272 case 2:
1273 tmpw = *((uint16_t *)data);
1274 break;
1275 case 4:
1276 tmpw = *((uint32_t *)data);
1277 break;
1278 case 8:
1279 default:
1280 tmpw = *((uint64_t *)data);
1281 break;
1282 }
1283
1284 if (sz[0] == 8) {
1285 word[startword] = tmpw;
1286 } else {
1287 word[startword] &=
1288 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1289 word[startword] |= tmpw << (off0 * 8);
1290 }
1291 if (sz[1] != 0) {
1292 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1293 word[startword+1] |= tmpw >> (sz[0] * 8);
1294 }
1295
1296 /*
1297 * don't lock here - write_wx gets the lock if each time
1298 * write_lock_irqsave(&adapter->adapter_lock, flags);
1299 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1300 */
1301 for (i = 0; i < loop; i++) {
1302 temp = off8 + (i << shift_amount);
1303 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1304 temp = 0;
1305 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1306 temp = word[i * scale] & 0xffffffff;
1307 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1308 temp = (word[i * scale] >> 32) & 0xffffffff;
1309 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1310 temp = word[i*scale + 1] & 0xffffffff;
1311 qla82xx_wr_32(ha, mem_crb +
1312 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1313 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1314 qla82xx_wr_32(ha, mem_crb +
1315 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1316
1317 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1318 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1319 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1320 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1321
1322 for (j = 0; j < MAX_CTL_CHECK; j++) {
1323 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1324 if ((temp & MIU_TA_CTL_BUSY) == 0)
1325 break;
1326 }
1327
1328 if (j >= MAX_CTL_CHECK) {
1329 if (printk_ratelimit())
1330 dev_err(&ha->pdev->dev,
1331 "failed to write through agent\n");
1332 ret = -1;
1333 break;
1334 }
1335 }
1336
1337 return ret;
1338}
1339
1340static int
1229qla82xx_fw_load_from_flash(struct qla_hw_data *ha) 1341qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1230{ 1342{
1231 int i; 1343 int i;
@@ -1357,114 +1469,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1357 return 0; 1469 return 0;
1358} 1470}
1359 1471
1360int
1361qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1362 u64 off, void *data, int size)
1363{
1364 int i, j, ret = 0, loop, sz[2], off0;
1365 int scale, shift_amount, startword;
1366 uint32_t temp;
1367 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1368
1369 /*
1370 * If not MN, go check for MS or invalid.
1371 */
1372 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1373 mem_crb = QLA82XX_CRB_QDR_NET;
1374 else {
1375 mem_crb = QLA82XX_CRB_DDR_NET;
1376 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1377 return qla82xx_pci_mem_write_direct(ha,
1378 off, data, size);
1379 }
1380
1381 off0 = off & 0x7;
1382 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1383 sz[1] = size - sz[0];
1384
1385 off8 = off & 0xfffffff0;
1386 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1387 shift_amount = 4;
1388 scale = 2;
1389 startword = (off & 0xf)/8;
1390
1391 for (i = 0; i < loop; i++) {
1392 if (qla82xx_pci_mem_read_2M(ha, off8 +
1393 (i << shift_amount), &word[i * scale], 8))
1394 return -1;
1395 }
1396
1397 switch (size) {
1398 case 1:
1399 tmpw = *((uint8_t *)data);
1400 break;
1401 case 2:
1402 tmpw = *((uint16_t *)data);
1403 break;
1404 case 4:
1405 tmpw = *((uint32_t *)data);
1406 break;
1407 case 8:
1408 default:
1409 tmpw = *((uint64_t *)data);
1410 break;
1411 }
1412
1413 if (sz[0] == 8) {
1414 word[startword] = tmpw;
1415 } else {
1416 word[startword] &=
1417 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1418 word[startword] |= tmpw << (off0 * 8);
1419 }
1420 if (sz[1] != 0) {
1421 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1422 word[startword+1] |= tmpw >> (sz[0] * 8);
1423 }
1424
1425 /*
1426 * don't lock here - write_wx gets the lock if each time
1427 * write_lock_irqsave(&adapter->adapter_lock, flags);
1428 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1429 */
1430 for (i = 0; i < loop; i++) {
1431 temp = off8 + (i << shift_amount);
1432 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1433 temp = 0;
1434 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1435 temp = word[i * scale] & 0xffffffff;
1436 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1437 temp = (word[i * scale] >> 32) & 0xffffffff;
1438 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1439 temp = word[i*scale + 1] & 0xffffffff;
1440 qla82xx_wr_32(ha, mem_crb +
1441 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1442 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1443 qla82xx_wr_32(ha, mem_crb +
1444 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1445
1446 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1447 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1448 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1449 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1450
1451 for (j = 0; j < MAX_CTL_CHECK; j++) {
1452 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1453 if ((temp & MIU_TA_CTL_BUSY) == 0)
1454 break;
1455 }
1456
1457 if (j >= MAX_CTL_CHECK) {
1458 if (printk_ratelimit())
1459 dev_err(&ha->pdev->dev,
1460 "failed to write through agent\n");
1461 ret = -1;
1462 break;
1463 }
1464 }
1465
1466 return ret;
1467}
1468 1472
1469static struct qla82xx_uri_table_desc * 1473static struct qla82xx_uri_table_desc *
1470qla82xx_get_table_desc(const u8 *unirom, int section) 1474qla82xx_get_table_desc(const u8 *unirom, int section)
@@ -1725,7 +1729,8 @@ void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1725 ha->isp_ops->disable_intrs(ha); 1729 ha->isp_ops->disable_intrs(ha);
1726} 1730}
1727 1731
1728int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) 1732static int
1733qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1729{ 1734{
1730 u64 *ptr64; 1735 u64 *ptr64;
1731 u32 i, flashaddr, size; 1736 u32 i, flashaddr, size;
@@ -1836,7 +1841,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1836 return 0; 1841 return 0;
1837} 1842}
1838 1843
1839int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) 1844static int
1845qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1840{ 1846{
1841 u32 val = 0; 1847 u32 val = 0;
1842 int retries = 60; 1848 int retries = 60;
@@ -1874,7 +1880,8 @@ int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1874 return QLA_FUNCTION_FAILED; 1880 return QLA_FUNCTION_FAILED;
1875} 1881}
1876 1882
1877int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) 1883static int
1884qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1878{ 1885{
1879 u32 val = 0; 1886 u32 val = 0;
1880 int retries = 60; 1887 int retries = 60;
@@ -1933,7 +1940,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
1933 * @ha: SCSI driver HA context 1940 * @ha: SCSI driver HA context
1934 * @mb0: Mailbox0 register 1941 * @mb0: Mailbox0 register
1935 */ 1942 */
1936void 1943static void
1937qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1944qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1938{ 1945{
1939 uint16_t cnt; 1946 uint16_t cnt;
@@ -2257,7 +2264,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
2257 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 2264 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2258} 2265}
2259 2266
2260static inline void 2267inline void
2261qla82xx_set_drv_active(scsi_qla_host_t *vha) 2268qla82xx_set_drv_active(scsi_qla_host_t *vha)
2262{ 2269{
2263 uint32_t drv_active; 2270 uint32_t drv_active;
@@ -2267,10 +2274,11 @@ qla82xx_set_drv_active(scsi_qla_host_t *vha)
2267 2274
2268 /* If reset value is all FF's, initialize DRV_ACTIVE */ 2275 /* If reset value is all FF's, initialize DRV_ACTIVE */
2269 if (drv_active == 0xffffffff) { 2276 if (drv_active == 0xffffffff) {
2270 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0); 2277 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2278 QLA82XX_DRV_NOT_ACTIVE);
2271 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2279 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2272 } 2280 }
2273 drv_active |= (1 << (ha->portnum * 4)); 2281 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2274 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2282 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2275} 2283}
2276 2284
@@ -2280,7 +2288,7 @@ qla82xx_clear_drv_active(struct qla_hw_data *ha)
2280 uint32_t drv_active; 2288 uint32_t drv_active;
2281 2289
2282 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2290 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2283 drv_active &= ~(1 << (ha->portnum * 4)); 2291 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2284 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2292 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2285} 2293}
2286 2294
@@ -2291,7 +2299,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2291 int rval; 2299 int rval;
2292 2300
2293 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2301 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2294 rval = drv_state & (1 << (ha->portnum * 4)); 2302 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2295 return rval; 2303 return rval;
2296} 2304}
2297 2305
@@ -2305,7 +2313,7 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2305 2313
2306 /* If reset value is all FF's, initialize DRV_STATE */ 2314 /* If reset value is all FF's, initialize DRV_STATE */
2307 if (drv_state == 0xffffffff) { 2315 if (drv_state == 0xffffffff) {
2308 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 2316 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2309 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2317 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2310 } 2318 }
2311 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2319 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
@@ -2335,7 +2343,8 @@ qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2335 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2343 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2336} 2344}
2337 2345
2338int qla82xx_load_fw(scsi_qla_host_t *vha) 2346static int
2347qla82xx_load_fw(scsi_qla_host_t *vha)
2339{ 2348{
2340 int rst; 2349 int rst;
2341 struct fw_blob *blob; 2350 struct fw_blob *blob;
@@ -2411,7 +2420,7 @@ fw_load_failed:
2411 return QLA_FUNCTION_FAILED; 2420 return QLA_FUNCTION_FAILED;
2412} 2421}
2413 2422
2414static int 2423int
2415qla82xx_start_firmware(scsi_qla_host_t *vha) 2424qla82xx_start_firmware(scsi_qla_host_t *vha)
2416{ 2425{
2417 int pcie_cap; 2426 int pcie_cap;
@@ -2419,7 +2428,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2419 struct qla_hw_data *ha = vha->hw; 2428 struct qla_hw_data *ha = vha->hw;
2420 2429
2421 /* scrub dma mask expansion register */ 2430 /* scrub dma mask expansion register */
2422 qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); 2431 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2423 2432
2424 /* Put both the PEG CMD and RCV PEG to default state 2433 /* Put both the PEG CMD and RCV PEG to default state
2425 * of 0 before resetting the hardware 2434 * of 0 before resetting the hardware
@@ -2882,7 +2891,7 @@ queuing_error:
2882 return QLA_FUNCTION_FAILED; 2891 return QLA_FUNCTION_FAILED;
2883} 2892}
2884 2893
2885uint32_t * 2894static uint32_t *
2886qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2895qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2887 uint32_t length) 2896 uint32_t length)
2888{ 2897{
@@ -2903,7 +2912,7 @@ done_read:
2903 return dwptr; 2912 return dwptr;
2904} 2913}
2905 2914
2906int 2915static int
2907qla82xx_unprotect_flash(struct qla_hw_data *ha) 2916qla82xx_unprotect_flash(struct qla_hw_data *ha)
2908{ 2917{
2909 int ret; 2918 int ret;
@@ -2934,7 +2943,7 @@ done_unprotect:
2934 return ret; 2943 return ret;
2935} 2944}
2936 2945
2937int 2946static int
2938qla82xx_protect_flash(struct qla_hw_data *ha) 2947qla82xx_protect_flash(struct qla_hw_data *ha)
2939{ 2948{
2940 int ret; 2949 int ret;
@@ -2963,7 +2972,7 @@ done_protect:
2963 return ret; 2972 return ret;
2964} 2973}
2965 2974
2966int 2975static int
2967qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 2976qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2968{ 2977{
2969 int ret = 0; 2978 int ret = 0;
@@ -3156,6 +3165,20 @@ qla82xx_start_iocbs(srb_t *sp)
3156 } 3165 }
3157} 3166}
3158 3167
3168void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3169{
3170 if (qla82xx_rom_lock(ha))
3171 /* Someone else is holding the lock. */
3172 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3173
3174 /*
3175 * Either we got the lock, or someone
3176 * else died while holding it.
3177 * In either case, unlock.
3178 */
3179 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3180}
3181
3159/* 3182/*
3160 * qla82xx_device_bootstrap 3183 * qla82xx_device_bootstrap
3161 * Initialize device, set DEV_READY, start fw 3184 * Initialize device, set DEV_READY, start fw
@@ -3170,12 +3193,13 @@ qla82xx_start_iocbs(srb_t *sp)
3170static int 3193static int
3171qla82xx_device_bootstrap(scsi_qla_host_t *vha) 3194qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3172{ 3195{
3173 int rval, i, timeout; 3196 int rval = QLA_SUCCESS;
3197 int i, timeout;
3174 uint32_t old_count, count; 3198 uint32_t old_count, count;
3175 struct qla_hw_data *ha = vha->hw; 3199 struct qla_hw_data *ha = vha->hw;
3200 int need_reset = 0, peg_stuck = 1;
3176 3201
3177 if (qla82xx_need_reset(ha)) 3202 need_reset = qla82xx_need_reset(ha);
3178 goto dev_initialize;
3179 3203
3180 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3204 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3181 3205
@@ -3189,9 +3213,27 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3189 3213
3190 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3214 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3191 if (count != old_count) 3215 if (count != old_count)
3216 peg_stuck = 0;
3217 }
3218
3219 if (need_reset) {
3220 /* We are trying to perform a recovery here. */
3221 if (peg_stuck)
3222 qla82xx_rom_lock_recovery(ha);
3223 goto dev_initialize;
3224 } else {
3225 /* Start of day for this ha context. */
3226 if (peg_stuck) {
3227 /* Either we are the first or recovery in progress. */
3228 qla82xx_rom_lock_recovery(ha);
3229 goto dev_initialize;
3230 } else
3231 /* Firmware already running. */
3192 goto dev_ready; 3232 goto dev_ready;
3193 } 3233 }
3194 3234
3235 return rval;
3236
3195dev_initialize: 3237dev_initialize:
3196 /* set to DEV_INITIALIZING */ 3238 /* set to DEV_INITIALIZING */
3197 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3239 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
@@ -3304,6 +3346,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3304 struct qla_hw_data *ha = vha->hw; 3346 struct qla_hw_data *ha = vha->hw;
3305 3347
3306 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3348 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3349 /* all 0xff, assume AER/EEH in progress, ignore */
3350 if (fw_heartbeat_counter == 0xffffffff)
3351 return;
3307 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3352 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3308 vha->seconds_since_last_heartbeat++; 3353 vha->seconds_since_last_heartbeat++;
3309 /* FW not alive after 2 seconds */ 3354 /* FW not alive after 2 seconds */
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 15559cab39f8..51ec0c5380e8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
29 30
30#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 31#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
31#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E 32#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
@@ -583,6 +584,10 @@
583#define QLA82XX_DRVST_RST_RDY 1 584#define QLA82XX_DRVST_RST_RDY 1
584#define QLA82XX_DRVST_QSNT_RDY 2 585#define QLA82XX_DRVST_QSNT_RDY 2
585 586
587/* Different drive active state */
588#define QLA82XX_DRV_NOT_ACTIVE 0
589#define QLA82XX_DRV_ACTIVE 1
590
586/* 591/*
587 * The PCI VendorID and DeviceID for our board. 592 * The PCI VendorID and DeviceID for our board.
588 */ 593 */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9946fac54255..800ea9269752 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1295,17 +1295,12 @@ static int
1295qla2xxx_slave_configure(struct scsi_device *sdev) 1295qla2xxx_slave_configure(struct scsi_device *sdev)
1296{ 1296{
1297 scsi_qla_host_t *vha = shost_priv(sdev->host); 1297 scsi_qla_host_t *vha = shost_priv(sdev->host);
1298 struct qla_hw_data *ha = vha->hw;
1299 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1300 struct req_que *req = vha->req; 1298 struct req_que *req = vha->req;
1301 1299
1302 if (sdev->tagged_supported) 1300 if (sdev->tagged_supported)
1303 scsi_activate_tcq(sdev, req->max_q_depth); 1301 scsi_activate_tcq(sdev, req->max_q_depth);
1304 else 1302 else
1305 scsi_deactivate_tcq(sdev, req->max_q_depth); 1303 scsi_deactivate_tcq(sdev, req->max_q_depth);
1306
1307 rport->dev_loss_tmo = ha->port_down_retry_count;
1308
1309 return 0; 1304 return 0;
1310} 1305}
1311 1306
@@ -2141,8 +2136,16 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2141 else 2136 else
2142 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2137 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2143 base_vha->vp_idx; 2138 base_vha->vp_idx;
2144 if (IS_QLA2100(ha)) 2139
2145 host->sg_tablesize = 32; 2140 /* Set the SG table size based on ISP type */
2141 if (!IS_FWI2_CAPABLE(ha)) {
2142 if (IS_QLA2100(ha))
2143 host->sg_tablesize = 32;
2144 } else {
2145 if (!IS_QLA82XX(ha))
2146 host->sg_tablesize = QLA_SG_ALL;
2147 }
2148
2146 host->max_id = max_id; 2149 host->max_id = max_id;
2147 host->this_id = 255; 2150 host->this_id = 255;
2148 host->cmd_per_lun = 3; 2151 host->cmd_per_lun = 3;
@@ -3553,6 +3556,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3553 struct qla_hw_data *ha = vha->hw; 3556 struct qla_hw_data *ha = vha->hw;
3554 struct req_que *req; 3557 struct req_que *req;
3555 3558
3559 if (ha->flags.eeh_busy) {
3560 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3561 return;
3562 }
3563
3556 if (IS_QLA82XX(ha)) 3564 if (IS_QLA82XX(ha))
3557 qla82xx_watchdog(vha); 3565 qla82xx_watchdog(vha);
3558 3566
@@ -3782,8 +3790,21 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3782 return PCI_ERS_RESULT_CAN_RECOVER; 3790 return PCI_ERS_RESULT_CAN_RECOVER;
3783 case pci_channel_io_frozen: 3791 case pci_channel_io_frozen:
3784 ha->flags.eeh_busy = 1; 3792 ha->flags.eeh_busy = 1;
3793 /* For ISP82XX complete any pending mailbox cmd */
3794 if (IS_QLA82XX(ha)) {
3795 ha->flags.fw_hung = 1;
3796 if (ha->flags.mbox_busy) {
3797 ha->flags.mbox_int = 1;
3798 DEBUG2(qla_printk(KERN_ERR, ha,
3799 "Due to pci channel io frozen, doing premature "
3800 "completion of mbx command\n"));
3801 complete(&ha->mbx_intr_comp);
3802 }
3803 }
3785 qla2x00_free_irqs(vha); 3804 qla2x00_free_irqs(vha);
3786 pci_disable_device(pdev); 3805 pci_disable_device(pdev);
3806 /* Return back all IOs */
3807 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3787 return PCI_ERS_RESULT_NEED_RESET; 3808 return PCI_ERS_RESULT_NEED_RESET;
3788 case pci_channel_io_perm_failure: 3809 case pci_channel_io_perm_failure:
3789 ha->flags.pci_channel_io_perm_failure = 1; 3810 ha->flags.pci_channel_io_perm_failure = 1;
@@ -3804,6 +3825,9 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3804 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3825 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3805 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 3826 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3806 3827
3828 if (IS_QLA82XX(ha))
3829 return PCI_ERS_RESULT_RECOVERED;
3830
3807 spin_lock_irqsave(&ha->hardware_lock, flags); 3831 spin_lock_irqsave(&ha->hardware_lock, flags);
3808 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 3832 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3809 stat = RD_REG_DWORD(&reg->hccr); 3833 stat = RD_REG_DWORD(&reg->hccr);
@@ -3830,6 +3854,109 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3830 return PCI_ERS_RESULT_RECOVERED; 3854 return PCI_ERS_RESULT_RECOVERED;
3831} 3855}
3832 3856
3857uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3858{
3859 uint32_t rval = QLA_FUNCTION_FAILED;
3860 uint32_t drv_active = 0;
3861 struct qla_hw_data *ha = base_vha->hw;
3862 int fn;
3863 struct pci_dev *other_pdev = NULL;
3864
3865 DEBUG17(qla_printk(KERN_INFO, ha,
3866 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
3867
3868 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3869
3870 if (base_vha->flags.online) {
3871 /* Abort all outstanding commands,
3872 * so as to be requeued later */
3873 qla2x00_abort_isp_cleanup(base_vha);
3874 }
3875
3876
3877 fn = PCI_FUNC(ha->pdev->devfn);
3878 while (fn > 0) {
3879 fn--;
3880 DEBUG17(qla_printk(KERN_INFO, ha,
3881 "Finding pci device at function = 0x%x\n", fn));
3882 other_pdev =
3883 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3884 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3885 fn));
3886
3887 if (!other_pdev)
3888 continue;
3889 if (atomic_read(&other_pdev->enable_cnt)) {
3890 DEBUG17(qla_printk(KERN_INFO, ha,
3891 "Found PCI func availabe and enabled at 0x%x\n",
3892 fn));
3893 pci_dev_put(other_pdev);
3894 break;
3895 }
3896 pci_dev_put(other_pdev);
3897 }
3898
3899 if (!fn) {
3900 /* Reset owner */
3901 DEBUG17(qla_printk(KERN_INFO, ha,
3902 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
3903 qla82xx_idc_lock(ha);
3904
3905 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3906 QLA82XX_DEV_INITIALIZING);
3907
3908 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3909 QLA82XX_IDC_VERSION);
3910
3911 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3912 DEBUG17(qla_printk(KERN_INFO, ha,
3913 "drv_active = 0x%x\n", drv_active));
3914
3915 qla82xx_idc_unlock(ha);
3916 /* Reset if device is not already reset
3917 * drv_active would be 0 if a reset has already been done
3918 */
3919 if (drv_active)
3920 rval = qla82xx_start_firmware(base_vha);
3921 else
3922 rval = QLA_SUCCESS;
3923 qla82xx_idc_lock(ha);
3924
3925 if (rval != QLA_SUCCESS) {
3926 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3927 qla82xx_clear_drv_active(ha);
3928 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3929 QLA82XX_DEV_FAILED);
3930 } else {
3931 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3932 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3933 QLA82XX_DEV_READY);
3934 qla82xx_idc_unlock(ha);
3935 ha->flags.fw_hung = 0;
3936 rval = qla82xx_restart_isp(base_vha);
3937 qla82xx_idc_lock(ha);
3938 /* Clear driver state register */
3939 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
3940 qla82xx_set_drv_active(base_vha);
3941 }
3942 qla82xx_idc_unlock(ha);
3943 } else {
3944 DEBUG17(qla_printk(KERN_INFO, ha,
3945 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
3946 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
3947 QLA82XX_DEV_READY)) {
3948 ha->flags.fw_hung = 0;
3949 rval = qla82xx_restart_isp(base_vha);
3950 qla82xx_idc_lock(ha);
3951 qla82xx_set_drv_active(base_vha);
3952 qla82xx_idc_unlock(ha);
3953 }
3954 }
3955 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3956
3957 return rval;
3958}
3959
3833static pci_ers_result_t 3960static pci_ers_result_t
3834qla2xxx_pci_slot_reset(struct pci_dev *pdev) 3961qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3835{ 3962{
@@ -3862,15 +3989,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3862 if (rc) { 3989 if (rc) {
3863 qla_printk(KERN_WARNING, ha, 3990 qla_printk(KERN_WARNING, ha,
3864 "Can't re-enable PCI device after reset.\n"); 3991 "Can't re-enable PCI device after reset.\n");
3865 return ret; 3992 goto exit_slot_reset;
3866 } 3993 }
3867 3994
3868 rsp = ha->rsp_q_map[0]; 3995 rsp = ha->rsp_q_map[0];
3869 if (qla2x00_request_irqs(ha, rsp)) 3996 if (qla2x00_request_irqs(ha, rsp))
3870 return ret; 3997 goto exit_slot_reset;
3871 3998
3872 if (ha->isp_ops->pci_config(base_vha)) 3999 if (ha->isp_ops->pci_config(base_vha))
3873 return ret; 4000 goto exit_slot_reset;
4001
4002 if (IS_QLA82XX(ha)) {
4003 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
4004 ret = PCI_ERS_RESULT_RECOVERED;
4005 goto exit_slot_reset;
4006 } else
4007 goto exit_slot_reset;
4008 }
3874 4009
3875 while (ha->flags.mbox_busy && retries--) 4010 while (ha->flags.mbox_busy && retries--)
3876 msleep(1000); 4011 msleep(1000);
@@ -3881,6 +4016,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3881 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4016 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3882 4017
3883 4018
4019exit_slot_reset:
3884 DEBUG17(qla_printk(KERN_WARNING, ha, 4020 DEBUG17(qla_printk(KERN_WARNING, ha,
3885 "slot_reset-return:ret=%x\n", ret)); 4021 "slot_reset-return:ret=%x\n", ret));
3886 4022
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 5d4a3822382d..449256f2c5f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/io.h>
8#include <linux/pci.h> 9#include <linux/pci.h>
9#include "ql4_def.h" 10#include "ql4_def.h"
10#include "ql4_glbl.h" 11#include "ql4_glbl.h"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b02bdc6c2cd1..2c36bae3bd4b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -109,10 +109,12 @@ static const char * scsi_debug_version_date = "20100324";
109#define DEF_PHYSBLK_EXP 0 109#define DEF_PHYSBLK_EXP 0
110#define DEF_LOWEST_ALIGNED 0 110#define DEF_LOWEST_ALIGNED 0
111#define DEF_OPT_BLKS 64 111#define DEF_OPT_BLKS 64
112#define DEF_UNMAP_MAX_BLOCKS 0 112#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
113#define DEF_UNMAP_MAX_DESC 0 113#define DEF_UNMAP_MAX_DESC 256
114#define DEF_UNMAP_GRANULARITY 0 114#define DEF_UNMAP_GRANULARITY 1
115#define DEF_UNMAP_ALIGNMENT 0 115#define DEF_UNMAP_ALIGNMENT 0
116#define DEF_TPWS 0
117#define DEF_TPU 0
116 118
117/* bit mask values for scsi_debug_opts */ 119/* bit mask values for scsi_debug_opts */
118#define SCSI_DEBUG_OPT_NOISE 1 120#define SCSI_DEBUG_OPT_NOISE 1
@@ -177,10 +179,12 @@ static int scsi_debug_ato = DEF_ATO;
177static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 179static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
178static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 180static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
179static int scsi_debug_opt_blks = DEF_OPT_BLKS; 181static int scsi_debug_opt_blks = DEF_OPT_BLKS;
180static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 182static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
181static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 183static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
182static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; 184static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
183static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 185static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
186static unsigned int scsi_debug_tpws = DEF_TPWS;
187static unsigned int scsi_debug_tpu = DEF_TPU;
184 188
185static int scsi_debug_cmnd_count = 0; 189static int scsi_debug_cmnd_count = 0;
186 190
@@ -723,16 +727,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
723 /* Optimal Transfer Length */ 727 /* Optimal Transfer Length */
724 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); 728 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
725 729
726 if (scsi_debug_unmap_max_desc) { 730 if (scsi_debug_tpu) {
727 unsigned int blocks;
728
729 if (scsi_debug_unmap_max_blocks)
730 blocks = scsi_debug_unmap_max_blocks;
731 else
732 blocks = 0xffffffff;
733
734 /* Maximum Unmap LBA Count */ 731 /* Maximum Unmap LBA Count */
735 put_unaligned_be32(blocks, &arr[16]); 732 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
736 733
737 /* Maximum Unmap Block Descriptor Count */ 734 /* Maximum Unmap Block Descriptor Count */
738 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); 735 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
@@ -745,10 +742,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
745 } 742 }
746 743
747 /* Optimal Unmap Granularity */ 744 /* Optimal Unmap Granularity */
748 if (scsi_debug_unmap_granularity) { 745 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
749 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); 746
750 return 0x3c; /* Mandatory page length for thin provisioning */ 747 return 0x3c; /* Mandatory page length for thin provisioning */
751 }
752 748
753 return sizeof(vpdb0_data); 749 return sizeof(vpdb0_data);
754} 750}
@@ -765,6 +761,21 @@ static int inquiry_evpd_b1(unsigned char *arr)
765 return 0x3c; 761 return 0x3c;
766} 762}
767 763
764/* Thin provisioning VPD page (SBC-3) */
765static int inquiry_evpd_b2(unsigned char *arr)
766{
767 memset(arr, 0, 0x8);
768 arr[0] = 0; /* threshold exponent */
769
770 if (scsi_debug_tpu)
771 arr[1] = 1 << 7;
772
773 if (scsi_debug_tpws)
774 arr[1] |= 1 << 6;
775
776 return 0x8;
777}
778
768#define SDEBUG_LONG_INQ_SZ 96 779#define SDEBUG_LONG_INQ_SZ 96
769#define SDEBUG_MAX_INQ_ARR_SZ 584 780#define SDEBUG_MAX_INQ_ARR_SZ 584
770 781
@@ -820,6 +831,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
820 arr[n++] = 0x89; /* ATA information */ 831 arr[n++] = 0x89; /* ATA information */
821 arr[n++] = 0xb0; /* Block limits (SBC) */ 832 arr[n++] = 0xb0; /* Block limits (SBC) */
822 arr[n++] = 0xb1; /* Block characteristics (SBC) */ 833 arr[n++] = 0xb1; /* Block characteristics (SBC) */
834 arr[n++] = 0xb2; /* Thin provisioning (SBC) */
823 arr[3] = n - 4; /* number of supported VPD pages */ 835 arr[3] = n - 4; /* number of supported VPD pages */
824 } else if (0x80 == cmd[2]) { /* unit serial number */ 836 } else if (0x80 == cmd[2]) { /* unit serial number */
825 arr[1] = cmd[2]; /*sanity */ 837 arr[1] = cmd[2]; /*sanity */
@@ -867,6 +879,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
867 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ 879 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
868 arr[1] = cmd[2]; /*sanity */ 880 arr[1] = cmd[2]; /*sanity */
869 arr[3] = inquiry_evpd_b1(&arr[4]); 881 arr[3] = inquiry_evpd_b1(&arr[4]);
882 } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
883 arr[1] = cmd[2]; /*sanity */
884 arr[3] = inquiry_evpd_b2(&arr[4]);
870 } else { 885 } else {
871 /* Illegal request, invalid field in cdb */ 886 /* Illegal request, invalid field in cdb */
872 mk_sense_buffer(devip, ILLEGAL_REQUEST, 887 mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -1038,7 +1053,7 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1038 arr[13] = scsi_debug_physblk_exp & 0xf; 1053 arr[13] = scsi_debug_physblk_exp & 0xf;
1039 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1054 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1040 1055
1041 if (scsi_debug_unmap_granularity) 1056 if (scsi_debug_tpu || scsi_debug_tpws)
1042 arr[14] |= 0x80; /* TPE */ 1057 arr[14] |= 0x80; /* TPE */
1043 1058
1044 arr[15] = scsi_debug_lowest_aligned & 0xff; 1059 arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -2708,6 +2723,8 @@ module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2708module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); 2723module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2709module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); 2724module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2710module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 2725module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2726module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
2727module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
2711 2728
2712MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2729MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2713MODULE_DESCRIPTION("SCSI debug adapter driver"); 2730MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2739,10 +2756,12 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2739MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2756MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2740MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 2757MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2741MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 2758MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2742MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)"); 2759MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2743MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)"); 2760MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2744MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)"); 2761MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2745MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 2762MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2763MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
2764MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
2746 2765
2747static char sdebug_info[256]; 2766static char sdebug_info[256];
2748 2767
@@ -3130,7 +3149,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3130{ 3149{
3131 ssize_t count; 3150 ssize_t count;
3132 3151
3133 if (scsi_debug_unmap_granularity == 0) 3152 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
3134 return scnprintf(buf, PAGE_SIZE, "0-%u\n", 3153 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3135 sdebug_store_sectors); 3154 sdebug_store_sectors);
3136 3155
@@ -3207,16 +3226,7 @@ static void do_remove_driverfs_files(void)
3207 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); 3226 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3208} 3227}
3209 3228
3210static void pseudo_0_release(struct device *dev) 3229struct device *pseudo_primary;
3211{
3212 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3213 printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3214}
3215
3216static struct device pseudo_primary = {
3217 .init_name = "pseudo_0",
3218 .release = pseudo_0_release,
3219};
3220 3230
3221static int __init scsi_debug_init(void) 3231static int __init scsi_debug_init(void)
3222{ 3232{
@@ -3322,10 +3332,21 @@ static int __init scsi_debug_init(void)
3322 memset(dif_storep, 0xff, dif_size); 3332 memset(dif_storep, 0xff, dif_size);
3323 } 3333 }
3324 3334
3325 if (scsi_debug_unmap_granularity) { 3335 /* Thin Provisioning */
3336 if (scsi_debug_tpu || scsi_debug_tpws) {
3326 unsigned int map_bytes; 3337 unsigned int map_bytes;
3327 3338
3328 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { 3339 scsi_debug_unmap_max_blocks =
3340 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3341
3342 scsi_debug_unmap_max_desc =
3343 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3344
3345 scsi_debug_unmap_granularity =
3346 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3347
3348 if (scsi_debug_unmap_alignment &&
3349 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3329 printk(KERN_ERR 3350 printk(KERN_ERR
3330 "%s: ERR: unmap_granularity < unmap_alignment\n", 3351 "%s: ERR: unmap_granularity < unmap_alignment\n",
3331 __func__); 3352 __func__);
@@ -3352,10 +3373,10 @@ static int __init scsi_debug_init(void)
3352 map_region(0, 2); 3373 map_region(0, 2);
3353 } 3374 }
3354 3375
3355 ret = device_register(&pseudo_primary); 3376 pseudo_primary = root_device_register("pseudo_0");
3356 if (ret < 0) { 3377 if (IS_ERR(pseudo_primary)) {
3357 printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 3378 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3358 ret); 3379 ret = PTR_ERR(pseudo_primary);
3359 goto free_vm; 3380 goto free_vm;
3360 } 3381 }
3361 ret = bus_register(&pseudo_lld_bus); 3382 ret = bus_register(&pseudo_lld_bus);
@@ -3402,7 +3423,7 @@ del_files:
3402bus_unreg: 3423bus_unreg:
3403 bus_unregister(&pseudo_lld_bus); 3424 bus_unregister(&pseudo_lld_bus);
3404dev_unreg: 3425dev_unreg:
3405 device_unregister(&pseudo_primary); 3426 root_device_unregister(pseudo_primary);
3406free_vm: 3427free_vm:
3407 if (map_storep) 3428 if (map_storep)
3408 vfree(map_storep); 3429 vfree(map_storep);
@@ -3423,7 +3444,7 @@ static void __exit scsi_debug_exit(void)
3423 do_remove_driverfs_files(); 3444 do_remove_driverfs_files();
3424 driver_unregister(&sdebug_driverfs_driver); 3445 driver_unregister(&sdebug_driverfs_driver);
3425 bus_unregister(&pseudo_lld_bus); 3446 bus_unregister(&pseudo_lld_bus);
3426 device_unregister(&pseudo_primary); 3447 root_device_unregister(pseudo_primary);
3427 3448
3428 if (dif_storep) 3449 if (dif_storep)
3429 vfree(dif_storep); 3450 vfree(dif_storep);
@@ -3474,7 +3495,7 @@ static int sdebug_add_adapter(void)
3474 spin_unlock(&sdebug_host_list_lock); 3495 spin_unlock(&sdebug_host_list_lock);
3475 3496
3476 sdbg_host->dev.bus = &pseudo_lld_bus; 3497 sdbg_host->dev.bus = &pseudo_lld_bus;
3477 sdbg_host->dev.parent = &pseudo_primary; 3498 sdbg_host->dev.parent = pseudo_primary;
3478 sdbg_host->dev.release = &sdebug_release_adapter; 3499 sdbg_host->dev.release = &sdebug_release_adapter;
3479 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); 3500 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3480 3501
@@ -3642,7 +3663,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3642 errsts = resp_readcap16(SCpnt, devip); 3663 errsts = resp_readcap16(SCpnt, devip);
3643 else if (cmd[1] == SAI_GET_LBA_STATUS) { 3664 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3644 3665
3645 if (scsi_debug_unmap_max_desc == 0) { 3666 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
3646 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3667 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3647 INVALID_COMMAND_OPCODE, 0); 3668 INVALID_COMMAND_OPCODE, 0);
3648 errsts = check_condition_result; 3669 errsts = check_condition_result;
@@ -3753,8 +3774,16 @@ write:
3753 } 3774 }
3754 break; 3775 break;
3755 case WRITE_SAME_16: 3776 case WRITE_SAME_16:
3756 if (cmd[1] & 0x8) 3777 if (cmd[1] & 0x8) {
3757 unmap = 1; 3778 if (scsi_debug_tpws == 0) {
3779 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3780 INVALID_FIELD_IN_CDB, 0);
3781 errsts = check_condition_result;
3782 } else
3783 unmap = 1;
3784 }
3785 if (errsts)
3786 break;
3758 /* fall through */ 3787 /* fall through */
3759 case WRITE_SAME: 3788 case WRITE_SAME:
3760 errsts = check_readiness(SCpnt, 0, devip); 3789 errsts = check_readiness(SCpnt, 0, devip);
@@ -3768,7 +3797,7 @@ write:
3768 if (errsts) 3797 if (errsts)
3769 break; 3798 break;
3770 3799
3771 if (scsi_debug_unmap_max_desc == 0) { 3800 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
3772 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3801 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3773 INVALID_COMMAND_OPCODE, 0); 3802 INVALID_COMMAND_OPCODE, 0);
3774 errsts = check_condition_result; 3803 errsts = check_condition_result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ee02d3838a0a..8041fe1ab179 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -968,11 +968,13 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
968 */ 968 */
969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
970{ 970{
971 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 971 struct request *rq = cmd->request;
972
973 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
972 if (error) 974 if (error)
973 goto err_exit; 975 goto err_exit;
974 976
975 if (blk_bidi_rq(cmd->request)) { 977 if (blk_bidi_rq(rq)) {
976 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 978 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
977 scsi_sdb_cache, GFP_ATOMIC); 979 scsi_sdb_cache, GFP_ATOMIC);
978 if (!bidi_sdb) { 980 if (!bidi_sdb) {
@@ -980,28 +982,28 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
980 goto err_exit; 982 goto err_exit;
981 } 983 }
982 984
983 cmd->request->next_rq->special = bidi_sdb; 985 rq->next_rq->special = bidi_sdb;
984 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 986 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
985 GFP_ATOMIC);
986 if (error) 987 if (error)
987 goto err_exit; 988 goto err_exit;
988 } 989 }
989 990
990 if (blk_integrity_rq(cmd->request)) { 991 if (blk_integrity_rq(rq)) {
991 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 992 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
992 int ivecs, count; 993 int ivecs, count;
993 994
994 BUG_ON(prot_sdb == NULL); 995 BUG_ON(prot_sdb == NULL);
995 ivecs = blk_rq_count_integrity_sg(cmd->request); 996 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
996 997
997 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 998 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
998 error = BLKPREP_DEFER; 999 error = BLKPREP_DEFER;
999 goto err_exit; 1000 goto err_exit;
1000 } 1001 }
1001 1002
1002 count = blk_rq_map_integrity_sg(cmd->request, 1003 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1003 prot_sdb->table.sgl); 1004 prot_sdb->table.sgl);
1004 BUG_ON(unlikely(count > ivecs)); 1005 BUG_ON(unlikely(count > ivecs));
1006 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1005 1007
1006 cmd->prot_sdb = prot_sdb; 1008 cmd->prot_sdb = prot_sdb;
1007 cmd->prot_sdb->table.nents = count; 1009 cmd->prot_sdb->table.nents = count;
@@ -1625,6 +1627,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1625 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1627 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1626 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1628 SCSI_MAX_SG_CHAIN_SEGMENTS));
1627 1629
1630 if (scsi_host_prot_dma(shost)) {
1631 shost->sg_prot_tablesize =
1632 min_not_zero(shost->sg_prot_tablesize,
1633 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1634 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1635 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1636 }
1637
1628 blk_queue_max_hw_sectors(q, shost->max_sectors); 1638 blk_queue_max_hw_sectors(q, shost->max_sectors);
1629 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1639 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1630 blk_queue_segment_boundary(q, shost->dma_boundary); 1640 blk_queue_segment_boundary(q, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3d0a1e6e9c48..087821fac8fe 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -417,9 +417,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
417 starget->reap_ref = 1; 417 starget->reap_ref = 1;
418 dev->parent = get_device(parent); 418 dev->parent = get_device(parent);
419 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 419 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
420#ifndef CONFIG_SYSFS_DEPRECATED
421 dev->bus = &scsi_bus_type; 420 dev->bus = &scsi_bus_type;
422#endif
423 dev->type = &scsi_target_type; 421 dev->type = &scsi_target_type;
424 starget->id = id; 422 starget->id = id;
425 starget->channel = channel; 423 starget->channel = channel;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index c3f67373a4f8..20ad59dff730 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -251,6 +251,7 @@ shost_rd_attr(host_busy, "%hu\n");
251shost_rd_attr(cmd_per_lun, "%hd\n"); 251shost_rd_attr(cmd_per_lun, "%hd\n");
252shost_rd_attr(can_queue, "%hd\n"); 252shost_rd_attr(can_queue, "%hd\n");
253shost_rd_attr(sg_tablesize, "%hu\n"); 253shost_rd_attr(sg_tablesize, "%hu\n");
254shost_rd_attr(sg_prot_tablesize, "%hu\n");
254shost_rd_attr(unchecked_isa_dma, "%d\n"); 255shost_rd_attr(unchecked_isa_dma, "%d\n");
255shost_rd_attr(prot_capabilities, "%u\n"); 256shost_rd_attr(prot_capabilities, "%u\n");
256shost_rd_attr(prot_guard_type, "%hd\n"); 257shost_rd_attr(prot_guard_type, "%hd\n");
@@ -262,6 +263,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
262 &dev_attr_cmd_per_lun.attr, 263 &dev_attr_cmd_per_lun.attr,
263 &dev_attr_can_queue.attr, 264 &dev_attr_can_queue.attr,
264 &dev_attr_sg_tablesize.attr, 265 &dev_attr_sg_tablesize.attr,
266 &dev_attr_sg_prot_tablesize.attr,
265 &dev_attr_unchecked_isa_dma.attr, 267 &dev_attr_unchecked_isa_dma.attr,
266 &dev_attr_proc_name.attr, 268 &dev_attr_proc_name.attr,
267 &dev_attr_scan.attr, 269 &dev_attr_scan.attr,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d7e470a06180..998c01be3234 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -53,6 +53,25 @@ static void fc_bsg_remove(struct request_queue *);
53static void fc_bsg_goose_queue(struct fc_rport *); 53static void fc_bsg_goose_queue(struct fc_rport *);
54 54
55/* 55/*
56 * Module Parameters
57 */
58
59/*
60 * dev_loss_tmo: the default number of seconds that the FC transport
61 * should insulate the loss of a remote port.
62 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
63 */
64static unsigned int fc_dev_loss_tmo = 60; /* seconds */
65
66module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
67MODULE_PARM_DESC(dev_loss_tmo,
68 "Maximum number of seconds that the FC transport should"
69 " insulate the loss of a remote port. Once this value is"
70 " exceeded, the scsi target is removed. Value should be"
71 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
72 " fast_io_fail_tmo is not set.");
73
74/*
56 * Redefine so that we can have same named attributes in the 75 * Redefine so that we can have same named attributes in the
57 * sdev/starget/host objects. 76 * sdev/starget/host objects.
58 */ 77 */
@@ -408,6 +427,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
408 if (!fc_host->work_q) 427 if (!fc_host->work_q)
409 return -ENOMEM; 428 return -ENOMEM;
410 429
430 fc_host->dev_loss_tmo = fc_dev_loss_tmo;
411 snprintf(fc_host->devloss_work_q_name, 431 snprintf(fc_host->devloss_work_q_name,
412 sizeof(fc_host->devloss_work_q_name), 432 sizeof(fc_host->devloss_work_q_name),
413 "fc_dl_%d", shost->host_no); 433 "fc_dl_%d", shost->host_no);
@@ -462,25 +482,6 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
462 NULL); 482 NULL);
463 483
464/* 484/*
465 * Module Parameters
466 */
467
468/*
469 * dev_loss_tmo: the default number of seconds that the FC transport
470 * should insulate the loss of a remote port.
471 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
472 */
473static unsigned int fc_dev_loss_tmo = 60; /* seconds */
474
475module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
476MODULE_PARM_DESC(dev_loss_tmo,
477 "Maximum number of seconds that the FC transport should"
478 " insulate the loss of a remote port. Once this value is"
479 " exceeded, the scsi target is removed. Value should be"
480 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
481 " fast_io_fail_tmo is not set.");
482
483/*
484 * Netlink Infrastructure 485 * Netlink Infrastructure
485 */ 486 */
486 487
@@ -830,24 +831,32 @@ static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
830/* 831/*
831 * dev_loss_tmo attribute 832 * dev_loss_tmo attribute
832 */ 833 */
833fc_rport_show_function(dev_loss_tmo, "%d\n", 20, ) 834static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
834static ssize_t 835{
835store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 836 char *cp;
836 const char *buf, size_t count) 837
838 *val = simple_strtoul(buf, &cp, 0);
839 if ((*cp && (*cp != '\n')) || (*val < 0))
840 return -EINVAL;
841 /*
842 * Check for overflow; dev_loss_tmo is u32
843 */
844 if (*val > UINT_MAX)
845 return -EINVAL;
846
847 return 0;
848}
849
850static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
851 unsigned long val)
837{ 852{
838 unsigned long val;
839 struct fc_rport *rport = transport_class_to_rport(dev);
840 struct Scsi_Host *shost = rport_to_shost(rport); 853 struct Scsi_Host *shost = rport_to_shost(rport);
841 struct fc_internal *i = to_fc_internal(shost->transportt); 854 struct fc_internal *i = to_fc_internal(shost->transportt);
842 char *cp; 855
843 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 856 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
844 (rport->port_state == FC_PORTSTATE_DELETED) || 857 (rport->port_state == FC_PORTSTATE_DELETED) ||
845 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 858 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
846 return -EBUSY; 859 return -EBUSY;
847 val = simple_strtoul(buf, &cp, 0);
848 if ((*cp && (*cp != '\n')) || (val < 0))
849 return -EINVAL;
850
851 /* 860 /*
852 * Check for overflow; dev_loss_tmo is u32 861 * Check for overflow; dev_loss_tmo is u32
853 */ 862 */
@@ -863,6 +872,25 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
863 return -EINVAL; 872 return -EINVAL;
864 873
865 i->f->set_rport_dev_loss_tmo(rport, val); 874 i->f->set_rport_dev_loss_tmo(rport, val);
875 return 0;
876}
877
878fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
879static ssize_t
880store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
881 const char *buf, size_t count)
882{
883 struct fc_rport *rport = transport_class_to_rport(dev);
884 unsigned long val;
885 int rc;
886
887 rc = fc_str_to_dev_loss(buf, &val);
888 if (rc)
889 return rc;
890
891 rc = fc_rport_set_dev_loss_tmo(rport, val);
892 if (rc)
893 return rc;
866 return count; 894 return count;
867} 895}
868static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, 896static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
@@ -1608,8 +1636,35 @@ store_fc_private_host_issue_lip(struct device *dev,
1608static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, 1636static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1609 store_fc_private_host_issue_lip); 1637 store_fc_private_host_issue_lip);
1610 1638
1611fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); 1639static ssize_t
1640store_fc_private_host_dev_loss_tmo(struct device *dev,
1641 struct device_attribute *attr,
1642 const char *buf, size_t count)
1643{
1644 struct Scsi_Host *shost = transport_class_to_shost(dev);
1645 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1646 struct fc_rport *rport;
1647 unsigned long val, flags;
1648 int rc;
1612 1649
1650 rc = fc_str_to_dev_loss(buf, &val);
1651 if (rc)
1652 return rc;
1653
1654 fc_host_dev_loss_tmo(shost) = val;
1655 spin_lock_irqsave(shost->host_lock, flags);
1656 list_for_each_entry(rport, &fc_host->rports, peers)
1657 fc_rport_set_dev_loss_tmo(rport, val);
1658 spin_unlock_irqrestore(shost->host_lock, flags);
1659 return count;
1660}
1661
1662fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
1663static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
1664 show_fc_host_dev_loss_tmo,
1665 store_fc_private_host_dev_loss_tmo);
1666
1667fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1613 1668
1614/* 1669/*
1615 * Host Statistics Management 1670 * Host Statistics Management
@@ -2165,6 +2220,7 @@ fc_attach_transport(struct fc_function_template *ft)
2165 SETUP_HOST_ATTRIBUTE_RW(system_hostname); 2220 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2166 2221
2167 /* Transport-managed attributes */ 2222 /* Transport-managed attributes */
2223 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2168 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 2224 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2169 if (ft->issue_fc_host_lip) 2225 if (ft->issue_fc_host_lip)
2170 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); 2226 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
@@ -2525,7 +2581,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2525 2581
2526 rport->maxframe_size = -1; 2582 rport->maxframe_size = -1;
2527 rport->supported_classes = FC_COS_UNSPECIFIED; 2583 rport->supported_classes = FC_COS_UNSPECIFIED;
2528 rport->dev_loss_tmo = fc_dev_loss_tmo; 2584 rport->dev_loss_tmo = fc_host->dev_loss_tmo;
2529 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); 2585 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2530 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); 2586 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2531 rport->port_id = ids->port_id; 2587 rport->port_id = ids->port_id;
@@ -4044,11 +4100,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4044/** 4100/**
4045 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports 4101 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4046 * @q: the request_queue that is to be torn down. 4102 * @q: the request_queue that is to be torn down.
4103 *
4104 * Notes:
4105 * Before unregistering the queue empty any requests that are blocked
4106 *
4107 *
4047 */ 4108 */
4048static void 4109static void
4049fc_bsg_remove(struct request_queue *q) 4110fc_bsg_remove(struct request_queue *q)
4050{ 4111{
4112 struct request *req; /* block request */
4113 int counts; /* totals for request_list count and starved */
4114
4051 if (q) { 4115 if (q) {
4116 /* Stop taking in new requests */
4117 spin_lock_irq(q->queue_lock);
4118 blk_stop_queue(q);
4119
4120 /* drain all requests in the queue */
4121 while (1) {
4122 /* need the lock to fetch a request
4123 * this may fetch the same reqeust as the previous pass
4124 */
4125 req = blk_fetch_request(q);
4126 /* save requests in use and starved */
4127 counts = q->rq.count[0] + q->rq.count[1] +
4128 q->rq.starved[0] + q->rq.starved[1];
4129 spin_unlock_irq(q->queue_lock);
4130 /* any requests still outstanding? */
4131 if (counts == 0)
4132 break;
4133
4134 /* This may be the same req as the previous iteration,
4135 * always send the blk_end_request_all after a prefetch.
4136 * It is not okay to not end the request because the
4137 * prefetch started the request.
4138 */
4139 if (req) {
4140 /* return -ENXIO to indicate that this queue is
4141 * going away
4142 */
4143 req->errors = -ENXIO;
4144 blk_end_request_all(req, -ENXIO);
4145 }
4146
4147 msleep(200); /* allow bsg to possibly finish */
4148 spin_lock_irq(q->queue_lock);
4149 }
4150
4052 bsg_unregister_queue(q); 4151 bsg_unregister_queue(q);
4053 blk_cleanup_queue(q); 4152 blk_cleanup_queue(q);
4054 } 4153 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e84026def1f4..332387a6bc25 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -537,7 +537,7 @@ static void iscsi_scan_session(struct work_struct *work)
537 537
538/** 538/**
539 * iscsi_block_scsi_eh - block scsi eh until session state has transistioned 539 * iscsi_block_scsi_eh - block scsi eh until session state has transistioned
540 * cmd: scsi cmd passed to scsi eh handler 540 * @cmd: scsi cmd passed to scsi eh handler
541 * 541 *
542 * If the session is down this function will wait for the recovery 542 * If the session is down this function will wait for the recovery
543 * timer to fire or for the session to be logged back in. If the 543 * timer to fire or for the session to be logged back in. If the
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ffa0689ee840..57d1e3e1bd44 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -477,7 +477,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
477 477
478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) 478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
479{ 479{
480 rq->timeout = SD_TIMEOUT; 480 rq->timeout = SD_FLUSH_TIMEOUT;
481 rq->retries = SD_MAX_RETRIES; 481 rq->retries = SD_MAX_RETRIES;
482 rq->cmd[0] = SYNCHRONIZE_CACHE; 482 rq->cmd[0] = SYNCHRONIZE_CACHE;
483 rq->cmd_len = 10; 483 rq->cmd_len = 10;
@@ -1072,7 +1072,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1072 * flush everything. 1072 * flush everything.
1073 */ 1073 */
1074 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1074 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1075 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 1075 SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
1076 if (res == 0) 1076 if (res == 0)
1077 break; 1077 break;
1078 } 1078 }
@@ -1498,6 +1498,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1498 unsigned long long lba; 1498 unsigned long long lba;
1499 unsigned sector_size; 1499 unsigned sector_size;
1500 1500
1501 if (sdp->no_read_capacity_16)
1502 return -EINVAL;
1503
1501 do { 1504 do {
1502 memset(cmd, 0, 16); 1505 memset(cmd, 0, 16);
1503 cmd[0] = SERVICE_ACTION_IN; 1506 cmd[0] = SERVICE_ACTION_IN;
@@ -1554,7 +1557,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1554 } 1557 }
1555 1558
1556 /* Logical blocks per physical block exponent */ 1559 /* Logical blocks per physical block exponent */
1557 sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; 1560 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
1558 1561
1559 /* Lowest aligned logical block */ 1562 /* Lowest aligned logical block */
1560 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 1563 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
@@ -1567,7 +1570,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1567 struct request_queue *q = sdp->request_queue; 1570 struct request_queue *q = sdp->request_queue;
1568 1571
1569 sdkp->thin_provisioning = 1; 1572 sdkp->thin_provisioning = 1;
1570 q->limits.discard_granularity = sdkp->hw_sector_size; 1573 q->limits.discard_granularity = sdkp->physical_block_size;
1571 q->limits.max_discard_sectors = 0xffffffff; 1574 q->limits.max_discard_sectors = 0xffffffff;
1572 1575
1573 if (buffer[14] & 0x40) /* TPRZ */ 1576 if (buffer[14] & 0x40) /* TPRZ */
@@ -1626,6 +1629,15 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1626 sector_size = get_unaligned_be32(&buffer[4]); 1629 sector_size = get_unaligned_be32(&buffer[4]);
1627 lba = get_unaligned_be32(&buffer[0]); 1630 lba = get_unaligned_be32(&buffer[0]);
1628 1631
1632 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
1633 /* Some buggy (usb cardreader) devices return an lba of
1634 0xffffffff when the want to report a size of 0 (with
1635 which they really mean no media is present) */
1636 sdkp->capacity = 0;
1637 sdkp->physical_block_size = sector_size;
1638 return sector_size;
1639 }
1640
1629 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 1641 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
1630 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 1642 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1631 "kernel compiled with support for large block " 1643 "kernel compiled with support for large block "
@@ -1635,7 +1647,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1635 } 1647 }
1636 1648
1637 sdkp->capacity = lba + 1; 1649 sdkp->capacity = lba + 1;
1638 sdkp->hw_sector_size = sector_size; 1650 sdkp->physical_block_size = sector_size;
1639 return sector_size; 1651 return sector_size;
1640} 1652}
1641 1653
@@ -1756,10 +1768,10 @@ got_data:
1756 (unsigned long long)sdkp->capacity, 1768 (unsigned long long)sdkp->capacity,
1757 sector_size, cap_str_10, cap_str_2); 1769 sector_size, cap_str_10, cap_str_2);
1758 1770
1759 if (sdkp->hw_sector_size != sector_size) 1771 if (sdkp->physical_block_size != sector_size)
1760 sd_printk(KERN_NOTICE, sdkp, 1772 sd_printk(KERN_NOTICE, sdkp,
1761 "%u-byte physical blocks\n", 1773 "%u-byte physical blocks\n",
1762 sdkp->hw_sector_size); 1774 sdkp->physical_block_size);
1763 } 1775 }
1764 } 1776 }
1765 1777
@@ -1773,7 +1785,8 @@ got_data:
1773 else if (sector_size == 256) 1785 else if (sector_size == 256)
1774 sdkp->capacity >>= 1; 1786 sdkp->capacity >>= 1;
1775 1787
1776 blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); 1788 blk_queue_physical_block_size(sdp->request_queue,
1789 sdkp->physical_block_size);
1777 sdkp->device->sector_size = sector_size; 1790 sdkp->device->sector_size = sector_size;
1778} 1791}
1779 1792
@@ -2039,14 +2052,24 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2039 lba_count = get_unaligned_be32(&buffer[20]); 2052 lba_count = get_unaligned_be32(&buffer[20]);
2040 desc_count = get_unaligned_be32(&buffer[24]); 2053 desc_count = get_unaligned_be32(&buffer[24]);
2041 2054
2042 if (lba_count) { 2055 if (lba_count && desc_count) {
2043 q->limits.max_discard_sectors = 2056 if (sdkp->tpvpd && !sdkp->tpu)
2044 lba_count * sector_sz >> 9; 2057 sdkp->unmap = 0;
2045 2058 else
2046 if (desc_count)
2047 sdkp->unmap = 1; 2059 sdkp->unmap = 1;
2048 } 2060 }
2049 2061
2062 if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) {
2063 sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \
2064 "enabled but neither TPU, nor TPWS are " \
2065 "set. Disabling discard!\n");
2066 goto out;
2067 }
2068
2069 if (lba_count)
2070 q->limits.max_discard_sectors =
2071 lba_count * sector_sz >> 9;
2072
2050 granularity = get_unaligned_be32(&buffer[28]); 2073 granularity = get_unaligned_be32(&buffer[28]);
2051 2074
2052 if (granularity) 2075 if (granularity)
@@ -2087,6 +2110,31 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2087 kfree(buffer); 2110 kfree(buffer);
2088} 2111}
2089 2112
2113/**
2114 * sd_read_thin_provisioning - Query thin provisioning VPD page
2115 * @disk: disk to query
2116 */
2117static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
2118{
2119 unsigned char *buffer;
2120 const int vpd_len = 8;
2121
2122 if (sdkp->thin_provisioning == 0)
2123 return;
2124
2125 buffer = kmalloc(vpd_len, GFP_KERNEL);
2126
2127 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
2128 goto out;
2129
2130 sdkp->tpvpd = 1;
2131 sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */
2132 sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
2133
2134 out:
2135 kfree(buffer);
2136}
2137
2090static int sd_try_extended_inquiry(struct scsi_device *sdp) 2138static int sd_try_extended_inquiry(struct scsi_device *sdp)
2091{ 2139{
2092 /* 2140 /*
@@ -2109,7 +2157,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2109 struct scsi_disk *sdkp = scsi_disk(disk); 2157 struct scsi_disk *sdkp = scsi_disk(disk);
2110 struct scsi_device *sdp = sdkp->device; 2158 struct scsi_device *sdp = sdkp->device;
2111 unsigned char *buffer; 2159 unsigned char *buffer;
2112 unsigned ordered; 2160 unsigned flush = 0;
2113 2161
2114 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 2162 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2115 "sd_revalidate_disk\n")); 2163 "sd_revalidate_disk\n"));
@@ -2138,6 +2186,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2138 sd_read_capacity(sdkp, buffer); 2186 sd_read_capacity(sdkp, buffer);
2139 2187
2140 if (sd_try_extended_inquiry(sdp)) { 2188 if (sd_try_extended_inquiry(sdp)) {
2189 sd_read_thin_provisioning(sdkp);
2141 sd_read_block_limits(sdkp); 2190 sd_read_block_limits(sdkp);
2142 sd_read_block_characteristics(sdkp); 2191 sd_read_block_characteristics(sdkp);
2143 } 2192 }
@@ -2151,17 +2200,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
2151 2200
2152 /* 2201 /*
2153 * We now have all cache related info, determine how we deal 2202 * We now have all cache related info, determine how we deal
2154 * with ordered requests. Note that as the current SCSI 2203 * with flush requests.
2155 * dispatch function can alter request order, we cannot use
2156 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
2157 */ 2204 */
2158 if (sdkp->WCE) 2205 if (sdkp->WCE) {
2159 ordered = sdkp->DPOFUA 2206 flush |= REQ_FLUSH;
2160 ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH; 2207 if (sdkp->DPOFUA)
2161 else 2208 flush |= REQ_FUA;
2162 ordered = QUEUE_ORDERED_DRAIN; 2209 }
2163 2210
2164 blk_queue_ordered(sdkp->disk->queue, ordered); 2211 blk_queue_flush(sdkp->disk->queue, flush);
2165 2212
2166 set_capacity(disk, sdkp->capacity); 2213 set_capacity(disk, sdkp->capacity);
2167 kfree(buffer); 2214 kfree(buffer);
@@ -2252,11 +2299,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2252 index = sdkp->index; 2299 index = sdkp->index;
2253 dev = &sdp->sdev_gendev; 2300 dev = &sdp->sdev_gendev;
2254 2301
2255 if (index < SD_MAX_DISKS) { 2302 gd->major = sd_major((index & 0xf0) >> 4);
2256 gd->major = sd_major((index & 0xf0) >> 4); 2303 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
2257 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 2304 gd->minors = SD_MINORS;
2258 gd->minors = SD_MINORS; 2305
2259 }
2260 gd->fops = &sd_fops; 2306 gd->fops = &sd_fops;
2261 gd->private_data = &sdkp->driver; 2307 gd->private_data = &sdkp->driver;
2262 gd->queue = sdkp->device->request_queue; 2308 gd->queue = sdkp->device->request_queue;
@@ -2346,6 +2392,12 @@ static int sd_probe(struct device *dev)
2346 if (error) 2392 if (error)
2347 goto out_put; 2393 goto out_put;
2348 2394
2395 if (index >= SD_MAX_DISKS) {
2396 error = -ENODEV;
2397 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
2398 goto out_free_index;
2399 }
2400
2349 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 2401 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
2350 if (error) 2402 if (error)
2351 goto out_free_index; 2403 goto out_free_index;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index f81a9309e6de..55488faf0815 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -19,6 +19,7 @@
19 */ 19 */
20#define SD_TIMEOUT (30 * HZ) 20#define SD_TIMEOUT (30 * HZ)
21#define SD_MOD_TIMEOUT (75 * HZ) 21#define SD_MOD_TIMEOUT (75 * HZ)
22#define SD_FLUSH_TIMEOUT (60 * HZ)
22 23
23/* 24/*
24 * Number of allowed retries 25 * Number of allowed retries
@@ -50,7 +51,7 @@ struct scsi_disk {
50 atomic_t openers; 51 atomic_t openers;
51 sector_t capacity; /* size in 512-byte sectors */ 52 sector_t capacity; /* size in 512-byte sectors */
52 u32 index; 53 u32 index;
53 unsigned short hw_sector_size; 54 unsigned int physical_block_size;
54 u8 media_present; 55 u8 media_present;
55 u8 write_prot; 56 u8 write_prot;
56 u8 protection_type;/* Data Integrity Field */ 57 u8 protection_type;/* Data Integrity Field */
@@ -62,6 +63,9 @@ struct scsi_disk {
62 unsigned first_scan : 1; 63 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1; 64 unsigned thin_provisioning : 1;
64 unsigned unmap : 1; 65 unsigned unmap : 1;
66 unsigned tpws : 1;
67 unsigned tpu : 1;
68 unsigned tpvpd : 1;
65}; 69};
66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 70#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
67 71
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 84be62149c6c..0cb39ff21171 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -375,21 +375,20 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
375 unsigned int i, j; 375 unsigned int i, j;
376 u32 phys, virt; 376 u32 phys, virt;
377 377
378 /* Already remapped? */
379 if (rq->cmd_flags & REQ_INTEGRITY)
380 return 0;
381
382 sdkp = rq->bio->bi_bdev->bd_disk->private_data; 378 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
383 379
384 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) 380 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
385 return 0; 381 return 0;
386 382
387 rq->cmd_flags |= REQ_INTEGRITY;
388 phys = hw_sector & 0xffffffff; 383 phys = hw_sector & 0xffffffff;
389 384
390 __rq_for_each_bio(bio, rq) { 385 __rq_for_each_bio(bio, rq) {
391 struct bio_vec *iv; 386 struct bio_vec *iv;
392 387
388 /* Already remapped? */
389 if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
390 break;
391
393 virt = bio->bi_integrity->bip_sector & 0xffffffff; 392 virt = bio->bi_integrity->bip_sector & 0xffffffff;
394 393
395 bip_for_each_vec(iv, bio->bi_integrity, i) { 394 bip_for_each_vec(iv, bio->bi_integrity, i) {
@@ -408,6 +407,8 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
408 407
409 kunmap_atomic(sdt, KM_USER0); 408 kunmap_atomic(sdt, KM_USER0);
410 } 409 }
410
411 bio->bi_flags |= BIO_MAPPED_INTEGRITY;
411 } 412 }
412 413
413 return 0; 414 return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 58ec8f4efcc2..5428d53f5a13 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1660,7 +1660,7 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1660 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && 1660 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1661 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && 1661 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1662 !sfp->parentdp->device->host->unchecked_isa_dma && 1662 !sfp->parentdp->device->host->unchecked_isa_dma &&
1663 blk_rq_aligned(q, hp->dxferp, dxfer_len)) 1663 blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1664 md = NULL; 1664 md = NULL;
1665 else 1665 else
1666 md = &map_data; 1666 md = &map_data;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e148341079b5..d7b383c96d5d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -862,10 +862,16 @@ static void get_capabilities(struct scsi_cd *cd)
862static int sr_packet(struct cdrom_device_info *cdi, 862static int sr_packet(struct cdrom_device_info *cdi,
863 struct packet_command *cgc) 863 struct packet_command *cgc)
864{ 864{
865 struct scsi_cd *cd = cdi->handle;
866 struct scsi_device *sdev = cd->device;
867
868 if (cgc->cmd[0] == GPCMD_READ_DISC_INFO && sdev->no_read_disc_info)
869 return -EDRIVE_CANT_DO_THIS;
870
865 if (cgc->timeout <= 0) 871 if (cgc->timeout <= 0)
866 cgc->timeout = IOCTL_TIMEOUT; 872 cgc->timeout = IOCTL_TIMEOUT;
867 873
868 sr_do_ioctl(cdi->handle, cgc); 874 sr_do_ioctl(cd, cgc);
869 875
870 return cgc->stat; 876 return cgc->stat;
871} 877}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index afdc3f5d915c..5b7388f1c835 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2008 Kai Makisara 12 Copyright 1992 - 2010 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20081215"; 20static const char *verstr = "20100829";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -2696,18 +2696,21 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2696 } 2696 }
2697 break; 2697 break;
2698 case MTWEOF: 2698 case MTWEOF:
2699 case MTWEOFI:
2699 case MTWSM: 2700 case MTWSM:
2700 if (STp->write_prot) 2701 if (STp->write_prot)
2701 return (-EACCES); 2702 return (-EACCES);
2702 cmd[0] = WRITE_FILEMARKS; 2703 cmd[0] = WRITE_FILEMARKS;
2703 if (cmd_in == MTWSM) 2704 if (cmd_in == MTWSM)
2704 cmd[1] = 2; 2705 cmd[1] = 2;
2706 if (cmd_in == MTWEOFI)
2707 cmd[1] |= 1;
2705 cmd[2] = (arg >> 16); 2708 cmd[2] = (arg >> 16);
2706 cmd[3] = (arg >> 8); 2709 cmd[3] = (arg >> 8);
2707 cmd[4] = arg; 2710 cmd[4] = arg;
2708 timeout = STp->device->request_queue->rq_timeout; 2711 timeout = STp->device->request_queue->rq_timeout;
2709 DEBC( 2712 DEBC(
2710 if (cmd_in == MTWEOF) 2713 if (cmd_in != MTWSM)
2711 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, 2714 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
2712 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); 2715 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
2713 else 2716 else
@@ -2883,8 +2886,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2883 else if (chg_eof) 2886 else if (chg_eof)
2884 STps->eof = ST_NOEOF; 2887 STps->eof = ST_NOEOF;
2885 2888
2886 if (cmd_in == MTWEOF) 2889 if (cmd_in == MTWEOF || cmd_in == MTWEOFI)
2887 STps->rw = ST_IDLE; 2890 STps->rw = ST_IDLE; /* prevent automatic WEOF at close */
2888 } else { /* SCSI command was not completely successful. Don't return 2891 } else { /* SCSI command was not completely successful. Don't return
2889 from this block without releasing the SCSI command block! */ 2892 from this block without releasing the SCSI command block! */
2890 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 2893 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -2901,7 +2904,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2901 else 2904 else
2902 undone = 0; 2905 undone = 0;
2903 2906
2904 if (cmd_in == MTWEOF && 2907 if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) &&
2905 cmdstatp->have_sense && 2908 cmdstatp->have_sense &&
2906 (cmdstatp->flags & SENSE_EOM)) { 2909 (cmdstatp->flags & SENSE_EOM)) {
2907 if (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2910 if (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index 0dff3bbddc8b..88b13356ec10 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -1381,6 +1381,30 @@ static void send_break(ser_info_t *info, unsigned int duration)
1381} 1381}
1382 1382
1383 1383
1384/*
1385 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1386 * Return: write counters to the user passed counter struct
1387 * NB: both 1->0 and 0->1 transitions are counted except for
1388 * RI where only 0->1 is counted.
1389 */
1390static int rs_360_get_icount(struct tty_struct *tty,
1391 struct serial_icounter_struct *icount)
1392{
1393 ser_info_t *info = (ser_info_t *)tty->driver_data;
1394 struct async_icount cnow;
1395
1396 local_irq_disable();
1397 cnow = info->state->icount;
1398 local_irq_enable();
1399
1400 icount->cts = cnow.cts;
1401 icount->dsr = cnow.dsr;
1402 icount->rng = cnow.rng;
1403 icount->dcd = cnow.dcd;
1404
1405 return 0;
1406}
1407
1384static int rs_360_ioctl(struct tty_struct *tty, struct file * file, 1408static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
1385 unsigned int cmd, unsigned long arg) 1409 unsigned int cmd, unsigned long arg)
1386{ 1410{
@@ -1394,7 +1418,7 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
1394 if (serial_paranoia_check(info, tty->name, "rs_ioctl")) 1418 if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
1395 return -ENODEV; 1419 return -ENODEV;
1396 1420
1397 if ((cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 1421 if (cmd != TIOCMIWAIT) {
1398 if (tty->flags & (1 << TTY_IO_ERROR)) 1422 if (tty->flags & (1 << TTY_IO_ERROR))
1399 return -EIO; 1423 return -EIO;
1400 } 1424 }
@@ -1477,31 +1501,6 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
1477 return 0; 1501 return 0;
1478#endif 1502#endif
1479 1503
1480 /*
1481 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1482 * Return: write counters to the user passed counter struct
1483 * NB: both 1->0 and 0->1 transitions are counted except for
1484 * RI where only 0->1 is counted.
1485 */
1486 case TIOCGICOUNT:
1487 local_irq_disable();
1488 cnow = info->state->icount;
1489 local_irq_enable();
1490 p_cuser = (struct serial_icounter_struct *) arg;
1491/* error = put_user(cnow.cts, &p_cuser->cts); */
1492/* if (error) return error; */
1493/* error = put_user(cnow.dsr, &p_cuser->dsr); */
1494/* if (error) return error; */
1495/* error = put_user(cnow.rng, &p_cuser->rng); */
1496/* if (error) return error; */
1497/* error = put_user(cnow.dcd, &p_cuser->dcd); */
1498/* if (error) return error; */
1499
1500 put_user(cnow.cts, &p_cuser->cts);
1501 put_user(cnow.dsr, &p_cuser->dsr);
1502 put_user(cnow.rng, &p_cuser->rng);
1503 put_user(cnow.dcd, &p_cuser->dcd);
1504 return 0;
1505 1504
1506 default: 1505 default:
1507 return -ENOIOCTLCMD; 1506 return -ENOIOCTLCMD;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 24110f6f61e0..167c4a6ccbc3 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -31,6 +31,7 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/tty.h> 33#include <linux/tty.h>
34#include <linux/ratelimit.h>
34#include <linux/tty_flip.h> 35#include <linux/tty_flip.h>
35#include <linux/serial_reg.h> 36#include <linux/serial_reg.h>
36#include <linux/serial_core.h> 37#include <linux/serial_core.h>
@@ -154,12 +155,6 @@ struct uart_8250_port {
154 unsigned char lsr_saved_flags; 155 unsigned char lsr_saved_flags;
155#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA 156#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
156 unsigned char msr_saved_flags; 157 unsigned char msr_saved_flags;
157
158 /*
159 * We provide a per-port pm hook.
160 */
161 void (*pm)(struct uart_port *port,
162 unsigned int state, unsigned int old);
163}; 158};
164 159
165struct irq_info { 160struct irq_info {
@@ -1606,8 +1601,8 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
1606 1601
1607 if (l == i->head && pass_counter++ > PASS_LIMIT) { 1602 if (l == i->head && pass_counter++ > PASS_LIMIT) {
1608 /* If we hit this, we're dead. */ 1603 /* If we hit this, we're dead. */
1609 printk(KERN_ERR "serial8250: too much work for " 1604 printk_ratelimited(KERN_ERR
1610 "irq%d\n", irq); 1605 "serial8250: too much work for irq%d\n", irq);
1611 break; 1606 break;
1612 } 1607 }
1613 } while (l != end); 1608 } while (l != end);
@@ -1722,12 +1717,6 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
1722 mutex_unlock(&hash_mutex); 1717 mutex_unlock(&hash_mutex);
1723} 1718}
1724 1719
1725/* Base timer interval for polling */
1726static inline int poll_timeout(int timeout)
1727{
1728 return timeout > 6 ? (timeout / 2 - 2) : 1;
1729}
1730
1731/* 1720/*
1732 * This function is used to handle ports that do not have an 1721 * This function is used to handle ports that do not have an
1733 * interrupt. This doesn't work very well for 16450's, but gives 1722 * interrupt. This doesn't work very well for 16450's, but gives
@@ -1742,7 +1731,7 @@ static void serial8250_timeout(unsigned long data)
1742 iir = serial_in(up, UART_IIR); 1731 iir = serial_in(up, UART_IIR);
1743 if (!(iir & UART_IIR_NO_INT)) 1732 if (!(iir & UART_IIR_NO_INT))
1744 serial8250_handle_port(up); 1733 serial8250_handle_port(up);
1745 mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout)); 1734 mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
1746} 1735}
1747 1736
1748static void serial8250_backup_timeout(unsigned long data) 1737static void serial8250_backup_timeout(unsigned long data)
@@ -1787,7 +1776,7 @@ static void serial8250_backup_timeout(unsigned long data)
1787 1776
1788 /* Standard timer interval plus 0.2s to keep the port running */ 1777 /* Standard timer interval plus 0.2s to keep the port running */
1789 mod_timer(&up->timer, 1778 mod_timer(&up->timer,
1790 jiffies + poll_timeout(up->port.timeout) + HZ / 5); 1779 jiffies + uart_poll_timeout(&up->port) + HZ / 5);
1791} 1780}
1792 1781
1793static unsigned int serial8250_tx_empty(struct uart_port *port) 1782static unsigned int serial8250_tx_empty(struct uart_port *port)
@@ -1867,15 +1856,17 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
1867 unsigned int status, tmout = 10000; 1856 unsigned int status, tmout = 10000;
1868 1857
1869 /* Wait up to 10ms for the character(s) to be sent. */ 1858 /* Wait up to 10ms for the character(s) to be sent. */
1870 do { 1859 for (;;) {
1871 status = serial_in(up, UART_LSR); 1860 status = serial_in(up, UART_LSR);
1872 1861
1873 up->lsr_saved_flags |= status & LSR_SAVE_FLAGS; 1862 up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
1874 1863
1864 if ((status & bits) == bits)
1865 break;
1875 if (--tmout == 0) 1866 if (--tmout == 0)
1876 break; 1867 break;
1877 udelay(1); 1868 udelay(1);
1878 } while ((status & bits) != bits); 1869 }
1879 1870
1880 /* Wait up to 1s for flow control if necessary */ 1871 /* Wait up to 1s for flow control if necessary */
1881 if (up->port.flags & UPF_CONS_FLOW) { 1872 if (up->port.flags & UPF_CONS_FLOW) {
@@ -2069,7 +2060,7 @@ static int serial8250_startup(struct uart_port *port)
2069 up->timer.function = serial8250_backup_timeout; 2060 up->timer.function = serial8250_backup_timeout;
2070 up->timer.data = (unsigned long)up; 2061 up->timer.data = (unsigned long)up;
2071 mod_timer(&up->timer, jiffies + 2062 mod_timer(&up->timer, jiffies +
2072 poll_timeout(up->port.timeout) + HZ / 5); 2063 uart_poll_timeout(port) + HZ / 5);
2073 } 2064 }
2074 2065
2075 /* 2066 /*
@@ -2079,7 +2070,7 @@ static int serial8250_startup(struct uart_port *port)
2079 */ 2070 */
2080 if (!is_real_interrupt(up->port.irq)) { 2071 if (!is_real_interrupt(up->port.irq)) {
2081 up->timer.data = (unsigned long)up; 2072 up->timer.data = (unsigned long)up;
2082 mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout)); 2073 mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
2083 } else { 2074 } else {
2084 retval = serial_link_irq_chain(up); 2075 retval = serial_link_irq_chain(up);
2085 if (retval) 2076 if (retval)
@@ -2440,16 +2431,24 @@ serial8250_set_ldisc(struct uart_port *port, int new)
2440 port->flags &= ~UPF_HARDPPS_CD; 2431 port->flags &= ~UPF_HARDPPS_CD;
2441} 2432}
2442 2433
2443static void 2434
2444serial8250_pm(struct uart_port *port, unsigned int state, 2435void serial8250_do_pm(struct uart_port *port, unsigned int state,
2445 unsigned int oldstate) 2436 unsigned int oldstate)
2446{ 2437{
2447 struct uart_8250_port *p = (struct uart_8250_port *)port; 2438 struct uart_8250_port *p = (struct uart_8250_port *)port;
2448 2439
2449 serial8250_set_sleep(p, state != 0); 2440 serial8250_set_sleep(p, state != 0);
2441}
2442EXPORT_SYMBOL(serial8250_do_pm);
2450 2443
2451 if (p->pm) 2444static void
2452 p->pm(port, state, oldstate); 2445serial8250_pm(struct uart_port *port, unsigned int state,
2446 unsigned int oldstate)
2447{
2448 if (port->pm)
2449 port->pm(port, state, oldstate);
2450 else
2451 serial8250_do_pm(port, state, oldstate);
2453} 2452}
2454 2453
2455static unsigned int serial8250_port_size(struct uart_8250_port *pt) 2454static unsigned int serial8250_port_size(struct uart_8250_port *pt)
@@ -2674,6 +2673,16 @@ static struct uart_ops serial8250_pops = {
2674 2673
2675static struct uart_8250_port serial8250_ports[UART_NR]; 2674static struct uart_8250_port serial8250_ports[UART_NR];
2676 2675
2676static void (*serial8250_isa_config)(int port, struct uart_port *up,
2677 unsigned short *capabilities);
2678
2679void serial8250_set_isa_configurator(
2680 void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
2681{
2682 serial8250_isa_config = v;
2683}
2684EXPORT_SYMBOL(serial8250_set_isa_configurator);
2685
2677static void __init serial8250_isa_init_ports(void) 2686static void __init serial8250_isa_init_ports(void)
2678{ 2687{
2679 struct uart_8250_port *up; 2688 struct uart_8250_port *up;
@@ -2719,6 +2728,9 @@ static void __init serial8250_isa_init_ports(void)
2719 up->port.regshift = old_serial_port[i].iomem_reg_shift; 2728 up->port.regshift = old_serial_port[i].iomem_reg_shift;
2720 set_io_from_upio(&up->port); 2729 set_io_from_upio(&up->port);
2721 up->port.irqflags |= irqflag; 2730 up->port.irqflags |= irqflag;
2731 if (serial8250_isa_config != NULL)
2732 serial8250_isa_config(i, &up->port, &up->capabilities);
2733
2722 } 2734 }
2723} 2735}
2724 2736
@@ -3010,6 +3022,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
3010 port.serial_in = p->serial_in; 3022 port.serial_in = p->serial_in;
3011 port.serial_out = p->serial_out; 3023 port.serial_out = p->serial_out;
3012 port.set_termios = p->set_termios; 3024 port.set_termios = p->set_termios;
3025 port.pm = p->pm;
3013 port.dev = &dev->dev; 3026 port.dev = &dev->dev;
3014 port.irqflags |= irqflag; 3027 port.irqflags |= irqflag;
3015 ret = serial8250_register_port(&port); 3028 ret = serial8250_register_port(&port);
@@ -3176,6 +3189,12 @@ int serial8250_register_port(struct uart_port *port)
3176 /* Possibly override set_termios call */ 3189 /* Possibly override set_termios call */
3177 if (port->set_termios) 3190 if (port->set_termios)
3178 uart->port.set_termios = port->set_termios; 3191 uart->port.set_termios = port->set_termios;
3192 if (port->pm)
3193 uart->port.pm = port->pm;
3194
3195 if (serial8250_isa_config != NULL)
3196 serial8250_isa_config(0, &uart->port,
3197 &uart->capabilities);
3179 3198
3180 ret = uart_add_one_port(&serial8250_reg, &uart->port); 3199 ret = uart_add_one_port(&serial8250_reg, &uart->port);
3181 if (ret == 0) 3200 if (ret == 0)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 3198c5335f0b..927816484397 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -718,13 +718,6 @@ config SERIAL_MRST_MAX3110
718 the Intel Moorestown platform. On other systems use the max3100 718 the Intel Moorestown platform. On other systems use the max3100
719 driver. 719 driver.
720 720
721config MRST_MAX3110_IRQ
722 boolean "Enable GPIO IRQ for Max3110 over Moorestown"
723 default n
724 depends on SERIAL_MRST_MAX3110 && GPIO_LANGWELL
725 help
726 This has to be enabled after Moorestown GPIO driver is loaded
727
728config SERIAL_MFD_HSU 721config SERIAL_MFD_HSU
729 tristate "Medfield High Speed UART support" 722 tristate "Medfield High Speed UART support"
730 depends on PCI 723 depends on PCI
diff --git a/drivers/serial/altera_uart.c b/drivers/serial/altera_uart.c
index f8d8a00554da..721216292a50 100644
--- a/drivers/serial/altera_uart.c
+++ b/drivers/serial/altera_uart.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/timer.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/console.h> 21#include <linux/console.h>
@@ -27,6 +28,8 @@
27#include <linux/altera_uart.h> 28#include <linux/altera_uart.h>
28 29
29#define DRV_NAME "altera_uart" 30#define DRV_NAME "altera_uart"
31#define SERIAL_ALTERA_MAJOR 204
32#define SERIAL_ALTERA_MINOR 213
30 33
31/* 34/*
32 * Altera UART register definitions according to the Nios UART datasheet: 35 * Altera UART register definitions according to the Nios UART datasheet:
@@ -76,13 +79,28 @@
76 */ 79 */
77struct altera_uart { 80struct altera_uart {
78 struct uart_port port; 81 struct uart_port port;
82 struct timer_list tmr;
79 unsigned int sigs; /* Local copy of line sigs */ 83 unsigned int sigs; /* Local copy of line sigs */
80 unsigned short imr; /* Local IMR mirror */ 84 unsigned short imr; /* Local IMR mirror */
81}; 85};
82 86
87static u32 altera_uart_readl(struct uart_port *port, int reg)
88{
89 struct altera_uart_platform_uart *platp = port->private_data;
90
91 return readl(port->membase + (reg << platp->bus_shift));
92}
93
94static void altera_uart_writel(struct uart_port *port, u32 dat, int reg)
95{
96 struct altera_uart_platform_uart *platp = port->private_data;
97
98 writel(dat, port->membase + (reg << platp->bus_shift));
99}
100
83static unsigned int altera_uart_tx_empty(struct uart_port *port) 101static unsigned int altera_uart_tx_empty(struct uart_port *port)
84{ 102{
85 return (readl(port->membase + ALTERA_UART_STATUS_REG) & 103 return (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
86 ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0; 104 ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
87} 105}
88 106
@@ -91,8 +109,7 @@ static unsigned int altera_uart_get_mctrl(struct uart_port *port)
91 struct altera_uart *pp = container_of(port, struct altera_uart, port); 109 struct altera_uart *pp = container_of(port, struct altera_uart, port);
92 unsigned int sigs; 110 unsigned int sigs;
93 111
94 sigs = 112 sigs = (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
95 (readl(port->membase + ALTERA_UART_STATUS_REG) &
96 ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0; 113 ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
97 sigs |= (pp->sigs & TIOCM_RTS); 114 sigs |= (pp->sigs & TIOCM_RTS);
98 115
@@ -108,7 +125,7 @@ static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
108 pp->imr |= ALTERA_UART_CONTROL_RTS_MSK; 125 pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
109 else 126 else
110 pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK; 127 pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
111 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 128 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
112} 129}
113 130
114static void altera_uart_start_tx(struct uart_port *port) 131static void altera_uart_start_tx(struct uart_port *port)
@@ -116,7 +133,7 @@ static void altera_uart_start_tx(struct uart_port *port)
116 struct altera_uart *pp = container_of(port, struct altera_uart, port); 133 struct altera_uart *pp = container_of(port, struct altera_uart, port);
117 134
118 pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK; 135 pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
119 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 136 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
120} 137}
121 138
122static void altera_uart_stop_tx(struct uart_port *port) 139static void altera_uart_stop_tx(struct uart_port *port)
@@ -124,7 +141,7 @@ static void altera_uart_stop_tx(struct uart_port *port)
124 struct altera_uart *pp = container_of(port, struct altera_uart, port); 141 struct altera_uart *pp = container_of(port, struct altera_uart, port);
125 142
126 pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; 143 pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
127 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 144 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
128} 145}
129 146
130static void altera_uart_stop_rx(struct uart_port *port) 147static void altera_uart_stop_rx(struct uart_port *port)
@@ -132,7 +149,7 @@ static void altera_uart_stop_rx(struct uart_port *port)
132 struct altera_uart *pp = container_of(port, struct altera_uart, port); 149 struct altera_uart *pp = container_of(port, struct altera_uart, port);
133 150
134 pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK; 151 pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
135 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 152 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
136} 153}
137 154
138static void altera_uart_break_ctl(struct uart_port *port, int break_state) 155static void altera_uart_break_ctl(struct uart_port *port, int break_state)
@@ -145,7 +162,7 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
145 pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK; 162 pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
146 else 163 else
147 pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK; 164 pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
148 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 165 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
149 spin_unlock_irqrestore(&port->lock, flags); 166 spin_unlock_irqrestore(&port->lock, flags);
150} 167}
151 168
@@ -168,7 +185,8 @@ static void altera_uart_set_termios(struct uart_port *port,
168 tty_termios_encode_baud_rate(termios, baud, baud); 185 tty_termios_encode_baud_rate(termios, baud, baud);
169 186
170 spin_lock_irqsave(&port->lock, flags); 187 spin_lock_irqsave(&port->lock, flags);
171 writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG); 188 uart_update_timeout(port, termios->c_cflag, baud);
189 altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
172 spin_unlock_irqrestore(&port->lock, flags); 190 spin_unlock_irqrestore(&port->lock, flags);
173} 191}
174 192
@@ -178,14 +196,15 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
178 unsigned char ch, flag; 196 unsigned char ch, flag;
179 unsigned short status; 197 unsigned short status;
180 198
181 while ((status = readl(port->membase + ALTERA_UART_STATUS_REG)) & 199 while ((status = altera_uart_readl(port, ALTERA_UART_STATUS_REG)) &
182 ALTERA_UART_STATUS_RRDY_MSK) { 200 ALTERA_UART_STATUS_RRDY_MSK) {
183 ch = readl(port->membase + ALTERA_UART_RXDATA_REG); 201 ch = altera_uart_readl(port, ALTERA_UART_RXDATA_REG);
184 flag = TTY_NORMAL; 202 flag = TTY_NORMAL;
185 port->icount.rx++; 203 port->icount.rx++;
186 204
187 if (status & ALTERA_UART_STATUS_E_MSK) { 205 if (status & ALTERA_UART_STATUS_E_MSK) {
188 writel(status, port->membase + ALTERA_UART_STATUS_REG); 206 altera_uart_writel(port, status,
207 ALTERA_UART_STATUS_REG);
189 208
190 if (status & ALTERA_UART_STATUS_BRK_MSK) { 209 if (status & ALTERA_UART_STATUS_BRK_MSK) {
191 port->icount.brk++; 210 port->icount.brk++;
@@ -225,18 +244,18 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
225 244
226 if (port->x_char) { 245 if (port->x_char) {
227 /* Send special char - probably flow control */ 246 /* Send special char - probably flow control */
228 writel(port->x_char, port->membase + ALTERA_UART_TXDATA_REG); 247 altera_uart_writel(port, port->x_char, ALTERA_UART_TXDATA_REG);
229 port->x_char = 0; 248 port->x_char = 0;
230 port->icount.tx++; 249 port->icount.tx++;
231 return; 250 return;
232 } 251 }
233 252
234 while (readl(port->membase + ALTERA_UART_STATUS_REG) & 253 while (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
235 ALTERA_UART_STATUS_TRDY_MSK) { 254 ALTERA_UART_STATUS_TRDY_MSK) {
236 if (xmit->head == xmit->tail) 255 if (xmit->head == xmit->tail)
237 break; 256 break;
238 writel(xmit->buf[xmit->tail], 257 altera_uart_writel(port, xmit->buf[xmit->tail],
239 port->membase + ALTERA_UART_TXDATA_REG); 258 ALTERA_UART_TXDATA_REG);
240 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 259 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
241 port->icount.tx++; 260 port->icount.tx++;
242 } 261 }
@@ -246,7 +265,7 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
246 265
247 if (xmit->head == xmit->tail) { 266 if (xmit->head == xmit->tail) {
248 pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; 267 pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
249 writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); 268 altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
250 } 269 }
251} 270}
252 271
@@ -256,7 +275,7 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
256 struct altera_uart *pp = container_of(port, struct altera_uart, port); 275 struct altera_uart *pp = container_of(port, struct altera_uart, port);
257 unsigned int isr; 276 unsigned int isr;
258 277
259 isr = readl(port->membase + ALTERA_UART_STATUS_REG) & pp->imr; 278 isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
260 279
261 spin_lock(&port->lock); 280 spin_lock(&port->lock);
262 if (isr & ALTERA_UART_STATUS_RRDY_MSK) 281 if (isr & ALTERA_UART_STATUS_RRDY_MSK)
@@ -268,14 +287,23 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
268 return IRQ_RETVAL(isr); 287 return IRQ_RETVAL(isr);
269} 288}
270 289
290static void altera_uart_timer(unsigned long data)
291{
292 struct uart_port *port = (void *)data;
293 struct altera_uart *pp = container_of(port, struct altera_uart, port);
294
295 altera_uart_interrupt(0, port);
296 mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
297}
298
271static void altera_uart_config_port(struct uart_port *port, int flags) 299static void altera_uart_config_port(struct uart_port *port, int flags)
272{ 300{
273 port->type = PORT_ALTERA_UART; 301 port->type = PORT_ALTERA_UART;
274 302
275 /* Clear mask, so no surprise interrupts. */ 303 /* Clear mask, so no surprise interrupts. */
276 writel(0, port->membase + ALTERA_UART_CONTROL_REG); 304 altera_uart_writel(port, 0, ALTERA_UART_CONTROL_REG);
277 /* Clear status register */ 305 /* Clear status register */
278 writel(0, port->membase + ALTERA_UART_STATUS_REG); 306 altera_uart_writel(port, 0, ALTERA_UART_STATUS_REG);
279} 307}
280 308
281static int altera_uart_startup(struct uart_port *port) 309static int altera_uart_startup(struct uart_port *port)
@@ -284,6 +312,12 @@ static int altera_uart_startup(struct uart_port *port)
284 unsigned long flags; 312 unsigned long flags;
285 int ret; 313 int ret;
286 314
315 if (!port->irq) {
316 setup_timer(&pp->tmr, altera_uart_timer, (unsigned long)port);
317 mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
318 return 0;
319 }
320
287 ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED, 321 ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
288 DRV_NAME, port); 322 DRV_NAME, port);
289 if (ret) { 323 if (ret) {
@@ -316,7 +350,10 @@ static void altera_uart_shutdown(struct uart_port *port)
316 350
317 spin_unlock_irqrestore(&port->lock, flags); 351 spin_unlock_irqrestore(&port->lock, flags);
318 352
319 free_irq(port->irq, port); 353 if (port->irq)
354 free_irq(port->irq, port);
355 else
356 del_timer_sync(&pp->tmr);
320} 357}
321 358
322static const char *altera_uart_type(struct uart_port *port) 359static const char *altera_uart_type(struct uart_port *port)
@@ -384,8 +421,9 @@ int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
384 port->iotype = SERIAL_IO_MEM; 421 port->iotype = SERIAL_IO_MEM;
385 port->irq = platp[i].irq; 422 port->irq = platp[i].irq;
386 port->uartclk = platp[i].uartclk; 423 port->uartclk = platp[i].uartclk;
387 port->flags = ASYNC_BOOT_AUTOCONF; 424 port->flags = UPF_BOOT_AUTOCONF;
388 port->ops = &altera_uart_ops; 425 port->ops = &altera_uart_ops;
426 port->private_data = platp;
389 } 427 }
390 428
391 return 0; 429 return 0;
@@ -393,7 +431,7 @@ int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
393 431
394static void altera_uart_console_putc(struct uart_port *port, const char c) 432static void altera_uart_console_putc(struct uart_port *port, const char c)
395{ 433{
396 while (!(readl(port->membase + ALTERA_UART_STATUS_REG) & 434 while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
397 ALTERA_UART_STATUS_TRDY_MSK)) 435 ALTERA_UART_STATUS_TRDY_MSK))
398 cpu_relax(); 436 cpu_relax();
399 437
@@ -423,7 +461,7 @@ static int __init altera_uart_console_setup(struct console *co, char *options)
423 if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS) 461 if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
424 return -EINVAL; 462 return -EINVAL;
425 port = &altera_uart_ports[co->index].port; 463 port = &altera_uart_ports[co->index].port;
426 if (port->membase == 0) 464 if (!port->membase)
427 return -ENODEV; 465 return -ENODEV;
428 466
429 if (options) 467 if (options)
@@ -435,7 +473,7 @@ static int __init altera_uart_console_setup(struct console *co, char *options)
435static struct uart_driver altera_uart_driver; 473static struct uart_driver altera_uart_driver;
436 474
437static struct console altera_uart_console = { 475static struct console altera_uart_console = {
438 .name = "ttyS", 476 .name = "ttyAL",
439 .write = altera_uart_console_write, 477 .write = altera_uart_console_write,
440 .device = uart_console_device, 478 .device = uart_console_device,
441 .setup = altera_uart_console_setup, 479 .setup = altera_uart_console_setup,
@@ -466,9 +504,9 @@ console_initcall(altera_uart_console_init);
466static struct uart_driver altera_uart_driver = { 504static struct uart_driver altera_uart_driver = {
467 .owner = THIS_MODULE, 505 .owner = THIS_MODULE,
468 .driver_name = DRV_NAME, 506 .driver_name = DRV_NAME,
469 .dev_name = "ttyS", 507 .dev_name = "ttyAL",
470 .major = TTY_MAJOR, 508 .major = SERIAL_ALTERA_MAJOR,
471 .minor = 64, 509 .minor = SERIAL_ALTERA_MINOR,
472 .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS, 510 .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
473 .cons = ALTERA_UART_CONSOLE, 511 .cons = ALTERA_UART_CONSOLE,
474}; 512};
@@ -477,38 +515,55 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
477{ 515{
478 struct altera_uart_platform_uart *platp = pdev->dev.platform_data; 516 struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
479 struct uart_port *port; 517 struct uart_port *port;
480 int i; 518 struct resource *res_mem;
519 struct resource *res_irq;
520 int i = pdev->id;
481 521
482 for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) { 522 /* -1 emphasizes that the platform must have one port, no .N suffix */
483 port = &altera_uart_ports[i].port; 523 if (i == -1)
524 i = 0;
484 525
485 port->line = i; 526 if (i >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
486 port->type = PORT_ALTERA_UART; 527 return -EINVAL;
487 port->mapbase = platp[i].mapbase;
488 port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
489 port->iotype = SERIAL_IO_MEM;
490 port->irq = platp[i].irq;
491 port->uartclk = platp[i].uartclk;
492 port->ops = &altera_uart_ops;
493 port->flags = ASYNC_BOOT_AUTOCONF;
494 528
495 uart_add_one_port(&altera_uart_driver, port); 529 port = &altera_uart_ports[i].port;
496 } 530
531 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
532 if (res_mem)
533 port->mapbase = res_mem->start;
534 else if (platp->mapbase)
535 port->mapbase = platp->mapbase;
536 else
537 return -EINVAL;
538
539 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
540 if (res_irq)
541 port->irq = res_irq->start;
542 else if (platp->irq)
543 port->irq = platp->irq;
544
545 port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
546 if (!port->membase)
547 return -ENOMEM;
548
549 port->line = i;
550 port->type = PORT_ALTERA_UART;
551 port->iotype = SERIAL_IO_MEM;
552 port->uartclk = platp->uartclk;
553 port->ops = &altera_uart_ops;
554 port->flags = UPF_BOOT_AUTOCONF;
555 port->private_data = platp;
556
557 uart_add_one_port(&altera_uart_driver, port);
497 558
498 return 0; 559 return 0;
499} 560}
500 561
501static int __devexit altera_uart_remove(struct platform_device *pdev) 562static int __devexit altera_uart_remove(struct platform_device *pdev)
502{ 563{
503 struct uart_port *port; 564 struct uart_port *port = &altera_uart_ports[pdev->id].port;
504 int i;
505
506 for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++) {
507 port = &altera_uart_ports[i].port;
508 if (port)
509 uart_remove_one_port(&altera_uart_driver, port);
510 }
511 565
566 uart_remove_one_port(&altera_uart_driver, port);
512 return 0; 567 return 0;
513} 568}
514 569
@@ -550,3 +605,4 @@ MODULE_DESCRIPTION("Altera UART driver");
550MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); 605MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
551MODULE_LICENSE("GPL"); 606MODULE_LICENSE("GPL");
552MODULE_ALIAS("platform:" DRV_NAME); 607MODULE_ALIAS("platform:" DRV_NAME);
608MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 5318dd3774ae..6f1b51e231e4 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -131,7 +131,12 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
131 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); 131 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
132 132
133 tclkdiv = sclk / (2 * baud_rate) - 1; 133 tclkdiv = sclk / (2 * baud_rate) - 1;
134 rclkdiv = sclk / (2 * baud_rate * 2) - 1; 134 /* The actual uart baud rate of devices vary between +/-2%. The sport
135 * RX sample rate should be faster than the double of the worst case,
136 * otherwise, wrong data are received. So, set sport RX clock to be
137 * 3% faster.
138 */
139 rclkdiv = sclk / (2 * baud_rate * 2 * 97 / 100) - 1;
135 SPORT_PUT_TCLKDIV(up, tclkdiv); 140 SPORT_PUT_TCLKDIV(up, tclkdiv);
136 SPORT_PUT_RCLKDIV(up, rclkdiv); 141 SPORT_PUT_RCLKDIV(up, rclkdiv);
137 SSYNC(); 142 SSYNC();
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 66ecc7ab6dab..dfcf4b1878aa 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -327,14 +327,13 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
327{ 327{
328 struct circ_buf *xmit = &sport->port.state->xmit; 328 struct circ_buf *xmit = &sport->port.state->xmit;
329 329
330 while (!(readl(sport->port.membase + UTS) & UTS_TXFULL)) { 330 while (!uart_circ_empty(xmit) &&
331 !(readl(sport->port.membase + UTS) & UTS_TXFULL)) {
331 /* send xmit->buf[xmit->tail] 332 /* send xmit->buf[xmit->tail]
332 * out the port here */ 333 * out the port here */
333 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); 334 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
334 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 335 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
335 sport->port.icount.tx++; 336 sport->port.icount.tx++;
336 if (uart_circ_empty(xmit))
337 break;
338 } 337 }
339 338
340 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 339 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index eaf545014119..18f548449c63 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -172,13 +172,15 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
172 jsm_uart_port_init here! */ 172 jsm_uart_port_init here! */
173 dev_err(&pdev->dev, "memory allocation for flipbuf failed\n"); 173 dev_err(&pdev->dev, "memory allocation for flipbuf failed\n");
174 rc = -ENOMEM; 174 rc = -ENOMEM;
175 goto out_free_irq; 175 goto out_free_uart;
176 } 176 }
177 177
178 pci_set_drvdata(pdev, brd); 178 pci_set_drvdata(pdev, brd);
179 pci_save_state(pdev); 179 pci_save_state(pdev);
180 180
181 return 0; 181 return 0;
182 out_free_uart:
183 jsm_remove_uart_port(brd);
182 out_free_irq: 184 out_free_irq:
183 jsm_remove_uart_port(brd); 185 jsm_remove_uart_port(brd);
184 free_irq(brd->irq, brd); 186 free_irq(brd->irq, brd);
diff --git a/drivers/serial/max3107.c b/drivers/serial/max3107.c
index 67283c1a57ff..910870edf708 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/serial/max3107.c
@@ -986,12 +986,14 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
986 s->rxbuf = kzalloc(sizeof(u16) * (MAX3107_RX_FIFO_SIZE+2), GFP_KERNEL); 986 s->rxbuf = kzalloc(sizeof(u16) * (MAX3107_RX_FIFO_SIZE+2), GFP_KERNEL);
987 if (!s->rxbuf) { 987 if (!s->rxbuf) {
988 pr_err("Allocating RX buffer failed\n"); 988 pr_err("Allocating RX buffer failed\n");
989 return -ENOMEM; 989 retval = -ENOMEM;
990 goto err_free4;
990 } 991 }
991 s->rxstr = kzalloc(sizeof(u8) * MAX3107_RX_FIFO_SIZE, GFP_KERNEL); 992 s->rxstr = kzalloc(sizeof(u8) * MAX3107_RX_FIFO_SIZE, GFP_KERNEL);
992 if (!s->rxstr) { 993 if (!s->rxstr) {
993 pr_err("Allocating RX buffer failed\n"); 994 pr_err("Allocating RX buffer failed\n");
994 return -ENOMEM; 995 retval = -ENOMEM;
996 goto err_free3;
995 } 997 }
996 /* SPI Tx buffer 998 /* SPI Tx buffer
997 * SPI transfer buffer 999 * SPI transfer buffer
@@ -1002,7 +1004,8 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1002 s->txbuf = kzalloc(sizeof(u16) * MAX3107_TX_FIFO_SIZE + 3, GFP_KERNEL); 1004 s->txbuf = kzalloc(sizeof(u16) * MAX3107_TX_FIFO_SIZE + 3, GFP_KERNEL);
1003 if (!s->txbuf) { 1005 if (!s->txbuf) {
1004 pr_err("Allocating TX buffer failed\n"); 1006 pr_err("Allocating TX buffer failed\n");
1005 return -ENOMEM; 1007 retval = -ENOMEM;
1008 goto err_free2;
1006 } 1009 }
1007 /* Initialize shared data lock */ 1010 /* Initialize shared data lock */
1008 spin_lock_init(&s->data_lock); 1011 spin_lock_init(&s->data_lock);
@@ -1021,13 +1024,15 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1021 buf[0] = MAX3107_REVID_REG; 1024 buf[0] = MAX3107_REVID_REG;
1022 if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) { 1025 if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
1023 dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n"); 1026 dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n");
1024 return -EIO; 1027 retval = -EIO;
1028 goto err_free1;
1025 } 1029 }
1026 if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 && 1030 if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 &&
1027 (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) { 1031 (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) {
1028 dev_err(&s->spi->dev, "REVID %x does not match\n", 1032 dev_err(&s->spi->dev, "REVID %x does not match\n",
1029 (buf[0] & MAX3107_SPI_RX_DATA_MASK)); 1033 (buf[0] & MAX3107_SPI_RX_DATA_MASK));
1030 return -ENODEV; 1034 retval = -ENODEV;
1035 goto err_free1;
1031 } 1036 }
1032 1037
1033 /* Disable all interrupts */ 1038 /* Disable all interrupts */
@@ -1047,7 +1052,8 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1047 /* Perform SPI transfer */ 1052 /* Perform SPI transfer */
1048 if (max3107_rw(s, (u8 *)buf, NULL, 4)) { 1053 if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
1049 dev_err(&s->spi->dev, "SPI transfer for init failed\n"); 1054 dev_err(&s->spi->dev, "SPI transfer for init failed\n");
1050 return -EIO; 1055 retval = -EIO;
1056 goto err_free1;
1051 } 1057 }
1052 1058
1053 /* Register UART driver */ 1059 /* Register UART driver */
@@ -1055,7 +1061,7 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1055 retval = uart_register_driver(&max3107_uart_driver); 1061 retval = uart_register_driver(&max3107_uart_driver);
1056 if (retval) { 1062 if (retval) {
1057 dev_err(&s->spi->dev, "Registering UART driver failed\n"); 1063 dev_err(&s->spi->dev, "Registering UART driver failed\n");
1058 return retval; 1064 goto err_free1;
1059 } 1065 }
1060 driver_registered = 1; 1066 driver_registered = 1;
1061 } 1067 }
@@ -1074,13 +1080,13 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1074 retval = uart_add_one_port(&max3107_uart_driver, &s->port); 1080 retval = uart_add_one_port(&max3107_uart_driver, &s->port);
1075 if (retval < 0) { 1081 if (retval < 0) {
1076 dev_err(&s->spi->dev, "Adding UART port failed\n"); 1082 dev_err(&s->spi->dev, "Adding UART port failed\n");
1077 return retval; 1083 goto err_free1;
1078 } 1084 }
1079 1085
1080 if (pdata->configure) { 1086 if (pdata->configure) {
1081 retval = pdata->configure(s); 1087 retval = pdata->configure(s);
1082 if (retval < 0) 1088 if (retval < 0)
1083 return retval; 1089 goto err_free1;
1084 } 1090 }
1085 1091
1086 /* Go to suspend mode */ 1092 /* Go to suspend mode */
@@ -1088,6 +1094,16 @@ int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
1088 pdata->hw_suspend(s, 1); 1094 pdata->hw_suspend(s, 1);
1089 1095
1090 return 0; 1096 return 0;
1097
1098err_free1:
1099 kfree(s->txbuf);
1100err_free2:
1101 kfree(s->rxstr);
1102err_free3:
1103 kfree(s->rxbuf);
1104err_free4:
1105 kfree(s);
1106 return retval;
1091} 1107}
1092EXPORT_SYMBOL_GPL(max3107_probe); 1108EXPORT_SYMBOL_GPL(max3107_probe);
1093 1109
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index dc0967fb9ea6..5fc699e929dc 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -172,6 +172,9 @@ static ssize_t port_show_regs(struct file *file, char __user *user_buf,
172 len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 172 len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
173 "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV)); 173 "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
174 174
175 if (len > HSU_REGS_BUFSIZE)
176 len = HSU_REGS_BUFSIZE;
177
175 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); 178 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
176 kfree(buf); 179 kfree(buf);
177 return ret; 180 return ret;
@@ -219,6 +222,9 @@ static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
219 len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 222 len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
220 "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR)); 223 "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
221 224
225 if (len > HSU_REGS_BUFSIZE)
226 len = HSU_REGS_BUFSIZE;
227
222 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); 228 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
223 kfree(buf); 229 kfree(buf);
224 return ret; 230 return ret;
@@ -925,39 +931,52 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
925 cval |= UART_LCR_EPAR; 931 cval |= UART_LCR_EPAR;
926 932
927 /* 933 /*
934 * The base clk is 50Mhz, and the baud rate come from:
935 * baud = 50M * MUL / (DIV * PS * DLAB)
936 *
928 * For those basic low baud rate we can get the direct 937 * For those basic low baud rate we can get the direct
929 * scalar from 2746800, like 115200 = 2746800/24, for those 938 * scalar from 2746800, like 115200 = 2746800/24. For those
930 * higher baud rate, we have to handle them case by case, 939 * higher baud rate, we handle them case by case, mainly by
931 * but DIV reg is never touched as its default value 0x3d09 940 * adjusting the MUL/PS registers, and DIV register is kept
941 * as default value 0x3d09 to make things simple
932 */ 942 */
933 baud = uart_get_baud_rate(port, termios, old, 0, 4000000); 943 baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
934 quot = uart_get_divisor(port, baud);
935 944
945 quot = 1;
936 switch (baud) { 946 switch (baud) {
937 case 3500000: 947 case 3500000:
938 mul = 0x3345; 948 mul = 0x3345;
939 ps = 0xC; 949 ps = 0xC;
940 quot = 1; 950 break;
951 case 3000000:
952 mul = 0x2EE0;
941 break; 953 break;
942 case 2500000: 954 case 2500000:
943 mul = 0x2710; 955 mul = 0x2710;
944 ps = 0x10;
945 quot = 1;
946 break; 956 break;
947 case 18432000: 957 case 2000000:
958 mul = 0x1F40;
959 break;
960 case 1843200:
948 mul = 0x2400; 961 mul = 0x2400;
949 ps = 0x10;
950 quot = 1;
951 break; 962 break;
952 case 1500000: 963 case 1500000:
953 mul = 0x1D4C; 964 mul = 0x1770;
954 ps = 0xc; 965 break;
955 quot = 1; 966 case 1000000:
967 mul = 0xFA0;
968 break;
969 case 500000:
970 mul = 0x7D0;
956 break; 971 break;
957 default: 972 default:
958 ; 973 /* Use uart_get_divisor to get quot for other baud rates */
974 quot = 0;
959 } 975 }
960 976
977 if (!quot)
978 quot = uart_get_divisor(port, baud);
979
961 if ((up->port.uartclk / quot) < (2400 * 16)) 980 if ((up->port.uartclk / quot) < (2400 * 16))
962 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B; 981 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
963 else if ((up->port.uartclk / quot) < (230400 * 16)) 982 else if ((up->port.uartclk / quot) < (230400 * 16))
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
index 51c15f58e01e..b62857bf2fdb 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/serial/mrst_max3110.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * max3110.c - spi uart protocol driver for Maxim 3110 on Moorestown 2 * mrst_max3110.c - spi uart protocol driver for Maxim 3110
3 * 3 *
4 * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com> 4 * Copyright (c) 2008-2010, Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -32,18 +32,13 @@
32#include <linux/irq.h> 32#include <linux/irq.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/console.h> 34#include <linux/console.h>
35#include <linux/sysrq.h>
36#include <linux/platform_device.h>
37#include <linux/tty.h> 35#include <linux/tty.h>
38#include <linux/tty_flip.h> 36#include <linux/tty_flip.h>
39#include <linux/serial_core.h> 37#include <linux/serial_core.h>
40#include <linux/serial_reg.h> 38#include <linux/serial_reg.h>
41 39
42#include <linux/kthread.h> 40#include <linux/kthread.h>
43#include <linux/delay.h>
44#include <asm/atomic.h>
45#include <linux/spi/spi.h> 41#include <linux/spi/spi.h>
46#include <linux/spi/dw_spi.h>
47 42
48#include "mrst_max3110.h" 43#include "mrst_max3110.h"
49 44
@@ -56,7 +51,7 @@
56struct uart_max3110 { 51struct uart_max3110 {
57 struct uart_port port; 52 struct uart_port port;
58 struct spi_device *spi; 53 struct spi_device *spi;
59 char *name; 54 char name[24];
60 55
61 wait_queue_head_t wq; 56 wait_queue_head_t wq;
62 struct task_struct *main_thread; 57 struct task_struct *main_thread;
@@ -67,35 +62,30 @@ struct uart_max3110 {
67 u16 cur_conf; 62 u16 cur_conf;
68 u8 clock; 63 u8 clock;
69 u8 parity, word_7bits; 64 u8 parity, word_7bits;
65 u16 irq;
70 66
71 unsigned long uart_flags; 67 unsigned long uart_flags;
72 68
73 /* console related */ 69 /* console related */
74 struct circ_buf con_xmit; 70 struct circ_buf con_xmit;
75
76 /* irq related */
77 u16 irq;
78}; 71};
79 72
80/* global data structure, may need be removed */ 73/* global data structure, may need be removed */
81struct uart_max3110 *pmax; 74static struct uart_max3110 *pmax;
82static inline void receive_char(struct uart_max3110 *max, u8 ch); 75
83static void receive_chars(struct uart_max3110 *max, 76static void receive_chars(struct uart_max3110 *max,
84 unsigned char *str, int len); 77 unsigned char *str, int len);
85static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf); 78static int max3110_read_multi(struct uart_max3110 *max, u8 *buf);
86static void max3110_console_receive(struct uart_max3110 *max); 79static void max3110_con_receive(struct uart_max3110 *max);
87 80
88int max3110_write_then_read(struct uart_max3110 *max, 81static int max3110_write_then_read(struct uart_max3110 *max,
89 const u8 *txbuf, u8 *rxbuf, unsigned len, int always_fast) 82 const void *txbuf, void *rxbuf, unsigned len, int always_fast)
90{ 83{
91 struct spi_device *spi = max->spi; 84 struct spi_device *spi = max->spi;
92 struct spi_message message; 85 struct spi_message message;
93 struct spi_transfer x; 86 struct spi_transfer x;
94 int ret; 87 int ret;
95 88
96 if (!txbuf || !rxbuf)
97 return -EINVAL;
98
99 spi_message_init(&message); 89 spi_message_init(&message);
100 memset(&x, 0, sizeof x); 90 memset(&x, 0, sizeof x);
101 x.len = len; 91 x.len = len;
@@ -104,7 +94,7 @@ int max3110_write_then_read(struct uart_max3110 *max,
104 spi_message_add_tail(&x, &message); 94 spi_message_add_tail(&x, &message);
105 95
106 if (always_fast) 96 if (always_fast)
107 x.speed_hz = 3125000; 97 x.speed_hz = spi->max_speed_hz;
108 else if (max->baud) 98 else if (max->baud)
109 x.speed_hz = max->baud; 99 x.speed_hz = max->baud;
110 100
@@ -113,58 +103,80 @@ int max3110_write_then_read(struct uart_max3110 *max,
113 return ret; 103 return ret;
114} 104}
115 105
116/* Write a u16 to the device, and return one u16 read back */ 106/* Write a 16b word to the device */
117int max3110_out(struct uart_max3110 *max, const u16 out) 107static int max3110_out(struct uart_max3110 *max, const u16 out)
118{ 108{
119 u16 tmp; 109 void *buf;
110 u16 *obuf, *ibuf;
111 u8 ch;
120 int ret; 112 int ret;
121 113
122 ret = max3110_write_then_read(max, (u8 *)&out, (u8 *)&tmp, 2, 1); 114 buf = kzalloc(8, GFP_KERNEL | GFP_DMA);
123 if (ret) 115 if (!buf)
124 return ret; 116 return -ENOMEM;
117
118 obuf = buf;
119 ibuf = buf + 4;
120 *obuf = out;
121 ret = max3110_write_then_read(max, obuf, ibuf, 2, 1);
122 if (ret) {
123 pr_warning(PR_FMT "%s(): get err msg %d when sending 0x%x\n",
124 __func__, ret, out);
125 goto exit;
126 }
125 127
126 /* If some valid data is read back */ 128 /* If some valid data is read back */
127 if (tmp & MAX3110_READ_DATA_AVAILABLE) 129 if (*ibuf & MAX3110_READ_DATA_AVAILABLE) {
128 receive_char(max, (tmp & 0xff)); 130 ch = *ibuf & 0xff;
131 receive_chars(max, &ch, 1);
132 }
129 133
134exit:
135 kfree(buf);
130 return ret; 136 return ret;
131} 137}
132 138
133#define MAX_READ_LEN 20
134/* 139/*
135 * This is usually used to read data from SPIC RX FIFO, which doesn't 140 * This is usually used to read data from SPIC RX FIFO, which doesn't
136 * need any delay like flushing character out. It returns how many 141 * need any delay like flushing character out.
137 * valide bytes are read back 142 *
143 * Return how many valide bytes are read back
138 */ 144 */
139static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf) 145static int max3110_read_multi(struct uart_max3110 *max, u8 *rxbuf)
140{ 146{
141 u16 out[MAX_READ_LEN], in[MAX_READ_LEN]; 147 void *buf;
142 u8 *pbuf, valid_str[MAX_READ_LEN]; 148 u16 *obuf, *ibuf;
143 int i, j, bytelen; 149 u8 *pbuf, valid_str[M3110_RX_FIFO_DEPTH];
150 int i, j, blen;
144 151
145 if (len > MAX_READ_LEN) { 152 blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
146 pr_err(PR_FMT "read len %d is too large\n", len); 153 buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
154 if (!buf) {
155 pr_warning(PR_FMT "%s(): fail to alloc dma buffer\n", __func__);
147 return 0; 156 return 0;
148 } 157 }
149 158
150 bytelen = len * 2; 159 /* tx/rx always have the same length */
151 memset(out, 0, bytelen); 160 obuf = buf;
152 memset(in, 0, bytelen); 161 ibuf = buf + blen;
153 162
154 if (max3110_write_then_read(max, (u8 *)out, (u8 *)in, bytelen, 1)) 163 if (max3110_write_then_read(max, obuf, ibuf, blen, 1)) {
164 kfree(buf);
155 return 0; 165 return 0;
166 }
156 167
157 /* If caller don't provide a buffer, then handle received char */ 168 /* If caller doesn't provide a buffer, then handle received char */
158 pbuf = buf ? buf : valid_str; 169 pbuf = rxbuf ? rxbuf : valid_str;
159 170
160 for (i = 0, j = 0; i < len; i++) { 171 for (i = 0, j = 0; i < M3110_RX_FIFO_DEPTH; i++) {
161 if (in[i] & MAX3110_READ_DATA_AVAILABLE) 172 if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
162 pbuf[j++] = (u8)(in[i] & 0xff); 173 pbuf[j++] = ibuf[i] & 0xff;
163 } 174 }
164 175
165 if (j && (pbuf == valid_str)) 176 if (j && (pbuf == valid_str))
166 receive_chars(max, valid_str, j); 177 receive_chars(max, valid_str, j);
167 178
179 kfree(buf);
168 return j; 180 return j;
169} 181}
170 182
@@ -178,10 +190,6 @@ static void serial_m3110_con_putchar(struct uart_port *port, int ch)
178 xmit->buf[xmit->head] = (char)ch; 190 xmit->buf[xmit->head] = (char)ch;
179 xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1); 191 xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
180 } 192 }
181
182
183 if (!test_and_set_bit(CON_TX_NEEDED, &max->uart_flags))
184 wake_up_process(max->main_thread);
185} 193}
186 194
187/* 195/*
@@ -197,6 +205,9 @@ static void serial_m3110_con_write(struct console *co,
197 return; 205 return;
198 206
199 uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar); 207 uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
208
209 if (!test_and_set_bit(CON_TX_NEEDED, &pmax->uart_flags))
210 wake_up_process(pmax->main_thread);
200} 211}
201 212
202static int __init 213static int __init
@@ -210,6 +221,9 @@ serial_m3110_con_setup(struct console *co, char *options)
210 221
211 pr_info(PR_FMT "setting up console\n"); 222 pr_info(PR_FMT "setting up console\n");
212 223
224 if (co->index == -1)
225 co->index = 0;
226
213 if (!max) { 227 if (!max) {
214 pr_err(PR_FMT "pmax is NULL, return"); 228 pr_err(PR_FMT "pmax is NULL, return");
215 return -ENODEV; 229 return -ENODEV;
@@ -240,8 +254,6 @@ static struct console serial_m3110_console = {
240 .data = &serial_m3110_reg, 254 .data = &serial_m3110_reg,
241}; 255};
242 256
243#define MRST_CONSOLE (&serial_m3110_console)
244
245static unsigned int serial_m3110_tx_empty(struct uart_port *port) 257static unsigned int serial_m3110_tx_empty(struct uart_port *port)
246{ 258{
247 return 1; 259 return 1;
@@ -259,32 +271,44 @@ static void serial_m3110_stop_rx(struct uart_port *port)
259} 271}
260 272
261#define WORDS_PER_XFER 128 273#define WORDS_PER_XFER 128
262static inline void send_circ_buf(struct uart_max3110 *max, 274static void send_circ_buf(struct uart_max3110 *max,
263 struct circ_buf *xmit) 275 struct circ_buf *xmit)
264{ 276{
265 int len, left = 0; 277 void *buf;
266 u16 obuf[WORDS_PER_XFER], ibuf[WORDS_PER_XFER]; 278 u16 *obuf, *ibuf;
267 u8 valid_str[WORDS_PER_XFER]; 279 u8 valid_str[WORDS_PER_XFER];
268 int i, j; 280 int i, j, len, blen, dma_size, left, ret = 0;
281
282
283 dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
284 buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
285 if (!buf)
286 return;
287 obuf = buf;
288 ibuf = buf + dma_size/2;
269 289
270 while (!uart_circ_empty(xmit)) { 290 while (!uart_circ_empty(xmit)) {
271 left = uart_circ_chars_pending(xmit); 291 left = uart_circ_chars_pending(xmit);
272 while (left) { 292 while (left) {
273 len = (left >= WORDS_PER_XFER) ? WORDS_PER_XFER : left; 293 len = min(left, WORDS_PER_XFER);
294 blen = len * sizeof(u16);
295 memset(ibuf, 0, blen);
274 296
275 memset(obuf, 0, len * 2);
276 memset(ibuf, 0, len * 2);
277 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
278 obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG; 298 obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
279 xmit->tail = (xmit->tail + 1) & 299 xmit->tail = (xmit->tail + 1) &
280 (UART_XMIT_SIZE - 1); 300 (UART_XMIT_SIZE - 1);
281 } 301 }
282 max3110_write_then_read(max, (u8 *)obuf, 302
283 (u8 *)ibuf, len * 2, 0); 303 /* Fail to send msg to console is not very critical */
304 ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
305 if (ret)
306 pr_warning(PR_FMT "%s(): get err msg %d\n",
307 __func__, ret);
284 308
285 for (i = 0, j = 0; i < len; i++) { 309 for (i = 0, j = 0; i < len; i++) {
286 if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE) 310 if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
287 valid_str[j++] = (u8)(ibuf[i] & 0xff); 311 valid_str[j++] = ibuf[i] & 0xff;
288 } 312 }
289 313
290 if (j) 314 if (j)
@@ -294,6 +318,8 @@ static inline void send_circ_buf(struct uart_max3110 *max,
294 left -= len; 318 left -= len;
295 } 319 }
296 } 320 }
321
322 kfree(buf);
297} 323}
298 324
299static void transmit_char(struct uart_max3110 *max) 325static void transmit_char(struct uart_max3110 *max)
@@ -313,8 +339,10 @@ static void transmit_char(struct uart_max3110 *max)
313 serial_m3110_stop_tx(port); 339 serial_m3110_stop_tx(port);
314} 340}
315 341
316/* This will be called by uart_write() and tty_write, can't 342/*
317 * go to sleep */ 343 * This will be called by uart_write() and tty_write, can't
344 * go to sleep
345 */
318static void serial_m3110_start_tx(struct uart_port *port) 346static void serial_m3110_start_tx(struct uart_port *port)
319{ 347{
320 struct uart_max3110 *max = 348 struct uart_max3110 *max =
@@ -336,7 +364,7 @@ static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
336 364
337 tty = port->state->port.tty; 365 tty = port->state->port.tty;
338 if (!tty) 366 if (!tty)
339 return; /* receive some char before the tty is opened */ 367 return;
340 368
341 while (len) { 369 while (len) {
342 usable = tty_buffer_request_room(tty, len); 370 usable = tty_buffer_request_room(tty, len);
@@ -344,32 +372,37 @@ static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
344 tty_insert_flip_string(tty, str, usable); 372 tty_insert_flip_string(tty, str, usable);
345 str += usable; 373 str += usable;
346 port->icount.rx += usable; 374 port->icount.rx += usable;
347 tty_flip_buffer_push(tty);
348 } 375 }
349 len -= usable; 376 len -= usable;
350 } 377 }
378 tty_flip_buffer_push(tty);
351} 379}
352 380
353static inline void receive_char(struct uart_max3110 *max, u8 ch) 381/*
354{ 382 * This routine will be used in read_thread or RX IRQ handling,
355 receive_chars(max, &ch, 1); 383 * it will first do one round buffer read(8 words), if there is some
356} 384 * valid RX data, will try to read 5 more rounds till all data
357 385 * is read out.
358static void max3110_console_receive(struct uart_max3110 *max) 386 *
387 * Use stack space as data buffer to save some system load, and chose
388 * 504 Btyes as a threadhold to do a bulk push to upper tty layer when
389 * receiving bulk data, a much bigger buffer may cause stack overflow
390 */
391static void max3110_con_receive(struct uart_max3110 *max)
359{ 392{
360 int loop = 1, num, total = 0; 393 int loop = 1, num, total = 0;
361 u8 recv_buf[512], *pbuf; 394 u8 recv_buf[512], *pbuf;
362 395
363 pbuf = recv_buf; 396 pbuf = recv_buf;
364 do { 397 do {
365 num = max3110_read_multi(max, 8, pbuf); 398 num = max3110_read_multi(max, pbuf);
366 399
367 if (num) { 400 if (num) {
368 loop = 10; 401 loop = 5;
369 pbuf += num; 402 pbuf += num;
370 total += num; 403 total += num;
371 404
372 if (total >= 500) { 405 if (total >= 504) {
373 receive_chars(max, recv_buf, total); 406 receive_chars(max, recv_buf, total);
374 pbuf = recv_buf; 407 pbuf = recv_buf;
375 total = 0; 408 total = 0;
@@ -397,7 +430,7 @@ static int max3110_main_thread(void *_max)
397 mutex_lock(&max->thread_mutex); 430 mutex_lock(&max->thread_mutex);
398 431
399 if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags)) 432 if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
400 max3110_console_receive(max); 433 max3110_con_receive(max);
401 434
402 /* first handle console output */ 435 /* first handle console output */
403 if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags)) 436 if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags))
@@ -414,7 +447,6 @@ static int max3110_main_thread(void *_max)
414 return ret; 447 return ret;
415} 448}
416 449
417#ifdef CONFIG_MRST_MAX3110_IRQ
418static irqreturn_t serial_m3110_irq(int irq, void *dev_id) 450static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
419{ 451{
420 struct uart_max3110 *max = dev_id; 452 struct uart_max3110 *max = dev_id;
@@ -426,7 +458,7 @@ static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
426 458
427 return IRQ_HANDLED; 459 return IRQ_HANDLED;
428} 460}
429#else 461
430/* if don't use RX IRQ, then need a thread to polling read */ 462/* if don't use RX IRQ, then need a thread to polling read */
431static int max3110_read_thread(void *_max) 463static int max3110_read_thread(void *_max)
432{ 464{
@@ -434,9 +466,14 @@ static int max3110_read_thread(void *_max)
434 466
435 pr_info(PR_FMT "start read thread\n"); 467 pr_info(PR_FMT "start read thread\n");
436 do { 468 do {
437 mutex_lock(&max->thread_mutex); 469 /*
438 max3110_console_receive(max); 470 * If can't acquire the mutex, it means the main thread
439 mutex_unlock(&max->thread_mutex); 471 * is running which will also perform the rx job
472 */
473 if (mutex_trylock(&max->thread_mutex)) {
474 max3110_con_receive(max);
475 mutex_unlock(&max->thread_mutex);
476 }
440 477
441 set_current_state(TASK_INTERRUPTIBLE); 478 set_current_state(TASK_INTERRUPTIBLE);
442 schedule_timeout(HZ / 20); 479 schedule_timeout(HZ / 20);
@@ -444,7 +481,6 @@ static int max3110_read_thread(void *_max)
444 481
445 return 0; 482 return 0;
446} 483}
447#endif
448 484
449static int serial_m3110_startup(struct uart_port *port) 485static int serial_m3110_startup(struct uart_port *port)
450{ 486{
@@ -453,33 +489,54 @@ static int serial_m3110_startup(struct uart_port *port)
453 u16 config = 0; 489 u16 config = 0;
454 int ret = 0; 490 int ret = 0;
455 491
456 if (port->line != 0) 492 if (port->line != 0) {
457 pr_err(PR_FMT "uart port startup failed\n"); 493 pr_err(PR_FMT "uart port startup failed\n");
494 return -1;
495 }
458 496
459 /* firstly disable all IRQ and config it to 115200, 8n1 */ 497 /* Disable all IRQ and config it to 115200, 8n1 */
460 config = WC_TAG | WC_FIFO_ENABLE 498 config = WC_TAG | WC_FIFO_ENABLE
461 | WC_1_STOPBITS 499 | WC_1_STOPBITS
462 | WC_8BIT_WORD 500 | WC_8BIT_WORD
463 | WC_BAUD_DR2; 501 | WC_BAUD_DR2;
464 ret = max3110_out(max, config);
465 502
466 /* as we use thread to handle tx/rx, need set low latency */ 503 /* as we use thread to handle tx/rx, need set low latency */
467 port->state->port.tty->low_latency = 1; 504 port->state->port.tty->low_latency = 1;
468 505
469#ifdef CONFIG_MRST_MAX3110_IRQ 506 if (max->irq) {
470 ret = request_irq(max->irq, serial_m3110_irq, 507 max->read_thread = NULL;
508 ret = request_irq(max->irq, serial_m3110_irq,
471 IRQ_TYPE_EDGE_FALLING, "max3110", max); 509 IRQ_TYPE_EDGE_FALLING, "max3110", max);
472 if (ret) 510 if (ret) {
473 return ret; 511 max->irq = 0;
512 pr_err(PR_FMT "unable to allocate IRQ, polling\n");
513 } else {
514 /* Enable RX IRQ only */
515 config |= WC_RXA_IRQ_ENABLE;
516 }
517 }
474 518
475 /* enable RX IRQ only */ 519 if (max->irq == 0) {
476 config |= WC_RXA_IRQ_ENABLE; 520 /* If IRQ is disabled, start a read thread for input data */
477 max3110_out(max, config); 521 max->read_thread =
478#else 522 kthread_run(max3110_read_thread, max, "max3110_read");
479 /* if IRQ is disabled, start a read thread for input data */ 523 if (IS_ERR(max->read_thread)) {
480 max->read_thread = 524 ret = PTR_ERR(max->read_thread);
481 kthread_run(max3110_read_thread, max, "max3110_read"); 525 max->read_thread = NULL;
482#endif 526 pr_err(PR_FMT "Can't create read thread!\n");
527 return ret;
528 }
529 }
530
531 ret = max3110_out(max, config);
532 if (ret) {
533 if (max->irq)
534 free_irq(max->irq, max);
535 if (max->read_thread)
536 kthread_stop(max->read_thread);
537 max->read_thread = NULL;
538 return ret;
539 }
483 540
484 max->cur_conf = config; 541 max->cur_conf = config;
485 return 0; 542 return 0;
@@ -496,9 +553,8 @@ static void serial_m3110_shutdown(struct uart_port *port)
496 max->read_thread = NULL; 553 max->read_thread = NULL;
497 } 554 }
498 555
499#ifdef CONFIG_MRST_MAX3110_IRQ 556 if (max->irq)
500 free_irq(max->irq, max); 557 free_irq(max->irq, max);
501#endif
502 558
503 /* Disable interrupts from this port */ 559 /* Disable interrupts from this port */
504 config = WC_TAG | WC_SW_SHDI; 560 config = WC_TAG | WC_SW_SHDI;
@@ -516,8 +572,7 @@ static int serial_m3110_request_port(struct uart_port *port)
516 572
517static void serial_m3110_config_port(struct uart_port *port, int flags) 573static void serial_m3110_config_port(struct uart_port *port, int flags)
518{ 574{
519 /* give it fake type */ 575 port->type = PORT_MAX3100;
520 port->type = PORT_PXA;
521} 576}
522 577
523static int 578static int
@@ -552,6 +607,9 @@ serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
552 new_conf |= WC_7BIT_WORD; 607 new_conf |= WC_7BIT_WORD;
553 break; 608 break;
554 default: 609 default:
610 /* We only support CS7 & CS8 */
611 termios->c_cflag &= ~CSIZE;
612 termios->c_cflag |= CS8;
555 case CS8: 613 case CS8:
556 cval = UART_LCR_WLEN8; 614 cval = UART_LCR_WLEN8;
557 new_conf |= WC_8BIT_WORD; 615 new_conf |= WC_8BIT_WORD;
@@ -560,7 +618,7 @@ serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
560 618
561 baud = uart_get_baud_rate(port, termios, old, 0, 230400); 619 baud = uart_get_baud_rate(port, termios, old, 0, 230400);
562 620
563 /* first calc the div for 1.8MHZ clock case */ 621 /* First calc the div for 1.8MHZ clock case */
564 switch (baud) { 622 switch (baud) {
565 case 300: 623 case 300:
566 clk_div = WC_BAUD_DR384; 624 clk_div = WC_BAUD_DR384;
@@ -596,7 +654,7 @@ serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
596 if (max->clock & MAX3110_HIGH_CLK) 654 if (max->clock & MAX3110_HIGH_CLK)
597 break; 655 break;
598 default: 656 default:
599 /* pick the previous baud rate */ 657 /* Pick the previous baud rate */
600 baud = max->baud; 658 baud = max->baud;
601 clk_div = max->cur_conf & WC_BAUD_DIV_MASK; 659 clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
602 tty_termios_encode_baud_rate(termios, baud, baud); 660 tty_termios_encode_baud_rate(termios, baud, baud);
@@ -604,15 +662,21 @@ serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
604 662
605 if (max->clock & MAX3110_HIGH_CLK) { 663 if (max->clock & MAX3110_HIGH_CLK) {
606 clk_div += 1; 664 clk_div += 1;
607 /* high clk version max3110 doesn't support B300 */ 665 /* High clk version max3110 doesn't support B300 */
608 if (baud == 300) 666 if (baud == 300) {
609 baud = 600; 667 baud = 600;
668 clk_div = WC_BAUD_DR384;
669 }
610 if (baud == 230400) 670 if (baud == 230400)
611 clk_div = WC_BAUD_DR1; 671 clk_div = WC_BAUD_DR1;
612 tty_termios_encode_baud_rate(termios, baud, baud); 672 tty_termios_encode_baud_rate(termios, baud, baud);
613 } 673 }
614 674
615 new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div; 675 new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
676
677 if (unlikely(termios->c_cflag & CMSPAR))
678 termios->c_cflag &= ~CMSPAR;
679
616 if (termios->c_cflag & CSTOPB) 680 if (termios->c_cflag & CSTOPB)
617 new_conf |= WC_2_STOPBITS; 681 new_conf |= WC_2_STOPBITS;
618 else 682 else
@@ -632,13 +696,14 @@ serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
632 696
633 new_conf |= WC_TAG; 697 new_conf |= WC_TAG;
634 if (new_conf != max->cur_conf) { 698 if (new_conf != max->cur_conf) {
635 max3110_out(max, new_conf); 699 if (!max3110_out(max, new_conf)) {
636 max->cur_conf = new_conf; 700 max->cur_conf = new_conf;
637 max->baud = baud; 701 max->baud = baud;
702 }
638 } 703 }
639} 704}
640 705
641/* don't handle hw handshaking */ 706/* Don't handle hw handshaking */
642static unsigned int serial_m3110_get_mctrl(struct uart_port *port) 707static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
643{ 708{
644 return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR; 709 return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
@@ -672,7 +737,7 @@ struct uart_ops serial_m3110_ops = {
672 .break_ctl = serial_m3110_break_ctl, 737 .break_ctl = serial_m3110_break_ctl,
673 .startup = serial_m3110_startup, 738 .startup = serial_m3110_startup,
674 .shutdown = serial_m3110_shutdown, 739 .shutdown = serial_m3110_shutdown,
675 .set_termios = serial_m3110_set_termios, /* must have */ 740 .set_termios = serial_m3110_set_termios,
676 .pm = serial_m3110_pm, 741 .pm = serial_m3110_pm,
677 .type = serial_m3110_type, 742 .type = serial_m3110_type,
678 .release_port = serial_m3110_release_port, 743 .release_port = serial_m3110_release_port,
@@ -688,52 +753,60 @@ static struct uart_driver serial_m3110_reg = {
688 .major = TTY_MAJOR, 753 .major = TTY_MAJOR,
689 .minor = 64, 754 .minor = 64,
690 .nr = 1, 755 .nr = 1,
691 .cons = MRST_CONSOLE, 756 .cons = &serial_m3110_console,
692}; 757};
693 758
759#ifdef CONFIG_PM
694static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state) 760static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
695{ 761{
762 struct uart_max3110 *max = spi_get_drvdata(spi);
763
764 disable_irq(max->irq);
765 uart_suspend_port(&serial_m3110_reg, &max->port);
766 max3110_out(max, max->cur_conf | WC_SW_SHDI);
696 return 0; 767 return 0;
697} 768}
698 769
699static int serial_m3110_resume(struct spi_device *spi) 770static int serial_m3110_resume(struct spi_device *spi)
700{ 771{
772 struct uart_max3110 *max = spi_get_drvdata(spi);
773
774 max3110_out(max, max->cur_conf);
775 uart_resume_port(&serial_m3110_reg, &max->port);
776 enable_irq(max->irq);
701 return 0; 777 return 0;
702} 778}
779#else
780#define serial_m3110_suspend NULL
781#define serial_m3110_resume NULL
782#endif
703 783
704static struct dw_spi_chip spi0_uart = { 784static int __devinit serial_m3110_probe(struct spi_device *spi)
705 .poll_mode = 1,
706 .enable_dma = 0,
707 .type = SPI_FRF_SPI,
708};
709
710static int serial_m3110_probe(struct spi_device *spi)
711{ 785{
712 struct uart_max3110 *max; 786 struct uart_max3110 *max;
713 int ret; 787 void *buffer;
714 unsigned char *buffer;
715 u16 res; 788 u16 res;
789 int ret = 0;
790
716 max = kzalloc(sizeof(*max), GFP_KERNEL); 791 max = kzalloc(sizeof(*max), GFP_KERNEL);
717 if (!max) 792 if (!max)
718 return -ENOMEM; 793 return -ENOMEM;
719 794
720 /* set spi info */ 795 /* Set spi info */
721 spi->mode = SPI_MODE_0;
722 spi->bits_per_word = 16; 796 spi->bits_per_word = 16;
723 max->clock = MAX3110_HIGH_CLK; 797 max->clock = MAX3110_HIGH_CLK;
724 spi->controller_data = &spi0_uart;
725 798
726 spi_setup(spi); 799 spi_setup(spi);
727 800
728 max->port.type = PORT_PXA; /* need apply for a max3110 type */ 801 max->port.type = PORT_MAX3100;
729 max->port.fifosize = 2; /* only have 16b buffer */ 802 max->port.fifosize = 2; /* Only have 16b buffer */
730 max->port.ops = &serial_m3110_ops; 803 max->port.ops = &serial_m3110_ops;
731 max->port.line = 0; 804 max->port.line = 0;
732 max->port.dev = &spi->dev; 805 max->port.dev = &spi->dev;
733 max->port.uartclk = 115200; 806 max->port.uartclk = 115200;
734 807
735 max->spi = spi; 808 max->spi = spi;
736 max->name = spi->modalias; /* use spi name as the name */ 809 strcpy(max->name, spi->modalias);
737 max->irq = (u16)spi->irq; 810 max->irq = (u16)spi->irq;
738 811
739 mutex_init(&max->thread_mutex); 812 mutex_init(&max->thread_mutex);
@@ -755,13 +828,15 @@ static int serial_m3110_probe(struct spi_device *spi)
755 ret = -ENODEV; 828 ret = -ENODEV;
756 goto err_get_page; 829 goto err_get_page;
757 } 830 }
758 buffer = (unsigned char *)__get_free_page(GFP_KERNEL); 831
832 buffer = (void *)__get_free_page(GFP_KERNEL);
759 if (!buffer) { 833 if (!buffer) {
760 ret = -ENOMEM; 834 ret = -ENOMEM;
761 goto err_get_page; 835 goto err_get_page;
762 } 836 }
763 max->con_xmit.buf = (unsigned char *)buffer; 837 max->con_xmit.buf = buffer;
764 max->con_xmit.head = max->con_xmit.tail = 0; 838 max->con_xmit.head = 0;
839 max->con_xmit.tail = 0;
765 840
766 max->main_thread = kthread_run(max3110_main_thread, 841 max->main_thread = kthread_run(max3110_main_thread,
767 max, "max3110_main"); 842 max, "max3110_main");
@@ -770,8 +845,10 @@ static int serial_m3110_probe(struct spi_device *spi)
770 goto err_kthread; 845 goto err_kthread;
771 } 846 }
772 847
848 spi_set_drvdata(spi, max);
773 pmax = max; 849 pmax = max;
774 /* give membase a psudo value to pass serial_core's check */ 850
851 /* Give membase a psudo value to pass serial_core's check */
775 max->port.membase = (void *)0xff110000; 852 max->port.membase = (void *)0xff110000;
776 uart_add_one_port(&serial_m3110_reg, &max->port); 853 uart_add_one_port(&serial_m3110_reg, &max->port);
777 854
@@ -780,19 +857,17 @@ static int serial_m3110_probe(struct spi_device *spi)
780err_kthread: 857err_kthread:
781 free_page((unsigned long)buffer); 858 free_page((unsigned long)buffer);
782err_get_page: 859err_get_page:
783 pmax = NULL;
784 kfree(max); 860 kfree(max);
785 return ret; 861 return ret;
786} 862}
787 863
788static int max3110_remove(struct spi_device *dev) 864static int __devexit serial_m3110_remove(struct spi_device *dev)
789{ 865{
790 struct uart_max3110 *max = pmax; 866 struct uart_max3110 *max = spi_get_drvdata(dev);
791 867
792 if (!pmax) 868 if (!max)
793 return 0; 869 return 0;
794 870
795 pmax = NULL;
796 uart_remove_one_port(&serial_m3110_reg, &max->port); 871 uart_remove_one_port(&serial_m3110_reg, &max->port);
797 872
798 free_page((unsigned long)max->con_xmit.buf); 873 free_page((unsigned long)max->con_xmit.buf);
@@ -811,13 +886,12 @@ static struct spi_driver uart_max3110_driver = {
811 .owner = THIS_MODULE, 886 .owner = THIS_MODULE,
812 }, 887 },
813 .probe = serial_m3110_probe, 888 .probe = serial_m3110_probe,
814 .remove = __devexit_p(max3110_remove), 889 .remove = __devexit_p(serial_m3110_remove),
815 .suspend = serial_m3110_suspend, 890 .suspend = serial_m3110_suspend,
816 .resume = serial_m3110_resume, 891 .resume = serial_m3110_resume,
817}; 892};
818 893
819 894static int __init serial_m3110_init(void)
820int __init serial_m3110_init(void)
821{ 895{
822 int ret = 0; 896 int ret = 0;
823 897
@@ -832,7 +906,7 @@ int __init serial_m3110_init(void)
832 return ret; 906 return ret;
833} 907}
834 908
835void __exit serial_m3110_exit(void) 909static void __exit serial_m3110_exit(void)
836{ 910{
837 spi_unregister_driver(&uart_max3110_driver); 911 spi_unregister_driver(&uart_max3110_driver);
838 uart_unregister_driver(&serial_m3110_reg); 912 uart_unregister_driver(&serial_m3110_reg);
@@ -841,5 +915,5 @@ void __exit serial_m3110_exit(void)
841module_init(serial_m3110_init); 915module_init(serial_m3110_init);
842module_exit(serial_m3110_exit); 916module_exit(serial_m3110_exit);
843 917
844MODULE_LICENSE("GPL"); 918MODULE_LICENSE("GPL v2");
845MODULE_ALIAS("max3110-uart"); 919MODULE_ALIAS("max3110-uart");
diff --git a/drivers/serial/mrst_max3110.h b/drivers/serial/mrst_max3110.h
index 363478acb2c3..d1ef43af397c 100644
--- a/drivers/serial/mrst_max3110.h
+++ b/drivers/serial/mrst_max3110.h
@@ -56,4 +56,5 @@
56#define WC_BAUD_DR192 (0xE) 56#define WC_BAUD_DR192 (0xE)
57#define WC_BAUD_DR384 (0xF) 57#define WC_BAUD_DR384 (0xF)
58 58
59#define M3110_RX_FIFO_DEPTH 8
59#endif 60#endif
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index cd8511298bcb..c4ea14670d44 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1074,10 +1074,10 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg)
1074 * NB: both 1->0 and 0->1 transitions are counted except for 1074 * NB: both 1->0 and 0->1 transitions are counted except for
1075 * RI where only 0->1 is counted. 1075 * RI where only 0->1 is counted.
1076 */ 1076 */
1077static int uart_get_count(struct uart_state *state, 1077static int uart_get_icount(struct tty_struct *tty,
1078 struct serial_icounter_struct __user *icnt) 1078 struct serial_icounter_struct *icount)
1079{ 1079{
1080 struct serial_icounter_struct icount; 1080 struct uart_state *state = tty->driver_data;
1081 struct uart_icount cnow; 1081 struct uart_icount cnow;
1082 struct uart_port *uport = state->uart_port; 1082 struct uart_port *uport = state->uart_port;
1083 1083
@@ -1085,19 +1085,19 @@ static int uart_get_count(struct uart_state *state,
1085 memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); 1085 memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
1086 spin_unlock_irq(&uport->lock); 1086 spin_unlock_irq(&uport->lock);
1087 1087
1088 icount.cts = cnow.cts; 1088 icount->cts = cnow.cts;
1089 icount.dsr = cnow.dsr; 1089 icount->dsr = cnow.dsr;
1090 icount.rng = cnow.rng; 1090 icount->rng = cnow.rng;
1091 icount.dcd = cnow.dcd; 1091 icount->dcd = cnow.dcd;
1092 icount.rx = cnow.rx; 1092 icount->rx = cnow.rx;
1093 icount.tx = cnow.tx; 1093 icount->tx = cnow.tx;
1094 icount.frame = cnow.frame; 1094 icount->frame = cnow.frame;
1095 icount.overrun = cnow.overrun; 1095 icount->overrun = cnow.overrun;
1096 icount.parity = cnow.parity; 1096 icount->parity = cnow.parity;
1097 icount.brk = cnow.brk; 1097 icount->brk = cnow.brk;
1098 icount.buf_overrun = cnow.buf_overrun; 1098 icount->buf_overrun = cnow.buf_overrun;
1099 1099
1100 return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; 1100 return 0;
1101} 1101}
1102 1102
1103/* 1103/*
@@ -1150,10 +1150,6 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
1150 case TIOCMIWAIT: 1150 case TIOCMIWAIT:
1151 ret = uart_wait_modem_status(state, arg); 1151 ret = uart_wait_modem_status(state, arg);
1152 break; 1152 break;
1153
1154 case TIOCGICOUNT:
1155 ret = uart_get_count(state, uarg);
1156 break;
1157 } 1153 }
1158 1154
1159 if (ret != -ENOIOCTLCMD) 1155 if (ret != -ENOIOCTLCMD)
@@ -2065,7 +2061,19 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
2065 /* 2061 /*
2066 * Re-enable the console device after suspending. 2062 * Re-enable the console device after suspending.
2067 */ 2063 */
2068 if (uart_console(uport)) { 2064 if (console_suspend_enabled && uart_console(uport)) {
2065 /*
2066 * First try to use the console cflag setting.
2067 */
2068 memset(&termios, 0, sizeof(struct ktermios));
2069 termios.c_cflag = uport->cons->cflag;
2070
2071 /*
2072 * If that's unset, use the tty termios setting.
2073 */
2074 if (port->tty && port->tty->termios && termios.c_cflag == 0)
2075 termios = *(port->tty->termios);
2076
2069 uart_change_pm(state, 0); 2077 uart_change_pm(state, 0);
2070 uport->ops->set_termios(uport, &termios, NULL); 2078 uport->ops->set_termios(uport, &termios, NULL);
2071 console_start(uport->cons); 2079 console_start(uport->cons);
@@ -2283,6 +2291,7 @@ static const struct tty_operations uart_ops = {
2283#endif 2291#endif
2284 .tiocmget = uart_tiocmget, 2292 .tiocmget = uart_tiocmget,
2285 .tiocmset = uart_tiocmset, 2293 .tiocmset = uart_tiocmset,
2294 .get_icount = uart_get_icount,
2286#ifdef CONFIG_CONSOLE_POLL 2295#ifdef CONFIG_CONSOLE_POLL
2287 .poll_init = uart_poll_init, 2296 .poll_init = uart_poll_init,
2288 .poll_get_char = uart_poll_get_char, 2297 .poll_get_char = uart_poll_get_char,
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 9b03d7b3e456..c4bf54bb3fc7 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -322,6 +322,26 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
322 return -EINVAL; 322 return -EINVAL;
323} 323}
324 324
325#ifdef CONFIG_CONSOLE_POLL
326static int ulite_get_poll_char(struct uart_port *port)
327{
328 if (!(ioread32be(port->membase + ULITE_STATUS)
329 & ULITE_STATUS_RXVALID))
330 return NO_POLL_CHAR;
331
332 return ioread32be(port->membase + ULITE_RX);
333}
334
335static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
336{
337 while (ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL)
338 cpu_relax();
339
340 /* write char to device */
341 iowrite32be(ch, port->membase + ULITE_TX);
342}
343#endif
344
325static struct uart_ops ulite_ops = { 345static struct uart_ops ulite_ops = {
326 .tx_empty = ulite_tx_empty, 346 .tx_empty = ulite_tx_empty,
327 .set_mctrl = ulite_set_mctrl, 347 .set_mctrl = ulite_set_mctrl,
@@ -338,7 +358,11 @@ static struct uart_ops ulite_ops = {
338 .release_port = ulite_release_port, 358 .release_port = ulite_release_port,
339 .request_port = ulite_request_port, 359 .request_port = ulite_request_port,
340 .config_port = ulite_config_port, 360 .config_port = ulite_config_port,
341 .verify_port = ulite_verify_port 361 .verify_port = ulite_verify_port,
362#ifdef CONFIG_CONSOLE_POLL
363 .poll_get_char = ulite_get_poll_char,
364 .poll_put_char = ulite_put_poll_char,
365#endif
342}; 366};
343 367
344/* --------------------------------------------------------------------- 368/* ---------------------------------------------------------------------
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 4d3a6fd1a152..a858d2b87b94 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -23,9 +23,10 @@
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/kobject.h> 25#include <linux/kobject.h>
26#include <linux/cdev.h>
26#include <linux/uio_driver.h> 27#include <linux/uio_driver.h>
27 28
28#define UIO_MAX_DEVICES 255 29#define UIO_MAX_DEVICES (1U << MINORBITS)
29 30
30struct uio_device { 31struct uio_device {
31 struct module *owner; 32 struct module *owner;
@@ -41,15 +42,10 @@ struct uio_device {
41}; 42};
42 43
43static int uio_major; 44static int uio_major;
45static struct cdev *uio_cdev;
44static DEFINE_IDR(uio_idr); 46static DEFINE_IDR(uio_idr);
45static const struct file_operations uio_fops; 47static const struct file_operations uio_fops;
46 48
47/* UIO class infrastructure */
48static struct uio_class {
49 struct kref kref;
50 struct class *class;
51} *uio_class;
52
53/* Protect idr accesses */ 49/* Protect idr accesses */
54static DEFINE_MUTEX(minor_lock); 50static DEFINE_MUTEX(minor_lock);
55 51
@@ -232,45 +228,34 @@ static ssize_t show_name(struct device *dev,
232 struct device_attribute *attr, char *buf) 228 struct device_attribute *attr, char *buf)
233{ 229{
234 struct uio_device *idev = dev_get_drvdata(dev); 230 struct uio_device *idev = dev_get_drvdata(dev);
235 if (idev) 231 return sprintf(buf, "%s\n", idev->info->name);
236 return sprintf(buf, "%s\n", idev->info->name);
237 else
238 return -ENODEV;
239} 232}
240static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
241 233
242static ssize_t show_version(struct device *dev, 234static ssize_t show_version(struct device *dev,
243 struct device_attribute *attr, char *buf) 235 struct device_attribute *attr, char *buf)
244{ 236{
245 struct uio_device *idev = dev_get_drvdata(dev); 237 struct uio_device *idev = dev_get_drvdata(dev);
246 if (idev) 238 return sprintf(buf, "%s\n", idev->info->version);
247 return sprintf(buf, "%s\n", idev->info->version);
248 else
249 return -ENODEV;
250} 239}
251static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
252 240
253static ssize_t show_event(struct device *dev, 241static ssize_t show_event(struct device *dev,
254 struct device_attribute *attr, char *buf) 242 struct device_attribute *attr, char *buf)
255{ 243{
256 struct uio_device *idev = dev_get_drvdata(dev); 244 struct uio_device *idev = dev_get_drvdata(dev);
257 if (idev) 245 return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
258 return sprintf(buf, "%u\n",
259 (unsigned int)atomic_read(&idev->event));
260 else
261 return -ENODEV;
262} 246}
263static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
264 247
265static struct attribute *uio_attrs[] = { 248static struct device_attribute uio_class_attributes[] = {
266 &dev_attr_name.attr, 249 __ATTR(name, S_IRUGO, show_name, NULL),
267 &dev_attr_version.attr, 250 __ATTR(version, S_IRUGO, show_version, NULL),
268 &dev_attr_event.attr, 251 __ATTR(event, S_IRUGO, show_event, NULL),
269 NULL, 252 {}
270}; 253};
271 254
272static struct attribute_group uio_attr_grp = { 255/* UIO class infrastructure */
273 .attrs = uio_attrs, 256static struct class uio_class = {
257 .name = "uio",
258 .dev_attrs = uio_class_attributes,
274}; 259};
275 260
276/* 261/*
@@ -287,10 +272,6 @@ static int uio_dev_add_attributes(struct uio_device *idev)
287 struct uio_port *port; 272 struct uio_port *port;
288 struct uio_portio *portio; 273 struct uio_portio *portio;
289 274
290 ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
291 if (ret)
292 goto err_group;
293
294 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 275 for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
295 mem = &idev->info->mem[mi]; 276 mem = &idev->info->mem[mi];
296 if (mem->size == 0) 277 if (mem->size == 0)
@@ -358,8 +339,6 @@ err_map:
358 kobject_put(&map->kobj); 339 kobject_put(&map->kobj);
359 } 340 }
360 kobject_put(idev->map_dir); 341 kobject_put(idev->map_dir);
361 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
362err_group:
363 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); 342 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
364 return ret; 343 return ret;
365} 344}
@@ -385,8 +364,6 @@ static void uio_dev_del_attributes(struct uio_device *idev)
385 kobject_put(&port->portio->kobj); 364 kobject_put(&port->portio->kobj);
386 } 365 }
387 kobject_put(idev->portio_dir); 366 kobject_put(idev->portio_dir);
388
389 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
390} 367}
391 368
392static int uio_get_minor(struct uio_device *idev) 369static int uio_get_minor(struct uio_device *idev)
@@ -525,7 +502,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
525 struct uio_listener *listener = filep->private_data; 502 struct uio_listener *listener = filep->private_data;
526 struct uio_device *idev = listener->dev; 503 struct uio_device *idev = listener->dev;
527 504
528 if (idev->info->irq == UIO_IRQ_NONE) 505 if (!idev->info->irq)
529 return -EIO; 506 return -EIO;
530 507
531 poll_wait(filep, &idev->wait, wait); 508 poll_wait(filep, &idev->wait, wait);
@@ -543,7 +520,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
543 ssize_t retval; 520 ssize_t retval;
544 s32 event_count; 521 s32 event_count;
545 522
546 if (idev->info->irq == UIO_IRQ_NONE) 523 if (!idev->info->irq)
547 return -EIO; 524 return -EIO;
548 525
549 if (count != sizeof(s32)) 526 if (count != sizeof(s32))
@@ -591,7 +568,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
591 ssize_t retval; 568 ssize_t retval;
592 s32 irq_on; 569 s32 irq_on;
593 570
594 if (idev->info->irq == UIO_IRQ_NONE) 571 if (!idev->info->irq)
595 return -EIO; 572 return -EIO;
596 573
597 if (count != sizeof(s32)) 574 if (count != sizeof(s32))
@@ -745,68 +722,72 @@ static const struct file_operations uio_fops = {
745 722
746static int uio_major_init(void) 723static int uio_major_init(void)
747{ 724{
748 uio_major = register_chrdev(0, "uio", &uio_fops); 725 static const char name[] = "uio";
749 if (uio_major < 0) 726 struct cdev *cdev = NULL;
750 return uio_major; 727 dev_t uio_dev = 0;
751 return 0; 728 int result;
729
730 result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name);
731 if (result)
732 goto out;
733
734 result = -ENOMEM;
735 cdev = cdev_alloc();
736 if (!cdev)
737 goto out_unregister;
738
739 cdev->owner = THIS_MODULE;
740 cdev->ops = &uio_fops;
741 kobject_set_name(&cdev->kobj, "%s", name);
742
743 result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES);
744 if (result)
745 goto out_put;
746
747 uio_major = MAJOR(uio_dev);
748 uio_cdev = cdev;
749 result = 0;
750out:
751 return result;
752out_put:
753 kobject_put(&cdev->kobj);
754out_unregister:
755 unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
756 goto out;
752} 757}
753 758
754static void uio_major_cleanup(void) 759static void uio_major_cleanup(void)
755{ 760{
756 unregister_chrdev(uio_major, "uio"); 761 unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES);
762 cdev_del(uio_cdev);
757} 763}
758 764
759static int init_uio_class(void) 765static int init_uio_class(void)
760{ 766{
761 int ret = 0; 767 int ret;
762
763 if (uio_class != NULL) {
764 kref_get(&uio_class->kref);
765 goto exit;
766 }
767 768
768 /* This is the first time in here, set everything up properly */ 769 /* This is the first time in here, set everything up properly */
769 ret = uio_major_init(); 770 ret = uio_major_init();
770 if (ret) 771 if (ret)
771 goto exit; 772 goto exit;
772 773
773 uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); 774 ret = class_register(&uio_class);
774 if (!uio_class) { 775 if (ret) {
775 ret = -ENOMEM; 776 printk(KERN_ERR "class_register failed for uio\n");
776 goto err_kzalloc; 777 goto err_class_register;
777 }
778
779 kref_init(&uio_class->kref);
780 uio_class->class = class_create(THIS_MODULE, "uio");
781 if (IS_ERR(uio_class->class)) {
782 ret = IS_ERR(uio_class->class);
783 printk(KERN_ERR "class_create failed for uio\n");
784 goto err_class_create;
785 } 778 }
786 return 0; 779 return 0;
787 780
788err_class_create: 781err_class_register:
789 kfree(uio_class);
790 uio_class = NULL;
791err_kzalloc:
792 uio_major_cleanup(); 782 uio_major_cleanup();
793exit: 783exit:
794 return ret; 784 return ret;
795} 785}
796 786
797static void release_uio_class(struct kref *kref) 787static void release_uio_class(void)
798{ 788{
799 /* Ok, we cheat as we know we only have one uio_class */ 789 class_unregister(&uio_class);
800 class_destroy(uio_class->class);
801 kfree(uio_class);
802 uio_major_cleanup(); 790 uio_major_cleanup();
803 uio_class = NULL;
804}
805
806static void uio_class_destroy(void)
807{
808 if (uio_class)
809 kref_put(&uio_class->kref, release_uio_class);
810} 791}
811 792
812/** 793/**
@@ -829,10 +810,6 @@ int __uio_register_device(struct module *owner,
829 810
830 info->uio_dev = NULL; 811 info->uio_dev = NULL;
831 812
832 ret = init_uio_class();
833 if (ret)
834 return ret;
835
836 idev = kzalloc(sizeof(*idev), GFP_KERNEL); 813 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
837 if (!idev) { 814 if (!idev) {
838 ret = -ENOMEM; 815 ret = -ENOMEM;
@@ -848,7 +825,7 @@ int __uio_register_device(struct module *owner,
848 if (ret) 825 if (ret)
849 goto err_get_minor; 826 goto err_get_minor;
850 827
851 idev->dev = device_create(uio_class->class, parent, 828 idev->dev = device_create(&uio_class, parent,
852 MKDEV(uio_major, idev->minor), idev, 829 MKDEV(uio_major, idev->minor), idev,
853 "uio%d", idev->minor); 830 "uio%d", idev->minor);
854 if (IS_ERR(idev->dev)) { 831 if (IS_ERR(idev->dev)) {
@@ -863,9 +840,9 @@ int __uio_register_device(struct module *owner,
863 840
864 info->uio_dev = idev; 841 info->uio_dev = idev;
865 842
866 if (idev->info->irq >= 0) { 843 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
867 ret = request_irq(idev->info->irq, uio_interrupt, 844 ret = request_irq(info->irq, uio_interrupt,
868 idev->info->irq_flags, idev->info->name, idev); 845 info->irq_flags, info->name, idev);
869 if (ret) 846 if (ret)
870 goto err_request_irq; 847 goto err_request_irq;
871 } 848 }
@@ -875,13 +852,12 @@ int __uio_register_device(struct module *owner,
875err_request_irq: 852err_request_irq:
876 uio_dev_del_attributes(idev); 853 uio_dev_del_attributes(idev);
877err_uio_dev_add_attributes: 854err_uio_dev_add_attributes:
878 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 855 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
879err_device_create: 856err_device_create:
880 uio_free_minor(idev); 857 uio_free_minor(idev);
881err_get_minor: 858err_get_minor:
882 kfree(idev); 859 kfree(idev);
883err_kzalloc: 860err_kzalloc:
884 uio_class_destroy();
885 return ret; 861 return ret;
886} 862}
887EXPORT_SYMBOL_GPL(__uio_register_device); 863EXPORT_SYMBOL_GPL(__uio_register_device);
@@ -902,15 +878,13 @@ void uio_unregister_device(struct uio_info *info)
902 878
903 uio_free_minor(idev); 879 uio_free_minor(idev);
904 880
905 if (info->irq >= 0) 881 if (info->irq && (info->irq != UIO_IRQ_CUSTOM))
906 free_irq(info->irq, idev); 882 free_irq(info->irq, idev);
907 883
908 uio_dev_del_attributes(idev); 884 uio_dev_del_attributes(idev);
909 885
910 dev_set_drvdata(idev->dev, NULL); 886 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
911 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
912 kfree(idev); 887 kfree(idev);
913 uio_class_destroy();
914 888
915 return; 889 return;
916} 890}
@@ -918,11 +892,12 @@ EXPORT_SYMBOL_GPL(uio_unregister_device);
918 892
919static int __init uio_init(void) 893static int __init uio_init(void)
920{ 894{
921 return 0; 895 return init_uio_class();
922} 896}
923 897
924static void __exit uio_exit(void) 898static void __exit uio_exit(void)
925{ 899{
900 release_uio_class();
926} 901}
927 902
928module_init(uio_init) 903module_init(uio_init)
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 85c9884a67fd..fc22e1e6f215 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -128,12 +128,6 @@ static int __devinit probe(struct pci_dev *pdev,
128 struct uio_pci_generic_dev *gdev; 128 struct uio_pci_generic_dev *gdev;
129 int err; 129 int err;
130 130
131 if (!pdev->irq) {
132 dev_warn(&pdev->dev, "No IRQ assigned to device: "
133 "no support for interrupts?\n");
134 return -ENODEV;
135 }
136
137 err = pci_enable_device(pdev); 131 err = pci_enable_device(pdev);
138 if (err) { 132 if (err) {
139 dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n", 133 dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n",
@@ -141,6 +135,13 @@ static int __devinit probe(struct pci_dev *pdev,
141 return err; 135 return err;
142 } 136 }
143 137
138 if (!pdev->irq) {
139 dev_warn(&pdev->dev, "No IRQ assigned to device: "
140 "no support for interrupts?\n");
141 pci_disable_device(pdev);
142 return -ENODEV;
143 }
144
144 err = verify_pci_2_3(pdev); 145 err = verify_pci_2_3(pdev);
145 if (err) 146 if (err)
146 goto err_verify; 147 goto err_verify;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4aa00e6e57ad..67eb3770868f 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -59,6 +59,7 @@ config USB_ARCH_HAS_OHCI
59config USB_ARCH_HAS_EHCI 59config USB_ARCH_HAS_EHCI
60 boolean 60 boolean
61 default y if PPC_83xx 61 default y if PPC_83xx
62 default y if PPC_MPC512x
62 default y if SOC_AU1200 63 default y if SOC_AU1200
63 default y if ARCH_IXP4XX 64 default y if ARCH_IXP4XX
64 default y if ARCH_W90X900 65 default y if ARCH_W90X900
diff --git a/drivers/usb/atm/Makefile b/drivers/usb/atm/Makefile
index 4c4a776ab1cd..a5d792ec3ad5 100644
--- a/drivers/usb/atm/Makefile
+++ b/drivers/usb/atm/Makefile
@@ -2,12 +2,10 @@
2# Makefile for USB ATM/xDSL drivers 2# Makefile for USB ATM/xDSL drivers
3# 3#
4 4
5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6
5obj-$(CONFIG_USB_CXACRU) += cxacru.o 7obj-$(CONFIG_USB_CXACRU) += cxacru.o
6obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o 8obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o
7obj-$(CONFIG_USB_UEAGLEATM) += ueagle-atm.o 9obj-$(CONFIG_USB_UEAGLEATM) += ueagle-atm.o
8obj-$(CONFIG_USB_ATM) += usbatm.o 10obj-$(CONFIG_USB_ATM) += usbatm.o
9obj-$(CONFIG_USB_XUSBATM) += xusbatm.o 11obj-$(CONFIG_USB_XUSBATM) += xusbatm.o
10
11ifeq ($(CONFIG_USB_DEBUG),y)
12EXTRA_CFLAGS += -DDEBUG
13endif
diff --git a/drivers/usb/c67x00/Makefile b/drivers/usb/c67x00/Makefile
index 868bc41b5980..b1218683c8ec 100644
--- a/drivers/usb/c67x00/Makefile
+++ b/drivers/usb/c67x00/Makefile
@@ -2,8 +2,8 @@
2# Makefile for Cypress C67X00 USB Controller 2# Makefile for Cypress C67X00 USB Controller
3# 3#
4 4
5ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG 5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6 6
7obj-$(CONFIG_USB_C67X00_HCD) += c67x00.o 7obj-$(CONFIG_USB_C67X00_HCD) += c67x00.o
8 8
9c67x00-objs := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o 9c67x00-y := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index bc62fae0680f..d6ede989ff22 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1614,7 +1614,7 @@ static const struct usb_device_id acm_ids[] = {
1614 /* Support Lego NXT using pbLua firmware */ 1614 /* Support Lego NXT using pbLua firmware */
1615 { USB_DEVICE(0x0694, 0xff00), 1615 { USB_DEVICE(0x0694, 0xff00),
1616 .driver_info = NOT_A_MODEM, 1616 .driver_info = NOT_A_MODEM,
1617 }, 1617 },
1618 1618
1619 /* control interfaces without any protocol set */ 1619 /* control interfaces without any protocol set */
1620 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1620 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index ec16e6029905..507a4e1b6360 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -2,20 +2,13 @@
2# Makefile for USB Core files and filesystem 2# Makefile for USB Core files and filesystem
3# 3#
4 4
5usbcore-objs := usb.o hub.o hcd.o urb.o message.o driver.o \ 5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6 config.o file.o buffer.o sysfs.o endpoint.o \
7 devio.o notify.o generic.o quirks.o devices.o
8 6
9ifeq ($(CONFIG_PCI),y) 7usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
10 usbcore-objs += hcd-pci.o 8usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
11endif 9usbcore-y += devio.o notify.o generic.o quirks.o devices.o
12 10
13ifeq ($(CONFIG_USB_DEVICEFS),y) 11usbcore-$(CONFIG_PCI) += hcd-pci.o
14 usbcore-objs += inode.o 12usbcore-$(CONFIG_USB_DEVICEFS) += inode.o
15endif
16 13
17obj-$(CONFIG_USB) += usbcore.o 14obj-$(CONFIG_USB) += usbcore.o
18
19ifeq ($(CONFIG_USB_DEBUG),y)
20EXTRA_CFLAGS += -DDEBUG
21endif
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 3449742c00e1..ddb4dc980923 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -66,8 +66,8 @@
66#define ALLOW_SERIAL_NUMBER 66#define ALLOW_SERIAL_NUMBER
67 67
68static const char *format_topo = 68static const char *format_topo =
69/* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd */ 69/* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=dddd MxCh=dd */
70"\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%3s MxCh=%2d\n"; 70"\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%-4s MxCh=%2d\n";
71 71
72static const char *format_string_manufacturer = 72static const char *format_string_manufacturer =
73/* S: Manufacturer=xxxx */ 73/* S: Manufacturer=xxxx */
@@ -520,11 +520,14 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
520 speed = "1.5"; break; 520 speed = "1.5"; break;
521 case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */ 521 case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */
522 case USB_SPEED_FULL: 522 case USB_SPEED_FULL:
523 speed = "12 "; break; 523 speed = "12"; break;
524 case USB_SPEED_WIRELESS: /* Wireless has no real fixed speed */
524 case USB_SPEED_HIGH: 525 case USB_SPEED_HIGH:
525 speed = "480"; break; 526 speed = "480"; break;
527 case USB_SPEED_SUPER:
528 speed = "5000"; break;
526 default: 529 default:
527 speed = "?? "; 530 speed = "??";
528 } 531 }
529 data_end = pages_start + sprintf(pages_start, format_topo, 532 data_end = pages_start + sprintf(pages_start, format_topo,
530 bus->busnum, level, parent_devnum, 533 bus->busnum, level, parent_devnum,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d7a4401ef019..c0e60fbcb048 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1337,7 +1337,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
1337 /* Avoid PM error messages for devices disconnected while suspended 1337 /* Avoid PM error messages for devices disconnected while suspended
1338 * as we'll display regular disconnect messages just a bit later. 1338 * as we'll display regular disconnect messages just a bit later.
1339 */ 1339 */
1340 if (status == -ENODEV) 1340 if (status == -ENODEV || status == -ESHUTDOWN)
1341 status = 0; 1341 status = 0;
1342 return status; 1342 return status;
1343} 1343}
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 3788e738e265..9da250563027 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -202,7 +202,7 @@ int usb_create_ep_devs(struct device *parent,
202 return retval; 202 return retval;
203 203
204error_register: 204error_register:
205 kfree(ep_dev); 205 put_device(&ep_dev->dev);
206exit: 206exit:
207 return retval; 207 return retval;
208} 208}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index c3f98543caaf..3799573bd385 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -329,8 +329,10 @@ void usb_hcd_pci_shutdown(struct pci_dev *dev)
329 return; 329 return;
330 330
331 if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && 331 if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) &&
332 hcd->driver->shutdown) 332 hcd->driver->shutdown) {
333 hcd->driver->shutdown(hcd); 333 hcd->driver->shutdown(hcd);
334 pci_disable_device(dev);
335 }
334} 336}
335EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown); 337EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
336 338
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 5cca00a6d09d..61800f77dac8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1263,10 +1263,8 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
1263 *dma_handle = 0; 1263 *dma_handle = 0;
1264} 1264}
1265 1265
1266static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 1266void unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
1267{ 1267{
1268 enum dma_data_direction dir;
1269
1270 if (urb->transfer_flags & URB_SETUP_MAP_SINGLE) 1268 if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
1271 dma_unmap_single(hcd->self.controller, 1269 dma_unmap_single(hcd->self.controller,
1272 urb->setup_dma, 1270 urb->setup_dma,
@@ -1279,6 +1277,17 @@ static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1279 sizeof(struct usb_ctrlrequest), 1277 sizeof(struct usb_ctrlrequest),
1280 DMA_TO_DEVICE); 1278 DMA_TO_DEVICE);
1281 1279
1280 /* Make it safe to call this routine more than once */
1281 urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL);
1282}
1283EXPORT_SYMBOL_GPL(unmap_urb_setup_for_dma);
1284
1285void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1286{
1287 enum dma_data_direction dir;
1288
1289 unmap_urb_setup_for_dma(hcd, urb);
1290
1282 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1291 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1283 if (urb->transfer_flags & URB_DMA_MAP_SG) 1292 if (urb->transfer_flags & URB_DMA_MAP_SG)
1284 dma_unmap_sg(hcd->self.controller, 1293 dma_unmap_sg(hcd->self.controller,
@@ -1303,10 +1312,10 @@ static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1303 dir); 1312 dir);
1304 1313
1305 /* Make it safe to call this routine more than once */ 1314 /* Make it safe to call this routine more than once */
1306 urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | 1315 urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
1307 URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
1308 URB_DMA_MAP_SINGLE | URB_MAP_LOCAL); 1316 URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
1309} 1317}
1318EXPORT_SYMBOL_GPL(unmap_urb_for_dma);
1310 1319
1311static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 1320static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1312 gfp_t mem_flags) 1321 gfp_t mem_flags)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 84c1897188d2..27115b45edc5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -758,6 +758,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
758 clear_port_feature(hdev, port1, 758 clear_port_feature(hdev, port1,
759 USB_PORT_FEAT_ENABLE); 759 USB_PORT_FEAT_ENABLE);
760 portstatus &= ~USB_PORT_STAT_ENABLE; 760 portstatus &= ~USB_PORT_STAT_ENABLE;
761 } else {
762 /* Pretend that power was lost for USB3 devs */
763 portstatus &= ~USB_PORT_STAT_ENABLE;
761 } 764 }
762 } 765 }
763 766
@@ -2594,16 +2597,14 @@ static int hub_set_address(struct usb_device *udev, int devnum)
2594 return 0; 2597 return 0;
2595 if (udev->state != USB_STATE_DEFAULT) 2598 if (udev->state != USB_STATE_DEFAULT)
2596 return -EINVAL; 2599 return -EINVAL;
2597 if (hcd->driver->address_device) { 2600 if (hcd->driver->address_device)
2598 retval = hcd->driver->address_device(hcd, udev); 2601 retval = hcd->driver->address_device(hcd, udev);
2599 } else { 2602 else
2600 retval = usb_control_msg(udev, usb_sndaddr0pipe(), 2603 retval = usb_control_msg(udev, usb_sndaddr0pipe(),
2601 USB_REQ_SET_ADDRESS, 0, devnum, 0, 2604 USB_REQ_SET_ADDRESS, 0, devnum, 0,
2602 NULL, 0, USB_CTRL_SET_TIMEOUT); 2605 NULL, 0, USB_CTRL_SET_TIMEOUT);
2603 if (retval == 0)
2604 update_address(udev, devnum);
2605 }
2606 if (retval == 0) { 2606 if (retval == 0) {
2607 update_address(udev, devnum);
2607 /* Device now using proper address. */ 2608 /* Device now using proper address. */
2608 usb_set_device_state(udev, USB_STATE_ADDRESS); 2609 usb_set_device_state(udev, USB_STATE_ADDRESS);
2609 usb_ep0_reinit(udev); 2610 usb_ep0_reinit(udev);
@@ -2860,13 +2861,16 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2860 else 2861 else
2861 i = udev->descriptor.bMaxPacketSize0; 2862 i = udev->descriptor.bMaxPacketSize0;
2862 if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) { 2863 if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
2863 if (udev->speed != USB_SPEED_FULL || 2864 if (udev->speed == USB_SPEED_LOW ||
2864 !(i == 8 || i == 16 || i == 32 || i == 64)) { 2865 !(i == 8 || i == 16 || i == 32 || i == 64)) {
2865 dev_err(&udev->dev, "ep0 maxpacket = %d\n", i); 2866 dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
2866 retval = -EMSGSIZE; 2867 retval = -EMSGSIZE;
2867 goto fail; 2868 goto fail;
2868 } 2869 }
2869 dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); 2870 if (udev->speed == USB_SPEED_FULL)
2871 dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
2872 else
2873 dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
2870 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); 2874 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
2871 usb_ep0_reinit(udev); 2875 usb_ep0_reinit(udev);
2872 } 2876 }
@@ -3097,16 +3101,17 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
3097 udev->speed = USB_SPEED_UNKNOWN; 3101 udev->speed = USB_SPEED_UNKNOWN;
3098 3102
3099 /* 3103 /*
3100 * xHCI needs to issue an address device command later 3104 * Set the address.
3101 * in the hub_port_init sequence for SS/HS/FS/LS devices. 3105 * Note xHCI needs to issue an address device command later
3106 * in the hub_port_init sequence for SS/HS/FS/LS devices,
3107 * and xHC will assign an address to the device. But use
3108 * kernel assigned address here, to avoid any address conflict
3109 * issue.
3102 */ 3110 */
3103 if (!(hcd->driver->flags & HCD_USB3)) { 3111 choose_address(udev);
3104 /* set the address */ 3112 if (udev->devnum <= 0) {
3105 choose_address(udev); 3113 status = -ENOTCONN; /* Don't retry */
3106 if (udev->devnum <= 0) { 3114 goto loop;
3107 status = -ENOTCONN; /* Don't retry */
3108 goto loop;
3109 }
3110 } 3115 }
3111 3116
3112 /* reset (non-USB 3.0 devices) and get descriptor */ 3117 /* reset (non-USB 3.0 devices) and get descriptor */
@@ -3629,7 +3634,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3629 } 3634 }
3630 3635
3631 if (!parent_hdev) { 3636 if (!parent_hdev) {
3632 /* this requires hcd-specific logic; see OHCI hc_restart() */ 3637 /* this requires hcd-specific logic; see ohci_restart() */
3633 dev_dbg(&udev->dev, "%s for root hub!\n", __func__); 3638 dev_dbg(&udev->dev, "%s for root hub!\n", __func__);
3634 return -EISDIR; 3639 return -EISDIR;
3635 } 3640 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 9f0ce7de0e36..d6e3e410477e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1140,13 +1140,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1140{ 1140{
1141 int i; 1141 int i;
1142 1142
1143 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
1144 skip_ep0 ? "non-ep0" : "all");
1145 for (i = skip_ep0; i < 16; ++i) {
1146 usb_disable_endpoint(dev, i, true);
1147 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
1148 }
1149
1150 /* getting rid of interfaces will disconnect 1143 /* getting rid of interfaces will disconnect
1151 * any drivers bound to them (a key side effect) 1144 * any drivers bound to them (a key side effect)
1152 */ 1145 */
@@ -1176,6 +1169,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1176 if (dev->state == USB_STATE_CONFIGURED) 1169 if (dev->state == USB_STATE_CONFIGURED)
1177 usb_set_device_state(dev, USB_STATE_ADDRESS); 1170 usb_set_device_state(dev, USB_STATE_ADDRESS);
1178 } 1171 }
1172
1173 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
1174 skip_ep0 ? "non-ep0" : "all");
1175 for (i = skip_ep0; i < 16; ++i) {
1176 usb_disable_endpoint(dev, i, true);
1177 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
1178 }
1179} 1179}
1180 1180
1181/** 1181/**
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 419e6b34e2fe..c14fc082864f 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -401,8 +401,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
401 }; 401 };
402 402
403 /* Check that the pipe's type matches the endpoint's type */ 403 /* Check that the pipe's type matches the endpoint's type */
404 if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) 404 if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) {
405 dev_err(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
406 usb_pipetype(urb->pipe), pipetypes[xfertype]);
405 return -EPIPE; /* The most suitable error code :-) */ 407 return -EPIPE; /* The most suitable error code :-) */
408 }
406 409
407 /* enforce simple/standard policy */ 410 /* enforce simple/standard policy */
408 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | 411 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
index dfedee8c45b6..24bbe519c737 100644
--- a/drivers/usb/early/Makefile
+++ b/drivers/usb/early/Makefile
@@ -2,4 +2,4 @@
2# Makefile for early USB devices 2# Makefile for early USB devices
3# 3#
4 4
5obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o 5obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index cd27f9bde2c8..b739ca814651 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,6 +158,7 @@ config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC || ARCH_MXC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 select USB_FSL_MPH_DR_OF
161 help 162 help
162 Some of Freescale PowerPC processors have a High Speed 163 Some of Freescale PowerPC processors have a High Speed
163 Dual-Role(DR) USB controller, which supports device mode. 164 Dual-Role(DR) USB controller, which supports device mode.
@@ -209,17 +210,6 @@ config USB_OMAP
209 default USB_GADGET 210 default USB_GADGET
210 select USB_GADGET_SELECTED 211 select USB_GADGET_SELECTED
211 212
212config USB_OTG
213 boolean "OTG Support"
214 depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
215 help
216 The most notable feature of USB OTG is support for a
217 "Dual-Role" device, which can act as either a device
218 or a host. The initial role choice can be changed
219 later, when two dual-role devices talk to each other.
220
221 Select this only if your OMAP board has a Mini-AB connector.
222
223config USB_GADGET_PXA25X 213config USB_GADGET_PXA25X
224 boolean "PXA 25x or IXP 4xx" 214 boolean "PXA 25x or IXP 4xx"
225 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX 215 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
@@ -716,8 +706,8 @@ config USB_FUNCTIONFS
716 depends on EXPERIMENTAL 706 depends on EXPERIMENTAL
717 select USB_FUNCTIONFS_GENERIC if !(USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS) 707 select USB_FUNCTIONFS_GENERIC if !(USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
718 help 708 help
719 The Function Filesystem (FunctioFS) lets one create USB 709 The Function Filesystem (FunctionFS) lets one create USB
720 composite functions in user space in the same way as GadgetFS 710 composite functions in user space in the same way GadgetFS
721 lets one create USB gadgets in user space. This allows creation 711 lets one create USB gadgets in user space. This allows creation
722 of composite gadgets such that some of the functions are 712 of composite gadgets such that some of the functions are
723 implemented in kernel space (for instance Ethernet, serial or 713 implemented in kernel space (for instance Ethernet, serial or
@@ -733,14 +723,14 @@ config USB_FUNCTIONFS_ETH
733 bool "Include configuration with CDC ECM (Ethernet)" 723 bool "Include configuration with CDC ECM (Ethernet)"
734 depends on USB_FUNCTIONFS && NET 724 depends on USB_FUNCTIONFS && NET
735 help 725 help
736 Include a configuration with CDC ECM funcion (Ethernet) and the 726 Include a configuration with CDC ECM function (Ethernet) and the
737 Funcion Filesystem. 727 Function Filesystem.
738 728
739config USB_FUNCTIONFS_RNDIS 729config USB_FUNCTIONFS_RNDIS
740 bool "Include configuration with RNDIS (Ethernet)" 730 bool "Include configuration with RNDIS (Ethernet)"
741 depends on USB_FUNCTIONFS && NET 731 depends on USB_FUNCTIONFS && NET
742 help 732 help
743 Include a configuration with RNDIS funcion (Ethernet) and the Filesystem. 733 Include a configuration with RNDIS function (Ethernet) and the Filesystem.
744 734
745config USB_FUNCTIONFS_GENERIC 735config USB_FUNCTIONFS_GENERIC
746 bool "Include 'pure' configuration" 736 bool "Include 'pure' configuration"
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 27283df37d09..5780db42417b 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -1,9 +1,7 @@
1# 1#
2# USB peripheral controller drivers 2# USB peripheral controller drivers
3# 3#
4ifeq ($(CONFIG_USB_GADGET_DEBUG),y) 4ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
5 EXTRA_CFLAGS += -DDEBUG
6endif
7 5
8obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o 6obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
9obj-$(CONFIG_USB_NET2280) += net2280.o 7obj-$(CONFIG_USB_NET2280) += net2280.o
@@ -18,10 +16,8 @@ obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
18obj-$(CONFIG_USB_AT91) += at91_udc.o 16obj-$(CONFIG_USB_AT91) += at91_udc.o
19obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o 17obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
20obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o 18obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
21fsl_usb2_udc-objs := fsl_udc_core.o 19fsl_usb2_udc-y := fsl_udc_core.o
22ifeq ($(CONFIG_ARCH_MXC),y) 20fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
23fsl_usb2_udc-objs += fsl_mxc_udc.o
24endif
25obj-$(CONFIG_USB_M66592) += m66592-udc.o 21obj-$(CONFIG_USB_M66592) += m66592-udc.o
26obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o 22obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
27obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o 23obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
@@ -32,21 +28,21 @@ obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
32# 28#
33# USB gadget drivers 29# USB gadget drivers
34# 30#
35g_zero-objs := zero.o 31g_zero-y := zero.o
36g_audio-objs := audio.o 32g_audio-y := audio.o
37g_ether-objs := ether.o 33g_ether-y := ether.o
38g_serial-objs := serial.o 34g_serial-y := serial.o
39g_midi-objs := gmidi.o 35g_midi-y := gmidi.o
40gadgetfs-objs := inode.o 36gadgetfs-y := inode.o
41g_file_storage-objs := file_storage.o 37g_file_storage-y := file_storage.o
42g_mass_storage-objs := mass_storage.o 38g_mass_storage-y := mass_storage.o
43g_printer-objs := printer.o 39g_printer-y := printer.o
44g_cdc-objs := cdc2.o 40g_cdc-y := cdc2.o
45g_multi-objs := multi.o 41g_multi-y := multi.o
46g_hid-objs := hid.o 42g_hid-y := hid.o
47g_dbgp-objs := dbgp.o 43g_dbgp-y := dbgp.o
48g_nokia-objs := nokia.o 44g_nokia-y := nokia.o
49g_webcam-objs := webcam.o 45g_webcam-y := webcam.o
50 46
51obj-$(CONFIG_USB_ZERO) += g_zero.o 47obj-$(CONFIG_USB_ZERO) += g_zero.o
52obj-$(CONFIG_USB_AUDIO) += g_audio.o 48obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -64,4 +60,3 @@ obj-$(CONFIG_USB_G_DBGP) += g_dbgp.o
64obj-$(CONFIG_USB_G_MULTI) += g_multi.o 60obj-$(CONFIG_USB_G_MULTI) += g_multi.o
65obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o 61obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
66obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o 62obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
67
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 731150d4b1d9..9034e0344723 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -203,7 +203,7 @@ static void print_regs(struct udc *dev)
203 DBG(dev, "DMA mode = PPBNDU (packet per buffer " 203 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
204 "WITHOUT desc. update)\n"); 204 "WITHOUT desc. update)\n");
205 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); 205 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
206 } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) { 206 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
207 DBG(dev, "DMA mode = PPBDU (packet per buffer " 207 DBG(dev, "DMA mode = PPBDU (packet per buffer "
208 "WITH desc. update)\n"); 208 "WITH desc. update)\n");
209 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); 209 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
@@ -1954,13 +1954,14 @@ static int setup_ep0(struct udc *dev)
1954} 1954}
1955 1955
1956/* Called by gadget driver to register itself */ 1956/* Called by gadget driver to register itself */
1957int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1957int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1958 int (*bind)(struct usb_gadget *))
1958{ 1959{
1959 struct udc *dev = udc; 1960 struct udc *dev = udc;
1960 int retval; 1961 int retval;
1961 u32 tmp; 1962 u32 tmp;
1962 1963
1963 if (!driver || !driver->bind || !driver->setup 1964 if (!driver || !bind || !driver->setup
1964 || driver->speed != USB_SPEED_HIGH) 1965 || driver->speed != USB_SPEED_HIGH)
1965 return -EINVAL; 1966 return -EINVAL;
1966 if (!dev) 1967 if (!dev)
@@ -1972,7 +1973,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1972 dev->driver = driver; 1973 dev->driver = driver;
1973 dev->gadget.dev.driver = &driver->driver; 1974 dev->gadget.dev.driver = &driver->driver;
1974 1975
1975 retval = driver->bind(&dev->gadget); 1976 retval = bind(&dev->gadget);
1976 1977
1977 /* Some gadget drivers use both ep0 directions. 1978 /* Some gadget drivers use both ep0 directions.
1978 * NOTE: to gadget driver, ep0 is just one endpoint... 1979 * NOTE: to gadget driver, ep0 is just one endpoint...
@@ -2000,7 +2001,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2000 2001
2001 return 0; 2002 return 0;
2002} 2003}
2003EXPORT_SYMBOL(usb_gadget_register_driver); 2004EXPORT_SYMBOL(usb_gadget_probe_driver);
2004 2005
2005/* shutdown requests and disconnect from gadget */ 2006/* shutdown requests and disconnect from gadget */
2006static void 2007static void
@@ -3382,8 +3383,10 @@ static int udc_probe(struct udc *dev)
3382 udc = dev; 3383 udc = dev;
3383 3384
3384 retval = device_register(&dev->gadget.dev); 3385 retval = device_register(&dev->gadget.dev);
3385 if (retval) 3386 if (retval) {
3387 put_device(&dev->gadget.dev);
3386 goto finished; 3388 goto finished;
3389 }
3387 3390
3388 /* timer init */ 3391 /* timer init */
3389 init_timer(&udc_timer); 3392 init_timer(&udc_timer);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 93ead19507b6..387e503b9d14 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1628,7 +1628,8 @@ static void at91_vbus_timer(unsigned long data)
1628 schedule_work(&udc->vbus_timer_work); 1628 schedule_work(&udc->vbus_timer_work);
1629} 1629}
1630 1630
1631int usb_gadget_register_driver (struct usb_gadget_driver *driver) 1631int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1632 int (*bind)(struct usb_gadget *))
1632{ 1633{
1633 struct at91_udc *udc = &controller; 1634 struct at91_udc *udc = &controller;
1634 int retval; 1635 int retval;
@@ -1636,7 +1637,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1636 1637
1637 if (!driver 1638 if (!driver
1638 || driver->speed < USB_SPEED_FULL 1639 || driver->speed < USB_SPEED_FULL
1639 || !driver->bind 1640 || !bind
1640 || !driver->setup) { 1641 || !driver->setup) {
1641 DBG("bad parameter.\n"); 1642 DBG("bad parameter.\n");
1642 return -EINVAL; 1643 return -EINVAL;
@@ -1653,9 +1654,9 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1653 udc->enabled = 1; 1654 udc->enabled = 1;
1654 udc->selfpowered = 1; 1655 udc->selfpowered = 1;
1655 1656
1656 retval = driver->bind(&udc->gadget); 1657 retval = bind(&udc->gadget);
1657 if (retval) { 1658 if (retval) {
1658 DBG("driver->bind() returned %d\n", retval); 1659 DBG("bind() returned %d\n", retval);
1659 udc->driver = NULL; 1660 udc->driver = NULL;
1660 udc->gadget.dev.driver = NULL; 1661 udc->gadget.dev.driver = NULL;
1661 dev_set_drvdata(&udc->gadget.dev, NULL); 1662 dev_set_drvdata(&udc->gadget.dev, NULL);
@@ -1671,7 +1672,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1671 DBG("bound to %s\n", driver->driver.name); 1672 DBG("bound to %s\n", driver->driver.name);
1672 return 0; 1673 return 0;
1673} 1674}
1674EXPORT_SYMBOL (usb_gadget_register_driver); 1675EXPORT_SYMBOL(usb_gadget_probe_driver);
1675 1676
1676int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) 1677int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
1677{ 1678{
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index d623c7bda1f6..b5e20e873cba 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1789,7 +1789,8 @@ out:
1789 return IRQ_HANDLED; 1789 return IRQ_HANDLED;
1790} 1790}
1791 1791
1792int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1792int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1793 int (*bind)(struct usb_gadget *))
1793{ 1794{
1794 struct usba_udc *udc = &the_udc; 1795 struct usba_udc *udc = &the_udc;
1795 unsigned long flags; 1796 unsigned long flags;
@@ -1812,7 +1813,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1812 clk_enable(udc->pclk); 1813 clk_enable(udc->pclk);
1813 clk_enable(udc->hclk); 1814 clk_enable(udc->hclk);
1814 1815
1815 ret = driver->bind(&udc->gadget); 1816 ret = bind(&udc->gadget);
1816 if (ret) { 1817 if (ret) {
1817 DBG(DBG_ERR, "Could not bind to driver %s: error %d\n", 1818 DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
1818 driver->driver.name, ret); 1819 driver->driver.name, ret);
@@ -1841,7 +1842,7 @@ err_driver_bind:
1841 udc->gadget.dev.driver = NULL; 1842 udc->gadget.dev.driver = NULL;
1842 return ret; 1843 return ret;
1843} 1844}
1844EXPORT_SYMBOL(usb_gadget_register_driver); 1845EXPORT_SYMBOL(usb_gadget_probe_driver);
1845 1846
1846int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1847int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1847{ 1848{
@@ -2014,6 +2015,9 @@ static int __init usba_udc_probe(struct platform_device *pdev)
2014 } else { 2015 } else {
2015 disable_irq(gpio_to_irq(udc->vbus_pin)); 2016 disable_irq(gpio_to_irq(udc->vbus_pin));
2016 } 2017 }
2018 } else {
2019 /* gpio_request fail so use -EINVAL for gpio_is_valid */
2020 ubc->vbus_pin = -EINVAL;
2017 } 2021 }
2018 } 2022 }
2019 2023
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index b744ccd0f34d..93b999e49ef3 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -89,7 +89,7 @@ static const struct usb_descriptor_header *otg_desc[] = {
89 89
90/*-------------------------------------------------------------------------*/ 90/*-------------------------------------------------------------------------*/
91 91
92static int __ref audio_do_config(struct usb_configuration *c) 92static int __init audio_do_config(struct usb_configuration *c)
93{ 93{
94 /* FIXME alloc iConfiguration string, set it in c->strings */ 94 /* FIXME alloc iConfiguration string, set it in c->strings */
95 95
@@ -105,7 +105,6 @@ static int __ref audio_do_config(struct usb_configuration *c)
105 105
106static struct usb_configuration audio_config_driver = { 106static struct usb_configuration audio_config_driver = {
107 .label = DRIVER_DESC, 107 .label = DRIVER_DESC,
108 .bind = audio_do_config,
109 .bConfigurationValue = 1, 108 .bConfigurationValue = 1,
110 /* .iConfiguration = DYNAMIC */ 109 /* .iConfiguration = DYNAMIC */
111 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 110 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -113,7 +112,7 @@ static struct usb_configuration audio_config_driver = {
113 112
114/*-------------------------------------------------------------------------*/ 113/*-------------------------------------------------------------------------*/
115 114
116static int __ref audio_bind(struct usb_composite_dev *cdev) 115static int __init audio_bind(struct usb_composite_dev *cdev)
117{ 116{
118 int gcnum; 117 int gcnum;
119 int status; 118 int status;
@@ -145,7 +144,7 @@ static int __ref audio_bind(struct usb_composite_dev *cdev)
145 strings_dev[STRING_PRODUCT_IDX].id = status; 144 strings_dev[STRING_PRODUCT_IDX].id = status;
146 device_desc.iProduct = status; 145 device_desc.iProduct = status;
147 146
148 status = usb_add_config(cdev, &audio_config_driver); 147 status = usb_add_config(cdev, &audio_config_driver, audio_do_config);
149 if (status < 0) 148 if (status < 0)
150 goto fail; 149 goto fail;
151 150
@@ -166,13 +165,12 @@ static struct usb_composite_driver audio_driver = {
166 .name = "g_audio", 165 .name = "g_audio",
167 .dev = &device_desc, 166 .dev = &device_desc,
168 .strings = audio_strings, 167 .strings = audio_strings,
169 .bind = audio_bind,
170 .unbind = __exit_p(audio_unbind), 168 .unbind = __exit_p(audio_unbind),
171}; 169};
172 170
173static int __init init(void) 171static int __init init(void)
174{ 172{
175 return usb_composite_register(&audio_driver); 173 return usb_composite_probe(&audio_driver, audio_bind);
176} 174}
177module_init(init); 175module_init(init);
178 176
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 1f5ba2fd4c1f..2720ab07ef1a 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -129,7 +129,7 @@ static u8 hostaddr[ETH_ALEN];
129/* 129/*
130 * We _always_ have both CDC ECM and CDC ACM functions. 130 * We _always_ have both CDC ECM and CDC ACM functions.
131 */ 131 */
132static int __ref cdc_do_config(struct usb_configuration *c) 132static int __init cdc_do_config(struct usb_configuration *c)
133{ 133{
134 int status; 134 int status;
135 135
@@ -151,7 +151,6 @@ static int __ref cdc_do_config(struct usb_configuration *c)
151 151
152static struct usb_configuration cdc_config_driver = { 152static struct usb_configuration cdc_config_driver = {
153 .label = "CDC Composite (ECM + ACM)", 153 .label = "CDC Composite (ECM + ACM)",
154 .bind = cdc_do_config,
155 .bConfigurationValue = 1, 154 .bConfigurationValue = 1,
156 /* .iConfiguration = DYNAMIC */ 155 /* .iConfiguration = DYNAMIC */
157 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 156 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -159,7 +158,7 @@ static struct usb_configuration cdc_config_driver = {
159 158
160/*-------------------------------------------------------------------------*/ 159/*-------------------------------------------------------------------------*/
161 160
162static int __ref cdc_bind(struct usb_composite_dev *cdev) 161static int __init cdc_bind(struct usb_composite_dev *cdev)
163{ 162{
164 int gcnum; 163 int gcnum;
165 struct usb_gadget *gadget = cdev->gadget; 164 struct usb_gadget *gadget = cdev->gadget;
@@ -218,7 +217,7 @@ static int __ref cdc_bind(struct usb_composite_dev *cdev)
218 device_desc.iProduct = status; 217 device_desc.iProduct = status;
219 218
220 /* register our configuration */ 219 /* register our configuration */
221 status = usb_add_config(cdev, &cdc_config_driver); 220 status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
222 if (status < 0) 221 if (status < 0)
223 goto fail1; 222 goto fail1;
224 223
@@ -245,7 +244,6 @@ static struct usb_composite_driver cdc_driver = {
245 .name = "g_cdc", 244 .name = "g_cdc",
246 .dev = &device_desc, 245 .dev = &device_desc,
247 .strings = dev_strings, 246 .strings = dev_strings,
248 .bind = cdc_bind,
249 .unbind = __exit_p(cdc_unbind), 247 .unbind = __exit_p(cdc_unbind),
250}; 248};
251 249
@@ -255,7 +253,7 @@ MODULE_LICENSE("GPL");
255 253
256static int __init init(void) 254static int __init init(void)
257{ 255{
258 return usb_composite_register(&cdc_driver); 256 return usb_composite_probe(&cdc_driver, cdc_bind);
259} 257}
260module_init(init); 258module_init(init);
261 259
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 699695128e33..98b36fc88c77 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2340,12 +2340,15 @@ static const struct usb_ep_ops usb_ep_ops = {
2340static const struct usb_gadget_ops usb_gadget_ops; 2340static const struct usb_gadget_ops usb_gadget_ops;
2341 2341
2342/** 2342/**
2343 * usb_gadget_register_driver: register a gadget driver 2343 * usb_gadget_probe_driver: register a gadget driver
2344 * @driver: the driver being registered
2345 * @bind: the driver's bind callback
2344 * 2346 *
2345 * Check usb_gadget_register_driver() at "usb_gadget.h" for details 2347 * Check usb_gadget_probe_driver() at <linux/usb/gadget.h> for details.
2346 * Interrupts are enabled here 2348 * Interrupts are enabled here.
2347 */ 2349 */
2348int usb_gadget_register_driver(struct usb_gadget_driver *driver) 2350int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2351 int (*bind)(struct usb_gadget *))
2349{ 2352{
2350 struct ci13xxx *udc = _udc; 2353 struct ci13xxx *udc = _udc;
2351 unsigned long i, k, flags; 2354 unsigned long i, k, flags;
@@ -2354,7 +2357,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2354 trace("%p", driver); 2357 trace("%p", driver);
2355 2358
2356 if (driver == NULL || 2359 if (driver == NULL ||
2357 driver->bind == NULL || 2360 bind == NULL ||
2358 driver->unbind == NULL || 2361 driver->unbind == NULL ||
2359 driver->setup == NULL || 2362 driver->setup == NULL ||
2360 driver->disconnect == NULL || 2363 driver->disconnect == NULL ||
@@ -2430,7 +2433,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2430 udc->gadget.dev.driver = &driver->driver; 2433 udc->gadget.dev.driver = &driver->driver;
2431 2434
2432 spin_unlock_irqrestore(udc->lock, flags); 2435 spin_unlock_irqrestore(udc->lock, flags);
2433 retval = driver->bind(&udc->gadget); /* MAY SLEEP */ 2436 retval = bind(&udc->gadget); /* MAY SLEEP */
2434 spin_lock_irqsave(udc->lock, flags); 2437 spin_lock_irqsave(udc->lock, flags);
2435 2438
2436 if (retval) { 2439 if (retval) {
@@ -2447,7 +2450,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2447 usb_gadget_unregister_driver(driver); 2450 usb_gadget_unregister_driver(driver);
2448 return retval; 2451 return retval;
2449} 2452}
2450EXPORT_SYMBOL(usb_gadget_register_driver); 2453EXPORT_SYMBOL(usb_gadget_probe_driver);
2451 2454
2452/** 2455/**
2453 * usb_gadget_unregister_driver: unregister a gadget driver 2456 * usb_gadget_unregister_driver: unregister a gadget driver
@@ -2462,7 +2465,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2462 trace("%p", driver); 2465 trace("%p", driver);
2463 2466
2464 if (driver == NULL || 2467 if (driver == NULL ||
2465 driver->bind == NULL ||
2466 driver->unbind == NULL || 2468 driver->unbind == NULL ||
2467 driver->setup == NULL || 2469 driver->setup == NULL ||
2468 driver->disconnect == NULL || 2470 driver->disconnect == NULL ||
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 1160c55de7f2..7b5cc16e4a0b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/utsname.h>
27 28
28#include <linux/usb/composite.h> 29#include <linux/usb/composite.h>
29 30
@@ -39,6 +40,7 @@
39#define USB_BUFSIZ 1024 40#define USB_BUFSIZ 1024
40 41
41static struct usb_composite_driver *composite; 42static struct usb_composite_driver *composite;
43static int (*composite_gadget_bind)(struct usb_composite_dev *cdev);
42 44
43/* Some systems will need runtime overrides for the product identifers 45/* Some systems will need runtime overrides for the product identifers
44 * published in the device descriptor, either numbers or strings or both. 46 * published in the device descriptor, either numbers or strings or both.
@@ -69,6 +71,8 @@ static char *iSerialNumber;
69module_param(iSerialNumber, charp, 0); 71module_param(iSerialNumber, charp, 0);
70MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); 72MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
71 73
74static char composite_manufacturer[50];
75
72/*-------------------------------------------------------------------------*/ 76/*-------------------------------------------------------------------------*/
73 77
74/** 78/**
@@ -470,18 +474,20 @@ done:
470 * usb_add_config() - add a configuration to a device. 474 * usb_add_config() - add a configuration to a device.
471 * @cdev: wraps the USB gadget 475 * @cdev: wraps the USB gadget
472 * @config: the configuration, with bConfigurationValue assigned 476 * @config: the configuration, with bConfigurationValue assigned
477 * @bind: the configuration's bind function
473 * Context: single threaded during gadget setup 478 * Context: single threaded during gadget setup
474 * 479 *
475 * One of the main tasks of a composite driver's bind() routine is to 480 * One of the main tasks of a composite @bind() routine is to
476 * add each of the configurations it supports, using this routine. 481 * add each of the configurations it supports, using this routine.
477 * 482 *
478 * This function returns the value of the configuration's bind(), which 483 * This function returns the value of the configuration's @bind(), which
479 * is zero for success else a negative errno value. Binding configurations 484 * is zero for success else a negative errno value. Binding configurations
480 * assigns global resources including string IDs, and per-configuration 485 * assigns global resources including string IDs, and per-configuration
481 * resources such as interface IDs and endpoints. 486 * resources such as interface IDs and endpoints.
482 */ 487 */
483int usb_add_config(struct usb_composite_dev *cdev, 488int usb_add_config(struct usb_composite_dev *cdev,
484 struct usb_configuration *config) 489 struct usb_configuration *config,
490 int (*bind)(struct usb_configuration *))
485{ 491{
486 int status = -EINVAL; 492 int status = -EINVAL;
487 struct usb_configuration *c; 493 struct usb_configuration *c;
@@ -490,7 +496,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
490 config->bConfigurationValue, 496 config->bConfigurationValue,
491 config->label, config); 497 config->label, config);
492 498
493 if (!config->bConfigurationValue || !config->bind) 499 if (!config->bConfigurationValue || !bind)
494 goto done; 500 goto done;
495 501
496 /* Prevent duplicate configuration identifiers */ 502 /* Prevent duplicate configuration identifiers */
@@ -507,7 +513,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
507 INIT_LIST_HEAD(&config->functions); 513 INIT_LIST_HEAD(&config->functions);
508 config->next_interface_id = 0; 514 config->next_interface_id = 0;
509 515
510 status = config->bind(config); 516 status = bind(config);
511 if (status < 0) { 517 if (status < 0) {
512 list_del(&config->list); 518 list_del(&config->list);
513 config->cdev = NULL; 519 config->cdev = NULL;
@@ -533,7 +539,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
533 } 539 }
534 } 540 }
535 541
536 /* set_alt(), or next config->bind(), sets up 542 /* set_alt(), or next bind(), sets up
537 * ep->driver_data as needed. 543 * ep->driver_data as needed.
538 */ 544 */
539 usb_ep_autoconfig_reset(cdev->gadget); 545 usb_ep_autoconfig_reset(cdev->gadget);
@@ -599,6 +605,7 @@ static int get_string(struct usb_composite_dev *cdev,
599 struct usb_configuration *c; 605 struct usb_configuration *c;
600 struct usb_function *f; 606 struct usb_function *f;
601 int len; 607 int len;
608 const char *str;
602 609
603 /* Yes, not only is USB's I18N support probably more than most 610 /* Yes, not only is USB's I18N support probably more than most
604 * folk will ever care about ... also, it's all supported here. 611 * folk will ever care about ... also, it's all supported here.
@@ -638,9 +645,29 @@ static int get_string(struct usb_composite_dev *cdev,
638 return s->bLength; 645 return s->bLength;
639 } 646 }
640 647
641 /* Otherwise, look up and return a specified string. String IDs 648 /* Otherwise, look up and return a specified string. First
642 * are device-scoped, so we look up each string table we're told 649 * check if the string has not been overridden.
643 * about. These lookups are infrequent; simpler-is-better here. 650 */
651 if (cdev->manufacturer_override == id)
652 str = iManufacturer ?: composite->iManufacturer ?:
653 composite_manufacturer;
654 else if (cdev->product_override == id)
655 str = iProduct ?: composite->iProduct;
656 else if (cdev->serial_override == id)
657 str = iSerialNumber;
658 else
659 str = NULL;
660 if (str) {
661 struct usb_gadget_strings strings = {
662 .language = language,
663 .strings = &(struct usb_string) { 0xff, str }
664 };
665 return usb_gadget_get_string(&strings, 0xff, buf);
666 }
667
668 /* String IDs are device-scoped, so we look up each string
669 * table we're told about. These lookups are infrequent;
670 * simpler-is-better here.
644 */ 671 */
645 if (composite->strings) { 672 if (composite->strings) {
646 len = lookup_string(composite->strings, buf, language, id); 673 len = lookup_string(composite->strings, buf, language, id);
@@ -901,7 +928,8 @@ unknown:
901 */ 928 */
902 switch (ctrl->bRequestType & USB_RECIP_MASK) { 929 switch (ctrl->bRequestType & USB_RECIP_MASK) {
903 case USB_RECIP_INTERFACE: 930 case USB_RECIP_INTERFACE:
904 f = cdev->config->interface[intf]; 931 if (cdev->config)
932 f = cdev->config->interface[intf];
905 break; 933 break;
906 934
907 case USB_RECIP_ENDPOINT: 935 case USB_RECIP_ENDPOINT:
@@ -1025,26 +1053,17 @@ composite_unbind(struct usb_gadget *gadget)
1025 composite = NULL; 1053 composite = NULL;
1026} 1054}
1027 1055
1028static void 1056static u8 override_id(struct usb_composite_dev *cdev, u8 *desc)
1029string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s)
1030{ 1057{
1031 struct usb_string *str = tab->strings; 1058 if (!*desc) {
1032 1059 int ret = usb_string_id(cdev);
1033 for (str = tab->strings; str->s; str++) { 1060 if (unlikely(ret < 0))
1034 if (str->id == id) { 1061 WARNING(cdev, "failed to override string ID\n");
1035 str->s = s; 1062 else
1036 return; 1063 *desc = ret;
1037 }
1038 } 1064 }
1039}
1040 1065
1041static void 1066 return *desc;
1042string_override(struct usb_gadget_strings **tab, u8 id, const char *s)
1043{
1044 while (*tab) {
1045 string_override_one(*tab, id, s);
1046 tab++;
1047 }
1048} 1067}
1049 1068
1050static int composite_bind(struct usb_gadget *gadget) 1069static int composite_bind(struct usb_gadget *gadget)
@@ -1074,7 +1093,13 @@ static int composite_bind(struct usb_gadget *gadget)
1074 cdev->bufsiz = USB_BUFSIZ; 1093 cdev->bufsiz = USB_BUFSIZ;
1075 cdev->driver = composite; 1094 cdev->driver = composite;
1076 1095
1077 usb_gadget_set_selfpowered(gadget); 1096 /*
1097 * As per USB compliance update, a device that is actively drawing
1098 * more than 100mA from USB must report itself as bus-powered in
1099 * the GetStatus(DEVICE) call.
1100 */
1101 if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW)
1102 usb_gadget_set_selfpowered(gadget);
1078 1103
1079 /* interface and string IDs start at zero via kzalloc. 1104 /* interface and string IDs start at zero via kzalloc.
1080 * we force endpoints to start unassigned; few controller 1105 * we force endpoints to start unassigned; few controller
@@ -1094,26 +1119,41 @@ static int composite_bind(struct usb_gadget *gadget)
1094 * serial number), register function drivers, potentially update 1119 * serial number), register function drivers, potentially update
1095 * power state and consumption, etc 1120 * power state and consumption, etc
1096 */ 1121 */
1097 status = composite->bind(cdev); 1122 status = composite_gadget_bind(cdev);
1098 if (status < 0) 1123 if (status < 0)
1099 goto fail; 1124 goto fail;
1100 1125
1101 cdev->desc = *composite->dev; 1126 cdev->desc = *composite->dev;
1102 cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; 1127 cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1103 1128
1104 /* strings can't be assigned before bind() allocates the 1129 /* stirng overrides */
1105 * releavnt identifiers 1130 if (iManufacturer || !cdev->desc.iManufacturer) {
1106 */ 1131 if (!iManufacturer && !composite->iManufacturer &&
1107 if (cdev->desc.iManufacturer && iManufacturer) 1132 !*composite_manufacturer)
1108 string_override(composite->strings, 1133 snprintf(composite_manufacturer,
1109 cdev->desc.iManufacturer, iManufacturer); 1134 sizeof composite_manufacturer,
1110 if (cdev->desc.iProduct && iProduct) 1135 "%s %s with %s",
1111 string_override(composite->strings, 1136 init_utsname()->sysname,
1112 cdev->desc.iProduct, iProduct); 1137 init_utsname()->release,
1113 if (cdev->desc.iSerialNumber && iSerialNumber) 1138 gadget->name);
1114 string_override(composite->strings, 1139
1115 cdev->desc.iSerialNumber, iSerialNumber); 1140 cdev->manufacturer_override =
1141 override_id(cdev, &cdev->desc.iManufacturer);
1142 }
1143
1144 if (iProduct || (!cdev->desc.iProduct && composite->iProduct))
1145 cdev->product_override =
1146 override_id(cdev, &cdev->desc.iProduct);
1147
1148 if (iSerialNumber)
1149 cdev->serial_override =
1150 override_id(cdev, &cdev->desc.iSerialNumber);
1151
1152 /* has userspace failed to provide a serial number? */
1153 if (composite->needs_serial && !cdev->desc.iSerialNumber)
1154 WARNING(cdev, "userspace failed to provide iSerialNumber\n");
1116 1155
1156 /* finish up */
1117 status = device_create_file(&gadget->dev, &dev_attr_suspended); 1157 status = device_create_file(&gadget->dev, &dev_attr_suspended);
1118 if (status) 1158 if (status)
1119 goto fail; 1159 goto fail;
@@ -1177,7 +1217,6 @@ composite_resume(struct usb_gadget *gadget)
1177static struct usb_gadget_driver composite_driver = { 1217static struct usb_gadget_driver composite_driver = {
1178 .speed = USB_SPEED_HIGH, 1218 .speed = USB_SPEED_HIGH,
1179 1219
1180 .bind = composite_bind,
1181 .unbind = composite_unbind, 1220 .unbind = composite_unbind,
1182 1221
1183 .setup = composite_setup, 1222 .setup = composite_setup,
@@ -1192,8 +1231,12 @@ static struct usb_gadget_driver composite_driver = {
1192}; 1231};
1193 1232
1194/** 1233/**
1195 * usb_composite_register() - register a composite driver 1234 * usb_composite_probe() - register a composite driver
1196 * @driver: the driver to register 1235 * @driver: the driver to register
1236 * @bind: the callback used to allocate resources that are shared across the
1237 * whole device, such as string IDs, and add its configurations using
1238 * @usb_add_config(). This may fail by returning a negative errno
1239 * value; it should return zero on successful initialization.
1197 * Context: single threaded during gadget setup 1240 * Context: single threaded during gadget setup
1198 * 1241 *
1199 * This function is used to register drivers using the composite driver 1242 * This function is used to register drivers using the composite driver
@@ -1206,18 +1249,22 @@ static struct usb_gadget_driver composite_driver = {
1206 * while it was binding. That would usually be done in order to wait for 1249 * while it was binding. That would usually be done in order to wait for
1207 * some userspace participation. 1250 * some userspace participation.
1208 */ 1251 */
1209int usb_composite_register(struct usb_composite_driver *driver) 1252extern int usb_composite_probe(struct usb_composite_driver *driver,
1253 int (*bind)(struct usb_composite_dev *cdev))
1210{ 1254{
1211 if (!driver || !driver->dev || !driver->bind || composite) 1255 if (!driver || !driver->dev || !bind || composite)
1212 return -EINVAL; 1256 return -EINVAL;
1213 1257
1258 if (!driver->iProduct)
1259 driver->iProduct = driver->name;
1214 if (!driver->name) 1260 if (!driver->name)
1215 driver->name = "composite"; 1261 driver->name = "composite";
1216 composite_driver.function = (char *) driver->name; 1262 composite_driver.function = (char *) driver->name;
1217 composite_driver.driver.name = driver->name; 1263 composite_driver.driver.name = driver->name;
1218 composite = driver; 1264 composite = driver;
1265 composite_gadget_bind = bind;
1219 1266
1220 return usb_gadget_register_driver(&composite_driver); 1267 return usb_gadget_probe_driver(&composite_driver, composite_bind);
1221} 1268}
1222 1269
1223/** 1270/**
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 0ed50a2c0a36..e5ac8a316fec 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -386,15 +386,13 @@ static int dbgp_setup(struct usb_gadget *gadget,
386 } else 386 } else
387 goto fail; 387 goto fail;
388 388
389 if (len >= 0) { 389 req->length = min(length, len);
390 req->length = min(length, len); 390 req->zero = len < req->length;
391 req->zero = len < req->length; 391 if (data && req->length)
392 if (data && req->length) 392 memcpy(req->buf, data, req->length);
393 memcpy(req->buf, data, req->length); 393
394 394 req->complete = dbgp_setup_complete;
395 req->complete = dbgp_setup_complete; 395 return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
396 return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
397 }
398 396
399fail: 397fail:
400 dev_dbg(&dbgp.gadget->dev, 398 dev_dbg(&dbgp.gadget->dev,
@@ -405,7 +403,6 @@ fail:
405static struct usb_gadget_driver dbgp_driver = { 403static struct usb_gadget_driver dbgp_driver = {
406 .function = "dbgp", 404 .function = "dbgp",
407 .speed = USB_SPEED_HIGH, 405 .speed = USB_SPEED_HIGH,
408 .bind = dbgp_bind,
409 .unbind = dbgp_unbind, 406 .unbind = dbgp_unbind,
410 .setup = dbgp_setup, 407 .setup = dbgp_setup,
411 .disconnect = dbgp_disconnect, 408 .disconnect = dbgp_disconnect,
@@ -417,7 +414,7 @@ static struct usb_gadget_driver dbgp_driver = {
417 414
418static int __init dbgp_init(void) 415static int __init dbgp_init(void)
419{ 416{
420 return usb_gadget_register_driver(&dbgp_driver); 417 return usb_gadget_probe_driver(&dbgp_driver, dbgp_bind);
421} 418}
422 419
423static void __exit dbgp_exit(void) 420static void __exit dbgp_exit(void)
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index dc6546248ed9..1d2a2abbfa80 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -748,7 +748,8 @@ static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
748 */ 748 */
749 749
750int 750int
751usb_gadget_register_driver (struct usb_gadget_driver *driver) 751usb_gadget_probe_driver(struct usb_gadget_driver *driver,
752 int (*bind)(struct usb_gadget *))
752{ 753{
753 struct dummy *dum = the_controller; 754 struct dummy *dum = the_controller;
754 int retval, i; 755 int retval, i;
@@ -757,8 +758,7 @@ usb_gadget_register_driver (struct usb_gadget_driver *driver)
757 return -EINVAL; 758 return -EINVAL;
758 if (dum->driver) 759 if (dum->driver)
759 return -EBUSY; 760 return -EBUSY;
760 if (!driver->bind || !driver->setup 761 if (!bind || !driver->setup || driver->speed == USB_SPEED_UNKNOWN)
761 || driver->speed == USB_SPEED_UNKNOWN)
762 return -EINVAL; 762 return -EINVAL;
763 763
764 /* 764 /*
@@ -796,7 +796,7 @@ usb_gadget_register_driver (struct usb_gadget_driver *driver)
796 dum->gadget.dev.driver = &driver->driver; 796 dum->gadget.dev.driver = &driver->driver;
797 dev_dbg (udc_dev(dum), "binding gadget driver '%s'\n", 797 dev_dbg (udc_dev(dum), "binding gadget driver '%s'\n",
798 driver->driver.name); 798 driver->driver.name);
799 retval = driver->bind(&dum->gadget); 799 retval = bind(&dum->gadget);
800 if (retval) { 800 if (retval) {
801 dum->driver = NULL; 801 dum->driver = NULL;
802 dum->gadget.dev.driver = NULL; 802 dum->gadget.dev.driver = NULL;
@@ -812,7 +812,7 @@ usb_gadget_register_driver (struct usb_gadget_driver *driver)
812 usb_hcd_poll_rh_status (dummy_to_hcd (dum)); 812 usb_hcd_poll_rh_status (dummy_to_hcd (dum));
813 return 0; 813 return 0;
814} 814}
815EXPORT_SYMBOL (usb_gadget_register_driver); 815EXPORT_SYMBOL(usb_gadget_probe_driver);
816 816
817int 817int
818usb_gadget_unregister_driver (struct usb_gadget_driver *driver) 818usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
@@ -874,6 +874,8 @@ static int dummy_udc_probe (struct platform_device *pdev)
874 struct dummy *dum = the_controller; 874 struct dummy *dum = the_controller;
875 int rc; 875 int rc;
876 876
877 usb_get_hcd(dummy_to_hcd(dum));
878
877 dum->gadget.name = gadget_name; 879 dum->gadget.name = gadget_name;
878 dum->gadget.ops = &dummy_ops; 880 dum->gadget.ops = &dummy_ops;
879 dum->gadget.is_dualspeed = 1; 881 dum->gadget.is_dualspeed = 1;
@@ -885,10 +887,10 @@ static int dummy_udc_probe (struct platform_device *pdev)
885 dum->gadget.dev.parent = &pdev->dev; 887 dum->gadget.dev.parent = &pdev->dev;
886 dum->gadget.dev.release = dummy_gadget_release; 888 dum->gadget.dev.release = dummy_gadget_release;
887 rc = device_register (&dum->gadget.dev); 889 rc = device_register (&dum->gadget.dev);
888 if (rc < 0) 890 if (rc < 0) {
891 put_device(&dum->gadget.dev);
889 return rc; 892 return rc;
890 893 }
891 usb_get_hcd (dummy_to_hcd (dum));
892 894
893 platform_set_drvdata (pdev, dum); 895 platform_set_drvdata (pdev, dum);
894 rc = device_create_file (&dum->gadget.dev, &dev_attr_function); 896 rc = device_create_file (&dum->gadget.dev, &dev_attr_function);
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 114fa024c22c..1690c9d68256 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -237,7 +237,7 @@ static u8 hostaddr[ETH_ALEN];
237 * the first one present. That's to make Microsoft's drivers happy, 237 * the first one present. That's to make Microsoft's drivers happy,
238 * and to follow DOCSIS 1.0 (cable modem standard). 238 * and to follow DOCSIS 1.0 (cable modem standard).
239 */ 239 */
240static int __ref rndis_do_config(struct usb_configuration *c) 240static int __init rndis_do_config(struct usb_configuration *c)
241{ 241{
242 /* FIXME alloc iConfiguration string, set it in c->strings */ 242 /* FIXME alloc iConfiguration string, set it in c->strings */
243 243
@@ -251,7 +251,6 @@ static int __ref rndis_do_config(struct usb_configuration *c)
251 251
252static struct usb_configuration rndis_config_driver = { 252static struct usb_configuration rndis_config_driver = {
253 .label = "RNDIS", 253 .label = "RNDIS",
254 .bind = rndis_do_config,
255 .bConfigurationValue = 2, 254 .bConfigurationValue = 2,
256 /* .iConfiguration = DYNAMIC */ 255 /* .iConfiguration = DYNAMIC */
257 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 256 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -270,7 +269,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
270/* 269/*
271 * We _always_ have an ECM, CDC Subset, or EEM configuration. 270 * We _always_ have an ECM, CDC Subset, or EEM configuration.
272 */ 271 */
273static int __ref eth_do_config(struct usb_configuration *c) 272static int __init eth_do_config(struct usb_configuration *c)
274{ 273{
275 /* FIXME alloc iConfiguration string, set it in c->strings */ 274 /* FIXME alloc iConfiguration string, set it in c->strings */
276 275
@@ -289,7 +288,6 @@ static int __ref eth_do_config(struct usb_configuration *c)
289 288
290static struct usb_configuration eth_config_driver = { 289static struct usb_configuration eth_config_driver = {
291 /* .label = f(hardware) */ 290 /* .label = f(hardware) */
292 .bind = eth_do_config,
293 .bConfigurationValue = 1, 291 .bConfigurationValue = 1,
294 /* .iConfiguration = DYNAMIC */ 292 /* .iConfiguration = DYNAMIC */
295 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 293 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -297,7 +295,7 @@ static struct usb_configuration eth_config_driver = {
297 295
298/*-------------------------------------------------------------------------*/ 296/*-------------------------------------------------------------------------*/
299 297
300static int __ref eth_bind(struct usb_composite_dev *cdev) 298static int __init eth_bind(struct usb_composite_dev *cdev)
301{ 299{
302 int gcnum; 300 int gcnum;
303 struct usb_gadget *gadget = cdev->gadget; 301 struct usb_gadget *gadget = cdev->gadget;
@@ -373,12 +371,13 @@ static int __ref eth_bind(struct usb_composite_dev *cdev)
373 371
374 /* register our configuration(s); RNDIS first, if it's used */ 372 /* register our configuration(s); RNDIS first, if it's used */
375 if (has_rndis()) { 373 if (has_rndis()) {
376 status = usb_add_config(cdev, &rndis_config_driver); 374 status = usb_add_config(cdev, &rndis_config_driver,
375 rndis_do_config);
377 if (status < 0) 376 if (status < 0)
378 goto fail; 377 goto fail;
379 } 378 }
380 379
381 status = usb_add_config(cdev, &eth_config_driver); 380 status = usb_add_config(cdev, &eth_config_driver, eth_do_config);
382 if (status < 0) 381 if (status < 0)
383 goto fail; 382 goto fail;
384 383
@@ -402,7 +401,6 @@ static struct usb_composite_driver eth_driver = {
402 .name = "g_ether", 401 .name = "g_ether",
403 .dev = &device_desc, 402 .dev = &device_desc,
404 .strings = dev_strings, 403 .strings = dev_strings,
405 .bind = eth_bind,
406 .unbind = __exit_p(eth_unbind), 404 .unbind = __exit_p(eth_unbind),
407}; 405};
408 406
@@ -412,7 +410,7 @@ MODULE_LICENSE("GPL");
412 410
413static int __init init(void) 411static int __init init(void)
414{ 412{
415 return usb_composite_register(&eth_driver); 413 return usb_composite_probe(&eth_driver, eth_bind);
416} 414}
417module_init(init); 415module_init(init);
418 416
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d47a123f15ab..bd6226cbae86 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -111,7 +111,7 @@ acm_iad_descriptor = {
111 .bInterfaceCount = 2, // control + data 111 .bInterfaceCount = 2, // control + data
112 .bFunctionClass = USB_CLASS_COMM, 112 .bFunctionClass = USB_CLASS_COMM,
113 .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, 113 .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
114 .bFunctionProtocol = USB_CDC_PROTO_NONE, 114 .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
115 /* .iFunction = DYNAMIC */ 115 /* .iFunction = DYNAMIC */
116}; 116};
117 117
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index 43225879c3cd..b37960f9e753 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -324,7 +324,7 @@ static void loopback_disable(struct usb_function *f)
324 324
325/*-------------------------------------------------------------------------*/ 325/*-------------------------------------------------------------------------*/
326 326
327static int __ref loopback_bind_config(struct usb_configuration *c) 327static int __init loopback_bind_config(struct usb_configuration *c)
328{ 328{
329 struct f_loopback *loop; 329 struct f_loopback *loop;
330 int status; 330 int status;
@@ -346,10 +346,9 @@ static int __ref loopback_bind_config(struct usb_configuration *c)
346 return status; 346 return status;
347} 347}
348 348
349static struct usb_configuration loopback_driver = { 349static struct usb_configuration loopback_driver = {
350 .label = "loopback", 350 .label = "loopback",
351 .strings = loopback_strings, 351 .strings = loopback_strings,
352 .bind = loopback_bind_config,
353 .bConfigurationValue = 2, 352 .bConfigurationValue = 2,
354 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 353 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
355 /* .iConfiguration = DYNAMIC */ 354 /* .iConfiguration = DYNAMIC */
@@ -382,5 +381,5 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
382 loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 381 loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
383 } 382 }
384 383
385 return usb_add_config(cdev, &loopback_driver); 384 return usb_add_config(cdev, &loopback_driver, loopback_bind_config);
386} 385}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 32cce029f65c..838286b1cd14 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -73,6 +73,8 @@
73 * being removable. 73 * being removable.
74 * ->cdrom Flag specifying that LUN shall be reported as 74 * ->cdrom Flag specifying that LUN shall be reported as
75 * being a CD-ROM. 75 * being a CD-ROM.
76 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
77 * commands for this LUN shall be ignored.
76 * 78 *
77 * lun_name_format A printf-like format for names of the LUN 79 * lun_name_format A printf-like format for names of the LUN
78 * devices. This determines how the 80 * devices. This determines how the
@@ -127,6 +129,8 @@
127 * Default true, boolean for removable media. 129 * Default true, boolean for removable media.
128 * cdrom=b[,b...] Default false, boolean for whether to emulate 130 * cdrom=b[,b...] Default false, boolean for whether to emulate
129 * a CD-ROM drive. 131 * a CD-ROM drive.
132 * nofua=b[,b...] Default false, booleans for ignore FUA flag
133 * in SCSI WRITE(10,12) commands
130 * luns=N Default N = number of filenames, number of 134 * luns=N Default N = number of filenames, number of
131 * LUNs to support. 135 * LUNs to support.
132 * stall Default determined according to the type of 136 * stall Default determined according to the type of
@@ -409,6 +413,7 @@ struct fsg_config {
409 char ro; 413 char ro;
410 char removable; 414 char removable;
411 char cdrom; 415 char cdrom;
416 char nofua;
412 } luns[FSG_MAX_LUNS]; 417 } luns[FSG_MAX_LUNS];
413 418
414 const char *lun_name_format; 419 const char *lun_name_format;
@@ -736,7 +741,7 @@ static int do_read(struct fsg_common *common)
736 741
737 /* Get the starting Logical Block Address and check that it's 742 /* Get the starting Logical Block Address and check that it's
738 * not too big */ 743 * not too big */
739 if (common->cmnd[0] == SC_READ_6) 744 if (common->cmnd[0] == READ_6)
740 lba = get_unaligned_be24(&common->cmnd[1]); 745 lba = get_unaligned_be24(&common->cmnd[1]);
741 else { 746 else {
742 lba = get_unaligned_be32(&common->cmnd[2]); 747 lba = get_unaligned_be32(&common->cmnd[2]);
@@ -874,7 +879,7 @@ static int do_write(struct fsg_common *common)
874 879
875 /* Get the starting Logical Block Address and check that it's 880 /* Get the starting Logical Block Address and check that it's
876 * not too big */ 881 * not too big */
877 if (common->cmnd[0] == SC_WRITE_6) 882 if (common->cmnd[0] == WRITE_6)
878 lba = get_unaligned_be24(&common->cmnd[1]); 883 lba = get_unaligned_be24(&common->cmnd[1]);
879 else { 884 else {
880 lba = get_unaligned_be32(&common->cmnd[2]); 885 lba = get_unaligned_be32(&common->cmnd[2]);
@@ -887,7 +892,7 @@ static int do_write(struct fsg_common *common)
887 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 892 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
888 return -EINVAL; 893 return -EINVAL;
889 } 894 }
890 if (common->cmnd[1] & 0x08) { /* FUA */ 895 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
891 spin_lock(&curlun->filp->f_lock); 896 spin_lock(&curlun->filp->f_lock);
892 curlun->filp->f_flags |= O_SYNC; 897 curlun->filp->f_flags |= O_SYNC;
893 spin_unlock(&curlun->filp->f_lock); 898 spin_unlock(&curlun->filp->f_lock);
@@ -1181,7 +1186,7 @@ static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1181 return 36; 1186 return 36;
1182 } 1187 }
1183 1188
1184 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK; 1189 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1185 buf[1] = curlun->removable ? 0x80 : 0; 1190 buf[1] = curlun->removable ? 0x80 : 0;
1186 buf[2] = 2; /* ANSI SCSI level 2 */ 1191 buf[2] = 2; /* ANSI SCSI level 2 */
1187 buf[3] = 2; /* SCSI-2 INQUIRY data format */ 1192 buf[3] = 2; /* SCSI-2 INQUIRY data format */
@@ -1348,11 +1353,11 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1348 * The only variable value is the WriteProtect bit. We will fill in 1353 * The only variable value is the WriteProtect bit. We will fill in
1349 * the mode data length later. */ 1354 * the mode data length later. */
1350 memset(buf, 0, 8); 1355 memset(buf, 0, 8);
1351 if (mscmnd == SC_MODE_SENSE_6) { 1356 if (mscmnd == MODE_SENSE) {
1352 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1357 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1353 buf += 4; 1358 buf += 4;
1354 limit = 255; 1359 limit = 255;
1355 } else { /* SC_MODE_SENSE_10 */ 1360 } else { /* MODE_SENSE_10 */
1356 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1361 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1357 buf += 8; 1362 buf += 8;
1358 limit = 65535; /* Should really be FSG_BUFLEN */ 1363 limit = 65535; /* Should really be FSG_BUFLEN */
@@ -1392,7 +1397,7 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1392 } 1397 }
1393 1398
1394 /* Store the mode data length */ 1399 /* Store the mode data length */
1395 if (mscmnd == SC_MODE_SENSE_6) 1400 if (mscmnd == MODE_SENSE)
1396 buf0[0] = len - 1; 1401 buf0[0] = len - 1;
1397 else 1402 else
1398 put_unaligned_be16(len - 2, buf0); 1403 put_unaligned_be16(len - 2, buf0);
@@ -1881,7 +1886,7 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1881 if (common->lun >= 0 && common->lun < common->nluns) { 1886 if (common->lun >= 0 && common->lun < common->nluns) {
1882 curlun = &common->luns[common->lun]; 1887 curlun = &common->luns[common->lun];
1883 common->curlun = curlun; 1888 common->curlun = curlun;
1884 if (common->cmnd[0] != SC_REQUEST_SENSE) { 1889 if (common->cmnd[0] != REQUEST_SENSE) {
1885 curlun->sense_data = SS_NO_SENSE; 1890 curlun->sense_data = SS_NO_SENSE;
1886 curlun->sense_data_info = 0; 1891 curlun->sense_data_info = 0;
1887 curlun->info_valid = 0; 1892 curlun->info_valid = 0;
@@ -1893,8 +1898,8 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1893 1898
1894 /* INQUIRY and REQUEST SENSE commands are explicitly allowed 1899 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1895 * to use unsupported LUNs; all others may not. */ 1900 * to use unsupported LUNs; all others may not. */
1896 if (common->cmnd[0] != SC_INQUIRY && 1901 if (common->cmnd[0] != INQUIRY &&
1897 common->cmnd[0] != SC_REQUEST_SENSE) { 1902 common->cmnd[0] != REQUEST_SENSE) {
1898 DBG(common, "unsupported LUN %d\n", common->lun); 1903 DBG(common, "unsupported LUN %d\n", common->lun);
1899 return -EINVAL; 1904 return -EINVAL;
1900 } 1905 }
@@ -1903,8 +1908,8 @@ static int check_command(struct fsg_common *common, int cmnd_size,
1903 /* If a unit attention condition exists, only INQUIRY and 1908 /* If a unit attention condition exists, only INQUIRY and
1904 * REQUEST SENSE commands are allowed; anything else must fail. */ 1909 * REQUEST SENSE commands are allowed; anything else must fail. */
1905 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1910 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1906 common->cmnd[0] != SC_INQUIRY && 1911 common->cmnd[0] != INQUIRY &&
1907 common->cmnd[0] != SC_REQUEST_SENSE) { 1912 common->cmnd[0] != REQUEST_SENSE) {
1908 curlun->sense_data = curlun->unit_attention_data; 1913 curlun->sense_data = curlun->unit_attention_data;
1909 curlun->unit_attention_data = SS_NO_SENSE; 1914 curlun->unit_attention_data = SS_NO_SENSE;
1910 return -EINVAL; 1915 return -EINVAL;
@@ -1955,7 +1960,7 @@ static int do_scsi_command(struct fsg_common *common)
1955 down_read(&common->filesem); /* We're using the backing file */ 1960 down_read(&common->filesem); /* We're using the backing file */
1956 switch (common->cmnd[0]) { 1961 switch (common->cmnd[0]) {
1957 1962
1958 case SC_INQUIRY: 1963 case INQUIRY:
1959 common->data_size_from_cmnd = common->cmnd[4]; 1964 common->data_size_from_cmnd = common->cmnd[4];
1960 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1965 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1961 (1<<4), 0, 1966 (1<<4), 0,
@@ -1964,7 +1969,7 @@ static int do_scsi_command(struct fsg_common *common)
1964 reply = do_inquiry(common, bh); 1969 reply = do_inquiry(common, bh);
1965 break; 1970 break;
1966 1971
1967 case SC_MODE_SELECT_6: 1972 case MODE_SELECT:
1968 common->data_size_from_cmnd = common->cmnd[4]; 1973 common->data_size_from_cmnd = common->cmnd[4];
1969 reply = check_command(common, 6, DATA_DIR_FROM_HOST, 1974 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1970 (1<<1) | (1<<4), 0, 1975 (1<<1) | (1<<4), 0,
@@ -1973,7 +1978,7 @@ static int do_scsi_command(struct fsg_common *common)
1973 reply = do_mode_select(common, bh); 1978 reply = do_mode_select(common, bh);
1974 break; 1979 break;
1975 1980
1976 case SC_MODE_SELECT_10: 1981 case MODE_SELECT_10:
1977 common->data_size_from_cmnd = 1982 common->data_size_from_cmnd =
1978 get_unaligned_be16(&common->cmnd[7]); 1983 get_unaligned_be16(&common->cmnd[7]);
1979 reply = check_command(common, 10, DATA_DIR_FROM_HOST, 1984 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
@@ -1983,7 +1988,7 @@ static int do_scsi_command(struct fsg_common *common)
1983 reply = do_mode_select(common, bh); 1988 reply = do_mode_select(common, bh);
1984 break; 1989 break;
1985 1990
1986 case SC_MODE_SENSE_6: 1991 case MODE_SENSE:
1987 common->data_size_from_cmnd = common->cmnd[4]; 1992 common->data_size_from_cmnd = common->cmnd[4];
1988 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1993 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1989 (1<<1) | (1<<2) | (1<<4), 0, 1994 (1<<1) | (1<<2) | (1<<4), 0,
@@ -1992,7 +1997,7 @@ static int do_scsi_command(struct fsg_common *common)
1992 reply = do_mode_sense(common, bh); 1997 reply = do_mode_sense(common, bh);
1993 break; 1998 break;
1994 1999
1995 case SC_MODE_SENSE_10: 2000 case MODE_SENSE_10:
1996 common->data_size_from_cmnd = 2001 common->data_size_from_cmnd =
1997 get_unaligned_be16(&common->cmnd[7]); 2002 get_unaligned_be16(&common->cmnd[7]);
1998 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2003 reply = check_command(common, 10, DATA_DIR_TO_HOST,
@@ -2002,7 +2007,7 @@ static int do_scsi_command(struct fsg_common *common)
2002 reply = do_mode_sense(common, bh); 2007 reply = do_mode_sense(common, bh);
2003 break; 2008 break;
2004 2009
2005 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL: 2010 case ALLOW_MEDIUM_REMOVAL:
2006 common->data_size_from_cmnd = 0; 2011 common->data_size_from_cmnd = 0;
2007 reply = check_command(common, 6, DATA_DIR_NONE, 2012 reply = check_command(common, 6, DATA_DIR_NONE,
2008 (1<<4), 0, 2013 (1<<4), 0,
@@ -2011,7 +2016,7 @@ static int do_scsi_command(struct fsg_common *common)
2011 reply = do_prevent_allow(common); 2016 reply = do_prevent_allow(common);
2012 break; 2017 break;
2013 2018
2014 case SC_READ_6: 2019 case READ_6:
2015 i = common->cmnd[4]; 2020 i = common->cmnd[4];
2016 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2021 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2017 reply = check_command(common, 6, DATA_DIR_TO_HOST, 2022 reply = check_command(common, 6, DATA_DIR_TO_HOST,
@@ -2021,7 +2026,7 @@ static int do_scsi_command(struct fsg_common *common)
2021 reply = do_read(common); 2026 reply = do_read(common);
2022 break; 2027 break;
2023 2028
2024 case SC_READ_10: 2029 case READ_10:
2025 common->data_size_from_cmnd = 2030 common->data_size_from_cmnd =
2026 get_unaligned_be16(&common->cmnd[7]) << 9; 2031 get_unaligned_be16(&common->cmnd[7]) << 9;
2027 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2032 reply = check_command(common, 10, DATA_DIR_TO_HOST,
@@ -2031,7 +2036,7 @@ static int do_scsi_command(struct fsg_common *common)
2031 reply = do_read(common); 2036 reply = do_read(common);
2032 break; 2037 break;
2033 2038
2034 case SC_READ_12: 2039 case READ_12:
2035 common->data_size_from_cmnd = 2040 common->data_size_from_cmnd =
2036 get_unaligned_be32(&common->cmnd[6]) << 9; 2041 get_unaligned_be32(&common->cmnd[6]) << 9;
2037 reply = check_command(common, 12, DATA_DIR_TO_HOST, 2042 reply = check_command(common, 12, DATA_DIR_TO_HOST,
@@ -2041,7 +2046,7 @@ static int do_scsi_command(struct fsg_common *common)
2041 reply = do_read(common); 2046 reply = do_read(common);
2042 break; 2047 break;
2043 2048
2044 case SC_READ_CAPACITY: 2049 case READ_CAPACITY:
2045 common->data_size_from_cmnd = 8; 2050 common->data_size_from_cmnd = 8;
2046 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2051 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2047 (0xf<<2) | (1<<8), 1, 2052 (0xf<<2) | (1<<8), 1,
@@ -2050,7 +2055,7 @@ static int do_scsi_command(struct fsg_common *common)
2050 reply = do_read_capacity(common, bh); 2055 reply = do_read_capacity(common, bh);
2051 break; 2056 break;
2052 2057
2053 case SC_READ_HEADER: 2058 case READ_HEADER:
2054 if (!common->curlun || !common->curlun->cdrom) 2059 if (!common->curlun || !common->curlun->cdrom)
2055 goto unknown_cmnd; 2060 goto unknown_cmnd;
2056 common->data_size_from_cmnd = 2061 common->data_size_from_cmnd =
@@ -2062,7 +2067,7 @@ static int do_scsi_command(struct fsg_common *common)
2062 reply = do_read_header(common, bh); 2067 reply = do_read_header(common, bh);
2063 break; 2068 break;
2064 2069
2065 case SC_READ_TOC: 2070 case READ_TOC:
2066 if (!common->curlun || !common->curlun->cdrom) 2071 if (!common->curlun || !common->curlun->cdrom)
2067 goto unknown_cmnd; 2072 goto unknown_cmnd;
2068 common->data_size_from_cmnd = 2073 common->data_size_from_cmnd =
@@ -2074,7 +2079,7 @@ static int do_scsi_command(struct fsg_common *common)
2074 reply = do_read_toc(common, bh); 2079 reply = do_read_toc(common, bh);
2075 break; 2080 break;
2076 2081
2077 case SC_READ_FORMAT_CAPACITIES: 2082 case READ_FORMAT_CAPACITIES:
2078 common->data_size_from_cmnd = 2083 common->data_size_from_cmnd =
2079 get_unaligned_be16(&common->cmnd[7]); 2084 get_unaligned_be16(&common->cmnd[7]);
2080 reply = check_command(common, 10, DATA_DIR_TO_HOST, 2085 reply = check_command(common, 10, DATA_DIR_TO_HOST,
@@ -2084,7 +2089,7 @@ static int do_scsi_command(struct fsg_common *common)
2084 reply = do_read_format_capacities(common, bh); 2089 reply = do_read_format_capacities(common, bh);
2085 break; 2090 break;
2086 2091
2087 case SC_REQUEST_SENSE: 2092 case REQUEST_SENSE:
2088 common->data_size_from_cmnd = common->cmnd[4]; 2093 common->data_size_from_cmnd = common->cmnd[4];
2089 reply = check_command(common, 6, DATA_DIR_TO_HOST, 2094 reply = check_command(common, 6, DATA_DIR_TO_HOST,
2090 (1<<4), 0, 2095 (1<<4), 0,
@@ -2093,7 +2098,7 @@ static int do_scsi_command(struct fsg_common *common)
2093 reply = do_request_sense(common, bh); 2098 reply = do_request_sense(common, bh);
2094 break; 2099 break;
2095 2100
2096 case SC_START_STOP_UNIT: 2101 case START_STOP:
2097 common->data_size_from_cmnd = 0; 2102 common->data_size_from_cmnd = 0;
2098 reply = check_command(common, 6, DATA_DIR_NONE, 2103 reply = check_command(common, 6, DATA_DIR_NONE,
2099 (1<<1) | (1<<4), 0, 2104 (1<<1) | (1<<4), 0,
@@ -2102,7 +2107,7 @@ static int do_scsi_command(struct fsg_common *common)
2102 reply = do_start_stop(common); 2107 reply = do_start_stop(common);
2103 break; 2108 break;
2104 2109
2105 case SC_SYNCHRONIZE_CACHE: 2110 case SYNCHRONIZE_CACHE:
2106 common->data_size_from_cmnd = 0; 2111 common->data_size_from_cmnd = 0;
2107 reply = check_command(common, 10, DATA_DIR_NONE, 2112 reply = check_command(common, 10, DATA_DIR_NONE,
2108 (0xf<<2) | (3<<7), 1, 2113 (0xf<<2) | (3<<7), 1,
@@ -2111,7 +2116,7 @@ static int do_scsi_command(struct fsg_common *common)
2111 reply = do_synchronize_cache(common); 2116 reply = do_synchronize_cache(common);
2112 break; 2117 break;
2113 2118
2114 case SC_TEST_UNIT_READY: 2119 case TEST_UNIT_READY:
2115 common->data_size_from_cmnd = 0; 2120 common->data_size_from_cmnd = 0;
2116 reply = check_command(common, 6, DATA_DIR_NONE, 2121 reply = check_command(common, 6, DATA_DIR_NONE,
2117 0, 1, 2122 0, 1,
@@ -2120,7 +2125,7 @@ static int do_scsi_command(struct fsg_common *common)
2120 2125
2121 /* Although optional, this command is used by MS-Windows. We 2126 /* Although optional, this command is used by MS-Windows. We
2122 * support a minimal version: BytChk must be 0. */ 2127 * support a minimal version: BytChk must be 0. */
2123 case SC_VERIFY: 2128 case VERIFY:
2124 common->data_size_from_cmnd = 0; 2129 common->data_size_from_cmnd = 0;
2125 reply = check_command(common, 10, DATA_DIR_NONE, 2130 reply = check_command(common, 10, DATA_DIR_NONE,
2126 (1<<1) | (0xf<<2) | (3<<7), 1, 2131 (1<<1) | (0xf<<2) | (3<<7), 1,
@@ -2129,7 +2134,7 @@ static int do_scsi_command(struct fsg_common *common)
2129 reply = do_verify(common); 2134 reply = do_verify(common);
2130 break; 2135 break;
2131 2136
2132 case SC_WRITE_6: 2137 case WRITE_6:
2133 i = common->cmnd[4]; 2138 i = common->cmnd[4];
2134 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2139 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2135 reply = check_command(common, 6, DATA_DIR_FROM_HOST, 2140 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
@@ -2139,7 +2144,7 @@ static int do_scsi_command(struct fsg_common *common)
2139 reply = do_write(common); 2144 reply = do_write(common);
2140 break; 2145 break;
2141 2146
2142 case SC_WRITE_10: 2147 case WRITE_10:
2143 common->data_size_from_cmnd = 2148 common->data_size_from_cmnd =
2144 get_unaligned_be16(&common->cmnd[7]) << 9; 2149 get_unaligned_be16(&common->cmnd[7]) << 9;
2145 reply = check_command(common, 10, DATA_DIR_FROM_HOST, 2150 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
@@ -2149,7 +2154,7 @@ static int do_scsi_command(struct fsg_common *common)
2149 reply = do_write(common); 2154 reply = do_write(common);
2150 break; 2155 break;
2151 2156
2152 case SC_WRITE_12: 2157 case WRITE_12:
2153 common->data_size_from_cmnd = 2158 common->data_size_from_cmnd =
2154 get_unaligned_be32(&common->cmnd[6]) << 9; 2159 get_unaligned_be32(&common->cmnd[6]) << 9;
2155 reply = check_command(common, 12, DATA_DIR_FROM_HOST, 2160 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
@@ -2163,10 +2168,10 @@ static int do_scsi_command(struct fsg_common *common)
2163 * They don't mean much in this setting. It's left as an exercise 2168 * They don't mean much in this setting. It's left as an exercise
2164 * for anyone interested to implement RESERVE and RELEASE in terms 2169 * for anyone interested to implement RESERVE and RELEASE in terms
2165 * of Posix locks. */ 2170 * of Posix locks. */
2166 case SC_FORMAT_UNIT: 2171 case FORMAT_UNIT:
2167 case SC_RELEASE: 2172 case RELEASE:
2168 case SC_RESERVE: 2173 case RESERVE:
2169 case SC_SEND_DIAGNOSTIC: 2174 case SEND_DIAGNOSTIC:
2170 /* Fall through */ 2175 /* Fall through */
2171 2176
2172 default: 2177 default:
@@ -2662,6 +2667,7 @@ static int fsg_main_thread(void *common_)
2662 2667
2663/* Write permission is checked per LUN in store_*() functions. */ 2668/* Write permission is checked per LUN in store_*() functions. */
2664static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro); 2669static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2670static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
2665static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file); 2671static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
2666 2672
2667 2673
@@ -2768,6 +2774,9 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2768 rc = device_create_file(&curlun->dev, &dev_attr_file); 2774 rc = device_create_file(&curlun->dev, &dev_attr_file);
2769 if (rc) 2775 if (rc)
2770 goto error_luns; 2776 goto error_luns;
2777 rc = device_create_file(&curlun->dev, &dev_attr_nofua);
2778 if (rc)
2779 goto error_luns;
2771 2780
2772 if (lcfg->filename) { 2781 if (lcfg->filename) {
2773 rc = fsg_lun_open(curlun, lcfg->filename); 2782 rc = fsg_lun_open(curlun, lcfg->filename);
@@ -2911,6 +2920,7 @@ static void fsg_common_release(struct kref *ref)
2911 2920
2912 /* In error recovery common->nluns may be zero. */ 2921 /* In error recovery common->nluns may be zero. */
2913 for (; i; --i, ++lun) { 2922 for (; i; --i, ++lun) {
2923 device_remove_file(&lun->dev, &dev_attr_nofua);
2914 device_remove_file(&lun->dev, &dev_attr_ro); 2924 device_remove_file(&lun->dev, &dev_attr_ro);
2915 device_remove_file(&lun->dev, &dev_attr_file); 2925 device_remove_file(&lun->dev, &dev_attr_file);
2916 fsg_lun_close(lun); 2926 fsg_lun_close(lun);
@@ -3069,8 +3079,10 @@ struct fsg_module_parameters {
3069 int ro[FSG_MAX_LUNS]; 3079 int ro[FSG_MAX_LUNS];
3070 int removable[FSG_MAX_LUNS]; 3080 int removable[FSG_MAX_LUNS];
3071 int cdrom[FSG_MAX_LUNS]; 3081 int cdrom[FSG_MAX_LUNS];
3082 int nofua[FSG_MAX_LUNS];
3072 3083
3073 unsigned int file_count, ro_count, removable_count, cdrom_count; 3084 unsigned int file_count, ro_count, removable_count, cdrom_count;
3085 unsigned int nofua_count;
3074 unsigned int luns; /* nluns */ 3086 unsigned int luns; /* nluns */
3075 int stall; /* can_stall */ 3087 int stall; /* can_stall */
3076}; 3088};
@@ -3096,6 +3108,8 @@ struct fsg_module_parameters {
3096 "true to simulate removable media"); \ 3108 "true to simulate removable media"); \
3097 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \ 3109 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3098 "true to simulate CD-ROM instead of disk"); \ 3110 "true to simulate CD-ROM instead of disk"); \
3111 _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \
3112 "true to ignore SCSI WRITE(10,12) FUA bit"); \
3099 _FSG_MODULE_PARAM(prefix, params, luns, uint, \ 3113 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3100 "number of LUNs"); \ 3114 "number of LUNs"); \
3101 _FSG_MODULE_PARAM(prefix, params, stall, bool, \ 3115 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index 685d768f336e..e403a534dd55 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -404,7 +404,7 @@ static void sourcesink_disable(struct usb_function *f)
404 404
405/*-------------------------------------------------------------------------*/ 405/*-------------------------------------------------------------------------*/
406 406
407static int __ref sourcesink_bind_config(struct usb_configuration *c) 407static int __init sourcesink_bind_config(struct usb_configuration *c)
408{ 408{
409 struct f_sourcesink *ss; 409 struct f_sourcesink *ss;
410 int status; 410 int status;
@@ -498,7 +498,6 @@ unknown:
498static struct usb_configuration sourcesink_driver = { 498static struct usb_configuration sourcesink_driver = {
499 .label = "source/sink", 499 .label = "source/sink",
500 .strings = sourcesink_strings, 500 .strings = sourcesink_strings,
501 .bind = sourcesink_bind_config,
502 .setup = sourcesink_setup, 501 .setup = sourcesink_setup,
503 .bConfigurationValue = 3, 502 .bConfigurationValue = 3,
504 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 503 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -532,5 +531,5 @@ int __init sourcesink_add(struct usb_composite_dev *cdev, bool autoresume)
532 sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 531 sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
533 } 532 }
534 533
535 return usb_add_config(cdev, &sourcesink_driver); 534 return usb_add_config(cdev, &sourcesink_driver, sourcesink_bind_config);
536} 535}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index a857b7ac238c..d4fdf65fb925 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -89,6 +89,7 @@
89 * Required if "removable" is not set, names of 89 * Required if "removable" is not set, names of
90 * the files or block devices used for 90 * the files or block devices used for
91 * backing storage 91 * backing storage
92 * serial=HHHH... Required serial number (string of hex chars)
92 * ro=b[,b...] Default false, booleans for read-only access 93 * ro=b[,b...] Default false, booleans for read-only access
93 * removable Default false, boolean for removable media 94 * removable Default false, boolean for removable media
94 * luns=N Default N = number of filenames, number of 95 * luns=N Default N = number of filenames, number of
@@ -108,12 +109,11 @@
108 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID 109 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
109 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID 110 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
110 * release=0xRRRR Override the USB release number (bcdDevice) 111 * release=0xRRRR Override the USB release number (bcdDevice)
111 * serial=HHHH... Override serial number (string of hex chars)
112 * buflen=N Default N=16384, buffer size used (will be 112 * buflen=N Default N=16384, buffer size used (will be
113 * rounded down to a multiple of 113 * rounded down to a multiple of
114 * PAGE_CACHE_SIZE) 114 * PAGE_CACHE_SIZE)
115 * 115 *
116 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro", 116 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
117 * "removable", "luns", "nofua", "stall", and "cdrom" options are available; 117 * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
118 * default values are used for everything else. 118 * default values are used for everything else.
119 * 119 *
@@ -273,13 +273,10 @@
273 273
274#define DRIVER_DESC "File-backed Storage Gadget" 274#define DRIVER_DESC "File-backed Storage Gadget"
275#define DRIVER_NAME "g_file_storage" 275#define DRIVER_NAME "g_file_storage"
276/* DRIVER_VERSION must be at least 6 characters long, as it is used 276#define DRIVER_VERSION "1 September 2010"
277 * to generate a fallback serial number. */
278#define DRIVER_VERSION "20 November 2008"
279 277
280static char fsg_string_manufacturer[64]; 278static char fsg_string_manufacturer[64];
281static const char fsg_string_product[] = DRIVER_DESC; 279static const char fsg_string_product[] = DRIVER_DESC;
282static char fsg_string_serial[13];
283static const char fsg_string_config[] = "Self-powered"; 280static const char fsg_string_config[] = "Self-powered";
284static const char fsg_string_interface[] = "Mass Storage"; 281static const char fsg_string_interface[] = "Mass Storage";
285 282
@@ -305,6 +302,7 @@ MODULE_LICENSE("Dual BSD/GPL");
305 302
306static struct { 303static struct {
307 char *file[FSG_MAX_LUNS]; 304 char *file[FSG_MAX_LUNS];
305 char *serial;
308 int ro[FSG_MAX_LUNS]; 306 int ro[FSG_MAX_LUNS];
309 int nofua[FSG_MAX_LUNS]; 307 int nofua[FSG_MAX_LUNS];
310 unsigned int num_filenames; 308 unsigned int num_filenames;
@@ -321,7 +319,6 @@ static struct {
321 unsigned short vendor; 319 unsigned short vendor;
322 unsigned short product; 320 unsigned short product;
323 unsigned short release; 321 unsigned short release;
324 char *serial;
325 unsigned int buflen; 322 unsigned int buflen;
326 323
327 int transport_type; 324 int transport_type;
@@ -346,6 +343,9 @@ module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
346 S_IRUGO); 343 S_IRUGO);
347MODULE_PARM_DESC(file, "names of backing files or devices"); 344MODULE_PARM_DESC(file, "names of backing files or devices");
348 345
346module_param_named(serial, mod_data.serial, charp, S_IRUGO);
347MODULE_PARM_DESC(serial, "USB serial number");
348
349module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO); 349module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
350MODULE_PARM_DESC(ro, "true to force read-only"); 350MODULE_PARM_DESC(ro, "true to force read-only");
351 351
@@ -365,9 +365,6 @@ MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
365module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO); 365module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
366MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk"); 366MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
367 367
368module_param_named(serial, mod_data.serial, charp, S_IRUGO);
369MODULE_PARM_DESC(serial, "USB serial number");
370
371/* In the non-TEST version, only the module parameters listed above 368/* In the non-TEST version, only the module parameters listed above
372 * are available. */ 369 * are available. */
373#ifdef CONFIG_USB_FILE_STORAGE_TEST 370#ifdef CONFIG_USB_FILE_STORAGE_TEST
@@ -786,7 +783,7 @@ static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
786{ 783{
787 struct usb_request *req = fsg->ep0req; 784 struct usb_request *req = fsg->ep0req;
788 static u8 cbi_reset_cmnd[6] = { 785 static u8 cbi_reset_cmnd[6] = {
789 SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff}; 786 SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
790 787
791 /* Error in command transfer? */ 788 /* Error in command transfer? */
792 if (req->status || req->length != req->actual || 789 if (req->status || req->length != req->actual ||
@@ -1138,7 +1135,7 @@ static int do_read(struct fsg_dev *fsg)
1138 1135
1139 /* Get the starting Logical Block Address and check that it's 1136 /* Get the starting Logical Block Address and check that it's
1140 * not too big */ 1137 * not too big */
1141 if (fsg->cmnd[0] == SC_READ_6) 1138 if (fsg->cmnd[0] == READ_6)
1142 lba = get_unaligned_be24(&fsg->cmnd[1]); 1139 lba = get_unaligned_be24(&fsg->cmnd[1]);
1143 else { 1140 else {
1144 lba = get_unaligned_be32(&fsg->cmnd[2]); 1141 lba = get_unaligned_be32(&fsg->cmnd[2]);
@@ -1273,7 +1270,7 @@ static int do_write(struct fsg_dev *fsg)
1273 1270
1274 /* Get the starting Logical Block Address and check that it's 1271 /* Get the starting Logical Block Address and check that it's
1275 * not too big */ 1272 * not too big */
1276 if (fsg->cmnd[0] == SC_WRITE_6) 1273 if (fsg->cmnd[0] == WRITE_6)
1277 lba = get_unaligned_be24(&fsg->cmnd[1]); 1274 lba = get_unaligned_be24(&fsg->cmnd[1]);
1278 else { 1275 else {
1279 lba = get_unaligned_be32(&fsg->cmnd[2]); 1276 lba = get_unaligned_be32(&fsg->cmnd[2]);
@@ -1581,7 +1578,7 @@ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1581 } 1578 }
1582 1579
1583 memset(buf, 0, 8); 1580 memset(buf, 0, 8);
1584 buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK); 1581 buf[0] = (mod_data.cdrom ? TYPE_ROM : TYPE_DISK);
1585 if (mod_data.removable) 1582 if (mod_data.removable)
1586 buf[1] = 0x80; 1583 buf[1] = 0x80;
1587 buf[2] = 2; // ANSI SCSI level 2 1584 buf[2] = 2; // ANSI SCSI level 2
@@ -1750,11 +1747,11 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1750 * The only variable value is the WriteProtect bit. We will fill in 1747 * The only variable value is the WriteProtect bit. We will fill in
1751 * the mode data length later. */ 1748 * the mode data length later. */
1752 memset(buf, 0, 8); 1749 memset(buf, 0, 8);
1753 if (mscmnd == SC_MODE_SENSE_6) { 1750 if (mscmnd == MODE_SENSE) {
1754 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 1751 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1755 buf += 4; 1752 buf += 4;
1756 limit = 255; 1753 limit = 255;
1757 } else { // SC_MODE_SENSE_10 1754 } else { // MODE_SENSE_10
1758 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 1755 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1759 buf += 8; 1756 buf += 8;
1760 limit = 65535; // Should really be mod_data.buflen 1757 limit = 65535; // Should really be mod_data.buflen
@@ -1794,7 +1791,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1794 } 1791 }
1795 1792
1796 /* Store the mode data length */ 1793 /* Store the mode data length */
1797 if (mscmnd == SC_MODE_SENSE_6) 1794 if (mscmnd == MODE_SENSE)
1798 buf0[0] = len - 1; 1795 buf0[0] = len - 1;
1799 else 1796 else
1800 put_unaligned_be16(len - 2, buf0); 1797 put_unaligned_be16(len - 2, buf0);
@@ -2319,7 +2316,7 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
2319 /* Check the LUN */ 2316 /* Check the LUN */
2320 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) { 2317 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
2321 fsg->curlun = curlun = &fsg->luns[fsg->lun]; 2318 fsg->curlun = curlun = &fsg->luns[fsg->lun];
2322 if (fsg->cmnd[0] != SC_REQUEST_SENSE) { 2319 if (fsg->cmnd[0] != REQUEST_SENSE) {
2323 curlun->sense_data = SS_NO_SENSE; 2320 curlun->sense_data = SS_NO_SENSE;
2324 curlun->sense_data_info = 0; 2321 curlun->sense_data_info = 0;
2325 curlun->info_valid = 0; 2322 curlun->info_valid = 0;
@@ -2330,8 +2327,8 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
2330 2327
2331 /* INQUIRY and REQUEST SENSE commands are explicitly allowed 2328 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2332 * to use unsupported LUNs; all others may not. */ 2329 * to use unsupported LUNs; all others may not. */
2333 if (fsg->cmnd[0] != SC_INQUIRY && 2330 if (fsg->cmnd[0] != INQUIRY &&
2334 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2331 fsg->cmnd[0] != REQUEST_SENSE) {
2335 DBG(fsg, "unsupported LUN %d\n", fsg->lun); 2332 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2336 return -EINVAL; 2333 return -EINVAL;
2337 } 2334 }
@@ -2340,8 +2337,8 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
2340 /* If a unit attention condition exists, only INQUIRY and 2337 /* If a unit attention condition exists, only INQUIRY and
2341 * REQUEST SENSE commands are allowed; anything else must fail. */ 2338 * REQUEST SENSE commands are allowed; anything else must fail. */
2342 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 2339 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2343 fsg->cmnd[0] != SC_INQUIRY && 2340 fsg->cmnd[0] != INQUIRY &&
2344 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2341 fsg->cmnd[0] != REQUEST_SENSE) {
2345 curlun->sense_data = curlun->unit_attention_data; 2342 curlun->sense_data = curlun->unit_attention_data;
2346 curlun->unit_attention_data = SS_NO_SENSE; 2343 curlun->unit_attention_data = SS_NO_SENSE;
2347 return -EINVAL; 2344 return -EINVAL;
@@ -2391,7 +2388,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2391 down_read(&fsg->filesem); // We're using the backing file 2388 down_read(&fsg->filesem); // We're using the backing file
2392 switch (fsg->cmnd[0]) { 2389 switch (fsg->cmnd[0]) {
2393 2390
2394 case SC_INQUIRY: 2391 case INQUIRY:
2395 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2392 fsg->data_size_from_cmnd = fsg->cmnd[4];
2396 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2393 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2397 (1<<4), 0, 2394 (1<<4), 0,
@@ -2399,7 +2396,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2399 reply = do_inquiry(fsg, bh); 2396 reply = do_inquiry(fsg, bh);
2400 break; 2397 break;
2401 2398
2402 case SC_MODE_SELECT_6: 2399 case MODE_SELECT:
2403 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2400 fsg->data_size_from_cmnd = fsg->cmnd[4];
2404 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2401 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2405 (1<<1) | (1<<4), 0, 2402 (1<<1) | (1<<4), 0,
@@ -2407,7 +2404,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2407 reply = do_mode_select(fsg, bh); 2404 reply = do_mode_select(fsg, bh);
2408 break; 2405 break;
2409 2406
2410 case SC_MODE_SELECT_10: 2407 case MODE_SELECT_10:
2411 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2408 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2412 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2409 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2413 (1<<1) | (3<<7), 0, 2410 (1<<1) | (3<<7), 0,
@@ -2415,7 +2412,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2415 reply = do_mode_select(fsg, bh); 2412 reply = do_mode_select(fsg, bh);
2416 break; 2413 break;
2417 2414
2418 case SC_MODE_SENSE_6: 2415 case MODE_SENSE:
2419 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2416 fsg->data_size_from_cmnd = fsg->cmnd[4];
2420 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2417 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2421 (1<<1) | (1<<2) | (1<<4), 0, 2418 (1<<1) | (1<<2) | (1<<4), 0,
@@ -2423,7 +2420,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2423 reply = do_mode_sense(fsg, bh); 2420 reply = do_mode_sense(fsg, bh);
2424 break; 2421 break;
2425 2422
2426 case SC_MODE_SENSE_10: 2423 case MODE_SENSE_10:
2427 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2424 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2428 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2425 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2429 (1<<1) | (1<<2) | (3<<7), 0, 2426 (1<<1) | (1<<2) | (3<<7), 0,
@@ -2431,7 +2428,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2431 reply = do_mode_sense(fsg, bh); 2428 reply = do_mode_sense(fsg, bh);
2432 break; 2429 break;
2433 2430
2434 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL: 2431 case ALLOW_MEDIUM_REMOVAL:
2435 fsg->data_size_from_cmnd = 0; 2432 fsg->data_size_from_cmnd = 0;
2436 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2433 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2437 (1<<4), 0, 2434 (1<<4), 0,
@@ -2439,7 +2436,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2439 reply = do_prevent_allow(fsg); 2436 reply = do_prevent_allow(fsg);
2440 break; 2437 break;
2441 2438
2442 case SC_READ_6: 2439 case READ_6:
2443 i = fsg->cmnd[4]; 2440 i = fsg->cmnd[4];
2444 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2441 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2445 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2442 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
@@ -2448,7 +2445,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2448 reply = do_read(fsg); 2445 reply = do_read(fsg);
2449 break; 2446 break;
2450 2447
2451 case SC_READ_10: 2448 case READ_10:
2452 fsg->data_size_from_cmnd = 2449 fsg->data_size_from_cmnd =
2453 get_unaligned_be16(&fsg->cmnd[7]) << 9; 2450 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2454 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2451 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
@@ -2457,7 +2454,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2457 reply = do_read(fsg); 2454 reply = do_read(fsg);
2458 break; 2455 break;
2459 2456
2460 case SC_READ_12: 2457 case READ_12:
2461 fsg->data_size_from_cmnd = 2458 fsg->data_size_from_cmnd =
2462 get_unaligned_be32(&fsg->cmnd[6]) << 9; 2459 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2463 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST, 2460 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
@@ -2466,7 +2463,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2466 reply = do_read(fsg); 2463 reply = do_read(fsg);
2467 break; 2464 break;
2468 2465
2469 case SC_READ_CAPACITY: 2466 case READ_CAPACITY:
2470 fsg->data_size_from_cmnd = 8; 2467 fsg->data_size_from_cmnd = 8;
2471 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2468 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2472 (0xf<<2) | (1<<8), 1, 2469 (0xf<<2) | (1<<8), 1,
@@ -2474,7 +2471,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2474 reply = do_read_capacity(fsg, bh); 2471 reply = do_read_capacity(fsg, bh);
2475 break; 2472 break;
2476 2473
2477 case SC_READ_HEADER: 2474 case READ_HEADER:
2478 if (!mod_data.cdrom) 2475 if (!mod_data.cdrom)
2479 goto unknown_cmnd; 2476 goto unknown_cmnd;
2480 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2477 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
@@ -2484,7 +2481,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2484 reply = do_read_header(fsg, bh); 2481 reply = do_read_header(fsg, bh);
2485 break; 2482 break;
2486 2483
2487 case SC_READ_TOC: 2484 case READ_TOC:
2488 if (!mod_data.cdrom) 2485 if (!mod_data.cdrom)
2489 goto unknown_cmnd; 2486 goto unknown_cmnd;
2490 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2487 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
@@ -2494,7 +2491,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2494 reply = do_read_toc(fsg, bh); 2491 reply = do_read_toc(fsg, bh);
2495 break; 2492 break;
2496 2493
2497 case SC_READ_FORMAT_CAPACITIES: 2494 case READ_FORMAT_CAPACITIES:
2498 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2495 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2499 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2496 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2500 (3<<7), 1, 2497 (3<<7), 1,
@@ -2502,7 +2499,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2502 reply = do_read_format_capacities(fsg, bh); 2499 reply = do_read_format_capacities(fsg, bh);
2503 break; 2500 break;
2504 2501
2505 case SC_REQUEST_SENSE: 2502 case REQUEST_SENSE:
2506 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2503 fsg->data_size_from_cmnd = fsg->cmnd[4];
2507 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2504 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2508 (1<<4), 0, 2505 (1<<4), 0,
@@ -2510,7 +2507,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2510 reply = do_request_sense(fsg, bh); 2507 reply = do_request_sense(fsg, bh);
2511 break; 2508 break;
2512 2509
2513 case SC_START_STOP_UNIT: 2510 case START_STOP:
2514 fsg->data_size_from_cmnd = 0; 2511 fsg->data_size_from_cmnd = 0;
2515 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2512 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2516 (1<<1) | (1<<4), 0, 2513 (1<<1) | (1<<4), 0,
@@ -2518,7 +2515,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2518 reply = do_start_stop(fsg); 2515 reply = do_start_stop(fsg);
2519 break; 2516 break;
2520 2517
2521 case SC_SYNCHRONIZE_CACHE: 2518 case SYNCHRONIZE_CACHE:
2522 fsg->data_size_from_cmnd = 0; 2519 fsg->data_size_from_cmnd = 0;
2523 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2520 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2524 (0xf<<2) | (3<<7), 1, 2521 (0xf<<2) | (3<<7), 1,
@@ -2526,7 +2523,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2526 reply = do_synchronize_cache(fsg); 2523 reply = do_synchronize_cache(fsg);
2527 break; 2524 break;
2528 2525
2529 case SC_TEST_UNIT_READY: 2526 case TEST_UNIT_READY:
2530 fsg->data_size_from_cmnd = 0; 2527 fsg->data_size_from_cmnd = 0;
2531 reply = check_command(fsg, 6, DATA_DIR_NONE, 2528 reply = check_command(fsg, 6, DATA_DIR_NONE,
2532 0, 1, 2529 0, 1,
@@ -2535,7 +2532,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2535 2532
2536 /* Although optional, this command is used by MS-Windows. We 2533 /* Although optional, this command is used by MS-Windows. We
2537 * support a minimal version: BytChk must be 0. */ 2534 * support a minimal version: BytChk must be 0. */
2538 case SC_VERIFY: 2535 case VERIFY:
2539 fsg->data_size_from_cmnd = 0; 2536 fsg->data_size_from_cmnd = 0;
2540 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2537 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2541 (1<<1) | (0xf<<2) | (3<<7), 1, 2538 (1<<1) | (0xf<<2) | (3<<7), 1,
@@ -2543,7 +2540,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2543 reply = do_verify(fsg); 2540 reply = do_verify(fsg);
2544 break; 2541 break;
2545 2542
2546 case SC_WRITE_6: 2543 case WRITE_6:
2547 i = fsg->cmnd[4]; 2544 i = fsg->cmnd[4];
2548 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2545 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2549 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2546 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
@@ -2552,7 +2549,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2552 reply = do_write(fsg); 2549 reply = do_write(fsg);
2553 break; 2550 break;
2554 2551
2555 case SC_WRITE_10: 2552 case WRITE_10:
2556 fsg->data_size_from_cmnd = 2553 fsg->data_size_from_cmnd =
2557 get_unaligned_be16(&fsg->cmnd[7]) << 9; 2554 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2558 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2555 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
@@ -2561,7 +2558,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2561 reply = do_write(fsg); 2558 reply = do_write(fsg);
2562 break; 2559 break;
2563 2560
2564 case SC_WRITE_12: 2561 case WRITE_12:
2565 fsg->data_size_from_cmnd = 2562 fsg->data_size_from_cmnd =
2566 get_unaligned_be32(&fsg->cmnd[6]) << 9; 2563 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2567 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST, 2564 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
@@ -2574,10 +2571,10 @@ static int do_scsi_command(struct fsg_dev *fsg)
2574 * They don't mean much in this setting. It's left as an exercise 2571 * They don't mean much in this setting. It's left as an exercise
2575 * for anyone interested to implement RESERVE and RELEASE in terms 2572 * for anyone interested to implement RESERVE and RELEASE in terms
2576 * of Posix locks. */ 2573 * of Posix locks. */
2577 case SC_FORMAT_UNIT: 2574 case FORMAT_UNIT:
2578 case SC_RELEASE: 2575 case RELEASE:
2579 case SC_RESERVE: 2576 case RESERVE:
2580 case SC_SEND_DIAGNOSTIC: 2577 case SEND_DIAGNOSTIC:
2581 // Fall through 2578 // Fall through
2582 2579
2583 default: 2580 default:
@@ -3178,6 +3175,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3178 for (i = 0; i < fsg->nluns; ++i) { 3175 for (i = 0; i < fsg->nluns; ++i) {
3179 curlun = &fsg->luns[i]; 3176 curlun = &fsg->luns[i];
3180 if (curlun->registered) { 3177 if (curlun->registered) {
3178 device_remove_file(&curlun->dev, &dev_attr_nofua);
3181 device_remove_file(&curlun->dev, &dev_attr_ro); 3179 device_remove_file(&curlun->dev, &dev_attr_ro);
3182 device_remove_file(&curlun->dev, &dev_attr_file); 3180 device_remove_file(&curlun->dev, &dev_attr_file);
3183 fsg_lun_close(curlun); 3181 fsg_lun_close(curlun);
@@ -3213,7 +3211,6 @@ static int __init check_parameters(struct fsg_dev *fsg)
3213{ 3211{
3214 int prot; 3212 int prot;
3215 int gcnum; 3213 int gcnum;
3216 int i;
3217 3214
3218 /* Store the default values */ 3215 /* Store the default values */
3219 mod_data.transport_type = USB_PR_BULK; 3216 mod_data.transport_type = USB_PR_BULK;
@@ -3309,45 +3306,29 @@ static int __init check_parameters(struct fsg_dev *fsg)
3309 if ((*ch < '0' || *ch > '9') && 3306 if ((*ch < '0' || *ch > '9') &&
3310 (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */ 3307 (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */
3311 WARNING(fsg, 3308 WARNING(fsg,
3312 "Invalid serial string character: %c; " 3309 "Invalid serial string character: %c\n",
3313 "Failing back to default\n",
3314 *ch); 3310 *ch);
3315 goto fill_serial; 3311 goto no_serial;
3316 } 3312 }
3317 } 3313 }
3318 if (len > 126 || 3314 if (len > 126 ||
3319 (mod_data.transport_type == USB_PR_BULK && len < 12) || 3315 (mod_data.transport_type == USB_PR_BULK && len < 12) ||
3320 (mod_data.transport_type != USB_PR_BULK && len > 12)) { 3316 (mod_data.transport_type != USB_PR_BULK && len > 12)) {
3321 WARNING(fsg, 3317 WARNING(fsg, "Invalid serial string length!\n");
3322 "Invalid serial string length; " 3318 goto no_serial;
3323 "Failing back to default\n");
3324 goto fill_serial;
3325 } 3319 }
3326 fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial; 3320 fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial;
3327 } else { 3321 } else {
3328 WARNING(fsg, 3322 WARNING(fsg, "No serial-number string provided!\n");
3329 "Userspace failed to provide serial number; " 3323 no_serial:
3330 "Failing back to default\n"); 3324 device_desc.iSerialNumber = 0;
3331fill_serial:
3332 /* Serial number not specified or invalid, make our own.
3333 * We just encode it from the driver version string,
3334 * 12 characters to comply with both CB[I] and BBB spec.
3335 * Warning : Two devices running the same kernel will have
3336 * the same fallback serial number. */
3337 for (i = 0; i < 12; i += 2) {
3338 unsigned char c = DRIVER_VERSION[i / 2];
3339
3340 if (!c)
3341 break;
3342 sprintf(&fsg_string_serial[i], "%02X", c);
3343 }
3344 } 3325 }
3345 3326
3346 return 0; 3327 return 0;
3347} 3328}
3348 3329
3349 3330
3350static int __ref fsg_bind(struct usb_gadget *gadget) 3331static int __init fsg_bind(struct usb_gadget *gadget)
3351{ 3332{
3352 struct fsg_dev *fsg = the_fsg; 3333 struct fsg_dev *fsg = the_fsg;
3353 int rc; 3334 int rc;
@@ -3607,7 +3588,6 @@ static struct usb_gadget_driver fsg_driver = {
3607 .speed = USB_SPEED_FULL, 3588 .speed = USB_SPEED_FULL,
3608#endif 3589#endif
3609 .function = (char *) fsg_string_product, 3590 .function = (char *) fsg_string_product,
3610 .bind = fsg_bind,
3611 .unbind = fsg_unbind, 3591 .unbind = fsg_unbind,
3612 .disconnect = fsg_disconnect, 3592 .disconnect = fsg_disconnect,
3613 .setup = fsg_setup, 3593 .setup = fsg_setup,
@@ -3649,7 +3629,7 @@ static int __init fsg_init(void)
3649 if ((rc = fsg_alloc()) != 0) 3629 if ((rc = fsg_alloc()) != 0)
3650 return rc; 3630 return rc;
3651 fsg = the_fsg; 3631 fsg = the_fsg;
3652 if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) 3632 if ((rc = usb_gadget_probe_driver(&fsg_driver, fsg_bind)) != 0)
3653 kref_put(&fsg->ref, fsg_release); 3633 kref_put(&fsg->ref, fsg_release);
3654 return rc; 3634 return rc;
3655} 3635}
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index eafa6d2c5ed7..5bdbfe619853 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -22,6 +22,10 @@
22static struct clk *mxc_ahb_clk; 22static struct clk *mxc_ahb_clk;
23static struct clk *mxc_usb_clk; 23static struct clk *mxc_usb_clk;
24 24
25/* workaround ENGcm09152 for i.MX35 */
26#define USBPHYCTRL_OTGBASE_OFFSET 0x608
27#define USBPHYCTRL_EVDO (1 << 23)
28
25int fsl_udc_clk_init(struct platform_device *pdev) 29int fsl_udc_clk_init(struct platform_device *pdev)
26{ 30{
27 struct fsl_usb2_platform_data *pdata; 31 struct fsl_usb2_platform_data *pdata;
@@ -84,6 +88,17 @@ eenahb:
84void fsl_udc_clk_finalize(struct platform_device *pdev) 88void fsl_udc_clk_finalize(struct platform_device *pdev)
85{ 89{
86 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; 90 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
91#if defined(CONFIG_ARCH_MX35)
92 unsigned int v;
93
94 /* workaround ENGcm09152 for i.MX35 */
95 if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
96 v = readl(MX35_IO_ADDRESS(MX35_OTG_BASE_ADDR +
97 USBPHYCTRL_OTGBASE_OFFSET));
98 writel(v | USBPHYCTRL_EVDO, MX35_IO_ADDRESS(MX35_OTG_BASE_ADDR +
99 USBPHYCTRL_OTGBASE_OFFSET));
100 }
101#endif
87 102
88 /* ULPI transceivers don't need usbpll */ 103 /* ULPI transceivers don't need usbpll */
89 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { 104 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index a5ea2c1d8c93..792d5ef40137 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2302,9 +2302,10 @@ static irqreturn_t qe_udc_irq(int irq, void *_udc)
2302} 2302}
2303 2303
2304/*------------------------------------------------------------------------- 2304/*-------------------------------------------------------------------------
2305 Gadget driver register and unregister. 2305 Gadget driver probe and unregister.
2306 --------------------------------------------------------------------------*/ 2306 --------------------------------------------------------------------------*/
2307int usb_gadget_register_driver(struct usb_gadget_driver *driver) 2307int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2308 int (*bind)(struct usb_gadget *))
2308{ 2309{
2309 int retval; 2310 int retval;
2310 unsigned long flags = 0; 2311 unsigned long flags = 0;
@@ -2315,8 +2316,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2315 2316
2316 if (!driver || (driver->speed != USB_SPEED_FULL 2317 if (!driver || (driver->speed != USB_SPEED_FULL
2317 && driver->speed != USB_SPEED_HIGH) 2318 && driver->speed != USB_SPEED_HIGH)
2318 || !driver->bind || !driver->disconnect 2319 || !bind || !driver->disconnect || !driver->setup)
2319 || !driver->setup)
2320 return -EINVAL; 2320 return -EINVAL;
2321 2321
2322 if (udc_controller->driver) 2322 if (udc_controller->driver)
@@ -2332,7 +2332,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2332 udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed); 2332 udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
2333 spin_unlock_irqrestore(&udc_controller->lock, flags); 2333 spin_unlock_irqrestore(&udc_controller->lock, flags);
2334 2334
2335 retval = driver->bind(&udc_controller->gadget); 2335 retval = bind(&udc_controller->gadget);
2336 if (retval) { 2336 if (retval) {
2337 dev_err(udc_controller->dev, "bind to %s --> %d", 2337 dev_err(udc_controller->dev, "bind to %s --> %d",
2338 driver->driver.name, retval); 2338 driver->driver.name, retval);
@@ -2353,7 +2353,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2353 udc_controller->gadget.name, driver->driver.name); 2353 udc_controller->gadget.name, driver->driver.name);
2354 return 0; 2354 return 0;
2355} 2355}
2356EXPORT_SYMBOL(usb_gadget_register_driver); 2356EXPORT_SYMBOL(usb_gadget_probe_driver);
2357 2357
2358int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2358int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2359{ 2359{
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 08a9a62a39e3..c16b402a876b 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -1765,7 +1765,8 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
1765 * Hook to gadget drivers 1765 * Hook to gadget drivers
1766 * Called by initialization code of gadget drivers 1766 * Called by initialization code of gadget drivers
1767*----------------------------------------------------------------*/ 1767*----------------------------------------------------------------*/
1768int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1768int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1769 int (*bind)(struct usb_gadget *))
1769{ 1770{
1770 int retval = -ENODEV; 1771 int retval = -ENODEV;
1771 unsigned long flags = 0; 1772 unsigned long flags = 0;
@@ -1775,8 +1776,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1775 1776
1776 if (!driver || (driver->speed != USB_SPEED_FULL 1777 if (!driver || (driver->speed != USB_SPEED_FULL
1777 && driver->speed != USB_SPEED_HIGH) 1778 && driver->speed != USB_SPEED_HIGH)
1778 || !driver->bind || !driver->disconnect 1779 || !bind || !driver->disconnect || !driver->setup)
1779 || !driver->setup)
1780 return -EINVAL; 1780 return -EINVAL;
1781 1781
1782 if (udc_controller->driver) 1782 if (udc_controller->driver)
@@ -1792,7 +1792,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1792 spin_unlock_irqrestore(&udc_controller->lock, flags); 1792 spin_unlock_irqrestore(&udc_controller->lock, flags);
1793 1793
1794 /* bind udc driver to gadget driver */ 1794 /* bind udc driver to gadget driver */
1795 retval = driver->bind(&udc_controller->gadget); 1795 retval = bind(&udc_controller->gadget);
1796 if (retval) { 1796 if (retval) {
1797 VDBG("bind to %s --> %d", driver->driver.name, retval); 1797 VDBG("bind to %s --> %d", driver->driver.name, retval);
1798 udc_controller->gadget.dev.driver = NULL; 1798 udc_controller->gadget.dev.driver = NULL;
@@ -1814,7 +1814,7 @@ out:
1814 retval); 1814 retval);
1815 return retval; 1815 return retval;
1816} 1816}
1817EXPORT_SYMBOL(usb_gadget_register_driver); 1817EXPORT_SYMBOL(usb_gadget_probe_driver);
1818 1818
1819/* Disconnect from gadget driver */ 1819/* Disconnect from gadget driver */
1820int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1820int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index a9474f8d5325..af75e3620849 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -52,9 +52,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
52MODULE_AUTHOR("Michal Nazarewicz"); 52MODULE_AUTHOR("Michal Nazarewicz");
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54 54
55 55#define GFS_VENDOR_ID 0x1d6b /* Linux Foundation */
56static unsigned short gfs_vendor_id = 0x0525; /* XXX NetChip */ 56#define GFS_PRODUCT_ID 0x0105 /* FunctionFS Gadget */
57static unsigned short gfs_product_id = 0xa4ac; /* XXX */
58 57
59static struct usb_device_descriptor gfs_dev_desc = { 58static struct usb_device_descriptor gfs_dev_desc = {
60 .bLength = sizeof gfs_dev_desc, 59 .bLength = sizeof gfs_dev_desc,
@@ -63,29 +62,16 @@ static struct usb_device_descriptor gfs_dev_desc = {
63 .bcdUSB = cpu_to_le16(0x0200), 62 .bcdUSB = cpu_to_le16(0x0200),
64 .bDeviceClass = USB_CLASS_PER_INTERFACE, 63 .bDeviceClass = USB_CLASS_PER_INTERFACE,
65 64
66 /* Vendor and product id can be overridden by module parameters. */ 65 .idVendor = cpu_to_le16(GFS_VENDOR_ID),
67 /* .idVendor = cpu_to_le16(gfs_vendor_id), */ 66 .idProduct = cpu_to_le16(GFS_PRODUCT_ID),
68 /* .idProduct = cpu_to_le16(gfs_product_id), */
69 /* .bcdDevice = f(hardware) */
70 /* .iManufacturer = DYNAMIC */
71 /* .iProduct = DYNAMIC */
72 /* NO SERIAL NUMBER */
73 .bNumConfigurations = 1,
74}; 67};
75 68
76#define GFS_MODULE_PARAM_DESC(name, field) \ 69module_param_named(bDeviceClass, gfs_dev_desc.bDeviceClass, byte, 0644);
77 MODULE_PARM_DESC(name, "Value of the " #field " field of the device descriptor sent to the host. Takes effect only prior to the user-space driver registering to the FunctionFS.") 70MODULE_PARM_DESC(bDeviceClass, "USB Device class");
78 71module_param_named(bDeviceSubClass, gfs_dev_desc.bDeviceSubClass, byte, 0644);
79module_param_named(usb_class, gfs_dev_desc.bDeviceClass, byte, 0644); 72MODULE_PARM_DESC(bDeviceSubClass, "USB Device subclass");
80GFS_MODULE_PARAM_DESC(usb_class, bDeviceClass); 73module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
81module_param_named(usb_subclass, gfs_dev_desc.bDeviceSubClass, byte, 0644); 74MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
82GFS_MODULE_PARAM_DESC(usb_subclass, bDeviceSubClass);
83module_param_named(usb_protocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
84GFS_MODULE_PARAM_DESC(usb_protocol, bDeviceProtocol);
85module_param_named(usb_vendor, gfs_vendor_id, ushort, 0644);
86GFS_MODULE_PARAM_DESC(usb_vendor, idVendor);
87module_param_named(usb_product, gfs_product_id, ushort, 0644);
88GFS_MODULE_PARAM_DESC(usb_product, idProduct);
89 75
90 76
91 77
@@ -95,8 +81,10 @@ static const struct usb_descriptor_header *gfs_otg_desc[] = {
95 .bLength = sizeof(struct usb_otg_descriptor), 81 .bLength = sizeof(struct usb_otg_descriptor),
96 .bDescriptorType = USB_DT_OTG, 82 .bDescriptorType = USB_DT_OTG,
97 83
98 /* REVISIT SRP-only hardware is possible, although 84 /*
99 * it would not be called "OTG" ... */ 85 * REVISIT SRP-only hardware is possible, although
86 * it would not be called "OTG" ...
87 */
100 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, 88 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
101 }, 89 },
102 90
@@ -105,19 +93,7 @@ static const struct usb_descriptor_header *gfs_otg_desc[] = {
105 93
106/* string IDs are assigned dynamically */ 94/* string IDs are assigned dynamically */
107 95
108enum {
109 GFS_STRING_MANUFACTURER_IDX,
110 GFS_STRING_PRODUCT_IDX,
111 GFS_STRING_FIRST_CONFIG_IDX,
112};
113
114static char gfs_manufacturer[50];
115static const char gfs_driver_desc[] = DRIVER_DESC;
116static const char gfs_short_name[] = DRIVER_NAME;
117
118static struct usb_string gfs_strings[] = { 96static struct usb_string gfs_strings[] = {
119 [GFS_STRING_MANUFACTURER_IDX].s = gfs_manufacturer,
120 [GFS_STRING_PRODUCT_IDX].s = gfs_driver_desc,
121#ifdef CONFIG_USB_FUNCTIONFS_RNDIS 97#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
122 { .s = "FunctionFS + RNDIS" }, 98 { .s = "FunctionFS + RNDIS" },
123#endif 99#endif
@@ -168,11 +144,11 @@ static int gfs_unbind(struct usb_composite_dev *cdev);
168static int gfs_do_config(struct usb_configuration *c); 144static int gfs_do_config(struct usb_configuration *c);
169 145
170static struct usb_composite_driver gfs_driver = { 146static struct usb_composite_driver gfs_driver = {
171 .name = gfs_short_name, 147 .name = DRIVER_NAME,
172 .dev = &gfs_dev_desc, 148 .dev = &gfs_dev_desc,
173 .strings = gfs_dev_strings, 149 .strings = gfs_dev_strings,
174 .bind = gfs_bind,
175 .unbind = gfs_unbind, 150 .unbind = gfs_unbind,
151 .iProduct = DRIVER_DESC,
176}; 152};
177 153
178 154
@@ -210,7 +186,7 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
210 return -EBUSY; 186 return -EBUSY;
211 187
212 gfs_ffs_data = ffs; 188 gfs_ffs_data = ffs;
213 ret = usb_composite_register(&gfs_driver); 189 ret = usb_composite_probe(&gfs_driver, gfs_bind);
214 if (unlikely(ret < 0)) 190 if (unlikely(ret < 0))
215 clear_bit(0, &gfs_registered); 191 clear_bit(0, &gfs_registered);
216 return ret; 192 return ret;
@@ -245,20 +221,10 @@ static int gfs_bind(struct usb_composite_dev *cdev)
245 if (unlikely(ret < 0)) 221 if (unlikely(ret < 0))
246 goto error_quick; 222 goto error_quick;
247 223
248 gfs_dev_desc.idVendor = cpu_to_le16(gfs_vendor_id);
249 gfs_dev_desc.idProduct = cpu_to_le16(gfs_product_id);
250
251 snprintf(gfs_manufacturer, sizeof gfs_manufacturer, "%s %s with %s",
252 init_utsname()->sysname, init_utsname()->release,
253 cdev->gadget->name);
254
255 ret = usb_string_ids_tab(cdev, gfs_strings); 224 ret = usb_string_ids_tab(cdev, gfs_strings);
256 if (unlikely(ret < 0)) 225 if (unlikely(ret < 0))
257 goto error; 226 goto error;
258 227
259 gfs_dev_desc.iManufacturer = gfs_strings[GFS_STRING_MANUFACTURER_IDX].id;
260 gfs_dev_desc.iProduct = gfs_strings[GFS_STRING_PRODUCT_IDX].id;
261
262 ret = functionfs_bind(gfs_ffs_data, cdev); 228 ret = functionfs_bind(gfs_ffs_data, cdev);
263 if (unlikely(ret < 0)) 229 if (unlikely(ret < 0))
264 goto error; 230 goto error;
@@ -266,14 +232,12 @@ static int gfs_bind(struct usb_composite_dev *cdev)
266 for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) { 232 for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
267 struct gfs_configuration *c = gfs_configurations + i; 233 struct gfs_configuration *c = gfs_configurations + i;
268 234
269 ret = GFS_STRING_FIRST_CONFIG_IDX + i; 235 c->c.label = gfs_strings[i].s;
270 c->c.label = gfs_strings[ret].s; 236 c->c.iConfiguration = gfs_strings[i].id;
271 c->c.iConfiguration = gfs_strings[ret].id;
272 c->c.bind = gfs_do_config;
273 c->c.bConfigurationValue = 1 + i; 237 c->c.bConfigurationValue = 1 + i;
274 c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER; 238 c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER;
275 239
276 ret = usb_add_config(cdev, &c->c); 240 ret = usb_add_config(cdev, &c->c, gfs_do_config);
277 if (unlikely(ret < 0)) 241 if (unlikely(ret < 0))
278 goto error_unbind; 242 goto error_unbind;
279 } 243 }
@@ -293,13 +257,14 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
293{ 257{
294 ENTER(); 258 ENTER();
295 259
296 /* We may have been called in an error recovery frem 260 /*
261 * We may have been called in an error recovery from
297 * composite_bind() after gfs_unbind() failure so we need to 262 * composite_bind() after gfs_unbind() failure so we need to
298 * check if gfs_ffs_data is not NULL since gfs_bind() handles 263 * check if gfs_ffs_data is not NULL since gfs_bind() handles
299 * all error recovery itself. I'd rather we werent called 264 * all error recovery itself. I'd rather we werent called
300 * from composite on orror recovery, but what you're gonna 265 * from composite on orror recovery, but what you're gonna
301 * do...? */ 266 * do...?
302 267 */
303 if (gfs_ffs_data) { 268 if (gfs_ffs_data) {
304 gether_cleanup(); 269 gether_cleanup();
305 functionfs_unbind(gfs_ffs_data); 270 functionfs_unbind(gfs_ffs_data);
@@ -334,14 +299,16 @@ static int gfs_do_config(struct usb_configuration *c)
334 if (unlikely(ret < 0)) 299 if (unlikely(ret < 0))
335 return ret; 300 return ret;
336 301
337 /* After previous do_configs there may be some invalid 302 /*
303 * After previous do_configs there may be some invalid
338 * pointers in c->interface array. This happens every time 304 * pointers in c->interface array. This happens every time
339 * a user space function with fewer interfaces than a user 305 * a user space function with fewer interfaces than a user
340 * space function that was run before the new one is run. The 306 * space function that was run before the new one is run. The
341 * compasit's set_config() assumes that if there is no more 307 * compasit's set_config() assumes that if there is no more
342 * then MAX_CONFIG_INTERFACES interfaces in a configuration 308 * then MAX_CONFIG_INTERFACES interfaces in a configuration
343 * then there is a NULL pointer after the last interface in 309 * then there is a NULL pointer after the last interface in
344 * c->interface array. We need to make sure this is true. */ 310 * c->interface array. We need to make sure this is true.
311 */
345 if (c->next_interface_id < ARRAY_SIZE(c->interface)) 312 if (c->next_interface_id < ARRAY_SIZE(c->interface))
346 c->interface[c->next_interface_id] = NULL; 313 c->interface[c->next_interface_id] = NULL;
347 314
@@ -350,10 +317,12 @@ static int gfs_do_config(struct usb_configuration *c)
350 317
351 318
352#ifdef CONFIG_USB_FUNCTIONFS_ETH 319#ifdef CONFIG_USB_FUNCTIONFS_ETH
320
353static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) 321static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
354{ 322{
355 return can_support_ecm(c->cdev->gadget) 323 return can_support_ecm(c->cdev->gadget)
356 ? ecm_bind_config(c, ethaddr) 324 ? ecm_bind_config(c, ethaddr)
357 : geth_bind_config(c, ethaddr); 325 : geth_bind_config(c, ethaddr);
358} 326}
327
359#endif 328#endif
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 1b413a5cc3f6..0ab7e141d494 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -1157,7 +1157,7 @@ fail:
1157/* 1157/*
1158 * Creates an output endpoint, and initializes output ports. 1158 * Creates an output endpoint, and initializes output ports.
1159 */ 1159 */
1160static int __ref gmidi_bind(struct usb_gadget *gadget) 1160static int __init gmidi_bind(struct usb_gadget *gadget)
1161{ 1161{
1162 struct gmidi_device *dev; 1162 struct gmidi_device *dev;
1163 struct usb_ep *in_ep, *out_ep; 1163 struct usb_ep *in_ep, *out_ep;
@@ -1292,7 +1292,6 @@ static void gmidi_resume(struct usb_gadget *gadget)
1292static struct usb_gadget_driver gmidi_driver = { 1292static struct usb_gadget_driver gmidi_driver = {
1293 .speed = USB_SPEED_FULL, 1293 .speed = USB_SPEED_FULL,
1294 .function = (char *)longname, 1294 .function = (char *)longname,
1295 .bind = gmidi_bind,
1296 .unbind = gmidi_unbind, 1295 .unbind = gmidi_unbind,
1297 1296
1298 .setup = gmidi_setup, 1297 .setup = gmidi_setup,
@@ -1309,7 +1308,7 @@ static struct usb_gadget_driver gmidi_driver = {
1309 1308
1310static int __init gmidi_init(void) 1309static int __init gmidi_init(void)
1311{ 1310{
1312 return usb_gadget_register_driver(&gmidi_driver); 1311 return usb_gadget_probe_driver(&gmidi_driver, gmidi_bind);
1313} 1312}
1314module_init(gmidi_init); 1313module_init(gmidi_init);
1315 1314
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 1088d08c7ed8..48a760220baf 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1343,14 +1343,15 @@ static struct goku_udc *the_controller;
1343 * disconnect is reported. then a host may connect again, or 1343 * disconnect is reported. then a host may connect again, or
1344 * the driver might get unbound. 1344 * the driver might get unbound.
1345 */ 1345 */
1346int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1346int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1347 int (*bind)(struct usb_gadget *))
1347{ 1348{
1348 struct goku_udc *dev = the_controller; 1349 struct goku_udc *dev = the_controller;
1349 int retval; 1350 int retval;
1350 1351
1351 if (!driver 1352 if (!driver
1352 || driver->speed < USB_SPEED_FULL 1353 || driver->speed < USB_SPEED_FULL
1353 || !driver->bind 1354 || !bind
1354 || !driver->disconnect 1355 || !driver->disconnect
1355 || !driver->setup) 1356 || !driver->setup)
1356 return -EINVAL; 1357 return -EINVAL;
@@ -1363,7 +1364,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1363 driver->driver.bus = NULL; 1364 driver->driver.bus = NULL;
1364 dev->driver = driver; 1365 dev->driver = driver;
1365 dev->gadget.dev.driver = &driver->driver; 1366 dev->gadget.dev.driver = &driver->driver;
1366 retval = driver->bind(&dev->gadget); 1367 retval = bind(&dev->gadget);
1367 if (retval) { 1368 if (retval) {
1368 DBG(dev, "bind to driver %s --> error %d\n", 1369 DBG(dev, "bind to driver %s --> error %d\n",
1369 driver->driver.name, retval); 1370 driver->driver.name, retval);
@@ -1380,7 +1381,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1380 DBG(dev, "registered gadget driver '%s'\n", driver->driver.name); 1381 DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
1381 return 0; 1382 return 0;
1382} 1383}
1383EXPORT_SYMBOL(usb_gadget_register_driver); 1384EXPORT_SYMBOL(usb_gadget_probe_driver);
1384 1385
1385static void 1386static void
1386stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver) 1387stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
@@ -1744,7 +1745,8 @@ static void goku_remove(struct pci_dev *pdev)
1744 pci_resource_len (pdev, 0)); 1745 pci_resource_len (pdev, 0));
1745 if (dev->enabled) 1746 if (dev->enabled)
1746 pci_disable_device(pdev); 1747 pci_disable_device(pdev);
1747 device_unregister(&dev->gadget.dev); 1748 if (dev->registered)
1749 device_unregister(&dev->gadget.dev);
1748 1750
1749 pci_set_drvdata(pdev, NULL); 1751 pci_set_drvdata(pdev, NULL);
1750 dev->regs = NULL; 1752 dev->regs = NULL;
@@ -1774,7 +1776,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1774 if (!pdev->irq) { 1776 if (!pdev->irq) {
1775 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev)); 1777 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1776 retval = -ENODEV; 1778 retval = -ENODEV;
1777 goto done; 1779 goto err;
1778 } 1780 }
1779 1781
1780 /* alloc, and start init */ 1782 /* alloc, and start init */
@@ -1782,7 +1784,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1782 if (dev == NULL){ 1784 if (dev == NULL){
1783 pr_debug("enomem %s\n", pci_name(pdev)); 1785 pr_debug("enomem %s\n", pci_name(pdev));
1784 retval = -ENOMEM; 1786 retval = -ENOMEM;
1785 goto done; 1787 goto err;
1786 } 1788 }
1787 1789
1788 spin_lock_init(&dev->lock); 1790 spin_lock_init(&dev->lock);
@@ -1800,7 +1802,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1800 retval = pci_enable_device(pdev); 1802 retval = pci_enable_device(pdev);
1801 if (retval < 0) { 1803 if (retval < 0) {
1802 DBG(dev, "can't enable, %d\n", retval); 1804 DBG(dev, "can't enable, %d\n", retval);
1803 goto done; 1805 goto err;
1804 } 1806 }
1805 dev->enabled = 1; 1807 dev->enabled = 1;
1806 1808
@@ -1809,7 +1811,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1809 if (!request_mem_region(resource, len, driver_name)) { 1811 if (!request_mem_region(resource, len, driver_name)) {
1810 DBG(dev, "controller already in use\n"); 1812 DBG(dev, "controller already in use\n");
1811 retval = -EBUSY; 1813 retval = -EBUSY;
1812 goto done; 1814 goto err;
1813 } 1815 }
1814 dev->got_region = 1; 1816 dev->got_region = 1;
1815 1817
@@ -1817,7 +1819,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1817 if (base == NULL) { 1819 if (base == NULL) {
1818 DBG(dev, "can't map memory\n"); 1820 DBG(dev, "can't map memory\n");
1819 retval = -EFAULT; 1821 retval = -EFAULT;
1820 goto done; 1822 goto err;
1821 } 1823 }
1822 dev->regs = (struct goku_udc_regs __iomem *) base; 1824 dev->regs = (struct goku_udc_regs __iomem *) base;
1823 1825
@@ -1833,7 +1835,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1833 driver_name, dev) != 0) { 1835 driver_name, dev) != 0) {
1834 DBG(dev, "request interrupt %d failed\n", pdev->irq); 1836 DBG(dev, "request interrupt %d failed\n", pdev->irq);
1835 retval = -EBUSY; 1837 retval = -EBUSY;
1836 goto done; 1838 goto err;
1837 } 1839 }
1838 dev->got_irq = 1; 1840 dev->got_irq = 1;
1839 if (use_dma) 1841 if (use_dma)
@@ -1844,13 +1846,16 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1844 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev); 1846 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
1845#endif 1847#endif
1846 1848
1847 /* done */
1848 the_controller = dev; 1849 the_controller = dev;
1849 retval = device_register(&dev->gadget.dev); 1850 retval = device_register(&dev->gadget.dev);
1850 if (retval == 0) 1851 if (retval) {
1851 return 0; 1852 put_device(&dev->gadget.dev);
1853 goto err;
1854 }
1855 dev->registered = 1;
1856 return 0;
1852 1857
1853done: 1858err:
1854 if (dev) 1859 if (dev)
1855 goku_remove (pdev); 1860 goku_remove (pdev);
1856 return retval; 1861 return retval;
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
index 735495bf8411..2523e54097bd 100644
--- a/drivers/usb/gadget/hid.c
+++ b/drivers/usb/gadget/hid.c
@@ -127,7 +127,7 @@ static struct usb_gadget_strings *dev_strings[] = {
127 127
128/****************************** Configurations ******************************/ 128/****************************** Configurations ******************************/
129 129
130static int __ref do_config(struct usb_configuration *c) 130static int __init do_config(struct usb_configuration *c)
131{ 131{
132 struct hidg_func_node *e; 132 struct hidg_func_node *e;
133 int func = 0, status = 0; 133 int func = 0, status = 0;
@@ -148,7 +148,6 @@ static int __ref do_config(struct usb_configuration *c)
148 148
149static struct usb_configuration config_driver = { 149static struct usb_configuration config_driver = {
150 .label = "HID Gadget", 150 .label = "HID Gadget",
151 .bind = do_config,
152 .bConfigurationValue = 1, 151 .bConfigurationValue = 1,
153 /* .iConfiguration = DYNAMIC */ 152 /* .iConfiguration = DYNAMIC */
154 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 153 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -156,7 +155,7 @@ static struct usb_configuration config_driver = {
156 155
157/****************************** Gadget Bind ******************************/ 156/****************************** Gadget Bind ******************************/
158 157
159static int __ref hid_bind(struct usb_composite_dev *cdev) 158static int __init hid_bind(struct usb_composite_dev *cdev)
160{ 159{
161 struct usb_gadget *gadget = cdev->gadget; 160 struct usb_gadget *gadget = cdev->gadget;
162 struct list_head *tmp; 161 struct list_head *tmp;
@@ -201,7 +200,7 @@ static int __ref hid_bind(struct usb_composite_dev *cdev)
201 device_desc.iProduct = status; 200 device_desc.iProduct = status;
202 201
203 /* register our configuration */ 202 /* register our configuration */
204 status = usb_add_config(cdev, &config_driver); 203 status = usb_add_config(cdev, &config_driver, do_config);
205 if (status < 0) 204 if (status < 0)
206 return status; 205 return status;
207 206
@@ -256,7 +255,6 @@ static struct usb_composite_driver hidg_driver = {
256 .name = "g_hid", 255 .name = "g_hid",
257 .dev = &device_desc, 256 .dev = &device_desc,
258 .strings = dev_strings, 257 .strings = dev_strings,
259 .bind = hid_bind,
260 .unbind = __exit_p(hid_unbind), 258 .unbind = __exit_p(hid_unbind),
261}; 259};
262 260
@@ -282,7 +280,7 @@ static int __init hidg_init(void)
282 if (status < 0) 280 if (status < 0)
283 return status; 281 return status;
284 282
285 status = usb_composite_register(&hidg_driver); 283 status = usb_composite_probe(&hidg_driver, hid_bind);
286 if (status < 0) 284 if (status < 0)
287 platform_driver_unregister(&hidg_plat_driver); 285 platform_driver_unregister(&hidg_plat_driver);
288 286
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index e743122fcd93..ed0266462c57 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1319,14 +1319,15 @@ static struct imx_udc_struct controller = {
1319 * USB gadged driver functions 1319 * USB gadged driver functions
1320 ******************************************************************************* 1320 *******************************************************************************
1321 */ 1321 */
1322int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1322int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1323 int (*bind)(struct usb_gadget *))
1323{ 1324{
1324 struct imx_udc_struct *imx_usb = &controller; 1325 struct imx_udc_struct *imx_usb = &controller;
1325 int retval; 1326 int retval;
1326 1327
1327 if (!driver 1328 if (!driver
1328 || driver->speed < USB_SPEED_FULL 1329 || driver->speed < USB_SPEED_FULL
1329 || !driver->bind 1330 || !bind
1330 || !driver->disconnect 1331 || !driver->disconnect
1331 || !driver->setup) 1332 || !driver->setup)
1332 return -EINVAL; 1333 return -EINVAL;
@@ -1342,7 +1343,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1342 retval = device_add(&imx_usb->gadget.dev); 1343 retval = device_add(&imx_usb->gadget.dev);
1343 if (retval) 1344 if (retval)
1344 goto fail; 1345 goto fail;
1345 retval = driver->bind(&imx_usb->gadget); 1346 retval = bind(&imx_usb->gadget);
1346 if (retval) { 1347 if (retval) {
1347 D_ERR(imx_usb->dev, "<%s> bind to driver %s --> error %d\n", 1348 D_ERR(imx_usb->dev, "<%s> bind to driver %s --> error %d\n",
1348 __func__, driver->driver.name, retval); 1349 __func__, driver->driver.name, retval);
@@ -1362,7 +1363,7 @@ fail:
1362 imx_usb->gadget.dev.driver = NULL; 1363 imx_usb->gadget.dev.driver = NULL;
1363 return retval; 1364 return retval;
1364} 1365}
1365EXPORT_SYMBOL(usb_gadget_register_driver); 1366EXPORT_SYMBOL(usb_gadget_probe_driver);
1366 1367
1367int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1368int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1368{ 1369{
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3f1d771c8be5..d1d72d946b04 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1774,7 +1774,6 @@ static struct usb_gadget_driver gadgetfs_driver = {
1774 .speed = USB_SPEED_FULL, 1774 .speed = USB_SPEED_FULL,
1775#endif 1775#endif
1776 .function = (char *) driver_desc, 1776 .function = (char *) driver_desc,
1777 .bind = gadgetfs_bind,
1778 .unbind = gadgetfs_unbind, 1777 .unbind = gadgetfs_unbind,
1779 .setup = gadgetfs_setup, 1778 .setup = gadgetfs_setup,
1780 .disconnect = gadgetfs_disconnect, 1779 .disconnect = gadgetfs_disconnect,
@@ -1797,7 +1796,6 @@ static int gadgetfs_probe (struct usb_gadget *gadget)
1797 1796
1798static struct usb_gadget_driver probe_driver = { 1797static struct usb_gadget_driver probe_driver = {
1799 .speed = USB_SPEED_HIGH, 1798 .speed = USB_SPEED_HIGH,
1800 .bind = gadgetfs_probe,
1801 .unbind = gadgetfs_nop, 1799 .unbind = gadgetfs_nop,
1802 .setup = (void *)gadgetfs_nop, 1800 .setup = (void *)gadgetfs_nop,
1803 .disconnect = gadgetfs_nop, 1801 .disconnect = gadgetfs_nop,
@@ -1907,7 +1905,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1907 1905
1908 /* triggers gadgetfs_bind(); then we can enumerate. */ 1906 /* triggers gadgetfs_bind(); then we can enumerate. */
1909 spin_unlock_irq (&dev->lock); 1907 spin_unlock_irq (&dev->lock);
1910 value = usb_gadget_register_driver (&gadgetfs_driver); 1908 value = usb_gadget_probe_driver(&gadgetfs_driver, gadgetfs_bind);
1911 if (value != 0) { 1909 if (value != 0) {
1912 kfree (dev->buf); 1910 kfree (dev->buf);
1913 dev->buf = NULL; 1911 dev->buf = NULL;
@@ -2046,7 +2044,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2046 return -ESRCH; 2044 return -ESRCH;
2047 2045
2048 /* fake probe to determine $CHIP */ 2046 /* fake probe to determine $CHIP */
2049 (void) usb_gadget_register_driver (&probe_driver); 2047 (void) usb_gadget_probe_driver(&probe_driver, gadgetfs_probe);
2050 if (!CHIP) 2048 if (!CHIP)
2051 return -ENODEV; 2049 return -ENODEV;
2052 2050
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index c2d2a201f84b..b8ec954c0692 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -19,7 +19,7 @@
19 19
20 20
21/* #undef DEBUG */ 21/* #undef DEBUG */
22/* #undef VERBOSE */ 22/* #undef VERBOSE_DEBUG */
23 23
24#if defined(CONFIG_USB_LANGWELL_OTG) 24#if defined(CONFIG_USB_LANGWELL_OTG)
25#define OTG_TRANSCEIVER 25#define OTG_TRANSCEIVER
@@ -77,141 +77,110 @@ langwell_ep0_desc = {
77/*-------------------------------------------------------------------------*/ 77/*-------------------------------------------------------------------------*/
78/* debugging */ 78/* debugging */
79 79
80#ifdef DEBUG 80#ifdef VERBOSE_DEBUG
81#define DBG(dev, fmt, args...) \
82 pr_debug("%s %s: " fmt , driver_name, \
83 pci_name(dev->pdev), ## args)
84#else
85#define DBG(dev, fmt, args...) \
86 do { } while (0)
87#endif /* DEBUG */
88
89
90#ifdef VERBOSE
91#define VDBG DBG
92#else
93#define VDBG(dev, fmt, args...) \
94 do { } while (0)
95#endif /* VERBOSE */
96
97
98#define ERROR(dev, fmt, args...) \
99 pr_err("%s %s: " fmt , driver_name, \
100 pci_name(dev->pdev), ## args)
101
102#define WARNING(dev, fmt, args...) \
103 pr_warning("%s %s: " fmt , driver_name, \
104 pci_name(dev->pdev), ## args)
105
106#define INFO(dev, fmt, args...) \
107 pr_info("%s %s: " fmt , driver_name, \
108 pci_name(dev->pdev), ## args)
109
110
111#ifdef VERBOSE
112static inline void print_all_registers(struct langwell_udc *dev) 81static inline void print_all_registers(struct langwell_udc *dev)
113{ 82{
114 int i; 83 int i;
115 84
116 /* Capability Registers */ 85 /* Capability Registers */
117 printk(KERN_DEBUG "Capability Registers (offset: " 86 dev_dbg(&dev->pdev->dev,
118 "0x%04x, length: 0x%08x)\n", 87 "Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
119 CAP_REG_OFFSET, 88 CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
120 (u32)sizeof(struct langwell_cap_regs)); 89 dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
121 printk(KERN_DEBUG "caplength=0x%02x\n",
122 readb(&dev->cap_regs->caplength)); 90 readb(&dev->cap_regs->caplength));
123 printk(KERN_DEBUG "hciversion=0x%04x\n", 91 dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
124 readw(&dev->cap_regs->hciversion)); 92 readw(&dev->cap_regs->hciversion));
125 printk(KERN_DEBUG "hcsparams=0x%08x\n", 93 dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
126 readl(&dev->cap_regs->hcsparams)); 94 readl(&dev->cap_regs->hcsparams));
127 printk(KERN_DEBUG "hccparams=0x%08x\n", 95 dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
128 readl(&dev->cap_regs->hccparams)); 96 readl(&dev->cap_regs->hccparams));
129 printk(KERN_DEBUG "dciversion=0x%04x\n", 97 dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
130 readw(&dev->cap_regs->dciversion)); 98 readw(&dev->cap_regs->dciversion));
131 printk(KERN_DEBUG "dccparams=0x%08x\n", 99 dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
132 readl(&dev->cap_regs->dccparams)); 100 readl(&dev->cap_regs->dccparams));
133 101
134 /* Operational Registers */ 102 /* Operational Registers */
135 printk(KERN_DEBUG "Operational Registers (offset: " 103 dev_dbg(&dev->pdev->dev,
136 "0x%04x, length: 0x%08x)\n", 104 "Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
137 OP_REG_OFFSET, 105 OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
138 (u32)sizeof(struct langwell_op_regs)); 106 dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
139 printk(KERN_DEBUG "extsts=0x%08x\n",
140 readl(&dev->op_regs->extsts)); 107 readl(&dev->op_regs->extsts));
141 printk(KERN_DEBUG "extintr=0x%08x\n", 108 dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
142 readl(&dev->op_regs->extintr)); 109 readl(&dev->op_regs->extintr));
143 printk(KERN_DEBUG "usbcmd=0x%08x\n", 110 dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
144 readl(&dev->op_regs->usbcmd)); 111 readl(&dev->op_regs->usbcmd));
145 printk(KERN_DEBUG "usbsts=0x%08x\n", 112 dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
146 readl(&dev->op_regs->usbsts)); 113 readl(&dev->op_regs->usbsts));
147 printk(KERN_DEBUG "usbintr=0x%08x\n", 114 dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
148 readl(&dev->op_regs->usbintr)); 115 readl(&dev->op_regs->usbintr));
149 printk(KERN_DEBUG "frindex=0x%08x\n", 116 dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
150 readl(&dev->op_regs->frindex)); 117 readl(&dev->op_regs->frindex));
151 printk(KERN_DEBUG "ctrldssegment=0x%08x\n", 118 dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
152 readl(&dev->op_regs->ctrldssegment)); 119 readl(&dev->op_regs->ctrldssegment));
153 printk(KERN_DEBUG "deviceaddr=0x%08x\n", 120 dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
154 readl(&dev->op_regs->deviceaddr)); 121 readl(&dev->op_regs->deviceaddr));
155 printk(KERN_DEBUG "endpointlistaddr=0x%08x\n", 122 dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
156 readl(&dev->op_regs->endpointlistaddr)); 123 readl(&dev->op_regs->endpointlistaddr));
157 printk(KERN_DEBUG "ttctrl=0x%08x\n", 124 dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
158 readl(&dev->op_regs->ttctrl)); 125 readl(&dev->op_regs->ttctrl));
159 printk(KERN_DEBUG "burstsize=0x%08x\n", 126 dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
160 readl(&dev->op_regs->burstsize)); 127 readl(&dev->op_regs->burstsize));
161 printk(KERN_DEBUG "txfilltuning=0x%08x\n", 128 dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
162 readl(&dev->op_regs->txfilltuning)); 129 readl(&dev->op_regs->txfilltuning));
163 printk(KERN_DEBUG "txttfilltuning=0x%08x\n", 130 dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
164 readl(&dev->op_regs->txttfilltuning)); 131 readl(&dev->op_regs->txttfilltuning));
165 printk(KERN_DEBUG "ic_usb=0x%08x\n", 132 dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
166 readl(&dev->op_regs->ic_usb)); 133 readl(&dev->op_regs->ic_usb));
167 printk(KERN_DEBUG "ulpi_viewport=0x%08x\n", 134 dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
168 readl(&dev->op_regs->ulpi_viewport)); 135 readl(&dev->op_regs->ulpi_viewport));
169 printk(KERN_DEBUG "configflag=0x%08x\n", 136 dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
170 readl(&dev->op_regs->configflag)); 137 readl(&dev->op_regs->configflag));
171 printk(KERN_DEBUG "portsc1=0x%08x\n", 138 dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
172 readl(&dev->op_regs->portsc1)); 139 readl(&dev->op_regs->portsc1));
173 printk(KERN_DEBUG "devlc=0x%08x\n", 140 dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
174 readl(&dev->op_regs->devlc)); 141 readl(&dev->op_regs->devlc));
175 printk(KERN_DEBUG "otgsc=0x%08x\n", 142 dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
176 readl(&dev->op_regs->otgsc)); 143 readl(&dev->op_regs->otgsc));
177 printk(KERN_DEBUG "usbmode=0x%08x\n", 144 dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
178 readl(&dev->op_regs->usbmode)); 145 readl(&dev->op_regs->usbmode));
179 printk(KERN_DEBUG "endptnak=0x%08x\n", 146 dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
180 readl(&dev->op_regs->endptnak)); 147 readl(&dev->op_regs->endptnak));
181 printk(KERN_DEBUG "endptnaken=0x%08x\n", 148 dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
182 readl(&dev->op_regs->endptnaken)); 149 readl(&dev->op_regs->endptnaken));
183 printk(KERN_DEBUG "endptsetupstat=0x%08x\n", 150 dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
184 readl(&dev->op_regs->endptsetupstat)); 151 readl(&dev->op_regs->endptsetupstat));
185 printk(KERN_DEBUG "endptprime=0x%08x\n", 152 dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
186 readl(&dev->op_regs->endptprime)); 153 readl(&dev->op_regs->endptprime));
187 printk(KERN_DEBUG "endptflush=0x%08x\n", 154 dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
188 readl(&dev->op_regs->endptflush)); 155 readl(&dev->op_regs->endptflush));
189 printk(KERN_DEBUG "endptstat=0x%08x\n", 156 dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
190 readl(&dev->op_regs->endptstat)); 157 readl(&dev->op_regs->endptstat));
191 printk(KERN_DEBUG "endptcomplete=0x%08x\n", 158 dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
192 readl(&dev->op_regs->endptcomplete)); 159 readl(&dev->op_regs->endptcomplete));
193 160
194 for (i = 0; i < dev->ep_max / 2; i++) { 161 for (i = 0; i < dev->ep_max / 2; i++) {
195 printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n", 162 dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
196 i, readl(&dev->op_regs->endptctrl[i])); 163 i, readl(&dev->op_regs->endptctrl[i]));
197 } 164 }
198} 165}
199#endif /* VERBOSE */ 166#else
167
168#define print_all_registers(dev) do { } while (0)
169
170#endif /* VERBOSE_DEBUG */
200 171
201 172
202/*-------------------------------------------------------------------------*/ 173/*-------------------------------------------------------------------------*/
203 174
204#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 175#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
176 USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
205 177
206#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \ 178#define DIR_STRING(ep) (is_in(ep) ? "in" : "out")
207 USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
208 & USB_DIR_IN) == USB_DIR_IN)
209 179
210 180
211#ifdef DEBUG 181static char *type_string(const struct usb_endpoint_descriptor *desc)
212static char *type_string(u8 bmAttributes)
213{ 182{
214 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 183 switch (usb_endpoint_type(desc)) {
215 case USB_ENDPOINT_XFER_BULK: 184 case USB_ENDPOINT_XFER_BULK:
216 return "bulk"; 185 return "bulk";
217 case USB_ENDPOINT_XFER_ISOC: 186 case USB_ENDPOINT_XFER_ISOC:
@@ -222,7 +191,6 @@ static char *type_string(u8 bmAttributes)
222 191
223 return "control"; 192 return "control";
224} 193}
225#endif
226 194
227 195
228/* configure endpoint control registers */ 196/* configure endpoint control registers */
@@ -233,7 +201,7 @@ static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
233 u32 endptctrl; 201 u32 endptctrl;
234 202
235 dev = ep->dev; 203 dev = ep->dev;
236 VDBG(dev, "---> %s()\n", __func__); 204 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
237 205
238 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); 206 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
239 if (is_in) { /* TX */ 207 if (is_in) { /* TX */
@@ -250,7 +218,7 @@ static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
250 218
251 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); 219 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
252 220
253 VDBG(dev, "<--- %s()\n", __func__); 221 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
254} 222}
255 223
256 224
@@ -260,7 +228,7 @@ static void ep0_reset(struct langwell_udc *dev)
260 struct langwell_ep *ep; 228 struct langwell_ep *ep;
261 int i; 229 int i;
262 230
263 VDBG(dev, "---> %s()\n", __func__); 231 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
264 232
265 /* ep0 in and out */ 233 /* ep0 in and out */
266 for (i = 0; i < 2; i++) { 234 for (i = 0; i < 2; i++) {
@@ -274,17 +242,18 @@ static void ep0_reset(struct langwell_udc *dev)
274 ep->dqh->dqh_ios = 1; 242 ep->dqh->dqh_ios = 1;
275 ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE; 243 ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
276 244
277 /* FIXME: enable ep0-in HW zero length termination select */ 245 /* enable ep0-in HW zero length termination select */
278 if (is_in(ep)) 246 if (is_in(ep))
279 ep->dqh->dqh_zlt = 0; 247 ep->dqh->dqh_zlt = 0;
280 ep->dqh->dqh_mult = 0; 248 ep->dqh->dqh_mult = 0;
281 249
250 ep->dqh->dtd_next = DTD_TERM;
251
282 /* configure ep0 control registers */ 252 /* configure ep0 control registers */
283 ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL); 253 ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
284 } 254 }
285 255
286 VDBG(dev, "<--- %s()\n", __func__); 256 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
287 return;
288} 257}
289 258
290 259
@@ -300,12 +269,12 @@ static int langwell_ep_enable(struct usb_ep *_ep,
300 struct langwell_ep *ep; 269 struct langwell_ep *ep;
301 u16 max = 0; 270 u16 max = 0;
302 unsigned long flags; 271 unsigned long flags;
303 int retval = 0; 272 int i, retval = 0;
304 unsigned char zlt, ios = 0, mult = 0; 273 unsigned char zlt, ios = 0, mult = 0;
305 274
306 ep = container_of(_ep, struct langwell_ep, ep); 275 ep = container_of(_ep, struct langwell_ep, ep);
307 dev = ep->dev; 276 dev = ep->dev;
308 VDBG(dev, "---> %s()\n", __func__); 277 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
309 278
310 if (!_ep || !desc || ep->desc 279 if (!_ep || !desc || ep->desc
311 || desc->bDescriptorType != USB_DT_ENDPOINT) 280 || desc->bDescriptorType != USB_DT_ENDPOINT)
@@ -326,7 +295,7 @@ static int langwell_ep_enable(struct usb_ep *_ep,
326 * sanity check type, direction, address, and then 295 * sanity check type, direction, address, and then
327 * initialize the endpoint capabilities fields in dQH 296 * initialize the endpoint capabilities fields in dQH
328 */ 297 */
329 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 298 switch (usb_endpoint_type(desc)) {
330 case USB_ENDPOINT_XFER_CONTROL: 299 case USB_ENDPOINT_XFER_CONTROL:
331 ios = 1; 300 ios = 1;
332 break; 301 break;
@@ -386,33 +355,36 @@ static int langwell_ep_enable(struct usb_ep *_ep,
386 355
387 spin_lock_irqsave(&dev->lock, flags); 356 spin_lock_irqsave(&dev->lock, flags);
388 357
389 /* configure endpoint capabilities in dQH */
390 ep->dqh->dqh_ios = ios;
391 ep->dqh->dqh_mpl = cpu_to_le16(max);
392 ep->dqh->dqh_zlt = zlt;
393 ep->dqh->dqh_mult = mult;
394
395 ep->ep.maxpacket = max; 358 ep->ep.maxpacket = max;
396 ep->desc = desc; 359 ep->desc = desc;
397 ep->stopped = 0; 360 ep->stopped = 0;
398 ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 361 ep->ep_num = usb_endpoint_num(desc);
399 362
400 /* ep_type */ 363 /* ep_type */
401 ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 364 ep->ep_type = usb_endpoint_type(desc);
402 365
403 /* configure endpoint control registers */ 366 /* configure endpoint control registers */
404 ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type); 367 ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
405 368
406 DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n", 369 /* configure endpoint capabilities in dQH */
370 i = ep->ep_num * 2 + is_in(ep);
371 ep->dqh = &dev->ep_dqh[i];
372 ep->dqh->dqh_ios = ios;
373 ep->dqh->dqh_mpl = cpu_to_le16(max);
374 ep->dqh->dqh_zlt = zlt;
375 ep->dqh->dqh_mult = mult;
376 ep->dqh->dtd_next = DTD_TERM;
377
378 dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
407 _ep->name, 379 _ep->name,
408 ep->ep_num, 380 ep->ep_num,
409 DIR_STRING(desc->bEndpointAddress), 381 DIR_STRING(ep),
410 type_string(desc->bmAttributes), 382 type_string(desc),
411 max); 383 max);
412 384
413 spin_unlock_irqrestore(&dev->lock, flags); 385 spin_unlock_irqrestore(&dev->lock, flags);
414done: 386done:
415 VDBG(dev, "<--- %s()\n", __func__); 387 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
416 return retval; 388 return retval;
417} 389}
418 390
@@ -428,7 +400,7 @@ static void done(struct langwell_ep *ep, struct langwell_request *req,
428 struct langwell_dtd *curr_dtd, *next_dtd; 400 struct langwell_dtd *curr_dtd, *next_dtd;
429 int i; 401 int i;
430 402
431 VDBG(dev, "---> %s()\n", __func__); 403 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
432 404
433 /* remove the req from ep->queue */ 405 /* remove the req from ep->queue */
434 list_del_init(&req->queue); 406 list_del_init(&req->queue);
@@ -448,7 +420,8 @@ static void done(struct langwell_ep *ep, struct langwell_request *req,
448 } 420 }
449 421
450 if (req->mapped) { 422 if (req->mapped) {
451 dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length, 423 dma_unmap_single(&dev->pdev->dev,
424 req->req.dma, req->req.length,
452 is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 425 is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
453 req->req.dma = DMA_ADDR_INVALID; 426 req->req.dma = DMA_ADDR_INVALID;
454 req->mapped = 0; 427 req->mapped = 0;
@@ -458,9 +431,10 @@ static void done(struct langwell_ep *ep, struct langwell_request *req,
458 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 431 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
459 432
460 if (status != -ESHUTDOWN) 433 if (status != -ESHUTDOWN)
461 DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n", 434 dev_dbg(&dev->pdev->dev,
462 ep->ep.name, &req->req, status, 435 "complete %s, req %p, stat %d, len %u/%u\n",
463 req->req.actual, req->req.length); 436 ep->ep.name, &req->req, status,
437 req->req.actual, req->req.length);
464 438
465 /* don't modify queue heads during completion callback */ 439 /* don't modify queue heads during completion callback */
466 ep->stopped = 1; 440 ep->stopped = 1;
@@ -473,7 +447,7 @@ static void done(struct langwell_ep *ep, struct langwell_request *req,
473 spin_lock(&dev->lock); 447 spin_lock(&dev->lock);
474 ep->stopped = stopped; 448 ep->stopped = stopped;
475 449
476 VDBG(dev, "<--- %s()\n", __func__); 450 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
477} 451}
478 452
479 453
@@ -511,7 +485,7 @@ static int langwell_ep_disable(struct usb_ep *_ep)
511 485
512 ep = container_of(_ep, struct langwell_ep, ep); 486 ep = container_of(_ep, struct langwell_ep, ep);
513 dev = ep->dev; 487 dev = ep->dev;
514 VDBG(dev, "---> %s()\n", __func__); 488 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
515 489
516 if (!_ep || !ep->desc) 490 if (!_ep || !ep->desc)
517 return -EINVAL; 491 return -EINVAL;
@@ -535,8 +509,8 @@ static int langwell_ep_disable(struct usb_ep *_ep)
535 509
536 spin_unlock_irqrestore(&dev->lock, flags); 510 spin_unlock_irqrestore(&dev->lock, flags);
537 511
538 DBG(dev, "disabled %s\n", _ep->name); 512 dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
539 VDBG(dev, "<--- %s()\n", __func__); 513 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
540 514
541 return 0; 515 return 0;
542} 516}
@@ -555,7 +529,7 @@ static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
555 529
556 ep = container_of(_ep, struct langwell_ep, ep); 530 ep = container_of(_ep, struct langwell_ep, ep);
557 dev = ep->dev; 531 dev = ep->dev;
558 VDBG(dev, "---> %s()\n", __func__); 532 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
559 533
560 req = kzalloc(sizeof(*req), gfp_flags); 534 req = kzalloc(sizeof(*req), gfp_flags);
561 if (!req) 535 if (!req)
@@ -564,8 +538,8 @@ static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
564 req->req.dma = DMA_ADDR_INVALID; 538 req->req.dma = DMA_ADDR_INVALID;
565 INIT_LIST_HEAD(&req->queue); 539 INIT_LIST_HEAD(&req->queue);
566 540
567 VDBG(dev, "alloc request for %s\n", _ep->name); 541 dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
568 VDBG(dev, "<--- %s()\n", __func__); 542 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
569 return &req->req; 543 return &req->req;
570} 544}
571 545
@@ -580,7 +554,7 @@ static void langwell_free_request(struct usb_ep *_ep,
580 554
581 ep = container_of(_ep, struct langwell_ep, ep); 555 ep = container_of(_ep, struct langwell_ep, ep);
582 dev = ep->dev; 556 dev = ep->dev;
583 VDBG(dev, "---> %s()\n", __func__); 557 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
584 558
585 if (!_ep || !_req) 559 if (!_ep || !_req)
586 return; 560 return;
@@ -591,8 +565,8 @@ static void langwell_free_request(struct usb_ep *_ep,
591 if (_req) 565 if (_req)
592 kfree(req); 566 kfree(req);
593 567
594 VDBG(dev, "free request for %s\n", _ep->name); 568 dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
595 VDBG(dev, "<--- %s()\n", __func__); 569 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
596} 570}
597 571
598 572
@@ -608,23 +582,24 @@ static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
608 struct langwell_udc *dev; 582 struct langwell_udc *dev;
609 583
610 dev = ep->dev; 584 dev = ep->dev;
611 VDBG(dev, "---> %s()\n", __func__); 585 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
612 586
613 i = ep->ep_num * 2 + is_in(ep); 587 i = ep->ep_num * 2 + is_in(ep);
614 dqh = &dev->ep_dqh[i]; 588 dqh = &dev->ep_dqh[i];
615 589
616 if (ep->ep_num) 590 if (ep->ep_num)
617 VDBG(dev, "%s\n", ep->name); 591 dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
618 else 592 else
619 /* ep0 */ 593 /* ep0 */
620 VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out"); 594 dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
621 595
622 VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i])); 596 dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%08x\n",
597 i, (u32)&(dev->ep_dqh[i]));
623 598
624 bit_mask = is_in(ep) ? 599 bit_mask = is_in(ep) ?
625 (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num)); 600 (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
626 601
627 VDBG(dev, "bit_mask = 0x%08x\n", bit_mask); 602 dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
628 603
629 /* check if the pipe is empty */ 604 /* check if the pipe is empty */
630 if (!(list_empty(&ep->queue))) { 605 if (!(list_empty(&ep->queue))) {
@@ -665,14 +640,17 @@ static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
665 /* clear active and halt bit */ 640 /* clear active and halt bit */
666 dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED); 641 dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
667 dqh->dtd_status &= dtd_status; 642 dqh->dtd_status &= dtd_status;
668 VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status); 643 dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
644
645 /* ensure that updates to the dQH will occure before priming */
646 wmb();
669 647
670 /* write 1 to endptprime register to PRIME endpoint */ 648 /* write 1 to endptprime register to PRIME endpoint */
671 bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num); 649 bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
672 VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask); 650 dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
673 writel(bit_mask, &dev->op_regs->endptprime); 651 writel(bit_mask, &dev->op_regs->endptprime);
674out: 652out:
675 VDBG(dev, "<--- %s()\n", __func__); 653 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
676 return 0; 654 return 0;
677} 655}
678 656
@@ -687,7 +665,7 @@ static struct langwell_dtd *build_dtd(struct langwell_request *req,
687 int i; 665 int i;
688 666
689 dev = req->ep->dev; 667 dev = req->ep->dev;
690 VDBG(dev, "---> %s()\n", __func__); 668 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
691 669
692 /* the maximum transfer length, up to 16k bytes */ 670 /* the maximum transfer length, up to 16k bytes */
693 *length = min(req->req.length - req->req.actual, 671 *length = min(req->req.length - req->req.actual,
@@ -708,7 +686,7 @@ static struct langwell_dtd *build_dtd(struct langwell_request *req,
708 686
709 /* fill in total bytes with transfer size */ 687 /* fill in total bytes with transfer size */
710 dtd->dtd_total = cpu_to_le16(*length); 688 dtd->dtd_total = cpu_to_le16(*length);
711 VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total); 689 dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
712 690
713 /* set is_last flag if req->req.zero is set or not */ 691 /* set is_last flag if req->req.zero is set or not */
714 if (req->req.zero) { 692 if (req->req.zero) {
@@ -722,7 +700,7 @@ static struct langwell_dtd *build_dtd(struct langwell_request *req,
722 *is_last = 0; 700 *is_last = 0;
723 701
724 if (*is_last == 0) 702 if (*is_last == 0)
725 VDBG(dev, "multi-dtd request!\n"); 703 dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
726 704
727 /* set interrupt on complete bit for the last dTD */ 705 /* set interrupt on complete bit for the last dTD */
728 if (*is_last && !req->req.no_interrupt) 706 if (*is_last && !req->req.no_interrupt)
@@ -733,10 +711,12 @@ static struct langwell_dtd *build_dtd(struct langwell_request *req,
733 711
734 /* set the active bit of status field to 1 */ 712 /* set the active bit of status field to 1 */
735 dtd->dtd_status = DTD_STS_ACTIVE; 713 dtd->dtd_status = DTD_STS_ACTIVE;
736 VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status); 714 dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
715 dtd->dtd_status);
737 716
738 VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma); 717 dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
739 VDBG(dev, "<--- %s()\n", __func__); 718 *length, (int)*dma);
719 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
740 return dtd; 720 return dtd;
741} 721}
742 722
@@ -751,7 +731,7 @@ static int req_to_dtd(struct langwell_request *req)
751 dma_addr_t dma; 731 dma_addr_t dma;
752 732
753 dev = req->ep->dev; 733 dev = req->ep->dev;
754 VDBG(dev, "---> %s()\n", __func__); 734 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
755 do { 735 do {
756 dtd = build_dtd(req, &count, &dma, &is_last); 736 dtd = build_dtd(req, &count, &dma, &is_last);
757 if (dtd == NULL) 737 if (dtd == NULL)
@@ -773,7 +753,7 @@ static int req_to_dtd(struct langwell_request *req)
773 753
774 req->tail = dtd; 754 req->tail = dtd;
775 755
776 VDBG(dev, "<--- %s()\n", __func__); 756 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
777 return 0; 757 return 0;
778} 758}
779 759
@@ -803,9 +783,9 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
803 783
804 dev = ep->dev; 784 dev = ep->dev;
805 req->ep = ep; 785 req->ep = ep;
806 VDBG(dev, "---> %s()\n", __func__); 786 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
807 787
808 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 788 if (usb_endpoint_xfer_isoc(ep->desc)) {
809 if (req->req.length > ep->ep.maxpacket) 789 if (req->req.length > ep->ep.maxpacket)
810 return -EMSGSIZE; 790 return -EMSGSIZE;
811 is_iso = 1; 791 is_iso = 1;
@@ -818,7 +798,7 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
818 if (_req->dma == DMA_ADDR_INVALID) { 798 if (_req->dma == DMA_ADDR_INVALID) {
819 /* WORKAROUND: WARN_ON(size == 0) */ 799 /* WORKAROUND: WARN_ON(size == 0) */
820 if (_req->length == 0) { 800 if (_req->length == 0) {
821 VDBG(dev, "req->length: 0->1\n"); 801 dev_vdbg(&dev->pdev->dev, "req->length: 0->1\n");
822 zlflag = 1; 802 zlflag = 1;
823 _req->length++; 803 _req->length++;
824 } 804 }
@@ -827,24 +807,25 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
827 _req->buf, _req->length, 807 _req->buf, _req->length,
828 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 808 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
829 if (zlflag && (_req->length == 1)) { 809 if (zlflag && (_req->length == 1)) {
830 VDBG(dev, "req->length: 1->0\n"); 810 dev_vdbg(&dev->pdev->dev, "req->length: 1->0\n");
831 zlflag = 0; 811 zlflag = 0;
832 _req->length = 0; 812 _req->length = 0;
833 } 813 }
834 814
835 req->mapped = 1; 815 req->mapped = 1;
836 VDBG(dev, "req->mapped = 1\n"); 816 dev_vdbg(&dev->pdev->dev, "req->mapped = 1\n");
837 } else { 817 } else {
838 dma_sync_single_for_device(&dev->pdev->dev, 818 dma_sync_single_for_device(&dev->pdev->dev,
839 _req->dma, _req->length, 819 _req->dma, _req->length,
840 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 820 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
841 req->mapped = 0; 821 req->mapped = 0;
842 VDBG(dev, "req->mapped = 0\n"); 822 dev_vdbg(&dev->pdev->dev, "req->mapped = 0\n");
843 } 823 }
844 824
845 DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08llx\n", 825 dev_dbg(&dev->pdev->dev,
846 _ep->name, 826 "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
847 _req, _req->length, _req->buf, (unsigned long long)_req->dma); 827 _ep->name,
828 _req, _req->length, _req->buf, (int)_req->dma);
848 829
849 _req->status = -EINPROGRESS; 830 _req->status = -EINPROGRESS;
850 _req->actual = 0; 831 _req->actual = 0;
@@ -866,12 +847,12 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
866 847
867 if (likely(req != NULL)) { 848 if (likely(req != NULL)) {
868 list_add_tail(&req->queue, &ep->queue); 849 list_add_tail(&req->queue, &ep->queue);
869 VDBG(dev, "list_add_tail() \n"); 850 dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
870 } 851 }
871 852
872 spin_unlock_irqrestore(&dev->lock, flags); 853 spin_unlock_irqrestore(&dev->lock, flags);
873 854
874 VDBG(dev, "<--- %s()\n", __func__); 855 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
875 return 0; 856 return 0;
876} 857}
877 858
@@ -888,7 +869,7 @@ static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
888 869
889 ep = container_of(_ep, struct langwell_ep, ep); 870 ep = container_of(_ep, struct langwell_ep, ep);
890 dev = ep->dev; 871 dev = ep->dev;
891 VDBG(dev, "---> %s()\n", __func__); 872 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
892 873
893 if (!_ep || !ep->desc || !_req) 874 if (!_ep || !ep->desc || !_req)
894 return -EINVAL; 875 return -EINVAL;
@@ -924,7 +905,7 @@ static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
924 905
925 /* queue head may be partially complete. */ 906 /* queue head may be partially complete. */
926 if (ep->queue.next == &req->queue) { 907 if (ep->queue.next == &req->queue) {
927 DBG(dev, "unlink (%s) dma\n", _ep->name); 908 dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
928 _req->status = -ECONNRESET; 909 _req->status = -ECONNRESET;
929 langwell_ep_fifo_flush(&ep->ep); 910 langwell_ep_fifo_flush(&ep->ep);
930 911
@@ -963,7 +944,7 @@ done:
963 ep->stopped = stopped; 944 ep->stopped = stopped;
964 spin_unlock_irqrestore(&dev->lock, flags); 945 spin_unlock_irqrestore(&dev->lock, flags);
965 946
966 VDBG(dev, "<--- %s()\n", __func__); 947 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
967 return retval; 948 return retval;
968} 949}
969 950
@@ -976,7 +957,7 @@ static void ep_set_halt(struct langwell_ep *ep, int value)
976 u32 endptctrl = 0; 957 u32 endptctrl = 0;
977 int ep_num; 958 int ep_num;
978 struct langwell_udc *dev = ep->dev; 959 struct langwell_udc *dev = ep->dev;
979 VDBG(dev, "---> %s()\n", __func__); 960 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
980 961
981 ep_num = ep->ep_num; 962 ep_num = ep->ep_num;
982 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); 963 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
@@ -1001,7 +982,7 @@ static void ep_set_halt(struct langwell_ep *ep, int value)
1001 982
1002 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); 983 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
1003 984
1004 VDBG(dev, "<--- %s()\n", __func__); 985 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1005} 986}
1006 987
1007 988
@@ -1016,7 +997,7 @@ static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
1016 ep = container_of(_ep, struct langwell_ep, ep); 997 ep = container_of(_ep, struct langwell_ep, ep);
1017 dev = ep->dev; 998 dev = ep->dev;
1018 999
1019 VDBG(dev, "---> %s()\n", __func__); 1000 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1020 1001
1021 if (!_ep || !ep->desc) 1002 if (!_ep || !ep->desc)
1022 return -EINVAL; 1003 return -EINVAL;
@@ -1024,8 +1005,7 @@ static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
1024 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 1005 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1025 return -ESHUTDOWN; 1006 return -ESHUTDOWN;
1026 1007
1027 if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 1008 if (usb_endpoint_xfer_isoc(ep->desc))
1028 == USB_ENDPOINT_XFER_ISOC)
1029 return -EOPNOTSUPP; 1009 return -EOPNOTSUPP;
1030 1010
1031 spin_lock_irqsave(&dev->lock, flags); 1011 spin_lock_irqsave(&dev->lock, flags);
@@ -1036,7 +1016,7 @@ static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
1036 */ 1016 */
1037 if (!list_empty(&ep->queue) && is_in(ep) && value) { 1017 if (!list_empty(&ep->queue) && is_in(ep) && value) {
1038 /* IN endpoint FIFO holds bytes */ 1018 /* IN endpoint FIFO holds bytes */
1039 DBG(dev, "%s FIFO holds bytes\n", _ep->name); 1019 dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
1040 retval = -EAGAIN; 1020 retval = -EAGAIN;
1041 goto done; 1021 goto done;
1042 } 1022 }
@@ -1050,8 +1030,9 @@ static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
1050 } 1030 }
1051done: 1031done:
1052 spin_unlock_irqrestore(&dev->lock, flags); 1032 spin_unlock_irqrestore(&dev->lock, flags);
1053 DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear"); 1033 dev_dbg(&dev->pdev->dev, "%s %s halt\n",
1054 VDBG(dev, "<--- %s()\n", __func__); 1034 _ep->name, value ? "set" : "clear");
1035 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1055 return retval; 1036 return retval;
1056} 1037}
1057 1038
@@ -1065,12 +1046,12 @@ static int langwell_ep_set_wedge(struct usb_ep *_ep)
1065 ep = container_of(_ep, struct langwell_ep, ep); 1046 ep = container_of(_ep, struct langwell_ep, ep);
1066 dev = ep->dev; 1047 dev = ep->dev;
1067 1048
1068 VDBG(dev, "---> %s()\n", __func__); 1049 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1069 1050
1070 if (!_ep || !ep->desc) 1051 if (!_ep || !ep->desc)
1071 return -EINVAL; 1052 return -EINVAL;
1072 1053
1073 VDBG(dev, "<--- %s()\n", __func__); 1054 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1074 return usb_ep_set_halt(_ep); 1055 return usb_ep_set_halt(_ep);
1075} 1056}
1076 1057
@@ -1086,15 +1067,16 @@ static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1086 ep = container_of(_ep, struct langwell_ep, ep); 1067 ep = container_of(_ep, struct langwell_ep, ep);
1087 dev = ep->dev; 1068 dev = ep->dev;
1088 1069
1089 VDBG(dev, "---> %s()\n", __func__); 1070 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1090 1071
1091 if (!_ep || !ep->desc) { 1072 if (!_ep || !ep->desc) {
1092 VDBG(dev, "ep or ep->desc is NULL\n"); 1073 dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
1093 VDBG(dev, "<--- %s()\n", __func__); 1074 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1094 return; 1075 return;
1095 } 1076 }
1096 1077
1097 VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out"); 1078 dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
1079 _ep->name, DIR_STRING(ep));
1098 1080
1099 /* flush endpoint buffer */ 1081 /* flush endpoint buffer */
1100 if (ep->ep_num == 0) 1082 if (ep->ep_num == 0)
@@ -1110,14 +1092,14 @@ static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1110 writel(flush_bit, &dev->op_regs->endptflush); 1092 writel(flush_bit, &dev->op_regs->endptflush);
1111 while (readl(&dev->op_regs->endptflush)) { 1093 while (readl(&dev->op_regs->endptflush)) {
1112 if (time_after(jiffies, timeout)) { 1094 if (time_after(jiffies, timeout)) {
1113 ERROR(dev, "ep flush timeout\n"); 1095 dev_err(&dev->pdev->dev, "ep flush timeout\n");
1114 goto done; 1096 goto done;
1115 } 1097 }
1116 cpu_relax(); 1098 cpu_relax();
1117 } 1099 }
1118 } while (readl(&dev->op_regs->endptstat) & flush_bit); 1100 } while (readl(&dev->op_regs->endptstat) & flush_bit);
1119done: 1101done:
1120 VDBG(dev, "<--- %s()\n", __func__); 1102 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1121} 1103}
1122 1104
1123 1105
@@ -1167,31 +1149,59 @@ static int langwell_get_frame(struct usb_gadget *_gadget)
1167 return -ENODEV; 1149 return -ENODEV;
1168 1150
1169 dev = container_of(_gadget, struct langwell_udc, gadget); 1151 dev = container_of(_gadget, struct langwell_udc, gadget);
1170 VDBG(dev, "---> %s()\n", __func__); 1152 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1171 1153
1172 retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK; 1154 retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
1173 1155
1174 VDBG(dev, "<--- %s()\n", __func__); 1156 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1175 return retval; 1157 return retval;
1176} 1158}
1177 1159
1178 1160
1161/* enter or exit PHY low power state */
1162static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
1163{
1164 u32 devlc;
1165 u8 devlc_byte2;
1166 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1167
1168 devlc = readl(&dev->op_regs->devlc);
1169 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1170
1171 if (flag)
1172 devlc |= LPM_PHCD;
1173 else
1174 devlc &= ~LPM_PHCD;
1175
1176 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1177 devlc_byte2 = (devlc >> 16) & 0xff;
1178 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1179
1180 devlc = readl(&dev->op_regs->devlc);
1181 dev_vdbg(&dev->pdev->dev,
1182 "%s PHY low power suspend, devlc = 0x%08x\n",
1183 flag ? "enter" : "exit", devlc);
1184}
1185
1186
1179/* tries to wake up the host connected to this gadget */ 1187/* tries to wake up the host connected to this gadget */
1180static int langwell_wakeup(struct usb_gadget *_gadget) 1188static int langwell_wakeup(struct usb_gadget *_gadget)
1181{ 1189{
1182 struct langwell_udc *dev; 1190 struct langwell_udc *dev;
1183 u32 portsc1, devlc; 1191 u32 portsc1;
1184 unsigned long flags; 1192 unsigned long flags;
1185 1193
1186 if (!_gadget) 1194 if (!_gadget)
1187 return 0; 1195 return 0;
1188 1196
1189 dev = container_of(_gadget, struct langwell_udc, gadget); 1197 dev = container_of(_gadget, struct langwell_udc, gadget);
1190 VDBG(dev, "---> %s()\n", __func__); 1198 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1191 1199
1192 /* Remote Wakeup feature not enabled by host */ 1200 /* remote wakeup feature not enabled by host */
1193 if (!dev->remote_wakeup) 1201 if (!dev->remote_wakeup) {
1202 dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
1194 return -ENOTSUPP; 1203 return -ENOTSUPP;
1204 }
1195 1205
1196 spin_lock_irqsave(&dev->lock, flags); 1206 spin_lock_irqsave(&dev->lock, flags);
1197 1207
@@ -1201,27 +1211,23 @@ static int langwell_wakeup(struct usb_gadget *_gadget)
1201 return 0; 1211 return 0;
1202 } 1212 }
1203 1213
1204 /* LPM L1 to L0, remote wakeup */ 1214 /* LPM L1 to L0 or legacy remote wakeup */
1205 if (dev->lpm && dev->lpm_state == LPM_L1) { 1215 if (dev->lpm && dev->lpm_state == LPM_L1)
1206 portsc1 |= PORTS_SLP; 1216 dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
1207 writel(portsc1, &dev->op_regs->portsc1); 1217 else
1208 } 1218 dev_info(&dev->pdev->dev, "device remote wakeup\n");
1209
1210 /* force port resume */
1211 if (dev->usb_state == USB_STATE_SUSPENDED) {
1212 portsc1 |= PORTS_FPR;
1213 writel(portsc1, &dev->op_regs->portsc1);
1214 }
1215 1219
1216 /* exit PHY low power suspend */ 1220 /* exit PHY low power suspend */
1217 devlc = readl(&dev->op_regs->devlc); 1221 if (dev->pdev->device != 0x0829)
1218 VDBG(dev, "devlc = 0x%08x\n", devlc); 1222 langwell_phy_low_power(dev, 0);
1219 devlc &= ~LPM_PHCD; 1223
1220 writel(devlc, &dev->op_regs->devlc); 1224 /* force port resume */
1225 portsc1 |= PORTS_FPR;
1226 writel(portsc1, &dev->op_regs->portsc1);
1221 1227
1222 spin_unlock_irqrestore(&dev->lock, flags); 1228 spin_unlock_irqrestore(&dev->lock, flags);
1223 1229
1224 VDBG(dev, "<--- %s()\n", __func__); 1230 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1225 return 0; 1231 return 0;
1226} 1232}
1227 1233
@@ -1231,16 +1237,17 @@ static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1231{ 1237{
1232 struct langwell_udc *dev; 1238 struct langwell_udc *dev;
1233 unsigned long flags; 1239 unsigned long flags;
1234 u32 usbcmd; 1240 u32 usbcmd;
1235 1241
1236 if (!_gadget) 1242 if (!_gadget)
1237 return -ENODEV; 1243 return -ENODEV;
1238 1244
1239 dev = container_of(_gadget, struct langwell_udc, gadget); 1245 dev = container_of(_gadget, struct langwell_udc, gadget);
1240 VDBG(dev, "---> %s()\n", __func__); 1246 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1241 1247
1242 spin_lock_irqsave(&dev->lock, flags); 1248 spin_lock_irqsave(&dev->lock, flags);
1243 VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off"); 1249 dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
1250 is_active ? "on" : "off");
1244 1251
1245 dev->vbus_active = (is_active != 0); 1252 dev->vbus_active = (is_active != 0);
1246 if (dev->driver && dev->softconnected && dev->vbus_active) { 1253 if (dev->driver && dev->softconnected && dev->vbus_active) {
@@ -1255,7 +1262,7 @@ static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1255 1262
1256 spin_unlock_irqrestore(&dev->lock, flags); 1263 spin_unlock_irqrestore(&dev->lock, flags);
1257 1264
1258 VDBG(dev, "<--- %s()\n", __func__); 1265 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1259 return 0; 1266 return 0;
1260} 1267}
1261 1268
@@ -1269,15 +1276,15 @@ static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1269 return -ENODEV; 1276 return -ENODEV;
1270 1277
1271 dev = container_of(_gadget, struct langwell_udc, gadget); 1278 dev = container_of(_gadget, struct langwell_udc, gadget);
1272 VDBG(dev, "---> %s()\n", __func__); 1279 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1273 1280
1274 if (dev->transceiver) { 1281 if (dev->transceiver) {
1275 VDBG(dev, "otg_set_power\n"); 1282 dev_vdbg(&dev->pdev->dev, "otg_set_power\n");
1276 VDBG(dev, "<--- %s()\n", __func__); 1283 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1277 return otg_set_power(dev->transceiver, mA); 1284 return otg_set_power(dev->transceiver, mA);
1278 } 1285 }
1279 1286
1280 VDBG(dev, "<--- %s()\n", __func__); 1287 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1281 return -ENOTSUPP; 1288 return -ENOTSUPP;
1282} 1289}
1283 1290
@@ -1286,15 +1293,15 @@ static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1286static int langwell_pullup(struct usb_gadget *_gadget, int is_on) 1293static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1287{ 1294{
1288 struct langwell_udc *dev; 1295 struct langwell_udc *dev;
1289 u32 usbcmd; 1296 u32 usbcmd;
1290 unsigned long flags; 1297 unsigned long flags;
1291 1298
1292 if (!_gadget) 1299 if (!_gadget)
1293 return -ENODEV; 1300 return -ENODEV;
1294 1301
1295 dev = container_of(_gadget, struct langwell_udc, gadget); 1302 dev = container_of(_gadget, struct langwell_udc, gadget);
1296 1303
1297 VDBG(dev, "---> %s()\n", __func__); 1304 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1298 1305
1299 spin_lock_irqsave(&dev->lock, flags); 1306 spin_lock_irqsave(&dev->lock, flags);
1300 dev->softconnected = (is_on != 0); 1307 dev->softconnected = (is_on != 0);
@@ -1310,7 +1317,7 @@ static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1310 } 1317 }
1311 spin_unlock_irqrestore(&dev->lock, flags); 1318 spin_unlock_irqrestore(&dev->lock, flags);
1312 1319
1313 VDBG(dev, "<--- %s()\n", __func__); 1320 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1314 return 0; 1321 return 0;
1315} 1322}
1316 1323
@@ -1346,12 +1353,13 @@ static const struct usb_gadget_ops langwell_ops = {
1346static int langwell_udc_reset(struct langwell_udc *dev) 1353static int langwell_udc_reset(struct langwell_udc *dev)
1347{ 1354{
1348 u32 usbcmd, usbmode, devlc, endpointlistaddr; 1355 u32 usbcmd, usbmode, devlc, endpointlistaddr;
1356 u8 devlc_byte0, devlc_byte2;
1349 unsigned long timeout; 1357 unsigned long timeout;
1350 1358
1351 if (!dev) 1359 if (!dev)
1352 return -EINVAL; 1360 return -EINVAL;
1353 1361
1354 DBG(dev, "---> %s()\n", __func__); 1362 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1355 1363
1356 /* set controller to stop state */ 1364 /* set controller to stop state */
1357 usbcmd = readl(&dev->op_regs->usbcmd); 1365 usbcmd = readl(&dev->op_regs->usbcmd);
@@ -1367,7 +1375,7 @@ static int langwell_udc_reset(struct langwell_udc *dev)
1367 timeout = jiffies + RESET_TIMEOUT; 1375 timeout = jiffies + RESET_TIMEOUT;
1368 while (readl(&dev->op_regs->usbcmd) & CMD_RST) { 1376 while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
1369 if (time_after(jiffies, timeout)) { 1377 if (time_after(jiffies, timeout)) {
1370 ERROR(dev, "device reset timeout\n"); 1378 dev_err(&dev->pdev->dev, "device reset timeout\n");
1371 return -ETIMEDOUT; 1379 return -ETIMEDOUT;
1372 } 1380 }
1373 cpu_relax(); 1381 cpu_relax();
@@ -1382,7 +1390,7 @@ static int langwell_udc_reset(struct langwell_udc *dev)
1382 1390
1383 writel(usbmode, &dev->op_regs->usbmode); 1391 writel(usbmode, &dev->op_regs->usbmode);
1384 usbmode = readl(&dev->op_regs->usbmode); 1392 usbmode = readl(&dev->op_regs->usbmode);
1385 VDBG(dev, "usbmode=0x%08x\n", usbmode); 1393 dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
1386 1394
1387 /* Write-Clear setup status */ 1395 /* Write-Clear setup status */
1388 writel(0, &dev->op_regs->usbsts); 1396 writel(0, &dev->op_regs->usbsts);
@@ -1390,9 +1398,17 @@ static int langwell_udc_reset(struct langwell_udc *dev)
1390 /* if support USB LPM, ACK all LPM token */ 1398 /* if support USB LPM, ACK all LPM token */
1391 if (dev->lpm) { 1399 if (dev->lpm) {
1392 devlc = readl(&dev->op_regs->devlc); 1400 devlc = readl(&dev->op_regs->devlc);
1401 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1402 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1393 devlc &= ~LPM_STL; /* don't STALL LPM token */ 1403 devlc &= ~LPM_STL; /* don't STALL LPM token */
1394 devlc &= ~LPM_NYT_ACK; /* ACK LPM token */ 1404 devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
1395 writel(devlc, &dev->op_regs->devlc); 1405 devlc_byte0 = devlc & 0xff;
1406 devlc_byte2 = (devlc >> 16) & 0xff;
1407 writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
1408 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1409 devlc = readl(&dev->op_regs->devlc);
1410 dev_vdbg(&dev->pdev->dev,
1411 "ACK LPM token, devlc = 0x%08x\n", devlc);
1396 } 1412 }
1397 1413
1398 /* fill endpointlistaddr register */ 1414 /* fill endpointlistaddr register */
@@ -1400,10 +1416,11 @@ static int langwell_udc_reset(struct langwell_udc *dev)
1400 endpointlistaddr &= ENDPOINTLISTADDR_MASK; 1416 endpointlistaddr &= ENDPOINTLISTADDR_MASK;
1401 writel(endpointlistaddr, &dev->op_regs->endpointlistaddr); 1417 writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
1402 1418
1403 VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n", 1419 dev_vdbg(&dev->pdev->dev,
1404 dev->ep_dqh, endpointlistaddr, 1420 "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
1405 readl(&dev->op_regs->endpointlistaddr)); 1421 dev->ep_dqh, endpointlistaddr,
1406 DBG(dev, "<--- %s()\n", __func__); 1422 readl(&dev->op_regs->endpointlistaddr));
1423 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1407 return 0; 1424 return 0;
1408} 1425}
1409 1426
@@ -1415,7 +1432,7 @@ static int eps_reinit(struct langwell_udc *dev)
1415 char name[14]; 1432 char name[14];
1416 int i; 1433 int i;
1417 1434
1418 VDBG(dev, "---> %s()\n", __func__); 1435 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1419 1436
1420 /* initialize ep0 */ 1437 /* initialize ep0 */
1421 ep = &dev->ep[0]; 1438 ep = &dev->ep[0];
@@ -1449,11 +1466,9 @@ static int eps_reinit(struct langwell_udc *dev)
1449 1466
1450 INIT_LIST_HEAD(&ep->queue); 1467 INIT_LIST_HEAD(&ep->queue);
1451 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); 1468 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
1452
1453 ep->dqh = &dev->ep_dqh[i];
1454 } 1469 }
1455 1470
1456 VDBG(dev, "<--- %s()\n", __func__); 1471 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1457 return 0; 1472 return 0;
1458} 1473}
1459 1474
@@ -1462,7 +1477,7 @@ static int eps_reinit(struct langwell_udc *dev)
1462static void langwell_udc_start(struct langwell_udc *dev) 1477static void langwell_udc_start(struct langwell_udc *dev)
1463{ 1478{
1464 u32 usbintr, usbcmd; 1479 u32 usbintr, usbcmd;
1465 DBG(dev, "---> %s()\n", __func__); 1480 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1466 1481
1467 /* enable interrupts */ 1482 /* enable interrupts */
1468 usbintr = INTR_ULPIE /* ULPI */ 1483 usbintr = INTR_ULPIE /* ULPI */
@@ -1485,8 +1500,7 @@ static void langwell_udc_start(struct langwell_udc *dev)
1485 usbcmd |= CMD_RUNSTOP; 1500 usbcmd |= CMD_RUNSTOP;
1486 writel(usbcmd, &dev->op_regs->usbcmd); 1501 writel(usbcmd, &dev->op_regs->usbcmd);
1487 1502
1488 DBG(dev, "<--- %s()\n", __func__); 1503 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1489 return;
1490} 1504}
1491 1505
1492 1506
@@ -1495,7 +1509,7 @@ static void langwell_udc_stop(struct langwell_udc *dev)
1495{ 1509{
1496 u32 usbcmd; 1510 u32 usbcmd;
1497 1511
1498 DBG(dev, "---> %s()\n", __func__); 1512 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1499 1513
1500 /* disable all interrupts */ 1514 /* disable all interrupts */
1501 writel(0, &dev->op_regs->usbintr); 1515 writel(0, &dev->op_regs->usbintr);
@@ -1508,8 +1522,7 @@ static void langwell_udc_stop(struct langwell_udc *dev)
1508 usbcmd &= ~CMD_RUNSTOP; 1522 usbcmd &= ~CMD_RUNSTOP;
1509 writel(usbcmd, &dev->op_regs->usbcmd); 1523 writel(usbcmd, &dev->op_regs->usbcmd);
1510 1524
1511 DBG(dev, "<--- %s()\n", __func__); 1525 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1512 return;
1513} 1526}
1514 1527
1515 1528
@@ -1518,7 +1531,7 @@ static void stop_activity(struct langwell_udc *dev,
1518 struct usb_gadget_driver *driver) 1531 struct usb_gadget_driver *driver)
1519{ 1532{
1520 struct langwell_ep *ep; 1533 struct langwell_ep *ep;
1521 DBG(dev, "---> %s()\n", __func__); 1534 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1522 1535
1523 nuke(&dev->ep[0], -ESHUTDOWN); 1536 nuke(&dev->ep[0], -ESHUTDOWN);
1524 1537
@@ -1533,7 +1546,7 @@ static void stop_activity(struct langwell_udc *dev,
1533 spin_lock(&dev->lock); 1546 spin_lock(&dev->lock);
1534 } 1547 }
1535 1548
1536 DBG(dev, "<--- %s()\n", __func__); 1549 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1537} 1550}
1538 1551
1539 1552
@@ -1659,13 +1672,15 @@ static ssize_t show_langwell_udc(struct device *_dev,
1659 "Over-current Change: %s\n" 1672 "Over-current Change: %s\n"
1660 "Port Enable/Disable Change: %s\n" 1673 "Port Enable/Disable Change: %s\n"
1661 "Port Enabled/Disabled: %s\n" 1674 "Port Enabled/Disabled: %s\n"
1662 "Current Connect Status: %s\n\n", 1675 "Current Connect Status: %s\n"
1676 "LPM Suspend Status: %s\n\n",
1663 (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset", 1677 (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
1664 (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend", 1678 (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
1665 (tmp_reg & PORTS_OCC) ? "Detected" : "No", 1679 (tmp_reg & PORTS_OCC) ? "Detected" : "No",
1666 (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed", 1680 (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
1667 (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct", 1681 (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
1668 (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached"); 1682 (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached",
1683 (tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
1669 size -= t; 1684 size -= t;
1670 next += t; 1685 next += t;
1671 1686
@@ -1676,7 +1691,7 @@ static ssize_t show_langwell_udc(struct device *_dev,
1676 "Serial Transceiver : %d\n" 1691 "Serial Transceiver : %d\n"
1677 "Port Speed: %s\n" 1692 "Port Speed: %s\n"
1678 "Port Force Full Speed Connenct: %s\n" 1693 "Port Force Full Speed Connenct: %s\n"
1679 "PHY Low Power Suspend Clock Disable: %s\n" 1694 "PHY Low Power Suspend Clock: %s\n"
1680 "BmAttributes: %d\n\n", 1695 "BmAttributes: %d\n\n",
1681 LPM_PTS(tmp_reg), 1696 LPM_PTS(tmp_reg),
1682 (tmp_reg & LPM_STS) ? 1 : 0, 1697 (tmp_reg & LPM_STS) ? 1 : 0,
@@ -1797,6 +1812,36 @@ static ssize_t show_langwell_udc(struct device *_dev,
1797static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL); 1812static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1798 1813
1799 1814
1815/* device "remote_wakeup" sysfs attribute file */
1816static ssize_t store_remote_wakeup(struct device *_dev,
1817 struct device_attribute *attr, const char *buf, size_t count)
1818{
1819 struct langwell_udc *dev = the_controller;
1820 unsigned long flags;
1821 ssize_t rc = count;
1822
1823 if (count > 2)
1824 return -EINVAL;
1825
1826 if (count > 0 && buf[count-1] == '\n')
1827 ((char *) buf)[count-1] = 0;
1828
1829 if (buf[0] != '1')
1830 return -EINVAL;
1831
1832 /* force remote wakeup enabled in case gadget driver doesn't support */
1833 spin_lock_irqsave(&dev->lock, flags);
1834 dev->remote_wakeup = 1;
1835 dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
1836 spin_unlock_irqrestore(&dev->lock, flags);
1837
1838 langwell_wakeup(&dev->gadget);
1839
1840 return rc;
1841}
1842static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
1843
1844
1800/*-------------------------------------------------------------------------*/ 1845/*-------------------------------------------------------------------------*/
1801 1846
1802/* 1847/*
@@ -1807,7 +1852,8 @@ static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1807 * the driver might get unbound. 1852 * the driver might get unbound.
1808 */ 1853 */
1809 1854
1810int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1855int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1856 int (*bind)(struct usb_gadget *))
1811{ 1857{
1812 struct langwell_udc *dev = the_controller; 1858 struct langwell_udc *dev = the_controller;
1813 unsigned long flags; 1859 unsigned long flags;
@@ -1816,7 +1862,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1816 if (!dev) 1862 if (!dev)
1817 return -ENODEV; 1863 return -ENODEV;
1818 1864
1819 DBG(dev, "---> %s()\n", __func__); 1865 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1820 1866
1821 if (dev->driver) 1867 if (dev->driver)
1822 return -EBUSY; 1868 return -EBUSY;
@@ -1830,9 +1876,9 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1830 1876
1831 spin_unlock_irqrestore(&dev->lock, flags); 1877 spin_unlock_irqrestore(&dev->lock, flags);
1832 1878
1833 retval = driver->bind(&dev->gadget); 1879 retval = bind(&dev->gadget);
1834 if (retval) { 1880 if (retval) {
1835 DBG(dev, "bind to driver %s --> %d\n", 1881 dev_dbg(&dev->pdev->dev, "bind to driver %s --> %d\n",
1836 driver->driver.name, retval); 1882 driver->driver.name, retval);
1837 dev->driver = NULL; 1883 dev->driver = NULL;
1838 dev->gadget.dev.driver = NULL; 1884 dev->gadget.dev.driver = NULL;
@@ -1851,13 +1897,13 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1851 if (dev->got_irq) 1897 if (dev->got_irq)
1852 langwell_udc_start(dev); 1898 langwell_udc_start(dev);
1853 1899
1854 VDBG(dev, "After langwell_udc_start(), print all registers:\n"); 1900 dev_vdbg(&dev->pdev->dev,
1855#ifdef VERBOSE 1901 "After langwell_udc_start(), print all registers:\n");
1856 print_all_registers(dev); 1902 print_all_registers(dev);
1857#endif
1858 1903
1859 INFO(dev, "register driver: %s\n", driver->driver.name); 1904 dev_info(&dev->pdev->dev, "register driver: %s\n",
1860 VDBG(dev, "<--- %s()\n", __func__); 1905 driver->driver.name);
1906 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1861 return 0; 1907 return 0;
1862 1908
1863err_unbind: 1909err_unbind:
@@ -1865,10 +1911,10 @@ err_unbind:
1865 dev->gadget.dev.driver = NULL; 1911 dev->gadget.dev.driver = NULL;
1866 dev->driver = NULL; 1912 dev->driver = NULL;
1867 1913
1868 DBG(dev, "<--- %s()\n", __func__); 1914 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1869 return retval; 1915 return retval;
1870} 1916}
1871EXPORT_SYMBOL(usb_gadget_register_driver); 1917EXPORT_SYMBOL(usb_gadget_probe_driver);
1872 1918
1873 1919
1874/* unregister gadget driver */ 1920/* unregister gadget driver */
@@ -1880,11 +1926,15 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1880 if (!dev) 1926 if (!dev)
1881 return -ENODEV; 1927 return -ENODEV;
1882 1928
1883 DBG(dev, "---> %s()\n", __func__); 1929 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1884 1930
1885 if (unlikely(!driver || !driver->bind || !driver->unbind)) 1931 if (unlikely(!driver || !driver->unbind))
1886 return -EINVAL; 1932 return -EINVAL;
1887 1933
1934 /* exit PHY low power suspend */
1935 if (dev->pdev->device != 0x0829)
1936 langwell_phy_low_power(dev, 0);
1937
1888 /* unbind OTG transceiver */ 1938 /* unbind OTG transceiver */
1889 if (dev->transceiver) 1939 if (dev->transceiver)
1890 (void)otg_set_peripheral(dev->transceiver, 0); 1940 (void)otg_set_peripheral(dev->transceiver, 0);
@@ -1910,8 +1960,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1910 1960
1911 device_remove_file(&dev->pdev->dev, &dev_attr_function); 1961 device_remove_file(&dev->pdev->dev, &dev_attr_function);
1912 1962
1913 INFO(dev, "unregistered driver '%s'\n", driver->driver.name); 1963 dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
1914 DBG(dev, "<--- %s()\n", __func__); 1964 driver->driver.name);
1965 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1915 return 0; 1966 return 0;
1916} 1967}
1917EXPORT_SYMBOL(usb_gadget_unregister_driver); 1968EXPORT_SYMBOL(usb_gadget_unregister_driver);
@@ -1930,7 +1981,7 @@ static void setup_tripwire(struct langwell_udc *dev)
1930 unsigned long timeout; 1981 unsigned long timeout;
1931 struct langwell_dqh *dqh; 1982 struct langwell_dqh *dqh;
1932 1983
1933 VDBG(dev, "---> %s()\n", __func__); 1984 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1934 1985
1935 /* ep0 OUT dQH */ 1986 /* ep0 OUT dQH */
1936 dqh = &dev->ep_dqh[EP_DIR_OUT]; 1987 dqh = &dev->ep_dqh[EP_DIR_OUT];
@@ -1943,7 +1994,7 @@ static void setup_tripwire(struct langwell_udc *dev)
1943 timeout = jiffies + SETUPSTAT_TIMEOUT; 1994 timeout = jiffies + SETUPSTAT_TIMEOUT;
1944 while (readl(&dev->op_regs->endptsetupstat)) { 1995 while (readl(&dev->op_regs->endptsetupstat)) {
1945 if (time_after(jiffies, timeout)) { 1996 if (time_after(jiffies, timeout)) {
1946 ERROR(dev, "setup_tripwire timeout\n"); 1997 dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
1947 break; 1998 break;
1948 } 1999 }
1949 cpu_relax(); 2000 cpu_relax();
@@ -1963,7 +2014,7 @@ static void setup_tripwire(struct langwell_udc *dev)
1963 usbcmd = readl(&dev->op_regs->usbcmd); 2014 usbcmd = readl(&dev->op_regs->usbcmd);
1964 writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd); 2015 writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
1965 2016
1966 VDBG(dev, "<--- %s()\n", __func__); 2017 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1967} 2018}
1968 2019
1969 2020
@@ -1972,7 +2023,7 @@ static void ep0_stall(struct langwell_udc *dev)
1972{ 2023{
1973 u32 endptctrl; 2024 u32 endptctrl;
1974 2025
1975 VDBG(dev, "---> %s()\n", __func__); 2026 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1976 2027
1977 /* set TX and RX to stall */ 2028 /* set TX and RX to stall */
1978 endptctrl = readl(&dev->op_regs->endptctrl[0]); 2029 endptctrl = readl(&dev->op_regs->endptctrl[0]);
@@ -1983,7 +2034,7 @@ static void ep0_stall(struct langwell_udc *dev)
1983 dev->ep0_state = WAIT_FOR_SETUP; 2034 dev->ep0_state = WAIT_FOR_SETUP;
1984 dev->ep0_dir = USB_DIR_OUT; 2035 dev->ep0_dir = USB_DIR_OUT;
1985 2036
1986 VDBG(dev, "<--- %s()\n", __func__); 2037 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1987} 2038}
1988 2039
1989 2040
@@ -1994,7 +2045,7 @@ static int prime_status_phase(struct langwell_udc *dev, int dir)
1994 struct langwell_ep *ep; 2045 struct langwell_ep *ep;
1995 int status = 0; 2046 int status = 0;
1996 2047
1997 VDBG(dev, "---> %s()\n", __func__); 2048 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1998 2049
1999 if (dir == EP_DIR_IN) 2050 if (dir == EP_DIR_IN)
2000 dev->ep0_dir = USB_DIR_IN; 2051 dev->ep0_dir = USB_DIR_IN;
@@ -2019,11 +2070,11 @@ static int prime_status_phase(struct langwell_udc *dev, int dir)
2019 return -ENOMEM; 2070 return -ENOMEM;
2020 2071
2021 if (status) 2072 if (status)
2022 ERROR(dev, "can't queue ep0 status request\n"); 2073 dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
2023 2074
2024 list_add_tail(&req->queue, &ep->queue); 2075 list_add_tail(&req->queue, &ep->queue);
2025 2076
2026 VDBG(dev, "<--- %s()\n", __func__); 2077 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2027 return status; 2078 return status;
2028} 2079}
2029 2080
@@ -2032,11 +2083,11 @@ static int prime_status_phase(struct langwell_udc *dev, int dir)
2032static void set_address(struct langwell_udc *dev, u16 value, 2083static void set_address(struct langwell_udc *dev, u16 value,
2033 u16 index, u16 length) 2084 u16 index, u16 length)
2034{ 2085{
2035 VDBG(dev, "---> %s()\n", __func__); 2086 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2036 2087
2037 /* save the new address to device struct */ 2088 /* save the new address to device struct */
2038 dev->dev_addr = (u8) value; 2089 dev->dev_addr = (u8) value;
2039 VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr); 2090 dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
2040 2091
2041 /* update usb state */ 2092 /* update usb state */
2042 dev->usb_state = USB_STATE_ADDRESS; 2093 dev->usb_state = USB_STATE_ADDRESS;
@@ -2045,7 +2096,7 @@ static void set_address(struct langwell_udc *dev, u16 value,
2045 if (prime_status_phase(dev, EP_DIR_IN)) 2096 if (prime_status_phase(dev, EP_DIR_IN))
2046 ep0_stall(dev); 2097 ep0_stall(dev);
2047 2098
2048 VDBG(dev, "<--- %s()\n", __func__); 2099 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2049} 2100}
2050 2101
2051 2102
@@ -2054,7 +2105,7 @@ static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2054 u16 wIndex) 2105 u16 wIndex)
2055{ 2106{
2056 struct langwell_ep *ep; 2107 struct langwell_ep *ep;
2057 VDBG(dev, "---> %s()\n", __func__); 2108 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2058 2109
2059 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2110 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2060 return &dev->ep[0]; 2111 return &dev->ep[0];
@@ -2073,7 +2124,7 @@ static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2073 return ep; 2124 return ep;
2074 } 2125 }
2075 2126
2076 VDBG(dev, "<--- %s()\n", __func__); 2127 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2077 return NULL; 2128 return NULL;
2078} 2129}
2079 2130
@@ -2085,7 +2136,7 @@ static int ep_is_stall(struct langwell_ep *ep)
2085 u32 endptctrl; 2136 u32 endptctrl;
2086 int retval; 2137 int retval;
2087 2138
2088 VDBG(dev, "---> %s()\n", __func__); 2139 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2089 2140
2090 endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]); 2141 endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
2091 if (is_in(ep)) 2142 if (is_in(ep))
@@ -2093,7 +2144,7 @@ static int ep_is_stall(struct langwell_ep *ep)
2093 else 2144 else
2094 retval = endptctrl & EPCTRL_RXS ? 1 : 0; 2145 retval = endptctrl & EPCTRL_RXS ? 1 : 0;
2095 2146
2096 VDBG(dev, "<--- %s()\n", __func__); 2147 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2097 return retval; 2148 return retval;
2098} 2149}
2099 2150
@@ -2107,14 +2158,13 @@ static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2107 u16 status_data = 0; /* 16 bits cpu view status data */ 2158 u16 status_data = 0; /* 16 bits cpu view status data */
2108 int status = 0; 2159 int status = 0;
2109 2160
2110 VDBG(dev, "---> %s()\n", __func__); 2161 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2111 2162
2112 ep = &dev->ep[0]; 2163 ep = &dev->ep[0];
2113 2164
2114 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) { 2165 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2115 /* get device status */ 2166 /* get device status */
2116 status_data = 1 << USB_DEVICE_SELF_POWERED; 2167 status_data = dev->dev_status;
2117 status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
2118 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) { 2168 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
2119 /* get interface status */ 2169 /* get interface status */
2120 status_data = 0; 2170 status_data = 0;
@@ -2129,6 +2179,8 @@ static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2129 status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT; 2179 status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
2130 } 2180 }
2131 2181
2182 dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
2183
2132 dev->ep0_dir = USB_DIR_IN; 2184 dev->ep0_dir = USB_DIR_IN;
2133 2185
2134 /* borrow the per device status_req */ 2186 /* borrow the per device status_req */
@@ -2150,18 +2202,19 @@ static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2150 goto stall; 2202 goto stall;
2151 2203
2152 if (status) { 2204 if (status) {
2153 ERROR(dev, "response error on GET_STATUS request\n"); 2205 dev_err(&dev->pdev->dev,
2206 "response error on GET_STATUS request\n");
2154 goto stall; 2207 goto stall;
2155 } 2208 }
2156 2209
2157 list_add_tail(&req->queue, &ep->queue); 2210 list_add_tail(&req->queue, &ep->queue);
2158 dev->ep0_state = DATA_STATE_XMIT; 2211 dev->ep0_state = DATA_STATE_XMIT;
2159 2212
2160 VDBG(dev, "<--- %s()\n", __func__); 2213 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2161 return; 2214 return;
2162stall: 2215stall:
2163 ep0_stall(dev); 2216 ep0_stall(dev);
2164 VDBG(dev, "<--- %s()\n", __func__); 2217 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2165} 2218}
2166 2219
2167 2220
@@ -2173,12 +2226,12 @@ static void handle_setup_packet(struct langwell_udc *dev,
2173 u16 wIndex = le16_to_cpu(setup->wIndex); 2226 u16 wIndex = le16_to_cpu(setup->wIndex);
2174 u16 wLength = le16_to_cpu(setup->wLength); 2227 u16 wLength = le16_to_cpu(setup->wLength);
2175 2228
2176 VDBG(dev, "---> %s()\n", __func__); 2229 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2177 2230
2178 /* ep0 fifo flush */ 2231 /* ep0 fifo flush */
2179 nuke(&dev->ep[0], -ESHUTDOWN); 2232 nuke(&dev->ep[0], -ESHUTDOWN);
2180 2233
2181 DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 2234 dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
2182 setup->bRequestType, setup->bRequest, 2235 setup->bRequestType, setup->bRequest,
2183 wValue, wIndex, wLength); 2236 wValue, wIndex, wLength);
2184 2237
@@ -2197,7 +2250,7 @@ static void handle_setup_packet(struct langwell_udc *dev,
2197 /* We process some stardard setup requests here */ 2250 /* We process some stardard setup requests here */
2198 switch (setup->bRequest) { 2251 switch (setup->bRequest) {
2199 case USB_REQ_GET_STATUS: 2252 case USB_REQ_GET_STATUS:
2200 DBG(dev, "SETUP: USB_REQ_GET_STATUS\n"); 2253 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
2201 /* get status, DATA and STATUS phase */ 2254 /* get status, DATA and STATUS phase */
2202 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) 2255 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2203 != (USB_DIR_IN | USB_TYPE_STANDARD)) 2256 != (USB_DIR_IN | USB_TYPE_STANDARD))
@@ -2206,7 +2259,7 @@ static void handle_setup_packet(struct langwell_udc *dev,
2206 goto end; 2259 goto end;
2207 2260
2208 case USB_REQ_SET_ADDRESS: 2261 case USB_REQ_SET_ADDRESS:
2209 DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n"); 2262 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
2210 /* STATUS phase */ 2263 /* STATUS phase */
2211 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD 2264 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
2212 | USB_RECIP_DEVICE)) 2265 | USB_RECIP_DEVICE))
@@ -2220,9 +2273,11 @@ static void handle_setup_packet(struct langwell_udc *dev,
2220 { 2273 {
2221 int rc = -EOPNOTSUPP; 2274 int rc = -EOPNOTSUPP;
2222 if (setup->bRequest == USB_REQ_SET_FEATURE) 2275 if (setup->bRequest == USB_REQ_SET_FEATURE)
2223 DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n"); 2276 dev_dbg(&dev->pdev->dev,
2277 "SETUP: USB_REQ_SET_FEATURE\n");
2224 else if (setup->bRequest == USB_REQ_CLEAR_FEATURE) 2278 else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
2225 DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n"); 2279 dev_dbg(&dev->pdev->dev,
2280 "SETUP: USB_REQ_CLEAR_FEATURE\n");
2226 2281
2227 if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK)) 2282 if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
2228 == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) { 2283 == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
@@ -2240,13 +2295,29 @@ static void handle_setup_packet(struct langwell_udc *dev,
2240 2295
2241 spin_unlock(&dev->lock); 2296 spin_unlock(&dev->lock);
2242 rc = langwell_ep_set_halt(&epn->ep, 2297 rc = langwell_ep_set_halt(&epn->ep,
2243 (setup->bRequest == USB_REQ_SET_FEATURE) 2298 (setup->bRequest == USB_REQ_SET_FEATURE)
2244 ? 1 : 0); 2299 ? 1 : 0);
2245 spin_lock(&dev->lock); 2300 spin_lock(&dev->lock);
2246 2301
2247 } else if ((setup->bRequestType & (USB_RECIP_MASK 2302 } else if ((setup->bRequestType & (USB_RECIP_MASK
2248 | USB_TYPE_MASK)) == (USB_RECIP_DEVICE 2303 | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
2249 | USB_TYPE_STANDARD)) { 2304 | USB_TYPE_STANDARD)) {
2305 rc = 0;
2306 switch (wValue) {
2307 case USB_DEVICE_REMOTE_WAKEUP:
2308 if (setup->bRequest == USB_REQ_SET_FEATURE) {
2309 dev->remote_wakeup = 1;
2310 dev->dev_status |= (1 << wValue);
2311 } else {
2312 dev->remote_wakeup = 0;
2313 dev->dev_status &= ~(1 << wValue);
2314 }
2315 break;
2316 default:
2317 rc = -EOPNOTSUPP;
2318 break;
2319 }
2320
2250 if (!gadget_is_otg(&dev->gadget)) 2321 if (!gadget_is_otg(&dev->gadget))
2251 break; 2322 break;
2252 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) { 2323 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
@@ -2262,7 +2333,6 @@ static void handle_setup_packet(struct langwell_udc *dev,
2262 dev->gadget.a_alt_hnp_support = 1; 2333 dev->gadget.a_alt_hnp_support = 1;
2263 else 2334 else
2264 break; 2335 break;
2265 rc = 0;
2266 } else 2336 } else
2267 break; 2337 break;
2268 2338
@@ -2274,31 +2344,38 @@ static void handle_setup_packet(struct langwell_udc *dev,
2274 } 2344 }
2275 2345
2276 case USB_REQ_GET_DESCRIPTOR: 2346 case USB_REQ_GET_DESCRIPTOR:
2277 DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n"); 2347 dev_dbg(&dev->pdev->dev,
2348 "SETUP: USB_REQ_GET_DESCRIPTOR\n");
2278 goto delegate; 2349 goto delegate;
2279 2350
2280 case USB_REQ_SET_DESCRIPTOR: 2351 case USB_REQ_SET_DESCRIPTOR:
2281 DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n"); 2352 dev_dbg(&dev->pdev->dev,
2353 "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
2282 goto delegate; 2354 goto delegate;
2283 2355
2284 case USB_REQ_GET_CONFIGURATION: 2356 case USB_REQ_GET_CONFIGURATION:
2285 DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n"); 2357 dev_dbg(&dev->pdev->dev,
2358 "SETUP: USB_REQ_GET_CONFIGURATION\n");
2286 goto delegate; 2359 goto delegate;
2287 2360
2288 case USB_REQ_SET_CONFIGURATION: 2361 case USB_REQ_SET_CONFIGURATION:
2289 DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n"); 2362 dev_dbg(&dev->pdev->dev,
2363 "SETUP: USB_REQ_SET_CONFIGURATION\n");
2290 goto delegate; 2364 goto delegate;
2291 2365
2292 case USB_REQ_GET_INTERFACE: 2366 case USB_REQ_GET_INTERFACE:
2293 DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n"); 2367 dev_dbg(&dev->pdev->dev,
2368 "SETUP: USB_REQ_GET_INTERFACE\n");
2294 goto delegate; 2369 goto delegate;
2295 2370
2296 case USB_REQ_SET_INTERFACE: 2371 case USB_REQ_SET_INTERFACE:
2297 DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n"); 2372 dev_dbg(&dev->pdev->dev,
2373 "SETUP: USB_REQ_SET_INTERFACE\n");
2298 goto delegate; 2374 goto delegate;
2299 2375
2300 case USB_REQ_SYNCH_FRAME: 2376 case USB_REQ_SYNCH_FRAME:
2301 DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n"); 2377 dev_dbg(&dev->pdev->dev,
2378 "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
2302 goto delegate; 2379 goto delegate;
2303 2380
2304 default: 2381 default:
@@ -2310,7 +2387,8 @@ delegate:
2310 /* DATA phase from gadget, STATUS phase from udc */ 2387 /* DATA phase from gadget, STATUS phase from udc */
2311 dev->ep0_dir = (setup->bRequestType & USB_DIR_IN) 2388 dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
2312 ? USB_DIR_IN : USB_DIR_OUT; 2389 ? USB_DIR_IN : USB_DIR_OUT;
2313 VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n", 2390 dev_vdbg(&dev->pdev->dev,
2391 "dev->ep0_dir = 0x%x, wLength = %d\n",
2314 dev->ep0_dir, wLength); 2392 dev->ep0_dir, wLength);
2315 spin_unlock(&dev->lock); 2393 spin_unlock(&dev->lock);
2316 if (dev->driver->setup(&dev->gadget, 2394 if (dev->driver->setup(&dev->gadget,
@@ -2322,7 +2400,8 @@ delegate:
2322 } else { 2400 } else {
2323 /* no DATA phase, IN STATUS phase from gadget */ 2401 /* no DATA phase, IN STATUS phase from gadget */
2324 dev->ep0_dir = USB_DIR_IN; 2402 dev->ep0_dir = USB_DIR_IN;
2325 VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n", 2403 dev_vdbg(&dev->pdev->dev,
2404 "dev->ep0_dir = 0x%x, wLength = %d\n",
2326 dev->ep0_dir, wLength); 2405 dev->ep0_dir, wLength);
2327 spin_unlock(&dev->lock); 2406 spin_unlock(&dev->lock);
2328 if (dev->driver->setup(&dev->gadget, 2407 if (dev->driver->setup(&dev->gadget,
@@ -2334,8 +2413,7 @@ delegate:
2334 break; 2413 break;
2335 } 2414 }
2336end: 2415end:
2337 VDBG(dev, "<--- %s()\n", __func__); 2416 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2338 return;
2339} 2417}
2340 2418
2341 2419
@@ -2359,23 +2437,27 @@ static int process_ep_req(struct langwell_udc *dev, int index,
2359 td_complete = 0; 2437 td_complete = 0;
2360 actual = curr_req->req.length; 2438 actual = curr_req->req.length;
2361 2439
2362 VDBG(dev, "---> %s()\n", __func__); 2440 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2363 2441
2364 for (i = 0; i < curr_req->dtd_count; i++) { 2442 for (i = 0; i < curr_req->dtd_count; i++) {
2365 remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2366 actual -= remaining_length;
2367 2443
2368 /* command execution states by dTD */ 2444 /* command execution states by dTD */
2369 dtd_status = curr_dtd->dtd_status; 2445 dtd_status = curr_dtd->dtd_status;
2370 2446
2447 barrier();
2448 remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2449 actual -= remaining_length;
2450
2371 if (!dtd_status) { 2451 if (!dtd_status) {
2372 /* transfers completed successfully */ 2452 /* transfers completed successfully */
2373 if (!remaining_length) { 2453 if (!remaining_length) {
2374 td_complete++; 2454 td_complete++;
2375 VDBG(dev, "dTD transmitted successfully\n"); 2455 dev_vdbg(&dev->pdev->dev,
2456 "dTD transmitted successfully\n");
2376 } else { 2457 } else {
2377 if (dir) { 2458 if (dir) {
2378 VDBG(dev, "TX dTD remains data\n"); 2459 dev_vdbg(&dev->pdev->dev,
2460 "TX dTD remains data\n");
2379 retval = -EPROTO; 2461 retval = -EPROTO;
2380 break; 2462 break;
2381 2463
@@ -2387,27 +2469,32 @@ static int process_ep_req(struct langwell_udc *dev, int index,
2387 } else { 2469 } else {
2388 /* transfers completed with errors */ 2470 /* transfers completed with errors */
2389 if (dtd_status & DTD_STS_ACTIVE) { 2471 if (dtd_status & DTD_STS_ACTIVE) {
2390 DBG(dev, "request not completed\n"); 2472 dev_dbg(&dev->pdev->dev,
2473 "dTD status ACTIVE dQH[%d]\n", index);
2391 retval = 1; 2474 retval = 1;
2392 return retval; 2475 return retval;
2393 } else if (dtd_status & DTD_STS_HALTED) { 2476 } else if (dtd_status & DTD_STS_HALTED) {
2394 ERROR(dev, "dTD error %08x dQH[%d]\n", 2477 dev_err(&dev->pdev->dev,
2395 dtd_status, index); 2478 "dTD error %08x dQH[%d]\n",
2479 dtd_status, index);
2396 /* clear the errors and halt condition */ 2480 /* clear the errors and halt condition */
2397 curr_dqh->dtd_status = 0; 2481 curr_dqh->dtd_status = 0;
2398 retval = -EPIPE; 2482 retval = -EPIPE;
2399 break; 2483 break;
2400 } else if (dtd_status & DTD_STS_DBE) { 2484 } else if (dtd_status & DTD_STS_DBE) {
2401 DBG(dev, "data buffer (overflow) error\n"); 2485 dev_dbg(&dev->pdev->dev,
2486 "data buffer (overflow) error\n");
2402 retval = -EPROTO; 2487 retval = -EPROTO;
2403 break; 2488 break;
2404 } else if (dtd_status & DTD_STS_TRE) { 2489 } else if (dtd_status & DTD_STS_TRE) {
2405 DBG(dev, "transaction(ISO) error\n"); 2490 dev_dbg(&dev->pdev->dev,
2491 "transaction(ISO) error\n");
2406 retval = -EILSEQ; 2492 retval = -EILSEQ;
2407 break; 2493 break;
2408 } else 2494 } else
2409 ERROR(dev, "unknown error (0x%x)!\n", 2495 dev_err(&dev->pdev->dev,
2410 dtd_status); 2496 "unknown error (0x%x)!\n",
2497 dtd_status);
2411 } 2498 }
2412 2499
2413 if (i != curr_req->dtd_count - 1) 2500 if (i != curr_req->dtd_count - 1)
@@ -2420,7 +2507,7 @@ static int process_ep_req(struct langwell_udc *dev, int index,
2420 2507
2421 curr_req->req.actual = actual; 2508 curr_req->req.actual = actual;
2422 2509
2423 VDBG(dev, "<--- %s()\n", __func__); 2510 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2424 return 0; 2511 return 0;
2425} 2512}
2426 2513
@@ -2430,7 +2517,7 @@ static void ep0_req_complete(struct langwell_udc *dev,
2430 struct langwell_ep *ep0, struct langwell_request *req) 2517 struct langwell_ep *ep0, struct langwell_request *req)
2431{ 2518{
2432 u32 new_addr; 2519 u32 new_addr;
2433 VDBG(dev, "---> %s()\n", __func__); 2520 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2434 2521
2435 if (dev->usb_state == USB_STATE_ADDRESS) { 2522 if (dev->usb_state == USB_STATE_ADDRESS) {
2436 /* set the new address */ 2523 /* set the new address */
@@ -2438,7 +2525,7 @@ static void ep0_req_complete(struct langwell_udc *dev,
2438 writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr); 2525 writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
2439 2526
2440 new_addr = USBADR(readl(&dev->op_regs->deviceaddr)); 2527 new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
2441 VDBG(dev, "new_addr = %d\n", new_addr); 2528 dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
2442 } 2529 }
2443 2530
2444 done(ep0, req, 0); 2531 done(ep0, req, 0);
@@ -2458,14 +2545,14 @@ static void ep0_req_complete(struct langwell_udc *dev,
2458 dev->ep0_state = WAIT_FOR_SETUP; 2545 dev->ep0_state = WAIT_FOR_SETUP;
2459 break; 2546 break;
2460 case WAIT_FOR_SETUP: 2547 case WAIT_FOR_SETUP:
2461 ERROR(dev, "unexpect ep0 packets\n"); 2548 dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
2462 break; 2549 break;
2463 default: 2550 default:
2464 ep0_stall(dev); 2551 ep0_stall(dev);
2465 break; 2552 break;
2466 } 2553 }
2467 2554
2468 VDBG(dev, "<--- %s()\n", __func__); 2555 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2469} 2556}
2470 2557
2471 2558
@@ -2477,16 +2564,17 @@ static void handle_trans_complete(struct langwell_udc *dev)
2477 struct langwell_ep *epn; 2564 struct langwell_ep *epn;
2478 struct langwell_request *curr_req, *temp_req; 2565 struct langwell_request *curr_req, *temp_req;
2479 2566
2480 VDBG(dev, "---> %s()\n", __func__); 2567 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2481 2568
2482 complete_bits = readl(&dev->op_regs->endptcomplete); 2569 complete_bits = readl(&dev->op_regs->endptcomplete);
2483 VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits); 2570 dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
2571 complete_bits);
2484 2572
2485 /* Write-Clear the bits in endptcomplete register */ 2573 /* Write-Clear the bits in endptcomplete register */
2486 writel(complete_bits, &dev->op_regs->endptcomplete); 2574 writel(complete_bits, &dev->op_regs->endptcomplete);
2487 2575
2488 if (!complete_bits) { 2576 if (!complete_bits) {
2489 DBG(dev, "complete_bits = 0\n"); 2577 dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
2490 goto done; 2578 goto done;
2491 } 2579 }
2492 2580
@@ -2506,23 +2594,25 @@ static void handle_trans_complete(struct langwell_udc *dev)
2506 epn = &dev->ep[i]; 2594 epn = &dev->ep[i];
2507 2595
2508 if (epn->name == NULL) { 2596 if (epn->name == NULL) {
2509 WARNING(dev, "invalid endpoint\n"); 2597 dev_warn(&dev->pdev->dev, "invalid endpoint\n");
2510 continue; 2598 continue;
2511 } 2599 }
2512 2600
2513 if (i < 2) 2601 if (i < 2)
2514 /* ep0 in and out */ 2602 /* ep0 in and out */
2515 DBG(dev, "%s-%s transfer completed\n", 2603 dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
2516 epn->name, 2604 epn->name,
2517 is_in(epn) ? "in" : "out"); 2605 is_in(epn) ? "in" : "out");
2518 else 2606 else
2519 DBG(dev, "%s transfer completed\n", epn->name); 2607 dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
2608 epn->name);
2520 2609
2521 /* process the req queue until an uncomplete request */ 2610 /* process the req queue until an uncomplete request */
2522 list_for_each_entry_safe(curr_req, temp_req, 2611 list_for_each_entry_safe(curr_req, temp_req,
2523 &epn->queue, queue) { 2612 &epn->queue, queue) {
2524 status = process_ep_req(dev, i, curr_req); 2613 status = process_ep_req(dev, i, curr_req);
2525 VDBG(dev, "%s req status: %d\n", epn->name, status); 2614 dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
2615 epn->name, status);
2526 2616
2527 if (status) 2617 if (status)
2528 break; 2618 break;
@@ -2540,8 +2630,7 @@ static void handle_trans_complete(struct langwell_udc *dev)
2540 } 2630 }
2541 } 2631 }
2542done: 2632done:
2543 VDBG(dev, "<--- %s()\n", __func__); 2633 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2544 return;
2545} 2634}
2546 2635
2547 2636
@@ -2551,14 +2640,14 @@ static void handle_port_change(struct langwell_udc *dev)
2551 u32 portsc1, devlc; 2640 u32 portsc1, devlc;
2552 u32 speed; 2641 u32 speed;
2553 2642
2554 VDBG(dev, "---> %s()\n", __func__); 2643 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2555 2644
2556 if (dev->bus_reset) 2645 if (dev->bus_reset)
2557 dev->bus_reset = 0; 2646 dev->bus_reset = 0;
2558 2647
2559 portsc1 = readl(&dev->op_regs->portsc1); 2648 portsc1 = readl(&dev->op_regs->portsc1);
2560 devlc = readl(&dev->op_regs->devlc); 2649 devlc = readl(&dev->op_regs->devlc);
2561 VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n", 2650 dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
2562 portsc1, devlc); 2651 portsc1, devlc);
2563 2652
2564 /* bus reset is finished */ 2653 /* bus reset is finished */
@@ -2579,25 +2668,22 @@ static void handle_port_change(struct langwell_udc *dev)
2579 dev->gadget.speed = USB_SPEED_UNKNOWN; 2668 dev->gadget.speed = USB_SPEED_UNKNOWN;
2580 break; 2669 break;
2581 } 2670 }
2582 VDBG(dev, "speed = %d, dev->gadget.speed = %d\n", 2671 dev_vdbg(&dev->pdev->dev,
2672 "speed = %d, dev->gadget.speed = %d\n",
2583 speed, dev->gadget.speed); 2673 speed, dev->gadget.speed);
2584 } 2674 }
2585 2675
2586 /* LPM L0 to L1 */ 2676 /* LPM L0 to L1 */
2587 if (dev->lpm && dev->lpm_state == LPM_L0) 2677 if (dev->lpm && dev->lpm_state == LPM_L0)
2588 if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) { 2678 if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
2589 INFO(dev, "LPM L0 to L1\n"); 2679 dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
2590 dev->lpm_state = LPM_L1; 2680 dev->lpm_state = LPM_L1;
2591 } 2681 }
2592 2682
2593 /* LPM L1 to L0, force resume or remote wakeup finished */ 2683 /* LPM L1 to L0, force resume or remote wakeup finished */
2594 if (dev->lpm && dev->lpm_state == LPM_L1) 2684 if (dev->lpm && dev->lpm_state == LPM_L1)
2595 if (!(portsc1 & PORTS_SUSP)) { 2685 if (!(portsc1 & PORTS_SUSP)) {
2596 if (portsc1 & PORTS_SLP) 2686 dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
2597 INFO(dev, "LPM L1 to L0, force resume\n");
2598 else
2599 INFO(dev, "LPM L1 to L0, remote wakeup\n");
2600
2601 dev->lpm_state = LPM_L0; 2687 dev->lpm_state = LPM_L0;
2602 } 2688 }
2603 2689
@@ -2605,7 +2691,7 @@ static void handle_port_change(struct langwell_udc *dev)
2605 if (!dev->resume_state) 2691 if (!dev->resume_state)
2606 dev->usb_state = USB_STATE_DEFAULT; 2692 dev->usb_state = USB_STATE_DEFAULT;
2607 2693
2608 VDBG(dev, "<--- %s()\n", __func__); 2694 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2609} 2695}
2610 2696
2611 2697
@@ -2617,7 +2703,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
2617 endptcomplete; 2703 endptcomplete;
2618 unsigned long timeout; 2704 unsigned long timeout;
2619 2705
2620 VDBG(dev, "---> %s()\n", __func__); 2706 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2621 2707
2622 /* Write-Clear the device address */ 2708 /* Write-Clear the device address */
2623 deviceaddr = readl(&dev->op_regs->deviceaddr); 2709 deviceaddr = readl(&dev->op_regs->deviceaddr);
@@ -2634,7 +2720,10 @@ static void handle_usb_reset(struct langwell_udc *dev)
2634 2720
2635 dev->ep0_dir = USB_DIR_OUT; 2721 dev->ep0_dir = USB_DIR_OUT;
2636 dev->ep0_state = WAIT_FOR_SETUP; 2722 dev->ep0_state = WAIT_FOR_SETUP;
2637 dev->remote_wakeup = 0; /* default to 0 on reset */ 2723
2724 /* remote wakeup reset to 0 when the device is reset */
2725 dev->remote_wakeup = 0;
2726 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
2638 dev->gadget.b_hnp_enable = 0; 2727 dev->gadget.b_hnp_enable = 0;
2639 dev->gadget.a_hnp_support = 0; 2728 dev->gadget.a_hnp_support = 0;
2640 dev->gadget.a_alt_hnp_support = 0; 2729 dev->gadget.a_alt_hnp_support = 0;
@@ -2651,7 +2740,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
2651 timeout = jiffies + PRIME_TIMEOUT; 2740 timeout = jiffies + PRIME_TIMEOUT;
2652 while (readl(&dev->op_regs->endptprime)) { 2741 while (readl(&dev->op_regs->endptprime)) {
2653 if (time_after(jiffies, timeout)) { 2742 if (time_after(jiffies, timeout)) {
2654 ERROR(dev, "USB reset timeout\n"); 2743 dev_err(&dev->pdev->dev, "USB reset timeout\n");
2655 break; 2744 break;
2656 } 2745 }
2657 cpu_relax(); 2746 cpu_relax();
@@ -2661,7 +2750,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
2661 writel((u32) ~0, &dev->op_regs->endptflush); 2750 writel((u32) ~0, &dev->op_regs->endptflush);
2662 2751
2663 if (readl(&dev->op_regs->portsc1) & PORTS_PR) { 2752 if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
2664 VDBG(dev, "USB bus reset\n"); 2753 dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
2665 /* bus is reseting */ 2754 /* bus is reseting */
2666 dev->bus_reset = 1; 2755 dev->bus_reset = 1;
2667 2756
@@ -2669,7 +2758,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
2669 stop_activity(dev, dev->driver); 2758 stop_activity(dev, dev->driver);
2670 dev->usb_state = USB_STATE_DEFAULT; 2759 dev->usb_state = USB_STATE_DEFAULT;
2671 } else { 2760 } else {
2672 VDBG(dev, "device controller reset\n"); 2761 dev_vdbg(&dev->pdev->dev, "device controller reset\n");
2673 /* controller reset */ 2762 /* controller reset */
2674 langwell_udc_reset(dev); 2763 langwell_udc_reset(dev);
2675 2764
@@ -2691,15 +2780,14 @@ static void handle_usb_reset(struct langwell_udc *dev)
2691 dev->lotg->hsm.b_hnp_enable = 0; 2780 dev->lotg->hsm.b_hnp_enable = 0;
2692#endif 2781#endif
2693 2782
2694 VDBG(dev, "<--- %s()\n", __func__); 2783 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2695} 2784}
2696 2785
2697 2786
2698/* USB bus suspend/resume interrupt */ 2787/* USB bus suspend/resume interrupt */
2699static void handle_bus_suspend(struct langwell_udc *dev) 2788static void handle_bus_suspend(struct langwell_udc *dev)
2700{ 2789{
2701 u32 devlc; 2790 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2702 DBG(dev, "---> %s()\n", __func__);
2703 2791
2704 dev->resume_state = dev->usb_state; 2792 dev->resume_state = dev->usb_state;
2705 dev->usb_state = USB_STATE_SUSPENDED; 2793 dev->usb_state = USB_STATE_SUSPENDED;
@@ -2733,33 +2821,29 @@ static void handle_bus_suspend(struct langwell_udc *dev)
2733 spin_unlock(&dev->lock); 2821 spin_unlock(&dev->lock);
2734 dev->driver->suspend(&dev->gadget); 2822 dev->driver->suspend(&dev->gadget);
2735 spin_lock(&dev->lock); 2823 spin_lock(&dev->lock);
2736 DBG(dev, "suspend %s\n", dev->driver->driver.name); 2824 dev_dbg(&dev->pdev->dev, "suspend %s\n",
2825 dev->driver->driver.name);
2737 } 2826 }
2738 } 2827 }
2739 2828
2740 /* enter PHY low power suspend */ 2829 /* enter PHY low power suspend */
2741 devlc = readl(&dev->op_regs->devlc); 2830 if (dev->pdev->device != 0x0829)
2742 VDBG(dev, "devlc = 0x%08x\n", devlc); 2831 langwell_phy_low_power(dev, 0);
2743 devlc |= LPM_PHCD;
2744 writel(devlc, &dev->op_regs->devlc);
2745 2832
2746 DBG(dev, "<--- %s()\n", __func__); 2833 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2747} 2834}
2748 2835
2749 2836
2750static void handle_bus_resume(struct langwell_udc *dev) 2837static void handle_bus_resume(struct langwell_udc *dev)
2751{ 2838{
2752 u32 devlc; 2839 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2753 DBG(dev, "---> %s()\n", __func__);
2754 2840
2755 dev->usb_state = dev->resume_state; 2841 dev->usb_state = dev->resume_state;
2756 dev->resume_state = 0; 2842 dev->resume_state = 0;
2757 2843
2758 /* exit PHY low power suspend */ 2844 /* exit PHY low power suspend */
2759 devlc = readl(&dev->op_regs->devlc); 2845 if (dev->pdev->device != 0x0829)
2760 VDBG(dev, "devlc = 0x%08x\n", devlc); 2846 langwell_phy_low_power(dev, 0);
2761 devlc &= ~LPM_PHCD;
2762 writel(devlc, &dev->op_regs->devlc);
2763 2847
2764#ifdef OTG_TRANSCEIVER 2848#ifdef OTG_TRANSCEIVER
2765 if (dev->lotg->otg.default_a == 0) 2849 if (dev->lotg->otg.default_a == 0)
@@ -2772,11 +2856,12 @@ static void handle_bus_resume(struct langwell_udc *dev)
2772 spin_unlock(&dev->lock); 2856 spin_unlock(&dev->lock);
2773 dev->driver->resume(&dev->gadget); 2857 dev->driver->resume(&dev->gadget);
2774 spin_lock(&dev->lock); 2858 spin_lock(&dev->lock);
2775 DBG(dev, "resume %s\n", dev->driver->driver.name); 2859 dev_dbg(&dev->pdev->dev, "resume %s\n",
2860 dev->driver->driver.name);
2776 } 2861 }
2777 } 2862 }
2778 2863
2779 DBG(dev, "<--- %s()\n", __func__); 2864 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2780} 2865}
2781 2866
2782 2867
@@ -2789,11 +2874,11 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
2789 irq_sts, 2874 irq_sts,
2790 portsc1; 2875 portsc1;
2791 2876
2792 VDBG(dev, "---> %s()\n", __func__); 2877 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2793 2878
2794 if (dev->stopped) { 2879 if (dev->stopped) {
2795 VDBG(dev, "handle IRQ_NONE\n"); 2880 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2796 VDBG(dev, "<--- %s()\n", __func__); 2881 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2797 return IRQ_NONE; 2882 return IRQ_NONE;
2798 } 2883 }
2799 2884
@@ -2806,12 +2891,13 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
2806 usbintr = readl(&dev->op_regs->usbintr); 2891 usbintr = readl(&dev->op_regs->usbintr);
2807 2892
2808 irq_sts = usbsts & usbintr; 2893 irq_sts = usbsts & usbintr;
2809 VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n", 2894 dev_vdbg(&dev->pdev->dev,
2895 "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
2810 usbsts, usbintr, irq_sts); 2896 usbsts, usbintr, irq_sts);
2811 2897
2812 if (!irq_sts) { 2898 if (!irq_sts) {
2813 VDBG(dev, "handle IRQ_NONE\n"); 2899 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2814 VDBG(dev, "<--- %s()\n", __func__); 2900 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2815 spin_unlock(&dev->lock); 2901 spin_unlock(&dev->lock);
2816 return IRQ_NONE; 2902 return IRQ_NONE;
2817 } 2903 }
@@ -2827,12 +2913,13 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
2827 2913
2828 /* USB interrupt */ 2914 /* USB interrupt */
2829 if (irq_sts & STS_UI) { 2915 if (irq_sts & STS_UI) {
2830 VDBG(dev, "USB interrupt\n"); 2916 dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
2831 2917
2832 /* setup packet received from ep0 */ 2918 /* setup packet received from ep0 */
2833 if (readl(&dev->op_regs->endptsetupstat) 2919 if (readl(&dev->op_regs->endptsetupstat)
2834 & EP0SETUPSTAT_MASK) { 2920 & EP0SETUPSTAT_MASK) {
2835 VDBG(dev, "USB SETUP packet received interrupt\n"); 2921 dev_vdbg(&dev->pdev->dev,
2922 "USB SETUP packet received interrupt\n");
2836 /* setup tripwire semaphone */ 2923 /* setup tripwire semaphone */
2837 setup_tripwire(dev); 2924 setup_tripwire(dev);
2838 handle_setup_packet(dev, &dev->local_setup_buff); 2925 handle_setup_packet(dev, &dev->local_setup_buff);
@@ -2840,7 +2927,8 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
2840 2927
2841 /* USB transfer completion */ 2928 /* USB transfer completion */
2842 if (readl(&dev->op_regs->endptcomplete)) { 2929 if (readl(&dev->op_regs->endptcomplete)) {
2843 VDBG(dev, "USB transfer completion interrupt\n"); 2930 dev_vdbg(&dev->pdev->dev,
2931 "USB transfer completion interrupt\n");
2844 handle_trans_complete(dev); 2932 handle_trans_complete(dev);
2845 } 2933 }
2846 } 2934 }
@@ -2848,36 +2936,36 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
2848 /* SOF received interrupt (for ISO transfer) */ 2936 /* SOF received interrupt (for ISO transfer) */
2849 if (irq_sts & STS_SRI) { 2937 if (irq_sts & STS_SRI) {
2850 /* FIXME */ 2938 /* FIXME */
2851 /* VDBG(dev, "SOF received interrupt\n"); */ 2939 /* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
2852 } 2940 }
2853 2941
2854 /* port change detect interrupt */ 2942 /* port change detect interrupt */
2855 if (irq_sts & STS_PCI) { 2943 if (irq_sts & STS_PCI) {
2856 VDBG(dev, "port change detect interrupt\n"); 2944 dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
2857 handle_port_change(dev); 2945 handle_port_change(dev);
2858 } 2946 }
2859 2947
2860 /* suspend interrrupt */ 2948 /* suspend interrrupt */
2861 if (irq_sts & STS_SLI) { 2949 if (irq_sts & STS_SLI) {
2862 VDBG(dev, "suspend interrupt\n"); 2950 dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
2863 handle_bus_suspend(dev); 2951 handle_bus_suspend(dev);
2864 } 2952 }
2865 2953
2866 /* USB reset interrupt */ 2954 /* USB reset interrupt */
2867 if (irq_sts & STS_URI) { 2955 if (irq_sts & STS_URI) {
2868 VDBG(dev, "USB reset interrupt\n"); 2956 dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
2869 handle_usb_reset(dev); 2957 handle_usb_reset(dev);
2870 } 2958 }
2871 2959
2872 /* USB error or system error interrupt */ 2960 /* USB error or system error interrupt */
2873 if (irq_sts & (STS_UEI | STS_SEI)) { 2961 if (irq_sts & (STS_UEI | STS_SEI)) {
2874 /* FIXME */ 2962 /* FIXME */
2875 WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts); 2963 dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
2876 } 2964 }
2877 2965
2878 spin_unlock(&dev->lock); 2966 spin_unlock(&dev->lock);
2879 2967
2880 VDBG(dev, "<--- %s()\n", __func__); 2968 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2881 return IRQ_HANDLED; 2969 return IRQ_HANDLED;
2882} 2970}
2883 2971
@@ -2889,15 +2977,59 @@ static void gadget_release(struct device *_dev)
2889{ 2977{
2890 struct langwell_udc *dev = the_controller; 2978 struct langwell_udc *dev = the_controller;
2891 2979
2892 DBG(dev, "---> %s()\n", __func__); 2980 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2893 2981
2894 complete(dev->done); 2982 complete(dev->done);
2895 2983
2896 DBG(dev, "<--- %s()\n", __func__); 2984 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2897 kfree(dev); 2985 kfree(dev);
2898} 2986}
2899 2987
2900 2988
2989/* enable SRAM caching if SRAM detected */
2990static void sram_init(struct langwell_udc *dev)
2991{
2992 struct pci_dev *pdev = dev->pdev;
2993
2994 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2995
2996 dev->sram_addr = pci_resource_start(pdev, 1);
2997 dev->sram_size = pci_resource_len(pdev, 1);
2998 dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
2999 dev->sram_addr, dev->sram_size);
3000 dev->got_sram = 1;
3001
3002 if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
3003 dev_warn(&dev->pdev->dev, "SRAM request failed\n");
3004 dev->got_sram = 0;
3005 } else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
3006 dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
3007 dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
3008 pci_release_region(pdev, 1);
3009 dev->got_sram = 0;
3010 }
3011
3012 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3013}
3014
3015
3016/* release SRAM caching */
3017static void sram_deinit(struct langwell_udc *dev)
3018{
3019 struct pci_dev *pdev = dev->pdev;
3020
3021 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3022
3023 dma_release_declared_memory(&pdev->dev);
3024 pci_release_region(pdev, 1);
3025
3026 dev->got_sram = 0;
3027
3028 dev_info(&dev->pdev->dev, "release SRAM caching\n");
3029 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3030}
3031
3032
2901/* tear down the binding between this driver and the pci device */ 3033/* tear down the binding between this driver and the pci device */
2902static void langwell_udc_remove(struct pci_dev *pdev) 3034static void langwell_udc_remove(struct pci_dev *pdev)
2903{ 3035{
@@ -2906,23 +3038,29 @@ static void langwell_udc_remove(struct pci_dev *pdev)
2906 DECLARE_COMPLETION(done); 3038 DECLARE_COMPLETION(done);
2907 3039
2908 BUG_ON(dev->driver); 3040 BUG_ON(dev->driver);
2909 DBG(dev, "---> %s()\n", __func__); 3041 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2910 3042
2911 dev->done = &done; 3043 dev->done = &done;
2912 3044
2913 /* free memory allocated in probe */ 3045#ifndef OTG_TRANSCEIVER
3046 /* free dTD dma_pool and dQH */
2914 if (dev->dtd_pool) 3047 if (dev->dtd_pool)
2915 dma_pool_destroy(dev->dtd_pool); 3048 dma_pool_destroy(dev->dtd_pool);
2916 3049
3050 if (dev->ep_dqh)
3051 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3052 dev->ep_dqh, dev->ep_dqh_dma);
3053
3054 /* release SRAM caching */
3055 if (dev->has_sram && dev->got_sram)
3056 sram_deinit(dev);
3057#endif
3058
2917 if (dev->status_req) { 3059 if (dev->status_req) {
2918 kfree(dev->status_req->req.buf); 3060 kfree(dev->status_req->req.buf);
2919 kfree(dev->status_req); 3061 kfree(dev->status_req);
2920 } 3062 }
2921 3063
2922 if (dev->ep_dqh)
2923 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
2924 dev->ep_dqh, dev->ep_dqh_dma);
2925
2926 kfree(dev->ep); 3064 kfree(dev->ep);
2927 3065
2928 /* diable IRQ handler */ 3066 /* diable IRQ handler */
@@ -2949,11 +3087,12 @@ static void langwell_udc_remove(struct pci_dev *pdev)
2949 3087
2950 dev->cap_regs = NULL; 3088 dev->cap_regs = NULL;
2951 3089
2952 INFO(dev, "unbind\n"); 3090 dev_info(&dev->pdev->dev, "unbind\n");
2953 DBG(dev, "<--- %s()\n", __func__); 3091 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2954 3092
2955 device_unregister(&dev->gadget.dev); 3093 device_unregister(&dev->gadget.dev);
2956 device_remove_file(&pdev->dev, &dev_attr_langwell_udc); 3094 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3095 device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
2957 3096
2958#ifndef OTG_TRANSCEIVER 3097#ifndef OTG_TRANSCEIVER
2959 pci_set_drvdata(pdev, NULL); 3098 pci_set_drvdata(pdev, NULL);
@@ -2997,7 +3136,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
2997 spin_lock_init(&dev->lock); 3136 spin_lock_init(&dev->lock);
2998 3137
2999 dev->pdev = pdev; 3138 dev->pdev = pdev;
3000 DBG(dev, "---> %s()\n", __func__); 3139 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3001 3140
3002#ifdef OTG_TRANSCEIVER 3141#ifdef OTG_TRANSCEIVER
3003 /* PCI device is already enabled by otg_transceiver driver */ 3142 /* PCI device is already enabled by otg_transceiver driver */
@@ -3022,7 +3161,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3022 resource = pci_resource_start(pdev, 0); 3161 resource = pci_resource_start(pdev, 0);
3023 len = pci_resource_len(pdev, 0); 3162 len = pci_resource_len(pdev, 0);
3024 if (!request_mem_region(resource, len, driver_name)) { 3163 if (!request_mem_region(resource, len, driver_name)) {
3025 ERROR(dev, "controller already in use\n"); 3164 dev_err(&dev->pdev->dev, "controller already in use\n");
3026 retval = -EBUSY; 3165 retval = -EBUSY;
3027 goto error; 3166 goto error;
3028 } 3167 }
@@ -3031,33 +3170,43 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3031 base = ioremap_nocache(resource, len); 3170 base = ioremap_nocache(resource, len);
3032#endif 3171#endif
3033 if (base == NULL) { 3172 if (base == NULL) {
3034 ERROR(dev, "can't map memory\n"); 3173 dev_err(&dev->pdev->dev, "can't map memory\n");
3035 retval = -EFAULT; 3174 retval = -EFAULT;
3036 goto error; 3175 goto error;
3037 } 3176 }
3038 3177
3039 dev->cap_regs = (struct langwell_cap_regs __iomem *) base; 3178 dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
3040 VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs); 3179 dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
3041 dev->op_regs = (struct langwell_op_regs __iomem *) 3180 dev->op_regs = (struct langwell_op_regs __iomem *)
3042 (base + OP_REG_OFFSET); 3181 (base + OP_REG_OFFSET);
3043 VDBG(dev, "dev->op_regs: %p\n", dev->op_regs); 3182 dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
3044 3183
3045 /* irq setup after old hardware is cleaned up */ 3184 /* irq setup after old hardware is cleaned up */
3046 if (!pdev->irq) { 3185 if (!pdev->irq) {
3047 ERROR(dev, "No IRQ. Check PCI setup!\n"); 3186 dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
3048 retval = -ENODEV; 3187 retval = -ENODEV;
3049 goto error; 3188 goto error;
3050 } 3189 }
3051 3190
3191 dev->has_sram = 1;
3192 dev->got_sram = 0;
3193 dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
3194
3052#ifndef OTG_TRANSCEIVER 3195#ifndef OTG_TRANSCEIVER
3053 INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n", 3196 /* enable SRAM caching if detected */
3197 if (dev->has_sram && !dev->got_sram)
3198 sram_init(dev);
3199
3200 dev_info(&dev->pdev->dev,
3201 "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
3054 pdev->irq, resource, len, base); 3202 pdev->irq, resource, len, base);
3055 /* enables bus-mastering for device dev */ 3203 /* enables bus-mastering for device dev */
3056 pci_set_master(pdev); 3204 pci_set_master(pdev);
3057 3205
3058 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, 3206 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3059 driver_name, dev) != 0) { 3207 driver_name, dev) != 0) {
3060 ERROR(dev, "request interrupt %d failed\n", pdev->irq); 3208 dev_err(&dev->pdev->dev,
3209 "request interrupt %d failed\n", pdev->irq);
3061 retval = -EBUSY; 3210 retval = -EBUSY;
3062 goto error; 3211 goto error;
3063 } 3212 }
@@ -3071,32 +3220,34 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3071 dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0; 3220 dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
3072 dev->dciversion = readw(&dev->cap_regs->dciversion); 3221 dev->dciversion = readw(&dev->cap_regs->dciversion);
3073 dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0; 3222 dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
3074 VDBG(dev, "dev->lpm: %d\n", dev->lpm); 3223 dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
3075 VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion); 3224 dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
3076 VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams)); 3225 dev->dciversion);
3077 VDBG(dev, "dev->devcap: %d\n", dev->devcap); 3226 dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
3227 readl(&dev->cap_regs->dccparams));
3228 dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
3078 if (!dev->devcap) { 3229 if (!dev->devcap) {
3079 ERROR(dev, "can't support device mode\n"); 3230 dev_err(&dev->pdev->dev, "can't support device mode\n");
3080 retval = -ENODEV; 3231 retval = -ENODEV;
3081 goto error; 3232 goto error;
3082 } 3233 }
3083 3234
3084 /* a pair of endpoints (out/in) for each address */ 3235 /* a pair of endpoints (out/in) for each address */
3085 dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2; 3236 dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
3086 VDBG(dev, "dev->ep_max: %d\n", dev->ep_max); 3237 dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
3087 3238
3088 /* allocate endpoints memory */ 3239 /* allocate endpoints memory */
3089 dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max, 3240 dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
3090 GFP_KERNEL); 3241 GFP_KERNEL);
3091 if (!dev->ep) { 3242 if (!dev->ep) {
3092 ERROR(dev, "allocate endpoints memory failed\n"); 3243 dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
3093 retval = -ENOMEM; 3244 retval = -ENOMEM;
3094 goto error; 3245 goto error;
3095 } 3246 }
3096 3247
3097 /* allocate device dQH memory */ 3248 /* allocate device dQH memory */
3098 size = dev->ep_max * sizeof(struct langwell_dqh); 3249 size = dev->ep_max * sizeof(struct langwell_dqh);
3099 VDBG(dev, "orig size = %d\n", size); 3250 dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
3100 if (size < DQH_ALIGNMENT) 3251 if (size < DQH_ALIGNMENT)
3101 size = DQH_ALIGNMENT; 3252 size = DQH_ALIGNMENT;
3102 else if ((size % DQH_ALIGNMENT) != 0) { 3253 else if ((size % DQH_ALIGNMENT) != 0) {
@@ -3106,17 +3257,18 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3106 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size, 3257 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3107 &dev->ep_dqh_dma, GFP_KERNEL); 3258 &dev->ep_dqh_dma, GFP_KERNEL);
3108 if (!dev->ep_dqh) { 3259 if (!dev->ep_dqh) {
3109 ERROR(dev, "allocate dQH memory failed\n"); 3260 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3110 retval = -ENOMEM; 3261 retval = -ENOMEM;
3111 goto error; 3262 goto error;
3112 } 3263 }
3113 dev->ep_dqh_size = size; 3264 dev->ep_dqh_size = size;
3114 VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size); 3265 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
3115 3266
3116 /* initialize ep0 status request structure */ 3267 /* initialize ep0 status request structure */
3117 dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL); 3268 dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
3118 if (!dev->status_req) { 3269 if (!dev->status_req) {
3119 ERROR(dev, "allocate status_req memory failed\n"); 3270 dev_err(&dev->pdev->dev,
3271 "allocate status_req memory failed\n");
3120 retval = -ENOMEM; 3272 retval = -ENOMEM;
3121 goto error; 3273 goto error;
3122 } 3274 }
@@ -3129,7 +3281,10 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3129 dev->resume_state = USB_STATE_NOTATTACHED; 3281 dev->resume_state = USB_STATE_NOTATTACHED;
3130 dev->usb_state = USB_STATE_POWERED; 3282 dev->usb_state = USB_STATE_POWERED;
3131 dev->ep0_dir = USB_DIR_OUT; 3283 dev->ep0_dir = USB_DIR_OUT;
3132 dev->remote_wakeup = 0; /* default to 0 on reset */ 3284
3285 /* remote wakeup reset to 0 when the device is reset */
3286 dev->remote_wakeup = 0;
3287 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
3133 3288
3134#ifndef OTG_TRANSCEIVER 3289#ifndef OTG_TRANSCEIVER
3135 /* reset device controller */ 3290 /* reset device controller */
@@ -3174,18 +3329,20 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3174 } 3329 }
3175 3330
3176 /* done */ 3331 /* done */
3177 INFO(dev, "%s\n", driver_desc); 3332 dev_info(&dev->pdev->dev, "%s\n", driver_desc);
3178 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); 3333 dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
3179 INFO(dev, "Driver version: " DRIVER_VERSION "\n"); 3334 dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
3180 INFO(dev, "Support (max) %d endpoints\n", dev->ep_max); 3335 dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
3181 INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion); 3336 dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
3182 INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host"); 3337 dev->dciversion);
3183 INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No"); 3338 dev_info(&dev->pdev->dev, "Controller mode: %s\n",
3184 3339 dev->devcap ? "Device" : "Host");
3185 VDBG(dev, "After langwell_udc_probe(), print all registers:\n"); 3340 dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
3186#ifdef VERBOSE 3341 dev->lpm ? "Yes" : "No");
3342
3343 dev_vdbg(&dev->pdev->dev,
3344 "After langwell_udc_probe(), print all registers:\n");
3187 print_all_registers(dev); 3345 print_all_registers(dev);
3188#endif
3189 3346
3190 the_controller = dev; 3347 the_controller = dev;
3191 3348
@@ -3197,12 +3354,18 @@ static int langwell_udc_probe(struct pci_dev *pdev,
3197 if (retval) 3354 if (retval)
3198 goto error; 3355 goto error;
3199 3356
3200 VDBG(dev, "<--- %s()\n", __func__); 3357 retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
3358 if (retval)
3359 goto error_attr1;
3360
3361 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3201 return 0; 3362 return 0;
3202 3363
3364error_attr1:
3365 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3203error: 3366error:
3204 if (dev) { 3367 if (dev) {
3205 DBG(dev, "<--- %s()\n", __func__); 3368 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3206 langwell_udc_remove(pdev); 3369 langwell_udc_remove(pdev);
3207 } 3370 }
3208 3371
@@ -3214,9 +3377,8 @@ error:
3214static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state) 3377static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3215{ 3378{
3216 struct langwell_udc *dev = the_controller; 3379 struct langwell_udc *dev = the_controller;
3217 u32 devlc;
3218 3380
3219 DBG(dev, "---> %s()\n", __func__); 3381 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3220 3382
3221 /* disable interrupt and set controller to stop state */ 3383 /* disable interrupt and set controller to stop state */
3222 langwell_udc_stop(dev); 3384 langwell_udc_stop(dev);
@@ -3226,20 +3388,34 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3226 free_irq(pdev->irq, dev); 3388 free_irq(pdev->irq, dev);
3227 dev->got_irq = 0; 3389 dev->got_irq = 0;
3228 3390
3229
3230 /* save PCI state */ 3391 /* save PCI state */
3231 pci_save_state(pdev); 3392 pci_save_state(pdev);
3232 3393
3394 spin_lock_irq(&dev->lock);
3395 /* stop all usb activities */
3396 stop_activity(dev, dev->driver);
3397 spin_unlock_irq(&dev->lock);
3398
3399 /* free dTD dma_pool and dQH */
3400 if (dev->dtd_pool)
3401 dma_pool_destroy(dev->dtd_pool);
3402
3403 if (dev->ep_dqh)
3404 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3405 dev->ep_dqh, dev->ep_dqh_dma);
3406
3407 /* release SRAM caching */
3408 if (dev->has_sram && dev->got_sram)
3409 sram_deinit(dev);
3410
3233 /* set device power state */ 3411 /* set device power state */
3234 pci_set_power_state(pdev, PCI_D3hot); 3412 pci_set_power_state(pdev, PCI_D3hot);
3235 3413
3236 /* enter PHY low power suspend */ 3414 /* enter PHY low power suspend */
3237 devlc = readl(&dev->op_regs->devlc); 3415 if (dev->pdev->device != 0x0829)
3238 VDBG(dev, "devlc = 0x%08x\n", devlc); 3416 langwell_phy_low_power(dev, 1);
3239 devlc |= LPM_PHCD;
3240 writel(devlc, &dev->op_regs->devlc);
3241 3417
3242 DBG(dev, "<--- %s()\n", __func__); 3418 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3243 return 0; 3419 return 0;
3244} 3420}
3245 3421
@@ -3248,27 +3424,58 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3248static int langwell_udc_resume(struct pci_dev *pdev) 3424static int langwell_udc_resume(struct pci_dev *pdev)
3249{ 3425{
3250 struct langwell_udc *dev = the_controller; 3426 struct langwell_udc *dev = the_controller;
3251 u32 devlc; 3427 size_t size;
3252 3428
3253 DBG(dev, "---> %s()\n", __func__); 3429 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3254 3430
3255 /* exit PHY low power suspend */ 3431 /* exit PHY low power suspend */
3256 devlc = readl(&dev->op_regs->devlc); 3432 if (dev->pdev->device != 0x0829)
3257 VDBG(dev, "devlc = 0x%08x\n", devlc); 3433 langwell_phy_low_power(dev, 0);
3258 devlc &= ~LPM_PHCD;
3259 writel(devlc, &dev->op_regs->devlc);
3260 3434
3261 /* set device D0 power state */ 3435 /* set device D0 power state */
3262 pci_set_power_state(pdev, PCI_D0); 3436 pci_set_power_state(pdev, PCI_D0);
3263 3437
3438 /* enable SRAM caching if detected */
3439 if (dev->has_sram && !dev->got_sram)
3440 sram_init(dev);
3441
3442 /* allocate device dQH memory */
3443 size = dev->ep_max * sizeof(struct langwell_dqh);
3444 dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
3445 if (size < DQH_ALIGNMENT)
3446 size = DQH_ALIGNMENT;
3447 else if ((size % DQH_ALIGNMENT) != 0) {
3448 size += DQH_ALIGNMENT + 1;
3449 size &= ~(DQH_ALIGNMENT - 1);
3450 }
3451 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3452 &dev->ep_dqh_dma, GFP_KERNEL);
3453 if (!dev->ep_dqh) {
3454 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3455 return -ENOMEM;
3456 }
3457 dev->ep_dqh_size = size;
3458 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
3459
3460 /* create dTD dma_pool resource */
3461 dev->dtd_pool = dma_pool_create("langwell_dtd",
3462 &dev->pdev->dev,
3463 sizeof(struct langwell_dtd),
3464 DTD_ALIGNMENT,
3465 DMA_BOUNDARY);
3466
3467 if (!dev->dtd_pool)
3468 return -ENOMEM;
3469
3264 /* restore PCI state */ 3470 /* restore PCI state */
3265 pci_restore_state(pdev); 3471 pci_restore_state(pdev);
3266 3472
3267 /* enable IRQ handler */ 3473 /* enable IRQ handler */
3268 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev) 3474 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3269 != 0) { 3475 driver_name, dev) != 0) {
3270 ERROR(dev, "request interrupt %d failed\n", pdev->irq); 3476 dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
3271 return -1; 3477 pdev->irq);
3478 return -EBUSY;
3272 } 3479 }
3273 dev->got_irq = 1; 3480 dev->got_irq = 1;
3274 3481
@@ -3290,7 +3497,7 @@ static int langwell_udc_resume(struct pci_dev *pdev)
3290 dev->ep0_state = WAIT_FOR_SETUP; 3497 dev->ep0_state = WAIT_FOR_SETUP;
3291 dev->ep0_dir = USB_DIR_OUT; 3498 dev->ep0_dir = USB_DIR_OUT;
3292 3499
3293 DBG(dev, "<--- %s()\n", __func__); 3500 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3294 return 0; 3501 return 0;
3295} 3502}
3296 3503
@@ -3301,15 +3508,15 @@ static void langwell_udc_shutdown(struct pci_dev *pdev)
3301 struct langwell_udc *dev = the_controller; 3508 struct langwell_udc *dev = the_controller;
3302 u32 usbmode; 3509 u32 usbmode;
3303 3510
3304 DBG(dev, "---> %s()\n", __func__); 3511 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3305 3512
3306 /* reset controller mode to IDLE */ 3513 /* reset controller mode to IDLE */
3307 usbmode = readl(&dev->op_regs->usbmode); 3514 usbmode = readl(&dev->op_regs->usbmode);
3308 DBG(dev, "usbmode = 0x%08x\n", usbmode); 3515 dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
3309 usbmode &= (~3 | MODE_IDLE); 3516 usbmode &= (~3 | MODE_IDLE);
3310 writel(usbmode, &dev->op_regs->usbmode); 3517 writel(usbmode, &dev->op_regs->usbmode);
3311 3518
3312 DBG(dev, "<--- %s()\n", __func__); 3519 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3313} 3520}
3314 3521
3315/*-------------------------------------------------------------------------*/ 3522/*-------------------------------------------------------------------------*/
@@ -3324,7 +3531,6 @@ static const struct pci_device_id pci_ids[] = { {
3324}, { /* end: all zeroes */ } 3531}, { /* end: all zeroes */ }
3325}; 3532};
3326 3533
3327
3328MODULE_DEVICE_TABLE(pci, pci_ids); 3534MODULE_DEVICE_TABLE(pci, pci_ids);
3329 3535
3330 3536
@@ -3343,12 +3549,6 @@ static struct pci_driver langwell_pci_driver = {
3343}; 3549};
3344 3550
3345 3551
3346MODULE_DESCRIPTION(DRIVER_DESC);
3347MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3348MODULE_VERSION(DRIVER_VERSION);
3349MODULE_LICENSE("GPL");
3350
3351
3352static int __init init(void) 3552static int __init init(void)
3353{ 3553{
3354#ifdef OTG_TRANSCEIVER 3554#ifdef OTG_TRANSCEIVER
@@ -3370,3 +3570,9 @@ static void __exit cleanup(void)
3370} 3570}
3371module_exit(cleanup); 3571module_exit(cleanup);
3372 3572
3573
3574MODULE_DESCRIPTION(DRIVER_DESC);
3575MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3576MODULE_VERSION(DRIVER_VERSION);
3577MODULE_LICENSE("GPL");
3578
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
index 9719934e1c08..f1d9c1bb04f3 100644
--- a/drivers/usb/gadget/langwell_udc.h
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -18,11 +18,7 @@
18 */ 18 */
19 19
20#include <linux/usb/langwell_udc.h> 20#include <linux/usb/langwell_udc.h>
21
22#if defined(CONFIG_USB_LANGWELL_OTG)
23#include <linux/usb/langwell_otg.h> 21#include <linux/usb/langwell_otg.h>
24#endif
25
26 22
27/*-------------------------------------------------------------------------*/ 23/*-------------------------------------------------------------------------*/
28 24
@@ -199,7 +195,9 @@ struct langwell_udc {
199 vbus_active:1, 195 vbus_active:1,
200 suspended:1, 196 suspended:1,
201 stopped:1, 197 stopped:1,
202 lpm:1; /* LPM capability */ 198 lpm:1, /* LPM capability */
199 has_sram:1, /* SRAM caching */
200 got_sram:1;
203 201
204 /* pci state used to access those endpoints */ 202 /* pci state used to access those endpoints */
205 struct pci_dev *pdev; 203 struct pci_dev *pdev;
@@ -224,5 +222,12 @@ struct langwell_udc {
224 222
225 /* make sure release() is done */ 223 /* make sure release() is done */
226 struct completion *done; 224 struct completion *done;
225
226 /* for private SRAM caching */
227 unsigned int sram_addr;
228 unsigned int sram_size;
229
230 /* device status data for get_status request */
231 u16 dev_status;
227}; 232};
228 233
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index fded3fca793b..6b58bd8ce623 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -408,7 +408,8 @@ static void udc_enable(struct lh7a40x_udc *dev)
408/* 408/*
409 Register entry point for the peripheral controller driver. 409 Register entry point for the peripheral controller driver.
410*/ 410*/
411int usb_gadget_register_driver(struct usb_gadget_driver *driver) 411int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
412 int (*bind)(struct usb_gadget *))
412{ 413{
413 struct lh7a40x_udc *dev = the_controller; 414 struct lh7a40x_udc *dev = the_controller;
414 int retval; 415 int retval;
@@ -417,7 +418,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
417 418
418 if (!driver 419 if (!driver
419 || driver->speed != USB_SPEED_FULL 420 || driver->speed != USB_SPEED_FULL
420 || !driver->bind 421 || !bind
421 || !driver->disconnect 422 || !driver->disconnect
422 || !driver->setup) 423 || !driver->setup)
423 return -EINVAL; 424 return -EINVAL;
@@ -431,7 +432,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
431 dev->gadget.dev.driver = &driver->driver; 432 dev->gadget.dev.driver = &driver->driver;
432 433
433 device_add(&dev->gadget.dev); 434 device_add(&dev->gadget.dev);
434 retval = driver->bind(&dev->gadget); 435 retval = bind(&dev->gadget);
435 if (retval) { 436 if (retval) {
436 printk(KERN_WARNING "%s: bind to driver %s --> error %d\n", 437 printk(KERN_WARNING "%s: bind to driver %s --> error %d\n",
437 dev->gadget.name, driver->driver.name, retval); 438 dev->gadget.name, driver->driver.name, retval);
@@ -453,8 +454,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
453 454
454 return 0; 455 return 0;
455} 456}
456 457EXPORT_SYMBOL(usb_gadget_probe_driver);
457EXPORT_SYMBOL(usb_gadget_register_driver);
458 458
459/* 459/*
460 Unregister entry point for the peripheral controller driver. 460 Unregister entry point for the peripheral controller driver.
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index e03058fe23cb..51b19f3027e7 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1454,14 +1454,15 @@ static struct usb_ep_ops m66592_ep_ops = {
1454/*-------------------------------------------------------------------------*/ 1454/*-------------------------------------------------------------------------*/
1455static struct m66592 *the_controller; 1455static struct m66592 *the_controller;
1456 1456
1457int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1457int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1458 int (*bind)(struct usb_gadget *))
1458{ 1459{
1459 struct m66592 *m66592 = the_controller; 1460 struct m66592 *m66592 = the_controller;
1460 int retval; 1461 int retval;
1461 1462
1462 if (!driver 1463 if (!driver
1463 || driver->speed != USB_SPEED_HIGH 1464 || driver->speed != USB_SPEED_HIGH
1464 || !driver->bind 1465 || !bind
1465 || !driver->setup) 1466 || !driver->setup)
1466 return -EINVAL; 1467 return -EINVAL;
1467 if (!m66592) 1468 if (!m66592)
@@ -1480,7 +1481,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1480 goto error; 1481 goto error;
1481 } 1482 }
1482 1483
1483 retval = driver->bind (&m66592->gadget); 1484 retval = bind(&m66592->gadget);
1484 if (retval) { 1485 if (retval) {
1485 pr_err("bind to driver error (%d)\n", retval); 1486 pr_err("bind to driver error (%d)\n", retval);
1486 device_del(&m66592->gadget.dev); 1487 device_del(&m66592->gadget.dev);
@@ -1505,7 +1506,7 @@ error:
1505 1506
1506 return retval; 1507 return retval;
1507} 1508}
1508EXPORT_SYMBOL(usb_gadget_register_driver); 1509EXPORT_SYMBOL(usb_gadget_probe_driver);
1509 1510
1510int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1511int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1511{ 1512{
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 585f2559484d..0769179dbdb0 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -75,10 +75,6 @@ static struct usb_device_descriptor msg_device_desc = {
75 /* Vendor and product id can be overridden by module parameters. */ 75 /* Vendor and product id can be overridden by module parameters. */
76 .idVendor = cpu_to_le16(FSG_VENDOR_ID), 76 .idVendor = cpu_to_le16(FSG_VENDOR_ID),
77 .idProduct = cpu_to_le16(FSG_PRODUCT_ID), 77 .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
78 /* .bcdDevice = f(hardware) */
79 /* .iManufacturer = DYNAMIC */
80 /* .iProduct = DYNAMIC */
81 /* NO SERIAL NUMBER */
82 .bNumConfigurations = 1, 78 .bNumConfigurations = 1,
83}; 79};
84 80
@@ -86,7 +82,8 @@ static struct usb_otg_descriptor otg_descriptor = {
86 .bLength = sizeof otg_descriptor, 82 .bLength = sizeof otg_descriptor,
87 .bDescriptorType = USB_DT_OTG, 83 .bDescriptorType = USB_DT_OTG,
88 84
89 /* REVISIT SRP-only hardware is possible, although 85 /*
86 * REVISIT SRP-only hardware is possible, although
90 * it would not be called "OTG" ... 87 * it would not be called "OTG" ...
91 */ 88 */
92 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, 89 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
@@ -98,33 +95,6 @@ static const struct usb_descriptor_header *otg_desc[] = {
98}; 95};
99 96
100 97
101/* string IDs are assigned dynamically */
102
103#define STRING_MANUFACTURER_IDX 0
104#define STRING_PRODUCT_IDX 1
105#define STRING_CONFIGURATION_IDX 2
106
107static char manufacturer[50];
108
109static struct usb_string strings_dev[] = {
110 [STRING_MANUFACTURER_IDX].s = manufacturer,
111 [STRING_PRODUCT_IDX].s = DRIVER_DESC,
112 [STRING_CONFIGURATION_IDX].s = "Self Powered",
113 { } /* end of list */
114};
115
116static struct usb_gadget_strings stringtab_dev = {
117 .language = 0x0409, /* en-us */
118 .strings = strings_dev,
119};
120
121static struct usb_gadget_strings *dev_strings[] = {
122 &stringtab_dev,
123 NULL,
124};
125
126
127
128/****************************** Configurations ******************************/ 98/****************************** Configurations ******************************/
129 99
130static struct fsg_module_parameters mod_data = { 100static struct fsg_module_parameters mod_data = {
@@ -141,7 +111,7 @@ static int msg_thread_exits(struct fsg_common *common)
141 return 0; 111 return 0;
142} 112}
143 113
144static int __ref msg_do_config(struct usb_configuration *c) 114static int __init msg_do_config(struct usb_configuration *c)
145{ 115{
146 static const struct fsg_operations ops = { 116 static const struct fsg_operations ops = {
147 .thread_exits = msg_thread_exits, 117 .thread_exits = msg_thread_exits,
@@ -171,54 +141,23 @@ static int __ref msg_do_config(struct usb_configuration *c)
171 141
172static struct usb_configuration msg_config_driver = { 142static struct usb_configuration msg_config_driver = {
173 .label = "Linux File-Backed Storage", 143 .label = "Linux File-Backed Storage",
174 .bind = msg_do_config,
175 .bConfigurationValue = 1, 144 .bConfigurationValue = 1,
176 /* .iConfiguration = DYNAMIC */
177 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 145 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
178}; 146};
179 147
180 148
181
182/****************************** Gadget Bind ******************************/ 149/****************************** Gadget Bind ******************************/
183 150
184 151static int __init msg_bind(struct usb_composite_dev *cdev)
185static int __ref msg_bind(struct usb_composite_dev *cdev)
186{ 152{
187 struct usb_gadget *gadget = cdev->gadget;
188 int status; 153 int status;
189 154
190 /* Allocate string descriptor numbers ... note that string 155 status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
191 * contents can be overridden by the composite_dev glue.
192 */
193
194 /* device descriptor strings: manufacturer, product */
195 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
196 init_utsname()->sysname, init_utsname()->release,
197 gadget->name);
198 status = usb_string_id(cdev);
199 if (status < 0) 156 if (status < 0)
200 return status; 157 return status;
201 strings_dev[STRING_MANUFACTURER_IDX].id = status;
202 msg_device_desc.iManufacturer = status;
203 158
204 status = usb_string_id(cdev); 159 dev_info(&cdev->gadget->dev,
205 if (status < 0) 160 DRIVER_DESC ", version: " DRIVER_VERSION "\n");
206 return status;
207 strings_dev[STRING_PRODUCT_IDX].id = status;
208 msg_device_desc.iProduct = status;
209
210 status = usb_string_id(cdev);
211 if (status < 0)
212 return status;
213 strings_dev[STRING_CONFIGURATION_IDX].id = status;
214 msg_config_driver.iConfiguration = status;
215
216 /* register our second configuration */
217 status = usb_add_config(cdev, &msg_config_driver);
218 if (status < 0)
219 return status;
220
221 dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
222 set_bit(0, &msg_registered); 161 set_bit(0, &msg_registered);
223 return 0; 162 return 0;
224} 163}
@@ -226,12 +165,11 @@ static int __ref msg_bind(struct usb_composite_dev *cdev)
226 165
227/****************************** Some noise ******************************/ 166/****************************** Some noise ******************************/
228 167
229
230static struct usb_composite_driver msg_driver = { 168static struct usb_composite_driver msg_driver = {
231 .name = "g_mass_storage", 169 .name = "g_mass_storage",
232 .dev = &msg_device_desc, 170 .dev = &msg_device_desc,
233 .strings = dev_strings, 171 .iProduct = DRIVER_DESC,
234 .bind = msg_bind, 172 .needs_serial = 1,
235}; 173};
236 174
237MODULE_DESCRIPTION(DRIVER_DESC); 175MODULE_DESCRIPTION(DRIVER_DESC);
@@ -240,7 +178,7 @@ MODULE_LICENSE("GPL");
240 178
241static int __init msg_init(void) 179static int __init msg_init(void)
242{ 180{
243 return usb_composite_register(&msg_driver); 181 return usb_composite_probe(&msg_driver, msg_bind);
244} 182}
245module_init(msg_init); 183module_init(msg_init);
246 184
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 795d76232167..d9feced348e3 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -74,8 +74,8 @@ MODULE_LICENSE("GPL");
74 74
75/***************************** Device Descriptor ****************************/ 75/***************************** Device Descriptor ****************************/
76 76
77#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */ 77#define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */
78#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */ 78#define MULTI_PRODUCT_NUM 0x0104 /* Multifunction Composite Gadget */
79 79
80 80
81enum { 81enum {
@@ -121,8 +121,6 @@ static const struct usb_descriptor_header *otg_desc[] = {
121 121
122 122
123enum { 123enum {
124 MULTI_STRING_MANUFACTURER_IDX,
125 MULTI_STRING_PRODUCT_IDX,
126#ifdef CONFIG_USB_G_MULTI_RNDIS 124#ifdef CONFIG_USB_G_MULTI_RNDIS
127 MULTI_STRING_RNDIS_CONFIG_IDX, 125 MULTI_STRING_RNDIS_CONFIG_IDX,
128#endif 126#endif
@@ -131,11 +129,7 @@ enum {
131#endif 129#endif
132}; 130};
133 131
134static char manufacturer[50];
135
136static struct usb_string strings_dev[] = { 132static struct usb_string strings_dev[] = {
137 [MULTI_STRING_MANUFACTURER_IDX].s = manufacturer,
138 [MULTI_STRING_PRODUCT_IDX].s = DRIVER_DESC,
139#ifdef CONFIG_USB_G_MULTI_RNDIS 133#ifdef CONFIG_USB_G_MULTI_RNDIS
140 [MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS", 134 [MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS",
141#endif 135#endif
@@ -170,7 +164,7 @@ static u8 hostaddr[ETH_ALEN];
170 164
171#ifdef USB_ETH_RNDIS 165#ifdef USB_ETH_RNDIS
172 166
173static __ref int rndis_do_config(struct usb_configuration *c) 167static __init int rndis_do_config(struct usb_configuration *c)
174{ 168{
175 int ret; 169 int ret;
176 170
@@ -197,7 +191,6 @@ static __ref int rndis_do_config(struct usb_configuration *c)
197static int rndis_config_register(struct usb_composite_dev *cdev) 191static int rndis_config_register(struct usb_composite_dev *cdev)
198{ 192{
199 static struct usb_configuration config = { 193 static struct usb_configuration config = {
200 .bind = rndis_do_config,
201 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM, 194 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
202 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 195 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
203 }; 196 };
@@ -205,7 +198,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
205 config.label = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s; 198 config.label = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s;
206 config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id; 199 config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id;
207 200
208 return usb_add_config(cdev, &config); 201 return usb_add_config(cdev, &config, rndis_do_config);
209} 202}
210 203
211#else 204#else
@@ -222,7 +215,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
222 215
223#ifdef CONFIG_USB_G_MULTI_CDC 216#ifdef CONFIG_USB_G_MULTI_CDC
224 217
225static __ref int cdc_do_config(struct usb_configuration *c) 218static __init int cdc_do_config(struct usb_configuration *c)
226{ 219{
227 int ret; 220 int ret;
228 221
@@ -249,7 +242,6 @@ static __ref int cdc_do_config(struct usb_configuration *c)
249static int cdc_config_register(struct usb_composite_dev *cdev) 242static int cdc_config_register(struct usb_composite_dev *cdev)
250{ 243{
251 static struct usb_configuration config = { 244 static struct usb_configuration config = {
252 .bind = cdc_do_config,
253 .bConfigurationValue = MULTI_CDC_CONFIG_NUM, 245 .bConfigurationValue = MULTI_CDC_CONFIG_NUM,
254 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 246 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
255 }; 247 };
@@ -257,7 +249,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
257 config.label = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s; 249 config.label = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s;
258 config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id; 250 config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id;
259 251
260 return usb_add_config(cdev, &config); 252 return usb_add_config(cdev, &config, cdc_do_config);
261} 253}
262 254
263#else 255#else
@@ -314,20 +306,11 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
314 device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); 306 device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
315 } 307 }
316 308
317 /* allocate string descriptor numbers */ 309 /* allocate string IDs */
318 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
319 init_utsname()->sysname, init_utsname()->release,
320 gadget->name);
321
322 status = usb_string_ids_tab(cdev, strings_dev); 310 status = usb_string_ids_tab(cdev, strings_dev);
323 if (unlikely(status < 0)) 311 if (unlikely(status < 0))
324 goto fail2; 312 goto fail2;
325 313
326 device_desc.iManufacturer =
327 strings_dev[MULTI_STRING_MANUFACTURER_IDX].id;
328 device_desc.iProduct =
329 strings_dev[MULTI_STRING_PRODUCT_IDX].id;
330
331 /* register configurations */ 314 /* register configurations */
332 status = rndis_config_register(cdev); 315 status = rndis_config_register(cdev);
333 if (unlikely(status < 0)) 316 if (unlikely(status < 0))
@@ -368,14 +351,15 @@ static struct usb_composite_driver multi_driver = {
368 .name = "g_multi", 351 .name = "g_multi",
369 .dev = &device_desc, 352 .dev = &device_desc,
370 .strings = dev_strings, 353 .strings = dev_strings,
371 .bind = multi_bind,
372 .unbind = __exit_p(multi_unbind), 354 .unbind = __exit_p(multi_unbind),
355 .iProduct = DRIVER_DESC,
356 .needs_serial = 1,
373}; 357};
374 358
375 359
376static int __init multi_init(void) 360static int __init multi_init(void)
377{ 361{
378 return usb_composite_register(&multi_driver); 362 return usb_composite_probe(&multi_driver, multi_bind);
379} 363}
380module_init(multi_init); 364module_init(multi_init);
381 365
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 9498be87a724..d09155b25d73 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1929,7 +1929,8 @@ static void ep0_start (struct net2280 *dev)
1929 * disconnect is reported. then a host may connect again, or 1929 * disconnect is reported. then a host may connect again, or
1930 * the driver might get unbound. 1930 * the driver might get unbound.
1931 */ 1931 */
1932int usb_gadget_register_driver (struct usb_gadget_driver *driver) 1932int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1933 int (*bind)(struct usb_gadget *))
1933{ 1934{
1934 struct net2280 *dev = the_controller; 1935 struct net2280 *dev = the_controller;
1935 int retval; 1936 int retval;
@@ -1941,8 +1942,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1941 */ 1942 */
1942 if (!driver 1943 if (!driver
1943 || driver->speed != USB_SPEED_HIGH 1944 || driver->speed != USB_SPEED_HIGH
1944 || !driver->bind 1945 || !bind || !driver->setup)
1945 || !driver->setup)
1946 return -EINVAL; 1946 return -EINVAL;
1947 if (!dev) 1947 if (!dev)
1948 return -ENODEV; 1948 return -ENODEV;
@@ -1957,7 +1957,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1957 driver->driver.bus = NULL; 1957 driver->driver.bus = NULL;
1958 dev->driver = driver; 1958 dev->driver = driver;
1959 dev->gadget.dev.driver = &driver->driver; 1959 dev->gadget.dev.driver = &driver->driver;
1960 retval = driver->bind (&dev->gadget); 1960 retval = bind(&dev->gadget);
1961 if (retval) { 1961 if (retval) {
1962 DEBUG (dev, "bind to driver %s --> %d\n", 1962 DEBUG (dev, "bind to driver %s --> %d\n",
1963 driver->driver.name, retval); 1963 driver->driver.name, retval);
@@ -1993,7 +1993,7 @@ err_unbind:
1993 dev->driver = NULL; 1993 dev->driver = NULL;
1994 return retval; 1994 return retval;
1995} 1995}
1996EXPORT_SYMBOL (usb_gadget_register_driver); 1996EXPORT_SYMBOL(usb_gadget_probe_driver);
1997 1997
1998static void 1998static void
1999stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) 1999stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
index 7d6b66a85724..b5364f9d7cd2 100644
--- a/drivers/usb/gadget/nokia.c
+++ b/drivers/usb/gadget/nokia.c
@@ -135,7 +135,6 @@ static int __init nokia_bind_config(struct usb_configuration *c)
135 135
136static struct usb_configuration nokia_config_500ma_driver = { 136static struct usb_configuration nokia_config_500ma_driver = {
137 .label = "Bus Powered", 137 .label = "Bus Powered",
138 .bind = nokia_bind_config,
139 .bConfigurationValue = 1, 138 .bConfigurationValue = 1,
140 /* .iConfiguration = DYNAMIC */ 139 /* .iConfiguration = DYNAMIC */
141 .bmAttributes = USB_CONFIG_ATT_ONE, 140 .bmAttributes = USB_CONFIG_ATT_ONE,
@@ -144,7 +143,6 @@ static struct usb_configuration nokia_config_500ma_driver = {
144 143
145static struct usb_configuration nokia_config_100ma_driver = { 144static struct usb_configuration nokia_config_100ma_driver = {
146 .label = "Self Powered", 145 .label = "Self Powered",
147 .bind = nokia_bind_config,
148 .bConfigurationValue = 2, 146 .bConfigurationValue = 2,
149 /* .iConfiguration = DYNAMIC */ 147 /* .iConfiguration = DYNAMIC */
150 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, 148 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
@@ -206,11 +204,13 @@ static int __init nokia_bind(struct usb_composite_dev *cdev)
206 } 204 }
207 205
208 /* finaly register the configuration */ 206 /* finaly register the configuration */
209 status = usb_add_config(cdev, &nokia_config_500ma_driver); 207 status = usb_add_config(cdev, &nokia_config_500ma_driver,
208 nokia_bind_config);
210 if (status < 0) 209 if (status < 0)
211 goto err_usb; 210 goto err_usb;
212 211
213 status = usb_add_config(cdev, &nokia_config_100ma_driver); 212 status = usb_add_config(cdev, &nokia_config_100ma_driver,
213 nokia_bind_config);
214 if (status < 0) 214 if (status < 0)
215 goto err_usb; 215 goto err_usb;
216 216
@@ -241,13 +241,12 @@ static struct usb_composite_driver nokia_driver = {
241 .name = "g_nokia", 241 .name = "g_nokia",
242 .dev = &device_desc, 242 .dev = &device_desc,
243 .strings = dev_strings, 243 .strings = dev_strings,
244 .bind = nokia_bind,
245 .unbind = __exit_p(nokia_unbind), 244 .unbind = __exit_p(nokia_unbind),
246}; 245};
247 246
248static int __init nokia_init(void) 247static int __init nokia_init(void)
249{ 248{
250 return usb_composite_register(&nokia_driver); 249 return usb_composite_probe(&nokia_driver, nokia_bind);
251} 250}
252module_init(nokia_init); 251module_init(nokia_init);
253 252
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index f81e4f025f23..61d3ca6619bb 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2102,7 +2102,8 @@ static inline int machine_without_vbus_sense(void)
2102 ); 2102 );
2103} 2103}
2104 2104
2105int usb_gadget_register_driver (struct usb_gadget_driver *driver) 2105int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2106 int (*bind)(struct usb_gadget *))
2106{ 2107{
2107 int status = -ENODEV; 2108 int status = -ENODEV;
2108 struct omap_ep *ep; 2109 struct omap_ep *ep;
@@ -2114,8 +2115,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2114 if (!driver 2115 if (!driver
2115 // FIXME if otg, check: driver->is_otg 2116 // FIXME if otg, check: driver->is_otg
2116 || driver->speed < USB_SPEED_FULL 2117 || driver->speed < USB_SPEED_FULL
2117 || !driver->bind 2118 || !bind || !driver->setup)
2118 || !driver->setup)
2119 return -EINVAL; 2119 return -EINVAL;
2120 2120
2121 spin_lock_irqsave(&udc->lock, flags); 2121 spin_lock_irqsave(&udc->lock, flags);
@@ -2145,7 +2145,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2145 if (udc->dc_clk != NULL) 2145 if (udc->dc_clk != NULL)
2146 omap_udc_enable_clock(1); 2146 omap_udc_enable_clock(1);
2147 2147
2148 status = driver->bind (&udc->gadget); 2148 status = bind(&udc->gadget);
2149 if (status) { 2149 if (status) {
2150 DBG("bind to %s --> %d\n", driver->driver.name, status); 2150 DBG("bind to %s --> %d\n", driver->driver.name, status);
2151 udc->gadget.dev.driver = NULL; 2151 udc->gadget.dev.driver = NULL;
@@ -2186,7 +2186,7 @@ done:
2186 omap_udc_enable_clock(0); 2186 omap_udc_enable_clock(0);
2187 return status; 2187 return status;
2188} 2188}
2189EXPORT_SYMBOL(usb_gadget_register_driver); 2189EXPORT_SYMBOL(usb_gadget_probe_driver);
2190 2190
2191int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) 2191int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2192{ 2192{
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 327a92a137b4..2fc8636316c5 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1348,7 +1348,7 @@ printer_unbind(struct usb_gadget *gadget)
1348 set_gadget_data(gadget, NULL); 1348 set_gadget_data(gadget, NULL);
1349} 1349}
1350 1350
1351static int __ref 1351static int __init
1352printer_bind(struct usb_gadget *gadget) 1352printer_bind(struct usb_gadget *gadget)
1353{ 1353{
1354 struct printer_dev *dev; 1354 struct printer_dev *dev;
@@ -1544,7 +1544,6 @@ static struct usb_gadget_driver printer_driver = {
1544 .speed = DEVSPEED, 1544 .speed = DEVSPEED,
1545 1545
1546 .function = (char *) driver_desc, 1546 .function = (char *) driver_desc,
1547 .bind = printer_bind,
1548 .unbind = printer_unbind, 1547 .unbind = printer_unbind,
1549 1548
1550 .setup = printer_setup, 1549 .setup = printer_setup,
@@ -1580,11 +1579,11 @@ init(void)
1580 return status; 1579 return status;
1581 } 1580 }
1582 1581
1583 status = usb_gadget_register_driver(&printer_driver); 1582 status = usb_gadget_probe_driver(&printer_driver, printer_bind);
1584 if (status) { 1583 if (status) {
1585 class_destroy(usb_gadget_class); 1584 class_destroy(usb_gadget_class);
1586 unregister_chrdev_region(g_printer_devno, 1); 1585 unregister_chrdev_region(g_printer_devno, 1);
1587 DBG(dev, "usb_gadget_register_driver %x\n", status); 1586 DBG(dev, "usb_gadget_probe_driver %x\n", status);
1588 } 1587 }
1589 1588
1590 return status; 1589 return status;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index be5fb34d9602..b37f92cb71bc 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1280,14 +1280,15 @@ static void udc_enable (struct pxa25x_udc *dev)
1280 * disconnect is reported. then a host may connect again, or 1280 * disconnect is reported. then a host may connect again, or
1281 * the driver might get unbound. 1281 * the driver might get unbound.
1282 */ 1282 */
1283int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1283int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1284 int (*bind)(struct usb_gadget *))
1284{ 1285{
1285 struct pxa25x_udc *dev = the_controller; 1286 struct pxa25x_udc *dev = the_controller;
1286 int retval; 1287 int retval;
1287 1288
1288 if (!driver 1289 if (!driver
1289 || driver->speed < USB_SPEED_FULL 1290 || driver->speed < USB_SPEED_FULL
1290 || !driver->bind 1291 || !bind
1291 || !driver->disconnect 1292 || !driver->disconnect
1292 || !driver->setup) 1293 || !driver->setup)
1293 return -EINVAL; 1294 return -EINVAL;
@@ -1308,7 +1309,7 @@ fail:
1308 dev->gadget.dev.driver = NULL; 1309 dev->gadget.dev.driver = NULL;
1309 return retval; 1310 return retval;
1310 } 1311 }
1311 retval = driver->bind(&dev->gadget); 1312 retval = bind(&dev->gadget);
1312 if (retval) { 1313 if (retval) {
1313 DMSG("bind to driver %s --> error %d\n", 1314 DMSG("bind to driver %s --> error %d\n",
1314 driver->driver.name, retval); 1315 driver->driver.name, retval);
@@ -1338,7 +1339,7 @@ fail:
1338bind_fail: 1339bind_fail:
1339 return retval; 1340 return retval;
1340} 1341}
1341EXPORT_SYMBOL(usb_gadget_register_driver); 1342EXPORT_SYMBOL(usb_gadget_probe_driver);
1342 1343
1343static void 1344static void
1344stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) 1345stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 980762453a9c..027d66f81620 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1792,8 +1792,9 @@ static void udc_enable(struct pxa_udc *udc)
1792} 1792}
1793 1793
1794/** 1794/**
1795 * usb_gadget_register_driver - Register gadget driver 1795 * usb_gadget_probe_driver - Register gadget driver
1796 * @driver: gadget driver 1796 * @driver: gadget driver
1797 * @bind: bind function
1797 * 1798 *
1798 * When a driver is successfully registered, it will receive control requests 1799 * When a driver is successfully registered, it will receive control requests
1799 * including set_configuration(), which enables non-control requests. Then 1800 * including set_configuration(), which enables non-control requests. Then
@@ -1805,12 +1806,13 @@ static void udc_enable(struct pxa_udc *udc)
1805 * 1806 *
1806 * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise 1807 * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1807 */ 1808 */
1808int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1809int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1810 int (*bind)(struct usb_gadget *))
1809{ 1811{
1810 struct pxa_udc *udc = the_controller; 1812 struct pxa_udc *udc = the_controller;
1811 int retval; 1813 int retval;
1812 1814
1813 if (!driver || driver->speed < USB_SPEED_FULL || !driver->bind 1815 if (!driver || driver->speed < USB_SPEED_FULL || !bind
1814 || !driver->disconnect || !driver->setup) 1816 || !driver->disconnect || !driver->setup)
1815 return -EINVAL; 1817 return -EINVAL;
1816 if (!udc) 1818 if (!udc)
@@ -1828,7 +1830,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1828 dev_err(udc->dev, "device_add error %d\n", retval); 1830 dev_err(udc->dev, "device_add error %d\n", retval);
1829 goto add_fail; 1831 goto add_fail;
1830 } 1832 }
1831 retval = driver->bind(&udc->gadget); 1833 retval = bind(&udc->gadget);
1832 if (retval) { 1834 if (retval) {
1833 dev_err(udc->dev, "bind to driver %s --> error %d\n", 1835 dev_err(udc->dev, "bind to driver %s --> error %d\n",
1834 driver->driver.name, retval); 1836 driver->driver.name, retval);
@@ -1859,7 +1861,7 @@ add_fail:
1859 udc->gadget.dev.driver = NULL; 1861 udc->gadget.dev.driver = NULL;
1860 return retval; 1862 return retval;
1861} 1863}
1862EXPORT_SYMBOL(usb_gadget_register_driver); 1864EXPORT_SYMBOL(usb_gadget_probe_driver);
1863 1865
1864 1866
1865/** 1867/**
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 2456ccd9965e..20d43da319ae 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -42,6 +42,7 @@ static const char *r8a66597_ep_name[] = {
42 "ep8", "ep9", 42 "ep8", "ep9",
43}; 43};
44 44
45static void init_controller(struct r8a66597 *r8a66597);
45static void disable_controller(struct r8a66597 *r8a66597); 46static void disable_controller(struct r8a66597 *r8a66597);
46static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req); 47static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
47static void irq_packet_write(struct r8a66597_ep *ep, 48static void irq_packet_write(struct r8a66597_ep *ep,
@@ -104,6 +105,8 @@ __acquires(r8a66597->lock)
104 spin_lock(&r8a66597->lock); 105 spin_lock(&r8a66597->lock);
105 106
106 disable_controller(r8a66597); 107 disable_controller(r8a66597);
108 init_controller(r8a66597);
109 r8a66597_bset(r8a66597, VBSE, INTENB0);
107 INIT_LIST_HEAD(&r8a66597->ep[0].queue); 110 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
108} 111}
109 112
@@ -274,7 +277,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
274 } 277 }
275 278
276 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) { 279 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
277 pr_err(KERN_ERR "r8a66597 pipe memory is insufficient\n"); 280 pr_err("r8a66597 pipe memory is insufficient\n");
278 return -ENOMEM; 281 return -ENOMEM;
279 } 282 }
280 283
@@ -1405,14 +1408,15 @@ static struct usb_ep_ops r8a66597_ep_ops = {
1405/*-------------------------------------------------------------------------*/ 1408/*-------------------------------------------------------------------------*/
1406static struct r8a66597 *the_controller; 1409static struct r8a66597 *the_controller;
1407 1410
1408int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1411int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1412 int (*bind)(struct usb_gadget *))
1409{ 1413{
1410 struct r8a66597 *r8a66597 = the_controller; 1414 struct r8a66597 *r8a66597 = the_controller;
1411 int retval; 1415 int retval;
1412 1416
1413 if (!driver 1417 if (!driver
1414 || driver->speed != USB_SPEED_HIGH 1418 || driver->speed != USB_SPEED_HIGH
1415 || !driver->bind 1419 || !bind
1416 || !driver->setup) 1420 || !driver->setup)
1417 return -EINVAL; 1421 return -EINVAL;
1418 if (!r8a66597) 1422 if (!r8a66597)
@@ -1431,7 +1435,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1431 goto error; 1435 goto error;
1432 } 1436 }
1433 1437
1434 retval = driver->bind(&r8a66597->gadget); 1438 retval = bind(&r8a66597->gadget);
1435 if (retval) { 1439 if (retval) {
1436 printk(KERN_ERR "bind to driver error (%d)\n", retval); 1440 printk(KERN_ERR "bind to driver error (%d)\n", retval);
1437 device_del(&r8a66597->gadget.dev); 1441 device_del(&r8a66597->gadget.dev);
@@ -1456,7 +1460,7 @@ error:
1456 1460
1457 return retval; 1461 return retval;
1458} 1462}
1459EXPORT_SYMBOL(usb_gadget_register_driver); 1463EXPORT_SYMBOL(usb_gadget_probe_driver);
1460 1464
1461int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1465int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1462{ 1466{
diff --git a/drivers/usb/gadget/r8a66597-udc.h b/drivers/usb/gadget/r8a66597-udc.h
index f763b5190afa..5fc22e09a0f1 100644
--- a/drivers/usb/gadget/r8a66597-udc.h
+++ b/drivers/usb/gadget/r8a66597-udc.h
@@ -136,7 +136,7 @@ static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
136 int len) 136 int len)
137{ 137{
138 void __iomem *fifoaddr = r8a66597->reg + offset; 138 void __iomem *fifoaddr = r8a66597->reg + offset;
139 unsigned int data; 139 unsigned int data = 0;
140 int i; 140 int i;
141 141
142 if (r8a66597->pdata->on_chip) { 142 if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 972d5ddd1e18..5b314041dfa9 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -61,17 +61,17 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
61#define RNDIS_MAX_CONFIGS 1 61#define RNDIS_MAX_CONFIGS 1
62 62
63 63
64static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS]; 64static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
65 65
66/* Driver Version */ 66/* Driver Version */
67static const __le32 rndis_driver_version = cpu_to_le32 (1); 67static const __le32 rndis_driver_version = cpu_to_le32(1);
68 68
69/* Function Prototypes */ 69/* Function Prototypes */
70static rndis_resp_t *rndis_add_response (int configNr, u32 length); 70static rndis_resp_t *rndis_add_response(int configNr, u32 length);
71 71
72 72
73/* supported OIDs */ 73/* supported OIDs */
74static const u32 oid_supported_list [] = 74static const u32 oid_supported_list[] =
75{ 75{
76 /* the general stuff */ 76 /* the general stuff */
77 OID_GEN_SUPPORTED_LIST, 77 OID_GEN_SUPPORTED_LIST,
@@ -161,21 +161,20 @@ static const u32 oid_supported_list [] =
161 161
162 162
163/* NDIS Functions */ 163/* NDIS Functions */
164static int 164static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
165gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, 165 unsigned buf_len, rndis_resp_t *r)
166 rndis_resp_t *r)
167{ 166{
168 int retval = -ENOTSUPP; 167 int retval = -ENOTSUPP;
169 u32 length = 4; /* usually */ 168 u32 length = 4; /* usually */
170 __le32 *outbuf; 169 __le32 *outbuf;
171 int i, count; 170 int i, count;
172 rndis_query_cmplt_type *resp; 171 rndis_query_cmplt_type *resp;
173 struct net_device *net; 172 struct net_device *net;
174 struct rtnl_link_stats64 temp; 173 struct rtnl_link_stats64 temp;
175 const struct rtnl_link_stats64 *stats; 174 const struct rtnl_link_stats64 *stats;
176 175
177 if (!r) return -ENOMEM; 176 if (!r) return -ENOMEM;
178 resp = (rndis_query_cmplt_type *) r->buf; 177 resp = (rndis_query_cmplt_type *)r->buf;
179 178
180 if (!resp) return -ENOMEM; 179 if (!resp) return -ENOMEM;
181 180
@@ -191,8 +190,8 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
191 } 190 }
192 191
193 /* response goes here, right after the header */ 192 /* response goes here, right after the header */
194 outbuf = (__le32 *) &resp[1]; 193 outbuf = (__le32 *)&resp[1];
195 resp->InformationBufferOffset = cpu_to_le32 (16); 194 resp->InformationBufferOffset = cpu_to_le32(16);
196 195
197 net = rndis_per_dev_params[configNr].dev; 196 net = rndis_per_dev_params[configNr].dev;
198 stats = dev_get_stats(net, &temp); 197 stats = dev_get_stats(net, &temp);
@@ -204,10 +203,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
204 /* mandatory */ 203 /* mandatory */
205 case OID_GEN_SUPPORTED_LIST: 204 case OID_GEN_SUPPORTED_LIST:
206 pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__); 205 pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__);
207 length = sizeof (oid_supported_list); 206 length = sizeof(oid_supported_list);
208 count = length / sizeof (u32); 207 count = length / sizeof(u32);
209 for (i = 0; i < count; i++) 208 for (i = 0; i < count; i++)
210 outbuf[i] = cpu_to_le32 (oid_supported_list[i]); 209 outbuf[i] = cpu_to_le32(oid_supported_list[i]);
211 retval = 0; 210 retval = 0;
212 break; 211 break;
213 212
@@ -220,14 +219,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
220 * reddite ergo quae sunt Caesaris Caesari 219 * reddite ergo quae sunt Caesaris Caesari
221 * et quae sunt Dei Deo! 220 * et quae sunt Dei Deo!
222 */ 221 */
223 *outbuf = cpu_to_le32 (0); 222 *outbuf = cpu_to_le32(0);
224 retval = 0; 223 retval = 0;
225 break; 224 break;
226 225
227 /* mandatory */ 226 /* mandatory */
228 case OID_GEN_MEDIA_SUPPORTED: 227 case OID_GEN_MEDIA_SUPPORTED:
229 pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__); 228 pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__);
230 *outbuf = cpu_to_le32 (rndis_per_dev_params [configNr].medium); 229 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
231 retval = 0; 230 retval = 0;
232 break; 231 break;
233 232
@@ -235,16 +234,16 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
235 case OID_GEN_MEDIA_IN_USE: 234 case OID_GEN_MEDIA_IN_USE:
236 pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__); 235 pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__);
237 /* one medium, one transport... (maybe you do it better) */ 236 /* one medium, one transport... (maybe you do it better) */
238 *outbuf = cpu_to_le32 (rndis_per_dev_params [configNr].medium); 237 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
239 retval = 0; 238 retval = 0;
240 break; 239 break;
241 240
242 /* mandatory */ 241 /* mandatory */
243 case OID_GEN_MAXIMUM_FRAME_SIZE: 242 case OID_GEN_MAXIMUM_FRAME_SIZE:
244 pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); 243 pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
245 if (rndis_per_dev_params [configNr].dev) { 244 if (rndis_per_dev_params[configNr].dev) {
246 *outbuf = cpu_to_le32 ( 245 *outbuf = cpu_to_le32(
247 rndis_per_dev_params [configNr].dev->mtu); 246 rndis_per_dev_params[configNr].dev->mtu);
248 retval = 0; 247 retval = 0;
249 } 248 }
250 break; 249 break;
@@ -253,21 +252,21 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
253 case OID_GEN_LINK_SPEED: 252 case OID_GEN_LINK_SPEED:
254 if (rndis_debug > 1) 253 if (rndis_debug > 1)
255 pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__); 254 pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__);
256 if (rndis_per_dev_params [configNr].media_state 255 if (rndis_per_dev_params[configNr].media_state
257 == NDIS_MEDIA_STATE_DISCONNECTED) 256 == NDIS_MEDIA_STATE_DISCONNECTED)
258 *outbuf = cpu_to_le32 (0); 257 *outbuf = cpu_to_le32(0);
259 else 258 else
260 *outbuf = cpu_to_le32 ( 259 *outbuf = cpu_to_le32(
261 rndis_per_dev_params [configNr].speed); 260 rndis_per_dev_params[configNr].speed);
262 retval = 0; 261 retval = 0;
263 break; 262 break;
264 263
265 /* mandatory */ 264 /* mandatory */
266 case OID_GEN_TRANSMIT_BLOCK_SIZE: 265 case OID_GEN_TRANSMIT_BLOCK_SIZE:
267 pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); 266 pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
268 if (rndis_per_dev_params [configNr].dev) { 267 if (rndis_per_dev_params[configNr].dev) {
269 *outbuf = cpu_to_le32 ( 268 *outbuf = cpu_to_le32(
270 rndis_per_dev_params [configNr].dev->mtu); 269 rndis_per_dev_params[configNr].dev->mtu);
271 retval = 0; 270 retval = 0;
272 } 271 }
273 break; 272 break;
@@ -275,9 +274,9 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
275 /* mandatory */ 274 /* mandatory */
276 case OID_GEN_RECEIVE_BLOCK_SIZE: 275 case OID_GEN_RECEIVE_BLOCK_SIZE:
277 pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); 276 pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
278 if (rndis_per_dev_params [configNr].dev) { 277 if (rndis_per_dev_params[configNr].dev) {
279 *outbuf = cpu_to_le32 ( 278 *outbuf = cpu_to_le32(
280 rndis_per_dev_params [configNr].dev->mtu); 279 rndis_per_dev_params[configNr].dev->mtu);
281 retval = 0; 280 retval = 0;
282 } 281 }
283 break; 282 break;
@@ -285,18 +284,20 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
285 /* mandatory */ 284 /* mandatory */
286 case OID_GEN_VENDOR_ID: 285 case OID_GEN_VENDOR_ID:
287 pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__); 286 pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__);
288 *outbuf = cpu_to_le32 ( 287 *outbuf = cpu_to_le32(
289 rndis_per_dev_params [configNr].vendorID); 288 rndis_per_dev_params[configNr].vendorID);
290 retval = 0; 289 retval = 0;
291 break; 290 break;
292 291
293 /* mandatory */ 292 /* mandatory */
294 case OID_GEN_VENDOR_DESCRIPTION: 293 case OID_GEN_VENDOR_DESCRIPTION:
295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); 294 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
296 if ( rndis_per_dev_params [configNr].vendorDescr ) { 295 if (rndis_per_dev_params[configNr].vendorDescr) {
297 length = strlen (rndis_per_dev_params [configNr].vendorDescr); 296 length = strlen(rndis_per_dev_params[configNr].
298 memcpy (outbuf, 297 vendorDescr);
299 rndis_per_dev_params [configNr].vendorDescr, length); 298 memcpy(outbuf,
299 rndis_per_dev_params[configNr].vendorDescr,
300 length);
300 } else { 301 } else {
301 outbuf[0] = 0; 302 outbuf[0] = 0;
302 } 303 }
@@ -313,7 +314,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
313 /* mandatory */ 314 /* mandatory */
314 case OID_GEN_CURRENT_PACKET_FILTER: 315 case OID_GEN_CURRENT_PACKET_FILTER:
315 pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__); 316 pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
316 *outbuf = cpu_to_le32 (*rndis_per_dev_params[configNr].filter); 317 *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
317 retval = 0; 318 retval = 0;
318 break; 319 break;
319 320
@@ -328,14 +329,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
328 case OID_GEN_MEDIA_CONNECT_STATUS: 329 case OID_GEN_MEDIA_CONNECT_STATUS:
329 if (rndis_debug > 1) 330 if (rndis_debug > 1)
330 pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); 331 pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
331 *outbuf = cpu_to_le32 (rndis_per_dev_params [configNr] 332 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
332 .media_state); 333 .media_state);
333 retval = 0; 334 retval = 0;
334 break; 335 break;
335 336
336 case OID_GEN_PHYSICAL_MEDIUM: 337 case OID_GEN_PHYSICAL_MEDIUM:
337 pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__); 338 pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__);
338 *outbuf = cpu_to_le32 (0); 339 *outbuf = cpu_to_le32(0);
339 retval = 0; 340 retval = 0;
340 break; 341 break;
341 342
@@ -409,10 +410,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
409 /* mandatory */ 410 /* mandatory */
410 case OID_802_3_PERMANENT_ADDRESS: 411 case OID_802_3_PERMANENT_ADDRESS:
411 pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__); 412 pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__);
412 if (rndis_per_dev_params [configNr].dev) { 413 if (rndis_per_dev_params[configNr].dev) {
413 length = ETH_ALEN; 414 length = ETH_ALEN;
414 memcpy (outbuf, 415 memcpy(outbuf,
415 rndis_per_dev_params [configNr].host_mac, 416 rndis_per_dev_params[configNr].host_mac,
416 length); 417 length);
417 retval = 0; 418 retval = 0;
418 } 419 }
@@ -421,9 +422,9 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
421 /* mandatory */ 422 /* mandatory */
422 case OID_802_3_CURRENT_ADDRESS: 423 case OID_802_3_CURRENT_ADDRESS:
423 pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__); 424 pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__);
424 if (rndis_per_dev_params [configNr].dev) { 425 if (rndis_per_dev_params[configNr].dev) {
425 length = ETH_ALEN; 426 length = ETH_ALEN;
426 memcpy (outbuf, 427 memcpy(outbuf,
427 rndis_per_dev_params [configNr].host_mac, 428 rndis_per_dev_params [configNr].host_mac,
428 length); 429 length);
429 retval = 0; 430 retval = 0;
@@ -434,7 +435,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
434 case OID_802_3_MULTICAST_LIST: 435 case OID_802_3_MULTICAST_LIST:
435 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); 436 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
436 /* Multicast base address only */ 437 /* Multicast base address only */
437 *outbuf = cpu_to_le32 (0xE0000000); 438 *outbuf = cpu_to_le32(0xE0000000);
438 retval = 0; 439 retval = 0;
439 break; 440 break;
440 441
@@ -442,7 +443,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
442 case OID_802_3_MAXIMUM_LIST_SIZE: 443 case OID_802_3_MAXIMUM_LIST_SIZE:
443 pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); 444 pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
444 /* Multicast base address only */ 445 /* Multicast base address only */
445 *outbuf = cpu_to_le32 (1); 446 *outbuf = cpu_to_le32(1);
446 retval = 0; 447 retval = 0;
447 break; 448 break;
448 449
@@ -466,14 +467,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
466 /* mandatory */ 467 /* mandatory */
467 case OID_802_3_XMIT_ONE_COLLISION: 468 case OID_802_3_XMIT_ONE_COLLISION:
468 pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__); 469 pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__);
469 *outbuf = cpu_to_le32 (0); 470 *outbuf = cpu_to_le32(0);
470 retval = 0; 471 retval = 0;
471 break; 472 break;
472 473
473 /* mandatory */ 474 /* mandatory */
474 case OID_802_3_XMIT_MORE_COLLISIONS: 475 case OID_802_3_XMIT_MORE_COLLISIONS:
475 pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); 476 pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
476 *outbuf = cpu_to_le32 (0); 477 *outbuf = cpu_to_le32(0);
477 retval = 0; 478 retval = 0;
478 break; 479 break;
479 480
@@ -484,22 +485,22 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
484 if (retval < 0) 485 if (retval < 0)
485 length = 0; 486 length = 0;
486 487
487 resp->InformationBufferLength = cpu_to_le32 (length); 488 resp->InformationBufferLength = cpu_to_le32(length);
488 r->length = length + sizeof *resp; 489 r->length = length + sizeof(*resp);
489 resp->MessageLength = cpu_to_le32 (r->length); 490 resp->MessageLength = cpu_to_le32(r->length);
490 return retval; 491 return retval;
491} 492}
492 493
493static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, 494static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
494 rndis_resp_t *r) 495 rndis_resp_t *r)
495{ 496{
496 rndis_set_cmplt_type *resp; 497 rndis_set_cmplt_type *resp;
497 int i, retval = -ENOTSUPP; 498 int i, retval = -ENOTSUPP;
498 struct rndis_params *params; 499 struct rndis_params *params;
499 500
500 if (!r) 501 if (!r)
501 return -ENOMEM; 502 return -ENOMEM;
502 resp = (rndis_set_cmplt_type *) r->buf; 503 resp = (rndis_set_cmplt_type *)r->buf;
503 if (!resp) 504 if (!resp)
504 return -ENOMEM; 505 return -ENOMEM;
505 506
@@ -514,7 +515,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
514 } 515 }
515 } 516 }
516 517
517 params = &rndis_per_dev_params [configNr]; 518 params = &rndis_per_dev_params[configNr];
518 switch (OID) { 519 switch (OID) {
519 case OID_GEN_CURRENT_PACKET_FILTER: 520 case OID_GEN_CURRENT_PACKET_FILTER:
520 521
@@ -537,11 +538,11 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
537 params->state = RNDIS_DATA_INITIALIZED; 538 params->state = RNDIS_DATA_INITIALIZED;
538 netif_carrier_on(params->dev); 539 netif_carrier_on(params->dev);
539 if (netif_running(params->dev)) 540 if (netif_running(params->dev))
540 netif_wake_queue (params->dev); 541 netif_wake_queue(params->dev);
541 } else { 542 } else {
542 params->state = RNDIS_INITIALIZED; 543 params->state = RNDIS_INITIALIZED;
543 netif_carrier_off (params->dev); 544 netif_carrier_off(params->dev);
544 netif_stop_queue (params->dev); 545 netif_stop_queue(params->dev);
545 } 546 }
546 break; 547 break;
547 548
@@ -563,48 +564,47 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
563 * Response Functions 564 * Response Functions
564 */ 565 */
565 566
566static int rndis_init_response (int configNr, rndis_init_msg_type *buf) 567static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
567{ 568{
568 rndis_init_cmplt_type *resp; 569 rndis_init_cmplt_type *resp;
569 rndis_resp_t *r; 570 rndis_resp_t *r;
570 struct rndis_params *params = rndis_per_dev_params + configNr; 571 struct rndis_params *params = rndis_per_dev_params + configNr;
571 572
572 if (!params->dev) 573 if (!params->dev)
573 return -ENOTSUPP; 574 return -ENOTSUPP;
574 575
575 r = rndis_add_response (configNr, sizeof (rndis_init_cmplt_type)); 576 r = rndis_add_response(configNr, sizeof(rndis_init_cmplt_type));
576 if (!r) 577 if (!r)
577 return -ENOMEM; 578 return -ENOMEM;
578 resp = (rndis_init_cmplt_type *) r->buf; 579 resp = (rndis_init_cmplt_type *)r->buf;
579 580
580 resp->MessageType = cpu_to_le32 ( 581 resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT);
581 REMOTE_NDIS_INITIALIZE_CMPLT); 582 resp->MessageLength = cpu_to_le32(52);
582 resp->MessageLength = cpu_to_le32 (52);
583 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 583 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
584 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS); 584 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
585 resp->MajorVersion = cpu_to_le32 (RNDIS_MAJOR_VERSION); 585 resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION);
586 resp->MinorVersion = cpu_to_le32 (RNDIS_MINOR_VERSION); 586 resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
587 resp->DeviceFlags = cpu_to_le32 (RNDIS_DF_CONNECTIONLESS); 587 resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
588 resp->Medium = cpu_to_le32 (RNDIS_MEDIUM_802_3); 588 resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
589 resp->MaxPacketsPerTransfer = cpu_to_le32 (1); 589 resp->MaxPacketsPerTransfer = cpu_to_le32(1);
590 resp->MaxTransferSize = cpu_to_le32 ( 590 resp->MaxTransferSize = cpu_to_le32(
591 params->dev->mtu 591 params->dev->mtu
592 + sizeof (struct ethhdr) 592 + sizeof(struct ethhdr)
593 + sizeof (struct rndis_packet_msg_type) 593 + sizeof(struct rndis_packet_msg_type)
594 + 22); 594 + 22);
595 resp->PacketAlignmentFactor = cpu_to_le32 (0); 595 resp->PacketAlignmentFactor = cpu_to_le32(0);
596 resp->AFListOffset = cpu_to_le32 (0); 596 resp->AFListOffset = cpu_to_le32(0);
597 resp->AFListSize = cpu_to_le32 (0); 597 resp->AFListSize = cpu_to_le32(0);
598 598
599 params->resp_avail(params->v); 599 params->resp_avail(params->v);
600 return 0; 600 return 0;
601} 601}
602 602
603static int rndis_query_response (int configNr, rndis_query_msg_type *buf) 603static int rndis_query_response(int configNr, rndis_query_msg_type *buf)
604{ 604{
605 rndis_query_cmplt_type *resp; 605 rndis_query_cmplt_type *resp;
606 rndis_resp_t *r; 606 rndis_resp_t *r;
607 struct rndis_params *params = rndis_per_dev_params + configNr; 607 struct rndis_params *params = rndis_per_dev_params + configNr;
608 608
609 /* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */ 609 /* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */
610 if (!params->dev) 610 if (!params->dev)
@@ -616,47 +616,46 @@ static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
616 * rndis_query_cmplt_type followed by data. 616 * rndis_query_cmplt_type followed by data.
617 * oid_supported_list is the largest data reply 617 * oid_supported_list is the largest data reply
618 */ 618 */
619 r = rndis_add_response (configNr, 619 r = rndis_add_response(configNr,
620 sizeof (oid_supported_list) + sizeof(rndis_query_cmplt_type)); 620 sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type));
621 if (!r) 621 if (!r)
622 return -ENOMEM; 622 return -ENOMEM;
623 resp = (rndis_query_cmplt_type *) r->buf; 623 resp = (rndis_query_cmplt_type *)r->buf;
624 624
625 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT); 625 resp->MessageType = cpu_to_le32(REMOTE_NDIS_QUERY_CMPLT);
626 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 626 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
627 627
628 if (gen_ndis_query_resp (configNr, le32_to_cpu (buf->OID), 628 if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID),
629 le32_to_cpu(buf->InformationBufferOffset) 629 le32_to_cpu(buf->InformationBufferOffset)
630 + 8 + (u8 *) buf, 630 + 8 + (u8 *)buf,
631 le32_to_cpu(buf->InformationBufferLength), 631 le32_to_cpu(buf->InformationBufferLength),
632 r)) { 632 r)) {
633 /* OID not supported */ 633 /* OID not supported */
634 resp->Status = cpu_to_le32 ( 634 resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
635 RNDIS_STATUS_NOT_SUPPORTED); 635 resp->MessageLength = cpu_to_le32(sizeof *resp);
636 resp->MessageLength = cpu_to_le32 (sizeof *resp); 636 resp->InformationBufferLength = cpu_to_le32(0);
637 resp->InformationBufferLength = cpu_to_le32 (0); 637 resp->InformationBufferOffset = cpu_to_le32(0);
638 resp->InformationBufferOffset = cpu_to_le32 (0);
639 } else 638 } else
640 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS); 639 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
641 640
642 params->resp_avail(params->v); 641 params->resp_avail(params->v);
643 return 0; 642 return 0;
644} 643}
645 644
646static int rndis_set_response (int configNr, rndis_set_msg_type *buf) 645static int rndis_set_response(int configNr, rndis_set_msg_type *buf)
647{ 646{
648 u32 BufLength, BufOffset; 647 u32 BufLength, BufOffset;
649 rndis_set_cmplt_type *resp; 648 rndis_set_cmplt_type *resp;
650 rndis_resp_t *r; 649 rndis_resp_t *r;
651 struct rndis_params *params = rndis_per_dev_params + configNr; 650 struct rndis_params *params = rndis_per_dev_params + configNr;
652 651
653 r = rndis_add_response (configNr, sizeof (rndis_set_cmplt_type)); 652 r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type));
654 if (!r) 653 if (!r)
655 return -ENOMEM; 654 return -ENOMEM;
656 resp = (rndis_set_cmplt_type *) r->buf; 655 resp = (rndis_set_cmplt_type *)r->buf;
657 656
658 BufLength = le32_to_cpu (buf->InformationBufferLength); 657 BufLength = le32_to_cpu(buf->InformationBufferLength);
659 BufOffset = le32_to_cpu (buf->InformationBufferOffset); 658 BufOffset = le32_to_cpu(buf->InformationBufferOffset);
660 659
661#ifdef VERBOSE_DEBUG 660#ifdef VERBOSE_DEBUG
662 pr_debug("%s: Length: %d\n", __func__, BufLength); 661 pr_debug("%s: Length: %d\n", __func__, BufLength);
@@ -670,59 +669,59 @@ static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
670 pr_debug("\n"); 669 pr_debug("\n");
671#endif 670#endif
672 671
673 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_SET_CMPLT); 672 resp->MessageType = cpu_to_le32(REMOTE_NDIS_SET_CMPLT);
674 resp->MessageLength = cpu_to_le32 (16); 673 resp->MessageLength = cpu_to_le32(16);
675 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 674 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
676 if (gen_ndis_set_resp (configNr, le32_to_cpu (buf->OID), 675 if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID),
677 ((u8 *) buf) + 8 + BufOffset, BufLength, r)) 676 ((u8 *)buf) + 8 + BufOffset, BufLength, r))
678 resp->Status = cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED); 677 resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
679 else 678 else
680 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS); 679 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
681 680
682 params->resp_avail(params->v); 681 params->resp_avail(params->v);
683 return 0; 682 return 0;
684} 683}
685 684
686static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf) 685static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
687{ 686{
688 rndis_reset_cmplt_type *resp; 687 rndis_reset_cmplt_type *resp;
689 rndis_resp_t *r; 688 rndis_resp_t *r;
690 struct rndis_params *params = rndis_per_dev_params + configNr; 689 struct rndis_params *params = rndis_per_dev_params + configNr;
691 690
692 r = rndis_add_response (configNr, sizeof (rndis_reset_cmplt_type)); 691 r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
693 if (!r) 692 if (!r)
694 return -ENOMEM; 693 return -ENOMEM;
695 resp = (rndis_reset_cmplt_type *) r->buf; 694 resp = (rndis_reset_cmplt_type *)r->buf;
696 695
697 resp->MessageType = cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT); 696 resp->MessageType = cpu_to_le32(REMOTE_NDIS_RESET_CMPLT);
698 resp->MessageLength = cpu_to_le32 (16); 697 resp->MessageLength = cpu_to_le32(16);
699 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS); 698 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
700 /* resent information */ 699 /* resent information */
701 resp->AddressingReset = cpu_to_le32 (1); 700 resp->AddressingReset = cpu_to_le32(1);
702 701
703 params->resp_avail(params->v); 702 params->resp_avail(params->v);
704 return 0; 703 return 0;
705} 704}
706 705
707static int rndis_keepalive_response (int configNr, 706static int rndis_keepalive_response(int configNr,
708 rndis_keepalive_msg_type *buf) 707 rndis_keepalive_msg_type *buf)
709{ 708{
710 rndis_keepalive_cmplt_type *resp; 709 rndis_keepalive_cmplt_type *resp;
711 rndis_resp_t *r; 710 rndis_resp_t *r;
712 struct rndis_params *params = rndis_per_dev_params + configNr; 711 struct rndis_params *params = rndis_per_dev_params + configNr;
713 712
714 /* host "should" check only in RNDIS_DATA_INITIALIZED state */ 713 /* host "should" check only in RNDIS_DATA_INITIALIZED state */
715 714
716 r = rndis_add_response (configNr, sizeof (rndis_keepalive_cmplt_type)); 715 r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type));
717 if (!r) 716 if (!r)
718 return -ENOMEM; 717 return -ENOMEM;
719 resp = (rndis_keepalive_cmplt_type *) r->buf; 718 resp = (rndis_keepalive_cmplt_type *)r->buf;
720 719
721 resp->MessageType = cpu_to_le32 ( 720 resp->MessageType = cpu_to_le32(
722 REMOTE_NDIS_KEEPALIVE_CMPLT); 721 REMOTE_NDIS_KEEPALIVE_CMPLT);
723 resp->MessageLength = cpu_to_le32 (16); 722 resp->MessageLength = cpu_to_le32(16);
724 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 723 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
725 resp->Status = cpu_to_le32 (RNDIS_STATUS_SUCCESS); 724 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
726 725
727 params->resp_avail(params->v); 726 params->resp_avail(params->v);
728 return 0; 727 return 0;
@@ -732,86 +731,85 @@ static int rndis_keepalive_response (int configNr,
732/* 731/*
733 * Device to Host Comunication 732 * Device to Host Comunication
734 */ 733 */
735static int rndis_indicate_status_msg (int configNr, u32 status) 734static int rndis_indicate_status_msg(int configNr, u32 status)
736{ 735{
737 rndis_indicate_status_msg_type *resp; 736 rndis_indicate_status_msg_type *resp;
738 rndis_resp_t *r; 737 rndis_resp_t *r;
739 struct rndis_params *params = rndis_per_dev_params + configNr; 738 struct rndis_params *params = rndis_per_dev_params + configNr;
740 739
741 if (params->state == RNDIS_UNINITIALIZED) 740 if (params->state == RNDIS_UNINITIALIZED)
742 return -ENOTSUPP; 741 return -ENOTSUPP;
743 742
744 r = rndis_add_response (configNr, 743 r = rndis_add_response(configNr,
745 sizeof (rndis_indicate_status_msg_type)); 744 sizeof(rndis_indicate_status_msg_type));
746 if (!r) 745 if (!r)
747 return -ENOMEM; 746 return -ENOMEM;
748 resp = (rndis_indicate_status_msg_type *) r->buf; 747 resp = (rndis_indicate_status_msg_type *)r->buf;
749 748
750 resp->MessageType = cpu_to_le32 ( 749 resp->MessageType = cpu_to_le32(REMOTE_NDIS_INDICATE_STATUS_MSG);
751 REMOTE_NDIS_INDICATE_STATUS_MSG); 750 resp->MessageLength = cpu_to_le32(20);
752 resp->MessageLength = cpu_to_le32 (20); 751 resp->Status = cpu_to_le32(status);
753 resp->Status = cpu_to_le32 (status); 752 resp->StatusBufferLength = cpu_to_le32(0);
754 resp->StatusBufferLength = cpu_to_le32 (0); 753 resp->StatusBufferOffset = cpu_to_le32(0);
755 resp->StatusBufferOffset = cpu_to_le32 (0);
756 754
757 params->resp_avail(params->v); 755 params->resp_avail(params->v);
758 return 0; 756 return 0;
759} 757}
760 758
761int rndis_signal_connect (int configNr) 759int rndis_signal_connect(int configNr)
762{ 760{
763 rndis_per_dev_params [configNr].media_state 761 rndis_per_dev_params[configNr].media_state
764 = NDIS_MEDIA_STATE_CONNECTED; 762 = NDIS_MEDIA_STATE_CONNECTED;
765 return rndis_indicate_status_msg (configNr, 763 return rndis_indicate_status_msg(configNr,
766 RNDIS_STATUS_MEDIA_CONNECT); 764 RNDIS_STATUS_MEDIA_CONNECT);
767} 765}
768 766
769int rndis_signal_disconnect (int configNr) 767int rndis_signal_disconnect(int configNr)
770{ 768{
771 rndis_per_dev_params [configNr].media_state 769 rndis_per_dev_params[configNr].media_state
772 = NDIS_MEDIA_STATE_DISCONNECTED; 770 = NDIS_MEDIA_STATE_DISCONNECTED;
773 return rndis_indicate_status_msg (configNr, 771 return rndis_indicate_status_msg(configNr,
774 RNDIS_STATUS_MEDIA_DISCONNECT); 772 RNDIS_STATUS_MEDIA_DISCONNECT);
775} 773}
776 774
777void rndis_uninit (int configNr) 775void rndis_uninit(int configNr)
778{ 776{
779 u8 *buf; 777 u8 *buf;
780 u32 length; 778 u32 length;
781 779
782 if (configNr >= RNDIS_MAX_CONFIGS) 780 if (configNr >= RNDIS_MAX_CONFIGS)
783 return; 781 return;
784 rndis_per_dev_params [configNr].state = RNDIS_UNINITIALIZED; 782 rndis_per_dev_params[configNr].state = RNDIS_UNINITIALIZED;
785 783
786 /* drain the response queue */ 784 /* drain the response queue */
787 while ((buf = rndis_get_next_response(configNr, &length))) 785 while ((buf = rndis_get_next_response(configNr, &length)))
788 rndis_free_response(configNr, buf); 786 rndis_free_response(configNr, buf);
789} 787}
790 788
791void rndis_set_host_mac (int configNr, const u8 *addr) 789void rndis_set_host_mac(int configNr, const u8 *addr)
792{ 790{
793 rndis_per_dev_params [configNr].host_mac = addr; 791 rndis_per_dev_params[configNr].host_mac = addr;
794} 792}
795 793
796/* 794/*
797 * Message Parser 795 * Message Parser
798 */ 796 */
799int rndis_msg_parser (u8 configNr, u8 *buf) 797int rndis_msg_parser(u8 configNr, u8 *buf)
800{ 798{
801 u32 MsgType, MsgLength; 799 u32 MsgType, MsgLength;
802 __le32 *tmp; 800 __le32 *tmp;
803 struct rndis_params *params; 801 struct rndis_params *params;
804 802
805 if (!buf) 803 if (!buf)
806 return -ENOMEM; 804 return -ENOMEM;
807 805
808 tmp = (__le32 *) buf; 806 tmp = (__le32 *)buf;
809 MsgType = get_unaligned_le32(tmp++); 807 MsgType = get_unaligned_le32(tmp++);
810 MsgLength = get_unaligned_le32(tmp++); 808 MsgLength = get_unaligned_le32(tmp++);
811 809
812 if (configNr >= RNDIS_MAX_CONFIGS) 810 if (configNr >= RNDIS_MAX_CONFIGS)
813 return -ENOTSUPP; 811 return -ENOTSUPP;
814 params = &rndis_per_dev_params [configNr]; 812 params = &rndis_per_dev_params[configNr];
815 813
816 /* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for 814 /* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for
817 * rx/tx statistics and link status, in addition to KEEPALIVE traffic 815 * rx/tx statistics and link status, in addition to KEEPALIVE traffic
@@ -822,41 +820,41 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
822 switch (MsgType) { 820 switch (MsgType) {
823 case REMOTE_NDIS_INITIALIZE_MSG: 821 case REMOTE_NDIS_INITIALIZE_MSG:
824 pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n", 822 pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
825 __func__ ); 823 __func__);
826 params->state = RNDIS_INITIALIZED; 824 params->state = RNDIS_INITIALIZED;
827 return rndis_init_response (configNr, 825 return rndis_init_response(configNr,
828 (rndis_init_msg_type *) buf); 826 (rndis_init_msg_type *)buf);
829 827
830 case REMOTE_NDIS_HALT_MSG: 828 case REMOTE_NDIS_HALT_MSG:
831 pr_debug("%s: REMOTE_NDIS_HALT_MSG\n", 829 pr_debug("%s: REMOTE_NDIS_HALT_MSG\n",
832 __func__ ); 830 __func__);
833 params->state = RNDIS_UNINITIALIZED; 831 params->state = RNDIS_UNINITIALIZED;
834 if (params->dev) { 832 if (params->dev) {
835 netif_carrier_off (params->dev); 833 netif_carrier_off(params->dev);
836 netif_stop_queue (params->dev); 834 netif_stop_queue(params->dev);
837 } 835 }
838 return 0; 836 return 0;
839 837
840 case REMOTE_NDIS_QUERY_MSG: 838 case REMOTE_NDIS_QUERY_MSG:
841 return rndis_query_response (configNr, 839 return rndis_query_response(configNr,
842 (rndis_query_msg_type *) buf); 840 (rndis_query_msg_type *)buf);
843 841
844 case REMOTE_NDIS_SET_MSG: 842 case REMOTE_NDIS_SET_MSG:
845 return rndis_set_response (configNr, 843 return rndis_set_response(configNr,
846 (rndis_set_msg_type *) buf); 844 (rndis_set_msg_type *)buf);
847 845
848 case REMOTE_NDIS_RESET_MSG: 846 case REMOTE_NDIS_RESET_MSG:
849 pr_debug("%s: REMOTE_NDIS_RESET_MSG\n", 847 pr_debug("%s: REMOTE_NDIS_RESET_MSG\n",
850 __func__ ); 848 __func__);
851 return rndis_reset_response (configNr, 849 return rndis_reset_response(configNr,
852 (rndis_reset_msg_type *) buf); 850 (rndis_reset_msg_type *)buf);
853 851
854 case REMOTE_NDIS_KEEPALIVE_MSG: 852 case REMOTE_NDIS_KEEPALIVE_MSG:
855 /* For USB: host does this every 5 seconds */ 853 /* For USB: host does this every 5 seconds */
856 if (rndis_debug > 1) 854 if (rndis_debug > 1)
857 pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n", 855 pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
858 __func__ ); 856 __func__);
859 return rndis_keepalive_response (configNr, 857 return rndis_keepalive_response(configNr,
860 (rndis_keepalive_msg_type *) 858 (rndis_keepalive_msg_type *)
861 buf); 859 buf);
862 860
@@ -866,7 +864,7 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
866 * suspending itself. 864 * suspending itself.
867 */ 865 */
868 pr_warning("%s: unknown RNDIS message 0x%08X len %d\n", 866 pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
869 __func__ , MsgType, MsgLength); 867 __func__, MsgType, MsgLength);
870 { 868 {
871 unsigned i; 869 unsigned i;
872 for (i = 0; i < MsgLength; i += 16) { 870 for (i = 0; i < MsgLength; i += 16) {
@@ -901,10 +899,10 @@ int rndis_register(void (*resp_avail)(void *v), void *v)
901 return -EINVAL; 899 return -EINVAL;
902 900
903 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { 901 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
904 if (!rndis_per_dev_params [i].used) { 902 if (!rndis_per_dev_params[i].used) {
905 rndis_per_dev_params [i].used = 1; 903 rndis_per_dev_params[i].used = 1;
906 rndis_per_dev_params [i].resp_avail = resp_avail; 904 rndis_per_dev_params[i].resp_avail = resp_avail;
907 rndis_per_dev_params [i].v = v; 905 rndis_per_dev_params[i].v = v;
908 pr_debug("%s: configNr = %d\n", __func__, i); 906 pr_debug("%s: configNr = %d\n", __func__, i);
909 return i; 907 return i;
910 } 908 }
@@ -914,12 +912,12 @@ int rndis_register(void (*resp_avail)(void *v), void *v)
914 return -ENODEV; 912 return -ENODEV;
915} 913}
916 914
917void rndis_deregister (int configNr) 915void rndis_deregister(int configNr)
918{ 916{
919 pr_debug("%s: \n", __func__); 917 pr_debug("%s:\n", __func__);
920 918
921 if (configNr >= RNDIS_MAX_CONFIGS) return; 919 if (configNr >= RNDIS_MAX_CONFIGS) return;
922 rndis_per_dev_params [configNr].used = 0; 920 rndis_per_dev_params[configNr].used = 0;
923 921
924 return; 922 return;
925} 923}
@@ -931,76 +929,76 @@ int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
931 return -EINVAL; 929 return -EINVAL;
932 if (configNr >= RNDIS_MAX_CONFIGS) return -1; 930 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
933 931
934 rndis_per_dev_params [configNr].dev = dev; 932 rndis_per_dev_params[configNr].dev = dev;
935 rndis_per_dev_params [configNr].filter = cdc_filter; 933 rndis_per_dev_params[configNr].filter = cdc_filter;
936 934
937 return 0; 935 return 0;
938} 936}
939 937
940int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr) 938int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr)
941{ 939{
942 pr_debug("%s:\n", __func__); 940 pr_debug("%s:\n", __func__);
943 if (!vendorDescr) return -1; 941 if (!vendorDescr) return -1;
944 if (configNr >= RNDIS_MAX_CONFIGS) return -1; 942 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
945 943
946 rndis_per_dev_params [configNr].vendorID = vendorID; 944 rndis_per_dev_params[configNr].vendorID = vendorID;
947 rndis_per_dev_params [configNr].vendorDescr = vendorDescr; 945 rndis_per_dev_params[configNr].vendorDescr = vendorDescr;
948 946
949 return 0; 947 return 0;
950} 948}
951 949
952int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed) 950int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
953{ 951{
954 pr_debug("%s: %u %u\n", __func__, medium, speed); 952 pr_debug("%s: %u %u\n", __func__, medium, speed);
955 if (configNr >= RNDIS_MAX_CONFIGS) return -1; 953 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
956 954
957 rndis_per_dev_params [configNr].medium = medium; 955 rndis_per_dev_params[configNr].medium = medium;
958 rndis_per_dev_params [configNr].speed = speed; 956 rndis_per_dev_params[configNr].speed = speed;
959 957
960 return 0; 958 return 0;
961} 959}
962 960
963void rndis_add_hdr (struct sk_buff *skb) 961void rndis_add_hdr(struct sk_buff *skb)
964{ 962{
965 struct rndis_packet_msg_type *header; 963 struct rndis_packet_msg_type *header;
966 964
967 if (!skb) 965 if (!skb)
968 return; 966 return;
969 header = (void *) skb_push (skb, sizeof *header); 967 header = (void *)skb_push(skb, sizeof(*header));
970 memset (header, 0, sizeof *header); 968 memset(header, 0, sizeof *header);
971 header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG); 969 header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG);
972 header->MessageLength = cpu_to_le32(skb->len); 970 header->MessageLength = cpu_to_le32(skb->len);
973 header->DataOffset = cpu_to_le32 (36); 971 header->DataOffset = cpu_to_le32(36);
974 header->DataLength = cpu_to_le32(skb->len - sizeof *header); 972 header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
975} 973}
976 974
977void rndis_free_response (int configNr, u8 *buf) 975void rndis_free_response(int configNr, u8 *buf)
978{ 976{
979 rndis_resp_t *r; 977 rndis_resp_t *r;
980 struct list_head *act, *tmp; 978 struct list_head *act, *tmp;
981 979
982 list_for_each_safe (act, tmp, 980 list_for_each_safe(act, tmp,
983 &(rndis_per_dev_params [configNr].resp_queue)) 981 &(rndis_per_dev_params[configNr].resp_queue))
984 { 982 {
985 r = list_entry (act, rndis_resp_t, list); 983 r = list_entry(act, rndis_resp_t, list);
986 if (r && r->buf == buf) { 984 if (r && r->buf == buf) {
987 list_del (&r->list); 985 list_del(&r->list);
988 kfree (r); 986 kfree(r);
989 } 987 }
990 } 988 }
991} 989}
992 990
993u8 *rndis_get_next_response (int configNr, u32 *length) 991u8 *rndis_get_next_response(int configNr, u32 *length)
994{ 992{
995 rndis_resp_t *r; 993 rndis_resp_t *r;
996 struct list_head *act, *tmp; 994 struct list_head *act, *tmp;
997 995
998 if (!length) return NULL; 996 if (!length) return NULL;
999 997
1000 list_for_each_safe (act, tmp, 998 list_for_each_safe(act, tmp,
1001 &(rndis_per_dev_params [configNr].resp_queue)) 999 &(rndis_per_dev_params[configNr].resp_queue))
1002 { 1000 {
1003 r = list_entry (act, rndis_resp_t, list); 1001 r = list_entry(act, rndis_resp_t, list);
1004 if (!r->send) { 1002 if (!r->send) {
1005 r->send = 1; 1003 r->send = 1;
1006 *length = r->length; 1004 *length = r->length;
@@ -1011,20 +1009,20 @@ u8 *rndis_get_next_response (int configNr, u32 *length)
1011 return NULL; 1009 return NULL;
1012} 1010}
1013 1011
1014static rndis_resp_t *rndis_add_response (int configNr, u32 length) 1012static rndis_resp_t *rndis_add_response(int configNr, u32 length)
1015{ 1013{
1016 rndis_resp_t *r; 1014 rndis_resp_t *r;
1017 1015
1018 /* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */ 1016 /* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */
1019 r = kmalloc (sizeof (rndis_resp_t) + length, GFP_ATOMIC); 1017 r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC);
1020 if (!r) return NULL; 1018 if (!r) return NULL;
1021 1019
1022 r->buf = (u8 *) (r + 1); 1020 r->buf = (u8 *)(r + 1);
1023 r->length = length; 1021 r->length = length;
1024 r->send = 0; 1022 r->send = 0;
1025 1023
1026 list_add_tail (&r->list, 1024 list_add_tail(&r->list,
1027 &(rndis_per_dev_params [configNr].resp_queue)); 1025 &(rndis_per_dev_params[configNr].resp_queue));
1028 return r; 1026 return r;
1029} 1027}
1030 1028
@@ -1033,7 +1031,7 @@ int rndis_rm_hdr(struct gether *port,
1033 struct sk_buff_head *list) 1031 struct sk_buff_head *list)
1034{ 1032{
1035 /* tmp points to a struct rndis_packet_msg_type */ 1033 /* tmp points to a struct rndis_packet_msg_type */
1036 __le32 *tmp = (void *) skb->data; 1034 __le32 *tmp = (void *)skb->data;
1037 1035
1038 /* MessageType, MessageLength */ 1036 /* MessageType, MessageLength */
1039 if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG) 1037 if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
@@ -1054,7 +1052,7 @@ int rndis_rm_hdr(struct gether *port,
1054 return 0; 1052 return 0;
1055} 1053}
1056 1054
1057#ifdef CONFIG_USB_GADGET_DEBUG_FILES 1055#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1058 1056
1059static int rndis_proc_show(struct seq_file *m, void *v) 1057static int rndis_proc_show(struct seq_file *m, void *v)
1060{ 1058{
@@ -1087,7 +1085,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
1087} 1085}
1088 1086
1089static ssize_t rndis_proc_write(struct file *file, const char __user *buffer, 1087static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
1090 size_t count, loff_t *ppos) 1088 size_t count, loff_t *ppos)
1091{ 1089{
1092 rndis_params *p = PDE(file->f_path.dentry->d_inode)->data; 1090 rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
1093 u32 speed = 0; 1091 u32 speed = 0;
@@ -1109,11 +1107,11 @@ static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
1109 case '8': 1107 case '8':
1110 case '9': 1108 case '9':
1111 fl_speed = 1; 1109 fl_speed = 1;
1112 speed = speed*10 + c - '0'; 1110 speed = speed * 10 + c - '0';
1113 break; 1111 break;
1114 case 'C': 1112 case 'C':
1115 case 'c': 1113 case 'c':
1116 rndis_signal_connect (p->confignr); 1114 rndis_signal_connect(p->confignr);
1117 break; 1115 break;
1118 case 'D': 1116 case 'D':
1119 case 'd': 1117 case 'd':
@@ -1145,11 +1143,11 @@ static const struct file_operations rndis_proc_fops = {
1145 .write = rndis_proc_write, 1143 .write = rndis_proc_write,
1146}; 1144};
1147 1145
1148#define NAME_TEMPLATE "driver/rndis-%03d" 1146#define NAME_TEMPLATE "driver/rndis-%03d"
1149 1147
1150static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; 1148static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
1151 1149
1152#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1150#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1153 1151
1154 1152
1155int rndis_init(void) 1153int rndis_init(void)
@@ -1160,42 +1158,40 @@ int rndis_init(void)
1160#ifdef CONFIG_USB_GADGET_DEBUG_FILES 1158#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1161 char name [20]; 1159 char name [20];
1162 1160
1163 sprintf (name, NAME_TEMPLATE, i); 1161 sprintf(name, NAME_TEMPLATE, i);
1164 if (!(rndis_connect_state [i] 1162 rndis_connect_state[i] = proc_create_data(name, 0660, NULL,
1165 = proc_create_data(name, 0660, NULL,
1166 &rndis_proc_fops, 1163 &rndis_proc_fops,
1167 (void *)(rndis_per_dev_params + i)))) 1164 (void *)(rndis_per_dev_params + i));
1168 { 1165 if (!rndis_connect_state[i]) {
1169 pr_debug("%s :remove entries", __func__); 1166 pr_debug("%s: remove entries", __func__);
1170 while (i) { 1167 while (i) {
1171 sprintf (name, NAME_TEMPLATE, --i); 1168 sprintf(name, NAME_TEMPLATE, --i);
1172 remove_proc_entry (name, NULL); 1169 remove_proc_entry(name, NULL);
1173 } 1170 }
1174 pr_debug("\n"); 1171 pr_debug("\n");
1175 return -EIO; 1172 return -EIO;
1176 } 1173 }
1177#endif 1174#endif
1178 rndis_per_dev_params [i].confignr = i; 1175 rndis_per_dev_params[i].confignr = i;
1179 rndis_per_dev_params [i].used = 0; 1176 rndis_per_dev_params[i].used = 0;
1180 rndis_per_dev_params [i].state = RNDIS_UNINITIALIZED; 1177 rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED;
1181 rndis_per_dev_params [i].media_state 1178 rndis_per_dev_params[i].media_state
1182 = NDIS_MEDIA_STATE_DISCONNECTED; 1179 = NDIS_MEDIA_STATE_DISCONNECTED;
1183 INIT_LIST_HEAD (&(rndis_per_dev_params [i].resp_queue)); 1180 INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
1184 } 1181 }
1185 1182
1186 return 0; 1183 return 0;
1187} 1184}
1188 1185
1189void rndis_exit (void) 1186void rndis_exit(void)
1190{ 1187{
1191#ifdef CONFIG_USB_GADGET_DEBUG_FILES 1188#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1192 u8 i; 1189 u8 i;
1193 char name [20]; 1190 char name[20];
1194 1191
1195 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { 1192 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
1196 sprintf (name, NAME_TEMPLATE, i); 1193 sprintf(name, NAME_TEMPLATE, i);
1197 remove_proc_entry (name, NULL); 1194 remove_proc_entry(name, NULL);
1198 } 1195 }
1199#endif 1196#endif
1200} 1197}
1201
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a229744a8c7d..ef825c3baed9 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2523,7 +2523,8 @@ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2523 return 0; 2523 return 0;
2524} 2524}
2525 2525
2526int usb_gadget_register_driver(struct usb_gadget_driver *driver) 2526int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2527 int (*bind)(struct usb_gadget *))
2527{ 2528{
2528 struct s3c_hsotg *hsotg = our_hsotg; 2529 struct s3c_hsotg *hsotg = our_hsotg;
2529 int ret; 2530 int ret;
@@ -2543,7 +2544,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2543 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 2544 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2544 } 2545 }
2545 2546
2546 if (!driver->bind || !driver->setup) { 2547 if (!bind || !driver->setup) {
2547 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 2548 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2548 return -EINVAL; 2549 return -EINVAL;
2549 } 2550 }
@@ -2562,7 +2563,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2562 goto err; 2563 goto err;
2563 } 2564 }
2564 2565
2565 ret = driver->bind(&hsotg->gadget); 2566 ret = bind(&hsotg->gadget);
2566 if (ret) { 2567 if (ret) {
2567 dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name); 2568 dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
2568 2569
@@ -2687,7 +2688,7 @@ err:
2687 hsotg->gadget.dev.driver = NULL; 2688 hsotg->gadget.dev.driver = NULL;
2688 return ret; 2689 return ret;
2689} 2690}
2690EXPORT_SYMBOL(usb_gadget_register_driver); 2691EXPORT_SYMBOL(usb_gadget_probe_driver);
2691 2692
2692int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2693int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2693{ 2694{
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index ea2b3c7ebee5..c2448950a8d8 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1632,15 +1632,15 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
1632} 1632}
1633 1633
1634/* 1634/*
1635 * usb_gadget_register_driver 1635 * usb_gadget_probe_driver
1636 */ 1636 */
1637int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1637int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1638 int (*bind)(struct usb_gadget *))
1638{ 1639{
1639 struct s3c2410_udc *udc = the_controller; 1640 struct s3c2410_udc *udc = the_controller;
1640 int retval; 1641 int retval;
1641 1642
1642 dprintk(DEBUG_NORMAL, "usb_gadget_register_driver() '%s'\n", 1643 dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
1643 driver->driver.name);
1644 1644
1645 /* Sanity checks */ 1645 /* Sanity checks */
1646 if (!udc) 1646 if (!udc)
@@ -1649,10 +1649,9 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1649 if (udc->driver) 1649 if (udc->driver)
1650 return -EBUSY; 1650 return -EBUSY;
1651 1651
1652 if (!driver->bind || !driver->setup 1652 if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
1653 || driver->speed < USB_SPEED_FULL) {
1654 printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n", 1653 printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
1655 driver->bind, driver->setup, driver->speed); 1654 bind, driver->setup, driver->speed);
1656 return -EINVAL; 1655 return -EINVAL;
1657 } 1656 }
1658#if defined(MODULE) 1657#if defined(MODULE)
@@ -1675,7 +1674,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1675 dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n", 1674 dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
1676 driver->driver.name); 1675 driver->driver.name);
1677 1676
1678 if ((retval = driver->bind (&udc->gadget)) != 0) { 1677 if ((retval = bind(&udc->gadget)) != 0) {
1679 device_del(&udc->gadget.dev); 1678 device_del(&udc->gadget.dev);
1680 goto register_error; 1679 goto register_error;
1681 } 1680 }
@@ -1690,6 +1689,7 @@ register_error:
1690 udc->gadget.dev.driver = NULL; 1689 udc->gadget.dev.driver = NULL;
1691 return retval; 1690 return retval;
1692} 1691}
1692EXPORT_SYMBOL(usb_gadget_probe_driver);
1693 1693
1694/* 1694/*
1695 * usb_gadget_unregister_driver 1695 * usb_gadget_unregister_driver
@@ -2049,7 +2049,6 @@ static void __exit udc_exit(void)
2049} 2049}
2050 2050
2051EXPORT_SYMBOL(usb_gadget_unregister_driver); 2051EXPORT_SYMBOL(usb_gadget_unregister_driver);
2052EXPORT_SYMBOL(usb_gadget_register_driver);
2053 2052
2054module_init(udc_init); 2053module_init(udc_init);
2055module_exit(udc_exit); 2054module_exit(udc_exit);
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index b22eedbc7dc5..1ac57a973aa9 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -137,7 +137,7 @@ MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
137 137
138/*-------------------------------------------------------------------------*/ 138/*-------------------------------------------------------------------------*/
139 139
140static int __ref serial_bind_config(struct usb_configuration *c) 140static int __init serial_bind_config(struct usb_configuration *c)
141{ 141{
142 unsigned i; 142 unsigned i;
143 int status = 0; 143 int status = 0;
@@ -155,13 +155,12 @@ static int __ref serial_bind_config(struct usb_configuration *c)
155 155
156static struct usb_configuration serial_config_driver = { 156static struct usb_configuration serial_config_driver = {
157 /* .label = f(use_acm) */ 157 /* .label = f(use_acm) */
158 .bind = serial_bind_config,
159 /* .bConfigurationValue = f(use_acm) */ 158 /* .bConfigurationValue = f(use_acm) */
160 /* .iConfiguration = DYNAMIC */ 159 /* .iConfiguration = DYNAMIC */
161 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 160 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
162}; 161};
163 162
164static int __ref gs_bind(struct usb_composite_dev *cdev) 163static int __init gs_bind(struct usb_composite_dev *cdev)
165{ 164{
166 int gcnum; 165 int gcnum;
167 struct usb_gadget *gadget = cdev->gadget; 166 struct usb_gadget *gadget = cdev->gadget;
@@ -225,7 +224,8 @@ static int __ref gs_bind(struct usb_composite_dev *cdev)
225 } 224 }
226 225
227 /* register our configuration */ 226 /* register our configuration */
228 status = usb_add_config(cdev, &serial_config_driver); 227 status = usb_add_config(cdev, &serial_config_driver,
228 serial_bind_config);
229 if (status < 0) 229 if (status < 0)
230 goto fail; 230 goto fail;
231 231
@@ -242,7 +242,6 @@ static struct usb_composite_driver gserial_driver = {
242 .name = "g_serial", 242 .name = "g_serial",
243 .dev = &device_desc, 243 .dev = &device_desc,
244 .strings = dev_strings, 244 .strings = dev_strings,
245 .bind = gs_bind,
246}; 245};
247 246
248static int __init init(void) 247static int __init init(void)
@@ -271,7 +270,7 @@ static int __init init(void)
271 } 270 }
272 strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label; 271 strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
273 272
274 return usb_composite_register(&gserial_driver); 273 return usb_composite_probe(&gserial_driver, gs_bind);
275} 274}
276module_init(init); 275module_init(init);
277 276
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 484acfb1a7c5..3b513bafaf2a 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -26,7 +26,6 @@
26 * be defined (each of type pointer to char): 26 * be defined (each of type pointer to char):
27 * - fsg_string_manufacturer -- name of the manufacturer 27 * - fsg_string_manufacturer -- name of the manufacturer
28 * - fsg_string_product -- name of the product 28 * - fsg_string_product -- name of the product
29 * - fsg_string_serial -- product's serial
30 * - fsg_string_config -- name of the configuration 29 * - fsg_string_config -- name of the configuration
31 * - fsg_string_interface -- name of the interface 30 * - fsg_string_interface -- name of the interface
32 * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS 31 * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
@@ -54,6 +53,8 @@
54 */ 53 */
55 54
56 55
56#include <linux/usb/storage.h>
57#include <scsi/scsi.h>
57#include <asm/unaligned.h> 58#include <asm/unaligned.h>
58 59
59 60
@@ -153,23 +154,6 @@
153 154
154/*-------------------------------------------------------------------------*/ 155/*-------------------------------------------------------------------------*/
155 156
156/* SCSI device types */
157#define TYPE_DISK 0x00
158#define TYPE_CDROM 0x05
159
160/* USB protocol value = the transport method */
161#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */
162#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */
163#define USB_PR_BULK 0x50 /* Bulk-only */
164
165/* USB subclass value = the protocol encapsulation */
166#define USB_SC_RBC 0x01 /* Reduced Block Commands (flash) */
167#define USB_SC_8020 0x02 /* SFF-8020i, MMC-2, ATAPI (CD-ROM) */
168#define USB_SC_QIC 0x03 /* QIC-157 (tape) */
169#define USB_SC_UFI 0x04 /* UFI (floppy) */
170#define USB_SC_8070 0x05 /* SFF-8070i (removable) */
171#define USB_SC_SCSI 0x06 /* Transparent SCSI */
172
173/* Bulk-only data structures */ 157/* Bulk-only data structures */
174 158
175/* Command Block Wrapper */ 159/* Command Block Wrapper */
@@ -221,33 +205,6 @@ struct interrupt_data {
221/* Length of a SCSI Command Data Block */ 205/* Length of a SCSI Command Data Block */
222#define MAX_COMMAND_SIZE 16 206#define MAX_COMMAND_SIZE 16
223 207
224/* SCSI commands that we recognize */
225#define SC_FORMAT_UNIT 0x04
226#define SC_INQUIRY 0x12
227#define SC_MODE_SELECT_6 0x15
228#define SC_MODE_SELECT_10 0x55
229#define SC_MODE_SENSE_6 0x1a
230#define SC_MODE_SENSE_10 0x5a
231#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
232#define SC_READ_6 0x08
233#define SC_READ_10 0x28
234#define SC_READ_12 0xa8
235#define SC_READ_CAPACITY 0x25
236#define SC_READ_FORMAT_CAPACITIES 0x23
237#define SC_READ_HEADER 0x44
238#define SC_READ_TOC 0x43
239#define SC_RELEASE 0x17
240#define SC_REQUEST_SENSE 0x03
241#define SC_RESERVE 0x16
242#define SC_SEND_DIAGNOSTIC 0x1d
243#define SC_START_STOP_UNIT 0x1b
244#define SC_SYNCHRONIZE_CACHE 0x35
245#define SC_TEST_UNIT_READY 0x00
246#define SC_VERIFY 0x2f
247#define SC_WRITE_6 0x0a
248#define SC_WRITE_10 0x2a
249#define SC_WRITE_12 0xaa
250
251/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */ 208/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
252#define SS_NO_SENSE 0 209#define SS_NO_SENSE 0
253#define SS_COMMUNICATION_FAILURE 0x040800 210#define SS_COMMUNICATION_FAILURE 0x040800
@@ -552,7 +509,7 @@ static struct usb_string fsg_strings[] = {
552#ifndef FSG_NO_DEVICE_STRINGS 509#ifndef FSG_NO_DEVICE_STRINGS
553 {FSG_STRING_MANUFACTURER, fsg_string_manufacturer}, 510 {FSG_STRING_MANUFACTURER, fsg_string_manufacturer},
554 {FSG_STRING_PRODUCT, fsg_string_product}, 511 {FSG_STRING_PRODUCT, fsg_string_product},
555 {FSG_STRING_SERIAL, fsg_string_serial}, 512 {FSG_STRING_SERIAL, ""},
556 {FSG_STRING_CONFIG, fsg_string_config}, 513 {FSG_STRING_CONFIG, fsg_string_config},
557#endif 514#endif
558 {FSG_STRING_INTERFACE, fsg_string_interface}, 515 {FSG_STRING_INTERFACE, fsg_string_interface},
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index de1deb7a3c63..a5a0fdb808c7 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -308,7 +308,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
308 * USB configuration 308 * USB configuration
309 */ 309 */
310 310
311static int __ref 311static int __init
312webcam_config_bind(struct usb_configuration *c) 312webcam_config_bind(struct usb_configuration *c)
313{ 313{
314 return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls, 314 return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
@@ -317,7 +317,6 @@ webcam_config_bind(struct usb_configuration *c)
317 317
318static struct usb_configuration webcam_config_driver = { 318static struct usb_configuration webcam_config_driver = {
319 .label = webcam_config_label, 319 .label = webcam_config_label,
320 .bind = webcam_config_bind,
321 .bConfigurationValue = 1, 320 .bConfigurationValue = 1,
322 .iConfiguration = 0, /* dynamic */ 321 .iConfiguration = 0, /* dynamic */
323 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 322 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
@@ -330,7 +329,7 @@ webcam_unbind(struct usb_composite_dev *cdev)
330 return 0; 329 return 0;
331} 330}
332 331
333static int __ref 332static int __init
334webcam_bind(struct usb_composite_dev *cdev) 333webcam_bind(struct usb_composite_dev *cdev)
335{ 334{
336 int ret; 335 int ret;
@@ -354,7 +353,8 @@ webcam_bind(struct usb_composite_dev *cdev)
354 webcam_config_driver.iConfiguration = ret; 353 webcam_config_driver.iConfiguration = ret;
355 354
356 /* Register our configuration. */ 355 /* Register our configuration. */
357 if ((ret = usb_add_config(cdev, &webcam_config_driver)) < 0) 356 if ((ret = usb_add_config(cdev, &webcam_config_driver,
357 webcam_config_bind)) < 0)
358 goto error; 358 goto error;
359 359
360 INFO(cdev, "Webcam Video Gadget\n"); 360 INFO(cdev, "Webcam Video Gadget\n");
@@ -373,14 +373,13 @@ static struct usb_composite_driver webcam_driver = {
373 .name = "g_webcam", 373 .name = "g_webcam",
374 .dev = &webcam_device_descriptor, 374 .dev = &webcam_device_descriptor,
375 .strings = webcam_device_strings, 375 .strings = webcam_device_strings,
376 .bind = webcam_bind,
377 .unbind = webcam_unbind, 376 .unbind = webcam_unbind,
378}; 377};
379 378
380static int __init 379static int __init
381webcam_init(void) 380webcam_init(void)
382{ 381{
383 return usb_composite_register(&webcam_driver); 382 return usb_composite_probe(&webcam_driver, webcam_bind);
384} 383}
385 384
386static void __exit 385static void __exit
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index cf353920bb1c..6d16db9d9d2d 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -264,7 +264,7 @@ static void zero_resume(struct usb_composite_dev *cdev)
264 264
265/*-------------------------------------------------------------------------*/ 265/*-------------------------------------------------------------------------*/
266 266
267static int __ref zero_bind(struct usb_composite_dev *cdev) 267static int __init zero_bind(struct usb_composite_dev *cdev)
268{ 268{
269 int gcnum; 269 int gcnum;
270 struct usb_gadget *gadget = cdev->gadget; 270 struct usb_gadget *gadget = cdev->gadget;
@@ -340,7 +340,6 @@ static struct usb_composite_driver zero_driver = {
340 .name = "zero", 340 .name = "zero",
341 .dev = &device_desc, 341 .dev = &device_desc,
342 .strings = dev_strings, 342 .strings = dev_strings,
343 .bind = zero_bind,
344 .unbind = zero_unbind, 343 .unbind = zero_unbind,
345 .suspend = zero_suspend, 344 .suspend = zero_suspend,
346 .resume = zero_resume, 345 .resume = zero_resume,
@@ -351,7 +350,7 @@ MODULE_LICENSE("GPL");
351 350
352static int __init init(void) 351static int __init init(void)
353{ 352{
354 return usb_composite_register(&zero_driver); 353 return usb_composite_probe(&zero_driver, zero_bind);
355} 354}
356module_init(init); 355module_init(init);
357 356
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2d926cec0725..bf2e7d234533 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -93,12 +93,14 @@ config USB_EHCI_TT_NEWSCHED
93 93
94config USB_EHCI_BIG_ENDIAN_MMIO 94config USB_EHCI_BIG_ENDIAN_MMIO
95 bool 95 bool
96 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) 96 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || \
97 XPS_USB_HCD_XILINX || PPC_MPC512x)
97 default y 98 default y
98 99
99config USB_EHCI_BIG_ENDIAN_DESC 100config USB_EHCI_BIG_ENDIAN_DESC
100 bool 101 bool
101 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) 102 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
103 PPC_MPC512x)
102 default y 104 default y
103 105
104config XPS_USB_HCD_XILINX 106config XPS_USB_HCD_XILINX
@@ -112,10 +114,14 @@ config XPS_USB_HCD_XILINX
112 support both high speed and full speed devices, or high speed 114 support both high speed and full speed devices, or high speed
113 devices only. 115 devices only.
114 116
117config USB_FSL_MPH_DR_OF
118 tristate
119
115config USB_EHCI_FSL 120config USB_EHCI_FSL
116 bool "Support for Freescale on-chip EHCI USB controller" 121 bool "Support for Freescale on-chip EHCI USB controller"
117 depends on USB_EHCI_HCD && FSL_SOC 122 depends on USB_EHCI_HCD && FSL_SOC
118 select USB_EHCI_ROOT_HUB_TT 123 select USB_EHCI_ROOT_HUB_TT
124 select USB_FSL_MPH_DR_OF
119 ---help--- 125 ---help---
120 Variation of ARC USB block used in some Freescale chips. 126 Variation of ARC USB block used in some Freescale chips.
121 127
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index b6315aa47f7a..91c5a1bd1026 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -2,17 +2,17 @@
2# Makefile for USB Host Controller Drivers 2# Makefile for USB Host Controller Drivers
3# 3#
4 4
5ifeq ($(CONFIG_USB_DEBUG),y) 5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6 EXTRA_CFLAGS += -DDEBUG 6
7endif 7isp1760-y := isp1760-hcd.o isp1760-if.o
8 8
9isp1760-objs := isp1760-hcd.o isp1760-if.o 9fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
10fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \ 10fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
11 fhci-tds.o fhci-sched.o 11
12ifeq ($(CONFIG_FHCI_DEBUG),y) 12fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
13fhci-objs += fhci-dbg.o 13
14endif 14xhci-hcd-y := xhci.o xhci-mem.o xhci-pci.o
15xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o 15xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
16 16
17obj-$(CONFIG_USB_WHCI_HCD) += whci/ 17obj-$(CONFIG_USB_WHCI_HCD) += whci/
18 18
@@ -33,4 +33,4 @@ obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o 34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
35obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o 35obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
36 36obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index a416421abfa2..86e42892016d 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -116,13 +116,33 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
116 goto err3; 116 goto err3;
117 } 117 }
118 118
119 /* Enable USB controller */ 119 pdata->regs = hcd->regs;
120 temp = in_be32(hcd->regs + 0x500);
121 out_be32(hcd->regs + 0x500, temp | 0x4);
122 120
123 /* Set to Host mode */ 121 /*
124 temp = in_le32(hcd->regs + 0x1a8); 122 * do platform specific init: check the clock, grab/config pins, etc.
125 out_le32(hcd->regs + 0x1a8, temp | 0x3); 123 */
124 if (pdata->init && pdata->init(pdev)) {
125 retval = -ENODEV;
126 goto err3;
127 }
128
129 /*
130 * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
131 * flag for 83xx or 8536 system interface registers.
132 */
133 if (pdata->big_endian_mmio)
134 temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
135 else
136 temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
137
138 if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
139 pdata->have_sysif_regs = 1;
140
141 /* Enable USB controller, 83xx or 8536 */
142 if (pdata->have_sysif_regs)
143 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
144
145 /* Don't need to set host mode here. It will be done by tdi_reset() */
126 146
127 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 147 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
128 if (retval != 0) 148 if (retval != 0)
@@ -137,6 +157,8 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
137 usb_put_hcd(hcd); 157 usb_put_hcd(hcd);
138 err1: 158 err1:
139 dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval); 159 dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
160 if (pdata->exit)
161 pdata->exit(pdev);
140 return retval; 162 return retval;
141} 163}
142 164
@@ -154,17 +176,30 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
154static void usb_hcd_fsl_remove(struct usb_hcd *hcd, 176static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
155 struct platform_device *pdev) 177 struct platform_device *pdev)
156{ 178{
179 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
180
157 usb_remove_hcd(hcd); 181 usb_remove_hcd(hcd);
182
183 /*
184 * do platform specific un-initialization:
185 * release iomux pins, disable clock, etc.
186 */
187 if (pdata->exit)
188 pdata->exit(pdev);
158 iounmap(hcd->regs); 189 iounmap(hcd->regs);
159 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 190 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
160 usb_put_hcd(hcd); 191 usb_put_hcd(hcd);
161} 192}
162 193
163static void mpc83xx_setup_phy(struct ehci_hcd *ehci, 194static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
164 enum fsl_usb2_phy_modes phy_mode, 195 enum fsl_usb2_phy_modes phy_mode,
165 unsigned int port_offset) 196 unsigned int port_offset)
166{ 197{
167 u32 portsc = 0; 198 u32 portsc;
199
200 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
201 portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
202
168 switch (phy_mode) { 203 switch (phy_mode) {
169 case FSL_USB2_PHY_ULPI: 204 case FSL_USB2_PHY_ULPI:
170 portsc |= PORT_PTS_ULPI; 205 portsc |= PORT_PTS_ULPI;
@@ -184,20 +219,21 @@ static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
184 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]); 219 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
185} 220}
186 221
187static void mpc83xx_usb_setup(struct usb_hcd *hcd) 222static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
188{ 223{
189 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 224 struct usb_hcd *hcd = ehci_to_hcd(ehci);
190 struct fsl_usb2_platform_data *pdata; 225 struct fsl_usb2_platform_data *pdata;
191 void __iomem *non_ehci = hcd->regs; 226 void __iomem *non_ehci = hcd->regs;
192 u32 temp; 227 u32 temp;
193 228
194 pdata = 229 pdata = hcd->self.controller->platform_data;
195 (struct fsl_usb2_platform_data *)hcd->self.controller-> 230
196 platform_data;
197 /* Enable PHY interface in the control reg. */ 231 /* Enable PHY interface in the control reg. */
198 temp = in_be32(non_ehci + FSL_SOC_USB_CTRL); 232 if (pdata->have_sysif_regs) {
199 out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004); 233 temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
200 out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b); 234 out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
235 out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
236 }
201 237
202#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 238#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
203 /* 239 /*
@@ -214,7 +250,7 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
214 250
215 if ((pdata->operating_mode == FSL_USB2_DR_HOST) || 251 if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
216 (pdata->operating_mode == FSL_USB2_DR_OTG)) 252 (pdata->operating_mode == FSL_USB2_DR_OTG))
217 mpc83xx_setup_phy(ehci, pdata->phy_mode, 0); 253 ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
218 254
219 if (pdata->operating_mode == FSL_USB2_MPH_HOST) { 255 if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
220 unsigned int chip, rev, svr; 256 unsigned int chip, rev, svr;
@@ -228,27 +264,27 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
228 ehci->has_fsl_port_bug = 1; 264 ehci->has_fsl_port_bug = 1;
229 265
230 if (pdata->port_enables & FSL_USB2_PORT0_ENABLED) 266 if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
231 mpc83xx_setup_phy(ehci, pdata->phy_mode, 0); 267 ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
232 if (pdata->port_enables & FSL_USB2_PORT1_ENABLED) 268 if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
233 mpc83xx_setup_phy(ehci, pdata->phy_mode, 1); 269 ehci_fsl_setup_phy(ehci, pdata->phy_mode, 1);
234 } 270 }
235 271
236 /* put controller in host mode. */ 272 if (pdata->have_sysif_regs) {
237 ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE);
238#ifdef CONFIG_PPC_85xx 273#ifdef CONFIG_PPC_85xx
239 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008); 274 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
240 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080); 275 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
241#else 276#else
242 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c); 277 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
243 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040); 278 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
244#endif 279#endif
245 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001); 280 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
281 }
246} 282}
247 283
248/* called after powerup, by probe or system-pm "wakeup" */ 284/* called after powerup, by probe or system-pm "wakeup" */
249static int ehci_fsl_reinit(struct ehci_hcd *ehci) 285static int ehci_fsl_reinit(struct ehci_hcd *ehci)
250{ 286{
251 mpc83xx_usb_setup(ehci_to_hcd(ehci)); 287 ehci_fsl_usb_setup(ehci);
252 ehci_port_power(ehci, 0); 288 ehci_port_power(ehci, 0);
253 289
254 return 0; 290 return 0;
@@ -259,6 +295,11 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
259{ 295{
260 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 296 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
261 int retval; 297 int retval;
298 struct fsl_usb2_platform_data *pdata;
299
300 pdata = hcd->self.controller->platform_data;
301 ehci->big_endian_desc = pdata->big_endian_desc;
302 ehci->big_endian_mmio = pdata->big_endian_mmio;
262 303
263 /* EHCI registers start at offset 0x100 */ 304 /* EHCI registers start at offset 0x100 */
264 ehci->caps = hcd->regs + 0x100; 305 ehci->caps = hcd->regs + 0x100;
@@ -270,6 +311,8 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
270 /* cache this readonly data; minimize chip reads */ 311 /* cache this readonly data; minimize chip reads */
271 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); 312 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
272 313
314 hcd->has_tt = 1;
315
273 retval = ehci_halt(ehci); 316 retval = ehci_halt(ehci);
274 if (retval) 317 if (retval)
275 return retval; 318 return retval;
@@ -279,8 +322,6 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
279 if (retval) 322 if (retval)
280 return retval; 323 return retval;
281 324
282 hcd->has_tt = 1;
283
284 ehci->sbrn = 0x20; 325 ehci->sbrn = 0x20;
285 326
286 ehci_reset(ehci); 327 ehci_reset(ehci);
@@ -372,7 +413,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
372 * generic hardware linkage 413 * generic hardware linkage
373 */ 414 */
374 .irq = ehci_irq, 415 .irq = ehci_irq,
375 .flags = HCD_USB2, 416 .flags = HCD_USB2 | HCD_MEMORY,
376 417
377 /* 418 /*
378 * basic lifecycle operations 419 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index b5e59db53347..2c8353795226 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,4 +1,4 @@
1/* Copyright (c) 2005 freescale semiconductor 1/* Copyright (C) 2005-2010 Freescale Semiconductor, Inc.
2 * Copyright (c) 2005 MontaVista Software 2 * Copyright (c) 2005 MontaVista Software
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,9 @@
19#define _EHCI_FSL_H 19#define _EHCI_FSL_H
20 20
21/* offsets for the non-ehci registers in the FSL SOC USB controller */ 21/* offsets for the non-ehci registers in the FSL SOC USB controller */
22#define FSL_SOC_USB_ID 0x0
23#define ID_MSK 0x3f
24#define NID_MSK 0x3f00
22#define FSL_SOC_USB_ULPIVP 0x170 25#define FSL_SOC_USB_ULPIVP 0x170
23#define FSL_SOC_USB_PORTSC1 0x184 26#define FSL_SOC_USB_PORTSC1 0x184
24#define PORT_PTS_MSK (3<<30) 27#define PORT_PTS_MSK (3<<30)
@@ -27,7 +30,14 @@
27#define PORT_PTS_SERIAL (3<<30) 30#define PORT_PTS_SERIAL (3<<30)
28#define PORT_PTS_PTW (1<<28) 31#define PORT_PTS_PTW (1<<28)
29#define FSL_SOC_USB_PORTSC2 0x188 32#define FSL_SOC_USB_PORTSC2 0x188
30#define FSL_SOC_USB_USBMODE 0x1a8 33
34#define FSL_SOC_USB_USBGENCTRL 0x200
35#define USBGENCTRL_PPP (1 << 3)
36#define USBGENCTRL_PFP (1 << 2)
37#define FSL_SOC_USB_ISIPHYCTRL 0x204
38#define ISIPHYCTRL_PXE (1)
39#define ISIPHYCTRL_PHYE (1 << 4)
40
31#define FSL_SOC_USB_SNOOP1 0x400 /* NOTE: big-endian */ 41#define FSL_SOC_USB_SNOOP1 0x400 /* NOTE: big-endian */
32#define FSL_SOC_USB_SNOOP2 0x404 /* NOTE: big-endian */ 42#define FSL_SOC_USB_SNOOP2 0x404 /* NOTE: big-endian */
33#define FSL_SOC_USB_AGECNTTHRSH 0x408 /* NOTE: big-endian */ 43#define FSL_SOC_USB_AGECNTTHRSH 0x408 /* NOTE: big-endian */
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 34a928d3b7d2..15fe3ecd203b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -194,6 +194,17 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
194 return -ETIMEDOUT; 194 return -ETIMEDOUT;
195} 195}
196 196
197/* check TDI/ARC silicon is in host mode */
198static int tdi_in_host_mode (struct ehci_hcd *ehci)
199{
200 u32 __iomem *reg_ptr;
201 u32 tmp;
202
203 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
204 tmp = ehci_readl(ehci, reg_ptr);
205 return (tmp & 3) == USBMODE_CM_HC;
206}
207
197/* force HC to halt state from unknown (EHCI spec section 2.3) */ 208/* force HC to halt state from unknown (EHCI spec section 2.3) */
198static int ehci_halt (struct ehci_hcd *ehci) 209static int ehci_halt (struct ehci_hcd *ehci)
199{ 210{
@@ -202,6 +213,10 @@ static int ehci_halt (struct ehci_hcd *ehci)
202 /* disable any irqs left enabled by previous code */ 213 /* disable any irqs left enabled by previous code */
203 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 214 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
204 215
216 if (ehci_is_TDI(ehci) && tdi_in_host_mode(ehci) == 0) {
217 return 0;
218 }
219
205 if ((temp & STS_HALT) != 0) 220 if ((temp & STS_HALT) != 0)
206 return 0; 221 return 0;
207 222
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 1f3f01eacaf0..d36e4e75e08d 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -40,7 +40,7 @@ static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
40{ 40{
41 memset (qtd, 0, sizeof *qtd); 41 memset (qtd, 0, sizeof *qtd);
42 qtd->qtd_dma = dma; 42 qtd->qtd_dma = dma;
43 qtd->hw_token = cpu_to_le32 (QTD_STS_HALT); 43 qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
44 qtd->hw_next = EHCI_LIST_END(ehci); 44 qtd->hw_next = EHCI_LIST_END(ehci);
45 qtd->hw_alt_next = EHCI_LIST_END(ehci); 45 qtd->hw_alt_next = EHCI_LIST_END(ehci);
46 INIT_LIST_HEAD (&qtd->qtd_list); 46 INIT_LIST_HEAD (&qtd->qtd_list);
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a8ad8ac120a2..ac9c4d7c44af 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -26,9 +26,6 @@
26#include <mach/mxc_ehci.h> 26#include <mach/mxc_ehci.h>
27 27
28#define ULPI_VIEWPORT_OFFSET 0x170 28#define ULPI_VIEWPORT_OFFSET 0x170
29#define PORTSC_OFFSET 0x184
30#define USBMODE_OFFSET 0x1a8
31#define USBMODE_CM_HOST 3
32 29
33struct ehci_mxc_priv { 30struct ehci_mxc_priv {
34 struct clk *usbclk, *ahbclk; 31 struct clk *usbclk, *ahbclk;
@@ -51,6 +48,8 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
51 /* cache this readonly data; minimize chip reads */ 48 /* cache this readonly data; minimize chip reads */
52 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); 49 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
53 50
51 hcd->has_tt = 1;
52
54 retval = ehci_halt(ehci); 53 retval = ehci_halt(ehci);
55 if (retval) 54 if (retval)
56 return retval; 55 return retval;
@@ -60,8 +59,6 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
60 if (retval) 59 if (retval)
61 return retval; 60 return retval;
62 61
63 hcd->has_tt = 1;
64
65 ehci->sbrn = 0x20; 62 ehci->sbrn = 0x20;
66 63
67 ehci_reset(ehci); 64 ehci_reset(ehci);
@@ -191,12 +188,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
191 clk_enable(priv->ahbclk); 188 clk_enable(priv->ahbclk);
192 } 189 }
193 190
194 /* set USBMODE to host mode */
195 temp = readl(hcd->regs + USBMODE_OFFSET);
196 writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET);
197
198 /* set up the PORTSCx register */ 191 /* set up the PORTSCx register */
199 writel(pdata->portsc, hcd->regs + PORTSC_OFFSET); 192 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
200 mdelay(10); 193 mdelay(10);
201 194
202 /* setup specific usb hw */ 195 /* setup specific usb hw */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
new file mode 100644
index 000000000000..574b99ea0700
--- /dev/null
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -0,0 +1,308 @@
1/*
2 * Setup platform devices needed by the Freescale multi-port host
3 * and/or dual-role USB controller modules based on the description
4 * in flat device tree.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/fsl_devices.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/of_platform.h>
18#include <linux/clk.h>
19
20struct fsl_usb2_dev_data {
21 char *dr_mode; /* controller mode */
22 char *drivers[3]; /* drivers to instantiate for this mode */
23 enum fsl_usb2_operating_modes op_mode; /* operating mode */
24};
25
26struct fsl_usb2_dev_data dr_mode_data[] __devinitdata = {
27 {
28 .dr_mode = "host",
29 .drivers = { "fsl-ehci", NULL, NULL, },
30 .op_mode = FSL_USB2_DR_HOST,
31 },
32 {
33 .dr_mode = "otg",
34 .drivers = { "fsl-usb2-otg", "fsl-ehci", "fsl-usb2-udc", },
35 .op_mode = FSL_USB2_DR_OTG,
36 },
37 {
38 .dr_mode = "peripheral",
39 .drivers = { "fsl-usb2-udc", NULL, NULL, },
40 .op_mode = FSL_USB2_DR_DEVICE,
41 },
42};
43
44struct fsl_usb2_dev_data * __devinit get_dr_mode_data(struct device_node *np)
45{
46 const unsigned char *prop;
47 int i;
48
49 prop = of_get_property(np, "dr_mode", NULL);
50 if (prop) {
51 for (i = 0; i < ARRAY_SIZE(dr_mode_data); i++) {
52 if (!strcmp(prop, dr_mode_data[i].dr_mode))
53 return &dr_mode_data[i];
54 }
55 }
56 pr_warn("%s: Invalid 'dr_mode' property, fallback to host mode\n",
57 np->full_name);
58 return &dr_mode_data[0]; /* mode not specified, use host */
59}
60
61static enum fsl_usb2_phy_modes __devinit determine_usb_phy(const char *phy_type)
62{
63 if (!phy_type)
64 return FSL_USB2_PHY_NONE;
65 if (!strcasecmp(phy_type, "ulpi"))
66 return FSL_USB2_PHY_ULPI;
67 if (!strcasecmp(phy_type, "utmi"))
68 return FSL_USB2_PHY_UTMI;
69 if (!strcasecmp(phy_type, "utmi_wide"))
70 return FSL_USB2_PHY_UTMI_WIDE;
71 if (!strcasecmp(phy_type, "serial"))
72 return FSL_USB2_PHY_SERIAL;
73
74 return FSL_USB2_PHY_NONE;
75}
76
77struct platform_device * __devinit fsl_usb2_device_register(
78 struct platform_device *ofdev,
79 struct fsl_usb2_platform_data *pdata,
80 const char *name, int id)
81{
82 struct platform_device *pdev;
83 const struct resource *res = ofdev->resource;
84 unsigned int num = ofdev->num_resources;
85 int retval;
86
87 pdev = platform_device_alloc(name, id);
88 if (!pdev) {
89 retval = -ENOMEM;
90 goto error;
91 }
92
93 pdev->dev.parent = &ofdev->dev;
94
95 pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
96 pdev->dev.dma_mask = &pdev->archdata.dma_mask;
97 *pdev->dev.dma_mask = *ofdev->dev.dma_mask;
98
99 retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
100 if (retval)
101 goto error;
102
103 if (num) {
104 retval = platform_device_add_resources(pdev, res, num);
105 if (retval)
106 goto error;
107 }
108
109 retval = platform_device_add(pdev);
110 if (retval)
111 goto error;
112
113 return pdev;
114
115error:
116 platform_device_put(pdev);
117 return ERR_PTR(retval);
118}
119
120static const struct of_device_id fsl_usb2_mph_dr_of_match[];
121
122static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
123{
124 struct device_node *np = ofdev->dev.of_node;
125 struct platform_device *usb_dev;
126 struct fsl_usb2_platform_data data, *pdata;
127 struct fsl_usb2_dev_data *dev_data;
128 const struct of_device_id *match;
129 const unsigned char *prop;
130 static unsigned int idx;
131 int i;
132
133 if (!of_device_is_available(np))
134 return -ENODEV;
135
136 match = of_match_device(fsl_usb2_mph_dr_of_match, &ofdev->dev);
137 if (!match)
138 return -ENODEV;
139
140 pdata = &data;
141 if (match->data)
142 memcpy(pdata, match->data, sizeof(data));
143 else
144 memset(pdata, 0, sizeof(data));
145
146 dev_data = get_dr_mode_data(np);
147
148 if (of_device_is_compatible(np, "fsl-usb2-mph")) {
149 if (of_get_property(np, "port0", NULL))
150 pdata->port_enables |= FSL_USB2_PORT0_ENABLED;
151
152 if (of_get_property(np, "port1", NULL))
153 pdata->port_enables |= FSL_USB2_PORT1_ENABLED;
154
155 pdata->operating_mode = FSL_USB2_MPH_HOST;
156 } else {
157 if (of_get_property(np, "fsl,invert-drvvbus", NULL))
158 pdata->invert_drvvbus = 1;
159
160 if (of_get_property(np, "fsl,invert-pwr-fault", NULL))
161 pdata->invert_pwr_fault = 1;
162
163 /* setup mode selected in the device tree */
164 pdata->operating_mode = dev_data->op_mode;
165 }
166
167 prop = of_get_property(np, "phy_type", NULL);
168 pdata->phy_mode = determine_usb_phy(prop);
169
170 for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
171 if (!dev_data->drivers[i])
172 continue;
173 usb_dev = fsl_usb2_device_register(ofdev, pdata,
174 dev_data->drivers[i], idx);
175 if (IS_ERR(usb_dev)) {
176 dev_err(&ofdev->dev, "Can't register usb device\n");
177 return PTR_ERR(usb_dev);
178 }
179 }
180 idx++;
181 return 0;
182}
183
184static int __devexit __unregister_subdev(struct device *dev, void *d)
185{
186 platform_device_unregister(to_platform_device(dev));
187 return 0;
188}
189
190static int __devexit fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
191{
192 device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
193 return 0;
194}
195
196#ifdef CONFIG_PPC_MPC512x
197
198#define USBGENCTRL 0x200 /* NOTE: big endian */
199#define GC_WU_INT_CLR (1 << 5) /* Wakeup int clear */
200#define GC_ULPI_SEL (1 << 4) /* ULPI i/f select (usb0 only)*/
201#define GC_PPP (1 << 3) /* Inv. Port Power Polarity */
202#define GC_PFP (1 << 2) /* Inv. Power Fault Polarity */
203#define GC_WU_ULPI_EN (1 << 1) /* Wakeup on ULPI event */
204#define GC_WU_IE (1 << 1) /* Wakeup interrupt enable */
205
206#define ISIPHYCTRL 0x204 /* NOTE: big endian */
207#define PHYCTRL_PHYE (1 << 4) /* On-chip UTMI PHY enable */
208#define PHYCTRL_BSENH (1 << 3) /* Bit Stuff Enable High */
209#define PHYCTRL_BSEN (1 << 2) /* Bit Stuff Enable */
210#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
211#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
212
213int fsl_usb2_mpc5121_init(struct platform_device *pdev)
214{
215 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
216 struct clk *clk;
217 char clk_name[10];
218 int base, clk_num;
219
220 base = pdev->resource->start & 0xf000;
221 if (base == 0x3000)
222 clk_num = 1;
223 else if (base == 0x4000)
224 clk_num = 2;
225 else
226 return -ENODEV;
227
228 snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
229 clk = clk_get(&pdev->dev, clk_name);
230 if (IS_ERR(clk)) {
231 dev_err(&pdev->dev, "failed to get clk\n");
232 return PTR_ERR(clk);
233 }
234
235 clk_enable(clk);
236 pdata->clk = clk;
237
238 if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
239 u32 reg = 0;
240
241 if (pdata->invert_drvvbus)
242 reg |= GC_PPP;
243
244 if (pdata->invert_pwr_fault)
245 reg |= GC_PFP;
246
247 out_be32(pdata->regs + ISIPHYCTRL, PHYCTRL_PHYE | PHYCTRL_PXE);
248 out_be32(pdata->regs + USBGENCTRL, reg);
249 }
250 return 0;
251}
252
253static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
254{
255 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
256
257 pdata->regs = NULL;
258
259 if (pdata->clk) {
260 clk_disable(pdata->clk);
261 clk_put(pdata->clk);
262 }
263}
264
265struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
266 .big_endian_desc = 1,
267 .big_endian_mmio = 1,
268 .es = 1,
269 .le_setup_buf = 1,
270 .init = fsl_usb2_mpc5121_init,
271 .exit = fsl_usb2_mpc5121_exit,
272};
273#endif /* CONFIG_PPC_MPC512x */
274
275static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
276 { .compatible = "fsl-usb2-mph", },
277 { .compatible = "fsl-usb2-dr", },
278#ifdef CONFIG_PPC_MPC512x
279 { .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
280#endif
281 {},
282};
283
284static struct platform_driver fsl_usb2_mph_dr_driver = {
285 .driver = {
286 .name = "fsl-usb2-mph-dr",
287 .owner = THIS_MODULE,
288 .of_match_table = fsl_usb2_mph_dr_of_match,
289 },
290 .probe = fsl_usb2_mph_dr_of_probe,
291 .remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
292};
293
294static int __init fsl_usb2_mph_dr_init(void)
295{
296 return platform_driver_register(&fsl_usb2_mph_dr_driver);
297}
298module_init(fsl_usb2_mph_dr_init);
299
300static void __exit fsl_usb2_mph_dr_exit(void)
301{
302 platform_driver_unregister(&fsl_usb2_mph_dr_driver);
303}
304module_exit(fsl_usb2_mph_dr_exit);
305
306MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
307MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
308MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 3e5630369c31..1dfb2c8f7707 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -57,6 +57,7 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/usb.h> 58#include <linux/usb.h>
59#include <linux/usb/hcd.h> 59#include <linux/usb/hcd.h>
60#include <linux/dma-mapping.h>
60 61
61#include "imx21-hcd.h" 62#include "imx21-hcd.h"
62 63
@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd)
136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); 137 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137} 138}
138 139
140static inline bool unsuitable_for_dma(dma_addr_t addr)
141{
142 return (addr & 3) != 0;
143}
139 144
140#include "imx21-dbg.c" 145#include "imx21-dbg.c"
141 146
147static void nonisoc_urb_completed_for_etd(
148 struct imx21 *imx21, struct etd_priv *etd, int status);
149static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
150static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151
142/* =========================================== */ 152/* =========================================== */
143/* ETD management */ 153/* ETD management */
144/* =========================================== */ 154/* =========================================== */
@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num)
185 etd_writel(imx21, num, i, 0); 195 etd_writel(imx21, num, i, 0);
186 etd->urb = NULL; 196 etd->urb = NULL;
187 etd->ep = NULL; 197 etd->ep = NULL;
188 etd->td = NULL;; 198 etd->td = NULL;
199 etd->bounce_buffer = NULL;
189} 200}
190 201
191static void free_etd(struct imx21 *imx21, int num) 202static void free_etd(struct imx21 *imx21, int num)
@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21,
221 ((u32) maxpacket << DW0_MAXPKTSIZ)); 232 ((u32) maxpacket << DW0_MAXPKTSIZ));
222} 233}
223 234
224static void activate_etd(struct imx21 *imx21, 235/**
225 int etd_num, dma_addr_t dma, u8 dir) 236 * Copy buffer to data controller data memory.
237 * We cannot use memcpy_toio() because the hardware requires 32bit writes
238 */
239static void copy_to_dmem(
240 struct imx21 *imx21, int dmem_offset, void *src, int count)
241{
242 void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
243 u32 word = 0;
244 u8 *p = src;
245 int byte = 0;
246 int i;
247
248 for (i = 0; i < count; i++) {
249 byte = i % 4;
250 word += (*p++ << (byte * 8));
251 if (byte == 3) {
252 writel(word, dmem);
253 dmem += 4;
254 word = 0;
255 }
256 }
257
258 if (count && byte != 3)
259 writel(word, dmem);
260}
261
262static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
226{ 263{
227 u32 etd_mask = 1 << etd_num; 264 u32 etd_mask = 1 << etd_num;
228 struct etd_priv *etd = &imx21->etd[etd_num]; 265 struct etd_priv *etd = &imx21->etd[etd_num];
229 266
267 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
268 /* For non aligned isoc the condition below is always true */
269 if (etd->len <= etd->dmem_size) {
270 /* Fits into data memory, use PIO */
271 if (dir != TD_DIR_IN) {
272 copy_to_dmem(imx21,
273 etd->dmem_offset,
274 etd->cpu_buffer, etd->len);
275 }
276 etd->dma_handle = 0;
277
278 } else {
279 /* Too big for data memory, use bounce buffer */
280 enum dma_data_direction dmadir;
281
282 if (dir == TD_DIR_IN) {
283 dmadir = DMA_FROM_DEVICE;
284 etd->bounce_buffer = kmalloc(etd->len,
285 GFP_ATOMIC);
286 } else {
287 dmadir = DMA_TO_DEVICE;
288 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
289 etd->len,
290 GFP_ATOMIC);
291 }
292 if (!etd->bounce_buffer) {
293 dev_err(imx21->dev, "failed bounce alloc\n");
294 goto err_bounce_alloc;
295 }
296
297 etd->dma_handle =
298 dma_map_single(imx21->dev,
299 etd->bounce_buffer,
300 etd->len,
301 dmadir);
302 if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
303 dev_err(imx21->dev, "failed bounce map\n");
304 goto err_bounce_map;
305 }
306 }
307 }
308
230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 309 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 310 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 311 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 312 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
234 313
235 if (dma) { 314 if (etd->dma_handle) {
236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); 315 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); 316 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); 317 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); 318 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); 319 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
241 } else { 320 } else {
242 if (dir != TD_DIR_IN) { 321 if (dir != TD_DIR_IN) {
243 /* need to set for ZLP */ 322 /* need to set for ZLP and PIO */
244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 323 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 324 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
246 } 325 }
@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21,
263 342
264 etd->active_count = 1; 343 etd->active_count = 1;
265 writel(etd_mask, imx21->regs + USBH_ETDENSET); 344 writel(etd_mask, imx21->regs + USBH_ETDENSET);
345 return;
346
347err_bounce_map:
348 kfree(etd->bounce_buffer);
349
350err_bounce_alloc:
351 free_dmem(imx21, etd);
352 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
266} 353}
267 354
268/* =========================================== */ 355/* =========================================== */
@@ -323,16 +410,23 @@ static void activate_queued_etd(struct imx21 *imx21,
323 etd_writel(imx21, etd_num, 1, 410 etd_writel(imx21, etd_num, 1,
324 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); 411 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
325 412
413 etd->dmem_offset = dmem_offset;
326 urb_priv->active = 1; 414 urb_priv->active = 1;
327 activate_etd(imx21, etd_num, etd->dma_handle, dir); 415 activate_etd(imx21, etd_num, dir);
328} 416}
329 417
330static void free_dmem(struct imx21 *imx21, int offset) 418static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
331{ 419{
332 struct imx21_dmem_area *area; 420 struct imx21_dmem_area *area;
333 struct etd_priv *etd, *tmp; 421 struct etd_priv *tmp;
334 int found = 0; 422 int found = 0;
423 int offset;
335 424
425 if (!etd->dmem_size)
426 return;
427 etd->dmem_size = 0;
428
429 offset = etd->dmem_offset;
336 list_for_each_entry(area, &imx21->dmem_list, list) { 430 list_for_each_entry(area, &imx21->dmem_list, list) {
337 if (area->offset == offset) { 431 if (area->offset == offset) {
338 debug_dmem_freed(imx21, area->size); 432 debug_dmem_freed(imx21, area->size);
@@ -378,20 +472,23 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
378/* =========================================== */ 472/* =========================================== */
379/* End handling */ 473/* End handling */
380/* =========================================== */ 474/* =========================================== */
381static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
382 475
383/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ 476/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
384static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) 477static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
385{ 478{
386 int etd_num;
387 int i; 479 int i;
388 480
389 for (i = 0; i < NUM_ISO_ETDS; i++) { 481 for (i = 0; i < NUM_ISO_ETDS; i++) {
390 etd_num = ep_priv->etd[i]; 482 int etd_num = ep_priv->etd[i];
483 struct etd_priv *etd;
391 if (etd_num < 0) 484 if (etd_num < 0)
392 continue; 485 continue;
393 486
487 etd = &imx21->etd[etd_num];
394 ep_priv->etd[i] = -1; 488 ep_priv->etd[i] = -1;
489
490 free_dmem(imx21, etd); /* for isoc */
491
395 if (list_empty(&imx21->queue_for_etd)) { 492 if (list_empty(&imx21->queue_for_etd)) {
396 free_etd(imx21, etd_num); 493 free_etd(imx21, etd_num);
397 continue; 494 continue;
@@ -437,6 +534,24 @@ __acquires(imx21->lock)
437 ep_idle(imx21, ep_priv); 534 ep_idle(imx21, ep_priv);
438} 535}
439 536
537static void nonisoc_urb_completed_for_etd(
538 struct imx21 *imx21, struct etd_priv *etd, int status)
539{
540 struct usb_host_endpoint *ep = etd->ep;
541
542 urb_done(imx21->hcd, etd->urb, status);
543 etd->urb = NULL;
544
545 if (!list_empty(&ep->urb_list)) {
546 struct urb *urb = list_first_entry(
547 &ep->urb_list, struct urb, urb_list);
548
549 dev_vdbg(imx21->dev, "next URB %p\n", urb);
550 schedule_nonisoc_etd(imx21, urb);
551 }
552}
553
554
440/* =========================================== */ 555/* =========================================== */
441/* ISOC Handling ... */ 556/* ISOC Handling ... */
442/* =========================================== */ 557/* =========================================== */
@@ -489,6 +604,8 @@ too_late:
489 etd->ep = td->ep; 604 etd->ep = td->ep;
490 etd->urb = td->urb; 605 etd->urb = td->urb;
491 etd->len = td->len; 606 etd->len = td->len;
607 etd->dma_handle = td->dma_handle;
608 etd->cpu_buffer = td->cpu_buffer;
492 609
493 debug_isoc_submitted(imx21, cur_frame, td); 610 debug_isoc_submitted(imx21, cur_frame, td);
494 611
@@ -502,16 +619,17 @@ too_late:
502 (TD_NOTACCESSED << DW3_COMPCODE0) | 619 (TD_NOTACCESSED << DW3_COMPCODE0) |
503 (td->len << DW3_PKTLEN0)); 620 (td->len << DW3_PKTLEN0));
504 621
505 activate_etd(imx21, etd_num, td->data, dir); 622 activate_etd(imx21, etd_num, dir);
506 } 623 }
507} 624}
508 625
509static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 626static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
510{ 627{
511 struct imx21 *imx21 = hcd_to_imx21(hcd); 628 struct imx21 *imx21 = hcd_to_imx21(hcd);
512 int etd_mask = 1 << etd_num; 629 int etd_mask = 1 << etd_num;
513 struct urb_priv *urb_priv = urb->hcpriv;
514 struct etd_priv *etd = imx21->etd + etd_num; 630 struct etd_priv *etd = imx21->etd + etd_num;
631 struct urb *urb = etd->urb;
632 struct urb_priv *urb_priv = urb->hcpriv;
515 struct td *td = etd->td; 633 struct td *td = etd->td;
516 struct usb_host_endpoint *ep = etd->ep; 634 struct usb_host_endpoint *ep = etd->ep;
517 int isoc_index = td->isoc_index; 635 int isoc_index = td->isoc_index;
@@ -545,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
545 bytes_xfrd, td->len, urb, etd_num, isoc_index); 663 bytes_xfrd, td->len, urb, etd_num, isoc_index);
546 } 664 }
547 665
548 if (dir_in) 666 if (dir_in) {
549 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 667 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
668 if (!etd->dma_handle)
669 memcpy_fromio(etd->cpu_buffer,
670 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
671 bytes_xfrd);
672 }
550 673
551 urb->actual_length += bytes_xfrd; 674 urb->actual_length += bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; 675 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
@@ -569,30 +692,43 @@ static struct ep_priv *alloc_isoc_ep(
569 int i; 692 int i;
570 693
571 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); 694 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
572 if (ep_priv == NULL) 695 if (!ep_priv)
573 return NULL; 696 return NULL;
574 697
575 /* Allocate the ETDs */ 698 for (i = 0; i < NUM_ISO_ETDS; i++)
576 for (i = 0; i < NUM_ISO_ETDS; i++) { 699 ep_priv->etd[i] = -1;
577 ep_priv->etd[i] = alloc_etd(imx21);
578 if (ep_priv->etd[i] < 0) {
579 int j;
580 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
581 for (j = 0; j < i; j++)
582 free_etd(imx21, ep_priv->etd[j]);
583 goto alloc_etd_failed;
584 }
585 imx21->etd[ep_priv->etd[i]].ep = ep;
586 }
587 700
588 INIT_LIST_HEAD(&ep_priv->td_list); 701 INIT_LIST_HEAD(&ep_priv->td_list);
589 ep_priv->ep = ep; 702 ep_priv->ep = ep;
590 ep->hcpriv = ep_priv; 703 ep->hcpriv = ep_priv;
591 return ep_priv; 704 return ep_priv;
705}
706
707static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
708{
709 int i, j;
710 int etd_num;
711
712 /* Allocate the ETDs if required */
713 for (i = 0; i < NUM_ISO_ETDS; i++) {
714 if (ep_priv->etd[i] < 0) {
715 etd_num = alloc_etd(imx21);
716 if (etd_num < 0)
717 goto alloc_etd_failed;
718
719 ep_priv->etd[i] = etd_num;
720 imx21->etd[etd_num].ep = ep_priv->ep;
721 }
722 }
723 return 0;
592 724
593alloc_etd_failed: 725alloc_etd_failed:
594 kfree(ep_priv); 726 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
595 return NULL; 727 for (j = 0; j < i; j++) {
728 free_etd(imx21, ep_priv->etd[j]);
729 ep_priv->etd[j] = -1;
730 }
731 return -ENOMEM;
596} 732}
597 733
598static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, 734static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
@@ -632,6 +768,10 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
632 ep_priv = ep->hcpriv; 768 ep_priv = ep->hcpriv;
633 } 769 }
634 770
771 ret = alloc_isoc_etds(imx21, ep_priv);
772 if (ret)
773 goto alloc_etd_failed;
774
635 ret = usb_hcd_link_urb_to_ep(hcd, urb); 775 ret = usb_hcd_link_urb_to_ep(hcd, urb);
636 if (ret) 776 if (ret)
637 goto link_failed; 777 goto link_failed;
@@ -688,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
688 /* set up transfers */ 828 /* set up transfers */
689 td = urb_priv->isoc_td; 829 td = urb_priv->isoc_td;
690 for (i = 0; i < urb->number_of_packets; i++, td++) { 830 for (i = 0; i < urb->number_of_packets; i++, td++) {
831 unsigned int offset = urb->iso_frame_desc[i].offset;
691 td->ep = ep; 832 td->ep = ep;
692 td->urb = urb; 833 td->urb = urb;
693 td->len = urb->iso_frame_desc[i].length; 834 td->len = urb->iso_frame_desc[i].length;
694 td->isoc_index = i; 835 td->isoc_index = i;
695 td->frame = wrap_frame(urb->start_frame + urb->interval * i); 836 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
696 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; 837 td->dma_handle = urb->transfer_dma + offset;
838 td->cpu_buffer = urb->transfer_buffer + offset;
697 list_add_tail(&td->list, &ep_priv->td_list); 839 list_add_tail(&td->list, &ep_priv->td_list);
698 } 840 }
699 841
@@ -711,6 +853,7 @@ alloc_dmem_failed:
711 usb_hcd_unlink_urb_from_ep(hcd, urb); 853 usb_hcd_unlink_urb_from_ep(hcd, urb);
712 854
713link_failed: 855link_failed:
856alloc_etd_failed:
714alloc_ep_failed: 857alloc_ep_failed:
715 spin_unlock_irqrestore(&imx21->lock, flags); 858 spin_unlock_irqrestore(&imx21->lock, flags);
716 kfree(urb_priv->isoc_td); 859 kfree(urb_priv->isoc_td);
@@ -734,9 +877,7 @@ static void dequeue_isoc_urb(struct imx21 *imx21,
734 struct etd_priv *etd = imx21->etd + etd_num; 877 struct etd_priv *etd = imx21->etd + etd_num;
735 878
736 reset_etd(imx21, etd_num); 879 reset_etd(imx21, etd_num);
737 if (etd->dmem_size) 880 free_dmem(imx21, etd);
738 free_dmem(imx21, etd->dmem_offset);
739 etd->dmem_size = 0;
740 } 881 }
741 } 882 }
742 } 883 }
@@ -761,7 +902,6 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
761 int state = urb_priv->state; 902 int state = urb_priv->state;
762 int etd_num = ep_priv->etd[0]; 903 int etd_num = ep_priv->etd[0];
763 struct etd_priv *etd; 904 struct etd_priv *etd;
764 int dmem_offset;
765 u32 count; 905 u32 count;
766 u16 etd_buf_size; 906 u16 etd_buf_size;
767 u16 maxpacket; 907 u16 maxpacket;
@@ -786,13 +926,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
786 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { 926 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
787 if (state == US_CTRL_SETUP) { 927 if (state == US_CTRL_SETUP) {
788 dir = TD_DIR_SETUP; 928 dir = TD_DIR_SETUP;
929 if (unsuitable_for_dma(urb->setup_dma))
930 unmap_urb_setup_for_dma(imx21->hcd, urb);
789 etd->dma_handle = urb->setup_dma; 931 etd->dma_handle = urb->setup_dma;
932 etd->cpu_buffer = urb->setup_packet;
790 bufround = 0; 933 bufround = 0;
791 count = 8; 934 count = 8;
792 datatoggle = TD_TOGGLE_DATA0; 935 datatoggle = TD_TOGGLE_DATA0;
793 } else { /* US_CTRL_ACK */ 936 } else { /* US_CTRL_ACK */
794 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; 937 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
795 etd->dma_handle = urb->transfer_dma;
796 bufround = 0; 938 bufround = 0;
797 count = 0; 939 count = 0;
798 datatoggle = TD_TOGGLE_DATA1; 940 datatoggle = TD_TOGGLE_DATA1;
@@ -800,7 +942,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
800 } else { 942 } else {
801 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; 943 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
802 bufround = (dir == TD_DIR_IN) ? 1 : 0; 944 bufround = (dir == TD_DIR_IN) ? 1 : 0;
945 if (unsuitable_for_dma(urb->transfer_dma))
946 unmap_urb_for_dma(imx21->hcd, urb);
947
803 etd->dma_handle = urb->transfer_dma; 948 etd->dma_handle = urb->transfer_dma;
949 etd->cpu_buffer = urb->transfer_buffer;
804 if (usb_pipebulk(pipe) && (state == US_BULK0)) 950 if (usb_pipebulk(pipe) && (state == US_BULK0))
805 count = 0; 951 count = 0;
806 else 952 else
@@ -855,8 +1001,8 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
855 1001
856 /* allocate x and y buffer space at once */ 1002 /* allocate x and y buffer space at once */
857 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; 1003 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
858 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); 1004 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
859 if (dmem_offset < 0) { 1005 if (etd->dmem_offset < 0) {
860 /* Setup everything we can in HW and update when we get DMEM */ 1006 /* Setup everything we can in HW and update when we get DMEM */
861 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); 1007 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
862 1008
@@ -867,26 +1013,26 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
867 } 1013 }
868 1014
869 etd_writel(imx21, etd_num, 1, 1015 etd_writel(imx21, etd_num, 1,
870 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | 1016 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
871 (u32) dmem_offset); 1017 (u32) etd->dmem_offset);
872 1018
873 urb_priv->active = 1; 1019 urb_priv->active = 1;
874 1020
875 /* enable the ETD to kick off transfer */ 1021 /* enable the ETD to kick off transfer */
876 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", 1022 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
877 etd_num, count, dir != TD_DIR_IN ? "out" : "in"); 1023 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
878 activate_etd(imx21, etd_num, etd->dma_handle, dir); 1024 activate_etd(imx21, etd_num, dir);
879 1025
880} 1026}
881 1027
882static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 1028static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
883{ 1029{
884 struct imx21 *imx21 = hcd_to_imx21(hcd); 1030 struct imx21 *imx21 = hcd_to_imx21(hcd);
885 struct etd_priv *etd = &imx21->etd[etd_num]; 1031 struct etd_priv *etd = &imx21->etd[etd_num];
1032 struct urb *urb = etd->urb;
886 u32 etd_mask = 1 << etd_num; 1033 u32 etd_mask = 1 << etd_num;
887 struct urb_priv *urb_priv = urb->hcpriv; 1034 struct urb_priv *urb_priv = urb->hcpriv;
888 int dir; 1035 int dir;
889 u16 xbufaddr;
890 int cc; 1036 int cc;
891 u32 bytes_xfrd; 1037 u32 bytes_xfrd;
892 int etd_done; 1038 int etd_done;
@@ -894,7 +1040,6 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
894 disactivate_etd(imx21, etd_num); 1040 disactivate_etd(imx21, etd_num);
895 1041
896 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; 1042 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
897 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
898 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; 1043 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
899 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); 1044 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
900 1045
@@ -906,8 +1051,21 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
906 if (dir == TD_DIR_IN) { 1051 if (dir == TD_DIR_IN) {
907 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 1052 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
908 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 1053 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1054
1055 if (etd->bounce_buffer) {
1056 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1057 dma_unmap_single(imx21->dev,
1058 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1059 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1060 memcpy_fromio(etd->cpu_buffer,
1061 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1062 bytes_xfrd);
1063 }
909 } 1064 }
910 free_dmem(imx21, xbufaddr); 1065
1066 kfree(etd->bounce_buffer);
1067 etd->bounce_buffer = NULL;
1068 free_dmem(imx21, etd);
911 1069
912 urb->error_count = 0; 1070 urb->error_count = 0;
913 if (!(urb->transfer_flags & URB_SHORT_NOT_OK) 1071 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -964,24 +1122,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
964 break; 1122 break;
965 } 1123 }
966 1124
967 if (!etd_done) { 1125 if (etd_done)
1126 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1127 else {
968 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); 1128 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
969 schedule_nonisoc_etd(imx21, urb); 1129 schedule_nonisoc_etd(imx21, urb);
970 } else {
971 struct usb_host_endpoint *ep = urb->ep;
972
973 urb_done(hcd, urb, cc_to_error[cc]);
974 etd->urb = NULL;
975
976 if (!list_empty(&ep->urb_list)) {
977 urb = list_first_entry(&ep->urb_list,
978 struct urb, urb_list);
979 dev_vdbg(imx21->dev, "next URB %p\n", urb);
980 schedule_nonisoc_etd(imx21, urb);
981 }
982 } 1130 }
983} 1131}
984 1132
1133
985static struct ep_priv *alloc_ep(void) 1134static struct ep_priv *alloc_ep(void)
986{ 1135{
987 int i; 1136 int i;
@@ -1007,7 +1156,6 @@ static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1007 struct etd_priv *etd; 1156 struct etd_priv *etd;
1008 int ret; 1157 int ret;
1009 unsigned long flags; 1158 unsigned long flags;
1010 int new_ep = 0;
1011 1159
1012 dev_vdbg(imx21->dev, 1160 dev_vdbg(imx21->dev,
1013 "enqueue urb=%p ep=%p len=%d " 1161 "enqueue urb=%p ep=%p len=%d "
@@ -1035,7 +1183,6 @@ static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1035 } 1183 }
1036 ep->hcpriv = ep_priv; 1184 ep->hcpriv = ep_priv;
1037 ep_priv->ep = ep; 1185 ep_priv->ep = ep;
1038 new_ep = 1;
1039 } 1186 }
1040 1187
1041 ret = usb_hcd_link_urb_to_ep(hcd, urb); 1188 ret = usb_hcd_link_urb_to_ep(hcd, urb);
@@ -1124,9 +1271,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1124 } else if (urb_priv->active) { 1271 } else if (urb_priv->active) {
1125 int etd_num = ep_priv->etd[0]; 1272 int etd_num = ep_priv->etd[0];
1126 if (etd_num != -1) { 1273 if (etd_num != -1) {
1274 struct etd_priv *etd = &imx21->etd[etd_num];
1275
1127 disactivate_etd(imx21, etd_num); 1276 disactivate_etd(imx21, etd_num);
1128 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff); 1277 free_dmem(imx21, etd);
1129 imx21->etd[etd_num].urb = NULL; 1278 etd->urb = NULL;
1279 kfree(etd->bounce_buffer);
1280 etd->bounce_buffer = NULL;
1130 } 1281 }
1131 } 1282 }
1132 1283
@@ -1226,9 +1377,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1226 } 1377 }
1227 1378
1228 if (usb_pipeisoc(etd->urb->pipe)) 1379 if (usb_pipeisoc(etd->urb->pipe))
1229 isoc_etd_done(hcd, etd->urb, etd_num); 1380 isoc_etd_done(hcd, etd_num);
1230 else 1381 else
1231 nonisoc_etd_done(hcd, etd->urb, etd_num); 1382 nonisoc_etd_done(hcd, etd_num);
1232 } 1383 }
1233 1384
1234 /* only enable SOF interrupt if it may be needed for the kludge */ 1385 /* only enable SOF interrupt if it may be needed for the kludge */
@@ -1696,6 +1847,7 @@ static int imx21_probe(struct platform_device *pdev)
1696 } 1847 }
1697 1848
1698 imx21 = hcd_to_imx21(hcd); 1849 imx21 = hcd_to_imx21(hcd);
1850 imx21->hcd = hcd;
1699 imx21->dev = &pdev->dev; 1851 imx21->dev = &pdev->dev;
1700 imx21->pdata = pdev->dev.platform_data; 1852 imx21->pdata = pdev->dev.platform_data;
1701 if (!imx21->pdata) 1853 if (!imx21->pdata)
@@ -1754,7 +1906,7 @@ failed_clock_set:
1754failed_clock_get: 1906failed_clock_get:
1755 iounmap(imx21->regs); 1907 iounmap(imx21->regs);
1756failed_ioremap: 1908failed_ioremap:
1757 release_mem_region(res->start, res->end - res->start); 1909 release_mem_region(res->start, resource_size(res));
1758failed_request_mem: 1910failed_request_mem:
1759 remove_debug_files(imx21); 1911 remove_debug_files(imx21);
1760 usb_put_hcd(hcd); 1912 usb_put_hcd(hcd);
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 1b0d913780a5..87b29fd971b4 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -250,6 +250,7 @@
250#define USBCTRL_USB_BYP (1 << 2) 250#define USBCTRL_USB_BYP (1 << 2)
251#define USBCTRL_HOST1_TXEN_OE (1 << 1) 251#define USBCTRL_HOST1_TXEN_OE (1 << 1)
252 252
253#define USBOTG_DMEM 0x1000
253 254
254/* Values in TD blocks */ 255/* Values in TD blocks */
255#define TD_DIR_SETUP 0 256#define TD_DIR_SETUP 0
@@ -346,8 +347,8 @@ struct td {
346 struct list_head list; 347 struct list_head list;
347 struct urb *urb; 348 struct urb *urb;
348 struct usb_host_endpoint *ep; 349 struct usb_host_endpoint *ep;
349 dma_addr_t data; 350 dma_addr_t dma_handle;
350 unsigned long buf_addr; 351 void *cpu_buffer;
351 int len; 352 int len;
352 int frame; 353 int frame;
353 int isoc_index; 354 int isoc_index;
@@ -360,6 +361,8 @@ struct etd_priv {
360 struct td *td; 361 struct td *td;
361 struct list_head queue; 362 struct list_head queue;
362 dma_addr_t dma_handle; 363 dma_addr_t dma_handle;
364 void *cpu_buffer;
365 void *bounce_buffer;
363 int alloc; 366 int alloc;
364 int len; 367 int len;
365 int dmem_size; 368 int dmem_size;
@@ -412,6 +415,7 @@ struct debug_isoc_trace {
412struct imx21 { 415struct imx21 {
413 spinlock_t lock; 416 spinlock_t lock;
414 struct device *dev; 417 struct device *dev;
418 struct usb_hcd *hcd;
415 struct mx21_usbh_platform_data *pdata; 419 struct mx21_usbh_platform_data *pdata;
416 struct list_head dmem_list; 420 struct list_head dmem_list;
417 struct list_head queue_for_etd; /* eps queued due to etd shortage */ 421 struct list_head queue_for_etd; /* eps queued due to etd shortage */
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index d9e82123de2a..0da7fc05f453 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1557,8 +1557,6 @@ static int isp116x_remove(struct platform_device *pdev)
1557 return 0; 1557 return 0;
1558} 1558}
1559 1559
1560#define resource_len(r) (((r)->end - (r)->start) + 1)
1561
1562static int __devinit isp116x_probe(struct platform_device *pdev) 1560static int __devinit isp116x_probe(struct platform_device *pdev)
1563{ 1561{
1564 struct usb_hcd *hcd; 1562 struct usb_hcd *hcd;
@@ -1597,7 +1595,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
1597 ret = -EBUSY; 1595 ret = -EBUSY;
1598 goto err1; 1596 goto err1;
1599 } 1597 }
1600 addr_reg = ioremap(addr->start, resource_len(addr)); 1598 addr_reg = ioremap(addr->start, resource_size(addr));
1601 if (addr_reg == NULL) { 1599 if (addr_reg == NULL) {
1602 ret = -ENOMEM; 1600 ret = -ENOMEM;
1603 goto err2; 1601 goto err2;
@@ -1606,7 +1604,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
1606 ret = -EBUSY; 1604 ret = -EBUSY;
1607 goto err3; 1605 goto err3;
1608 } 1606 }
1609 data_reg = ioremap(data->start, resource_len(data)); 1607 data_reg = ioremap(data->start, resource_size(data));
1610 if (data_reg == NULL) { 1608 if (data_reg == NULL) {
1611 ret = -ENOMEM; 1609 ret = -ENOMEM;
1612 goto err4; 1610 goto err4;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 0587ad4ce5c2..8196fa11fec4 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1676,13 +1676,6 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1676 switch (wValue) { 1676 switch (wValue) {
1677 case USB_PORT_FEAT_SUSPEND: 1677 case USB_PORT_FEAT_SUSPEND:
1678 _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); 1678 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1679#ifdef CONFIG_USB_OTG
1680 if (ohci->hcd.self.otg_port == (wIndex + 1) &&
1681 ohci->hcd.self.b_hnp_enable) {
1682 start_hnp(ohci);
1683 break;
1684 }
1685#endif
1686 spin_lock_irqsave(&isp1362_hcd->lock, flags); 1679 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1687 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); 1680 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1688 isp1362_hcd->rhport[wIndex] = 1681 isp1362_hcd->rhport[wIndex] =
@@ -2656,8 +2649,6 @@ static struct hc_driver isp1362_hc_driver = {
2656 2649
2657/*-------------------------------------------------------------------------*/ 2650/*-------------------------------------------------------------------------*/
2658 2651
2659#define resource_len(r) (((r)->end - (r)->start) + 1)
2660
2661static int __devexit isp1362_remove(struct platform_device *pdev) 2652static int __devexit isp1362_remove(struct platform_device *pdev)
2662{ 2653{
2663 struct usb_hcd *hcd = platform_get_drvdata(pdev); 2654 struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -2679,12 +2670,12 @@ static int __devexit isp1362_remove(struct platform_device *pdev)
2679 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2670 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2680 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); 2671 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2681 if (res) 2672 if (res)
2682 release_mem_region(res->start, resource_len(res)); 2673 release_mem_region(res->start, resource_size(res));
2683 2674
2684 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2675 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2685 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); 2676 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2686 if (res) 2677 if (res)
2687 release_mem_region(res->start, resource_len(res)); 2678 release_mem_region(res->start, resource_size(res));
2688 2679
2689 DBG(0, "%s: put_hcd\n", __func__); 2680 DBG(0, "%s: put_hcd\n", __func__);
2690 usb_put_hcd(hcd); 2681 usb_put_hcd(hcd);
@@ -2730,21 +2721,21 @@ static int __init isp1362_probe(struct platform_device *pdev)
2730 goto err1; 2721 goto err1;
2731 } 2722 }
2732 2723
2733 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { 2724 if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2734 retval = -EBUSY; 2725 retval = -EBUSY;
2735 goto err1; 2726 goto err1;
2736 } 2727 }
2737 addr_reg = ioremap(addr->start, resource_len(addr)); 2728 addr_reg = ioremap(addr->start, resource_size(addr));
2738 if (addr_reg == NULL) { 2729 if (addr_reg == NULL) {
2739 retval = -ENOMEM; 2730 retval = -ENOMEM;
2740 goto err2; 2731 goto err2;
2741 } 2732 }
2742 2733
2743 if (!request_mem_region(data->start, resource_len(data), hcd_name)) { 2734 if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2744 retval = -EBUSY; 2735 retval = -EBUSY;
2745 goto err3; 2736 goto err3;
2746 } 2737 }
2747 data_reg = ioremap(data->start, resource_len(data)); 2738 data_reg = ioremap(data->start, resource_size(data));
2748 if (data_reg == NULL) { 2739 if (data_reg == NULL) {
2749 retval = -ENOMEM; 2740 retval = -ENOMEM;
2750 goto err4; 2741 goto err4;
@@ -2802,13 +2793,13 @@ static int __init isp1362_probe(struct platform_device *pdev)
2802 iounmap(data_reg); 2793 iounmap(data_reg);
2803 err4: 2794 err4:
2804 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); 2795 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2805 release_mem_region(data->start, resource_len(data)); 2796 release_mem_region(data->start, resource_size(data));
2806 err3: 2797 err3:
2807 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg); 2798 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2808 iounmap(addr_reg); 2799 iounmap(addr_reg);
2809 err2: 2800 err2:
2810 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); 2801 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2811 release_mem_region(addr->start, resource_len(addr)); 2802 release_mem_region(addr->start, resource_size(addr));
2812 err1: 2803 err1:
2813 pr_err("%s: init error, %d\n", __func__, retval); 2804 pr_err("%s: init error, %d\n", __func__, retval);
2814 2805
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c3b4ccc7337b..3b5785032a10 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -398,7 +398,14 @@ ohci_shutdown (struct usb_hcd *hcd)
398 398
399 ohci = hcd_to_ohci (hcd); 399 ohci = hcd_to_ohci (hcd);
400 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 400 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
401 ohci_usb_reset (ohci); 401 ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
402
403 /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
404 ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
405 OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
406 OHCI_CTRL_RWC);
407 ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
408
402 /* flush the writes */ 409 /* flush the writes */
403 (void) ohci_readl (ohci, &ohci->regs->control); 410 (void) ohci_readl (ohci, &ohci->regs->control);
404} 411}
@@ -1270,6 +1277,9 @@ static void __exit ohci_hcd_mod_exit(void)
1270#ifdef PLATFORM_DRIVER 1277#ifdef PLATFORM_DRIVER
1271 platform_driver_unregister(&PLATFORM_DRIVER); 1278 platform_driver_unregister(&PLATFORM_DRIVER);
1272#endif 1279#endif
1280#ifdef OMAP3_PLATFORM_DRIVER
1281 platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
1282#endif
1273#ifdef PS3_SYSTEM_BUS_DRIVER 1283#ifdef PS3_SYSTEM_BUS_DRIVER
1274 ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1284 ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1275#endif 1285#endif
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 6bdc8b25a6a1..36ee9a666e93 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -201,6 +201,20 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
201 return 0; 201 return 0;
202} 202}
203 203
204/* nVidia controllers continue to drive Reset signalling on the bus
205 * even after system shutdown, wasting power. This flag tells the
206 * shutdown routine to leave the controller OPERATIONAL instead of RESET.
207 */
208static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
209{
210 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
211
212 ohci->flags |= OHCI_QUIRK_SHUTDOWN;
213 ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
214
215 return 0;
216}
217
204/* 218/*
205 * The hardware normally enables the A-link power management feature, which 219 * The hardware normally enables the A-link power management feature, which
206 * lets the system lower the power consumption in idle states. 220 * lets the system lower the power consumption in idle states.
@@ -332,6 +346,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
332 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), 346 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
333 .driver_data = (unsigned long)ohci_quirk_amd700, 347 .driver_data = (unsigned long)ohci_quirk_amd700,
334 }, 348 },
349 {
350 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
351 .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
352 },
335 353
336 /* FIXME for some of the early AMD 760 southbridges, OHCI 354 /* FIXME for some of the early AMD 760 southbridges, OHCI
337 * won't work at all. blacklist them. 355 * won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index 60f03cc7ec4f..0b35d22cc70e 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -77,7 +77,6 @@ static const struct hc_driver ohci_sh_hc_driver = {
77 77
78/*-------------------------------------------------------------------------*/ 78/*-------------------------------------------------------------------------*/
79 79
80#define resource_len(r) (((r)->end - (r)->start) + 1)
81static int ohci_hcd_sh_probe(struct platform_device *pdev) 80static int ohci_hcd_sh_probe(struct platform_device *pdev)
82{ 81{
83 struct resource *res = NULL; 82 struct resource *res = NULL;
@@ -109,7 +108,7 @@ static int ohci_hcd_sh_probe(struct platform_device *pdev)
109 108
110 hcd->regs = (void __iomem *)res->start; 109 hcd->regs = (void __iomem *)res->start;
111 hcd->rsrc_start = res->start; 110 hcd->rsrc_start = res->start;
112 hcd->rsrc_len = resource_len(res); 111 hcd->rsrc_len = resource_size(res);
113 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); 112 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
114 if (ret != 0) { 113 if (ret != 0) {
115 err("Failed to add hcd"); 114 err("Failed to add hcd");
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index cff23637cfcc..041d30f30c10 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -168,7 +168,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
168 168
169 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 169 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
170 if (retval) 170 if (retval)
171 goto err4; 171 goto err5;
172 172
173 /* enable power and unmask interrupts */ 173 /* enable power and unmask interrupts */
174 174
@@ -176,6 +176,8 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
176 sm501_modify_reg(dev->parent, SM501_IRQ_MASK, 1 << 6, 0); 176 sm501_modify_reg(dev->parent, SM501_IRQ_MASK, 1 << 6, 0);
177 177
178 return 0; 178 return 0;
179err5:
180 iounmap(hcd->regs);
179err4: 181err4:
180 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 182 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
181err3: 183err3:
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 5bf15fed0d9f..51facb985c84 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,6 +403,7 @@ struct ohci_hcd {
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ 404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
406#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
406 // there are also chip quirks/bugs in init logic 407 // there are also chip quirks/bugs in init logic
407 408
408 struct work_struct nec_work; /* Worker for NEC quirk */ 409 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index d9c85a292737..d32c3eae99cb 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3696,7 +3696,7 @@ static void oxu_configuration(struct platform_device *pdev, void *base)
3696static int oxu_verify_id(struct platform_device *pdev, void *base) 3696static int oxu_verify_id(struct platform_device *pdev, void *base)
3697{ 3697{
3698 u32 id; 3698 u32 id;
3699 char *bo[] = { 3699 static const char * const bo[] = {
3700 "reserved", 3700 "reserved",
3701 "128-pin LQFP", 3701 "128-pin LQFP",
3702 "84-pin TFBGA", 3702 "84-pin TFBGA",
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 83b5f9cea85a..464ed977b45d 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -169,6 +169,7 @@ static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
169static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) 169static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
170{ 170{
171 void __iomem *base; 171 void __iomem *base;
172 u32 control;
172 173
173 if (!mmio_resource_enabled(pdev, 0)) 174 if (!mmio_resource_enabled(pdev, 0))
174 return; 175 return;
@@ -177,10 +178,14 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
177 if (base == NULL) 178 if (base == NULL)
178 return; 179 return;
179 180
181 control = readl(base + OHCI_CONTROL);
182
180/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 183/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
181#ifndef __hppa__ 184#ifdef __hppa__
182{ 185#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
183 u32 control = readl(base + OHCI_CONTROL); 186#else
187#define OHCI_CTRL_MASK OHCI_CTRL_RWC
188
184 if (control & OHCI_CTRL_IR) { 189 if (control & OHCI_CTRL_IR) {
185 int wait_time = 500; /* arbitrary; 5 seconds */ 190 int wait_time = 500; /* arbitrary; 5 seconds */
186 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); 191 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
@@ -194,13 +199,12 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
194 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed" 199 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
195 " (BIOS bug?) %08x\n", 200 " (BIOS bug?) %08x\n",
196 readl(base + OHCI_CONTROL)); 201 readl(base + OHCI_CONTROL));
197
198 /* reset controller, preserving RWC */
199 writel(control & OHCI_CTRL_RWC, base + OHCI_CONTROL);
200 } 202 }
201}
202#endif 203#endif
203 204
205 /* reset controller, preserving RWC (and possibly IR) */
206 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
207
204 /* 208 /*
205 * disable interrupts 209 * disable interrupts
206 */ 210 */
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 95d0f5adfdcf..25563e9a90bc 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -227,7 +227,7 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
227 int odd = len & 0x0001; 227 int odd = len & 0x0001;
228 228
229 len = len / 2; 229 len = len / 2;
230 ioread16_rep(fifoaddr, buf, len); 230 iowrite16_rep(fifoaddr, buf, len);
231 if (unlikely(odd)) { 231 if (unlikely(odd)) {
232 buf = &buf[len]; 232 buf = &buf[len];
233 iowrite8((unsigned char)*buf, fifoaddr); 233 iowrite8((unsigned char)*buf, fifoaddr);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index d3ade4018487..2090b45eb606 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -917,10 +917,13 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
917 unsigned long destination, status; 917 unsigned long destination, status;
918 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); 918 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
919 int len = urb->transfer_buffer_length; 919 int len = urb->transfer_buffer_length;
920 dma_addr_t data = urb->transfer_dma; 920 int this_sg_len;
921 dma_addr_t data;
921 __le32 *plink; 922 __le32 *plink;
922 struct urb_priv *urbp = urb->hcpriv; 923 struct urb_priv *urbp = urb->hcpriv;
923 unsigned int toggle; 924 unsigned int toggle;
925 struct scatterlist *sg;
926 int i;
924 927
925 if (len < 0) 928 if (len < 0)
926 return -EINVAL; 929 return -EINVAL;
@@ -937,12 +940,26 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
937 if (usb_pipein(urb->pipe)) 940 if (usb_pipein(urb->pipe))
938 status |= TD_CTRL_SPD; 941 status |= TD_CTRL_SPD;
939 942
943 i = urb->num_sgs;
944 if (len > 0 && i > 0) {
945 sg = urb->sg;
946 data = sg_dma_address(sg);
947
948 /* urb->transfer_buffer_length may be smaller than the
949 * size of the scatterlist (or vice versa)
950 */
951 this_sg_len = min_t(int, sg_dma_len(sg), len);
952 } else {
953 sg = NULL;
954 data = urb->transfer_dma;
955 this_sg_len = len;
956 }
940 /* 957 /*
941 * Build the DATA TDs 958 * Build the DATA TDs
942 */ 959 */
943 plink = NULL; 960 plink = NULL;
944 td = qh->dummy_td; 961 td = qh->dummy_td;
945 do { /* Allow zero length packets */ 962 for (;;) { /* Allow zero length packets */
946 int pktsze = maxsze; 963 int pktsze = maxsze;
947 964
948 if (len <= pktsze) { /* The last packet */ 965 if (len <= pktsze) { /* The last packet */
@@ -965,10 +982,18 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
965 plink = &td->link; 982 plink = &td->link;
966 status |= TD_CTRL_ACTIVE; 983 status |= TD_CTRL_ACTIVE;
967 984
985 toggle ^= 1;
968 data += pktsze; 986 data += pktsze;
987 this_sg_len -= pktsze;
969 len -= maxsze; 988 len -= maxsze;
970 toggle ^= 1; 989 if (this_sg_len <= 0) {
971 } while (len > 0); 990 if (--i <= 0 || len <= 0)
991 break;
992 sg = sg_next(sg);
993 data = sg_dma_address(sg);
994 this_sg_len = min_t(int, sg_dma_len(sg), len);
995 }
996 }
972 997
973 /* 998 /*
974 * URB_ZERO_PACKET means adding a 0-length packet, if direction 999 * URB_ZERO_PACKET means adding a 0-length packet, if direction
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
index 11e5040b8337..26df0138079e 100644
--- a/drivers/usb/host/whci/Kbuild
+++ b/drivers/usb/host/whci/Kbuild
@@ -3,7 +3,7 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
3whci-hcd-y := \ 3whci-hcd-y := \
4 asl.o \ 4 asl.o \
5 debug.o \ 5 debug.o \
6 hcd.o \ 6 hcd.o \
7 hw.o \ 7 hw.o \
8 init.o \ 8 init.o \
9 int.o \ 9 int.o \
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a1a7a9795536..fef5a1f9d483 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -24,6 +24,10 @@
24 24
25#include "xhci.h" 25#include "xhci.h"
26 26
27#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
28#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
29 PORT_RC | PORT_PLC | PORT_PE)
30
27static void xhci_hub_descriptor(struct xhci_hcd *xhci, 31static void xhci_hub_descriptor(struct xhci_hcd *xhci,
28 struct usb_hub_descriptor *desc) 32 struct usb_hub_descriptor *desc)
29{ 33{
@@ -123,12 +127,105 @@ static unsigned int xhci_port_speed(unsigned int port_status)
123 * writing a 0 clears the bit and writing a 1 sets the bit (RWS). 127 * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
124 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. 128 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
125 */ 129 */
126static u32 xhci_port_state_to_neutral(u32 state) 130u32 xhci_port_state_to_neutral(u32 state)
127{ 131{
128 /* Save read-only status and port state */ 132 /* Save read-only status and port state */
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); 133 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130} 134}
131 135
136/*
137 * find slot id based on port number.
138 */
139int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port)
140{
141 int slot_id;
142 int i;
143
144 slot_id = 0;
145 for (i = 0; i < MAX_HC_SLOTS; i++) {
146 if (!xhci->devs[i])
147 continue;
148 if (xhci->devs[i]->port == port) {
149 slot_id = i;
150 break;
151 }
152 }
153
154 return slot_id;
155}
156
157/*
158 * Stop device
159 * It issues stop endpoint command for EP 0 to 30. And wait the last command
160 * to complete.
161 * suspend will set to 1, if suspend bit need to set in command.
162 */
163static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
164{
165 struct xhci_virt_device *virt_dev;
166 struct xhci_command *cmd;
167 unsigned long flags;
168 int timeleft;
169 int ret;
170 int i;
171
172 ret = 0;
173 virt_dev = xhci->devs[slot_id];
174 cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
175 if (!cmd) {
176 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
177 return -ENOMEM;
178 }
179
180 spin_lock_irqsave(&xhci->lock, flags);
181 for (i = LAST_EP_INDEX; i > 0; i--) {
182 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
183 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
184 }
185 cmd->command_trb = xhci->cmd_ring->enqueue;
186 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
187 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
188 xhci_ring_cmd_db(xhci);
189 spin_unlock_irqrestore(&xhci->lock, flags);
190
191 /* Wait for last stop endpoint command to finish */
192 timeleft = wait_for_completion_interruptible_timeout(
193 cmd->completion,
194 USB_CTRL_SET_TIMEOUT);
195 if (timeleft <= 0) {
196 xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
197 timeleft == 0 ? "Timeout" : "Signal");
198 spin_lock_irqsave(&xhci->lock, flags);
199 /* The timeout might have raced with the event ring handler, so
200 * only delete from the list if the item isn't poisoned.
201 */
202 if (cmd->cmd_list.next != LIST_POISON1)
203 list_del(&cmd->cmd_list);
204 spin_unlock_irqrestore(&xhci->lock, flags);
205 ret = -ETIME;
206 goto command_cleanup;
207 }
208
209command_cleanup:
210 xhci_free_command(xhci, cmd);
211 return ret;
212}
213
214/*
215 * Ring device, it rings the all doorbells unconditionally.
216 */
217void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
218{
219 int i;
220
221 for (i = 0; i < LAST_EP_INDEX + 1; i++)
222 if (xhci->devs[slot_id]->eps[i].ring &&
223 xhci->devs[slot_id]->eps[i].ring->dequeue)
224 xhci_ring_ep_doorbell(xhci, slot_id, i, 0);
225
226 return;
227}
228
132static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, 229static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
133 u32 __iomem *addr, u32 port_status) 230 u32 __iomem *addr, u32 port_status)
134{ 231{
@@ -162,6 +259,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
162 status = PORT_PEC; 259 status = PORT_PEC;
163 port_change_bit = "enable/disable"; 260 port_change_bit = "enable/disable";
164 break; 261 break;
262 case USB_PORT_FEAT_C_SUSPEND:
263 status = PORT_PLC;
264 port_change_bit = "suspend/resume";
265 break;
165 default: 266 default:
166 /* Should never happen */ 267 /* Should never happen */
167 return; 268 return;
@@ -179,9 +280,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
179 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 280 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
180 int ports; 281 int ports;
181 unsigned long flags; 282 unsigned long flags;
182 u32 temp, status; 283 u32 temp, temp1, status;
183 int retval = 0; 284 int retval = 0;
184 u32 __iomem *addr; 285 u32 __iomem *addr;
286 int slot_id;
185 287
186 ports = HCS_MAX_PORTS(xhci->hcs_params1); 288 ports = HCS_MAX_PORTS(xhci->hcs_params1);
187 289
@@ -211,9 +313,49 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
211 if ((temp & PORT_OCC)) 313 if ((temp & PORT_OCC))
212 status |= USB_PORT_STAT_C_OVERCURRENT << 16; 314 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
213 /* 315 /*
214 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific 316 * FIXME ignoring reset and USB 2.1/3.0 specific
215 * changes 317 * changes
216 */ 318 */
319 if ((temp & PORT_PLS_MASK) == XDEV_U3
320 && (temp & PORT_POWER))
321 status |= 1 << USB_PORT_FEAT_SUSPEND;
322 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
323 if ((temp & PORT_RESET) || !(temp & PORT_PE))
324 goto error;
325 if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
326 xhci->resume_done[wIndex])) {
327 xhci_dbg(xhci, "Resume USB2 port %d\n",
328 wIndex + 1);
329 xhci->resume_done[wIndex] = 0;
330 temp1 = xhci_port_state_to_neutral(temp);
331 temp1 &= ~PORT_PLS_MASK;
332 temp1 |= PORT_LINK_STROBE | XDEV_U0;
333 xhci_writel(xhci, temp1, addr);
334
335 xhci_dbg(xhci, "set port %d resume\n",
336 wIndex + 1);
337 slot_id = xhci_find_slot_id_by_port(xhci,
338 wIndex + 1);
339 if (!slot_id) {
340 xhci_dbg(xhci, "slot_id is zero\n");
341 goto error;
342 }
343 xhci_ring_device(xhci, slot_id);
344 xhci->port_c_suspend[wIndex >> 5] |=
345 1 << (wIndex & 31);
346 xhci->suspended_ports[wIndex >> 5] &=
347 ~(1 << (wIndex & 31));
348 }
349 }
350 if ((temp & PORT_PLS_MASK) == XDEV_U0
351 && (temp & PORT_POWER)
352 && (xhci->suspended_ports[wIndex >> 5] &
353 (1 << (wIndex & 31)))) {
354 xhci->suspended_ports[wIndex >> 5] &=
355 ~(1 << (wIndex & 31));
356 xhci->port_c_suspend[wIndex >> 5] |=
357 1 << (wIndex & 31);
358 }
217 if (temp & PORT_CONNECT) { 359 if (temp & PORT_CONNECT) {
218 status |= USB_PORT_STAT_CONNECTION; 360 status |= USB_PORT_STAT_CONNECTION;
219 status |= xhci_port_speed(temp); 361 status |= xhci_port_speed(temp);
@@ -226,6 +368,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
226 status |= USB_PORT_STAT_RESET; 368 status |= USB_PORT_STAT_RESET;
227 if (temp & PORT_POWER) 369 if (temp & PORT_POWER)
228 status |= USB_PORT_STAT_POWER; 370 status |= USB_PORT_STAT_POWER;
371 if (xhci->port_c_suspend[wIndex >> 5] & (1 << (wIndex & 31)))
372 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
229 xhci_dbg(xhci, "Get port status returned 0x%x\n", status); 373 xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
230 put_unaligned(cpu_to_le32(status), (__le32 *) buf); 374 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
231 break; 375 break;
@@ -238,6 +382,42 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
238 temp = xhci_readl(xhci, addr); 382 temp = xhci_readl(xhci, addr);
239 temp = xhci_port_state_to_neutral(temp); 383 temp = xhci_port_state_to_neutral(temp);
240 switch (wValue) { 384 switch (wValue) {
385 case USB_PORT_FEAT_SUSPEND:
386 temp = xhci_readl(xhci, addr);
387 /* In spec software should not attempt to suspend
388 * a port unless the port reports that it is in the
389 * enabled (PED = ‘1’,PLS < ‘3’) state.
390 */
391 if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
392 || (temp & PORT_PLS_MASK) >= XDEV_U3) {
393 xhci_warn(xhci, "USB core suspending device "
394 "not in U0/U1/U2.\n");
395 goto error;
396 }
397
398 slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
399 if (!slot_id) {
400 xhci_warn(xhci, "slot_id is zero\n");
401 goto error;
402 }
403 /* unlock to execute stop endpoint commands */
404 spin_unlock_irqrestore(&xhci->lock, flags);
405 xhci_stop_device(xhci, slot_id, 1);
406 spin_lock_irqsave(&xhci->lock, flags);
407
408 temp = xhci_port_state_to_neutral(temp);
409 temp &= ~PORT_PLS_MASK;
410 temp |= PORT_LINK_STROBE | XDEV_U3;
411 xhci_writel(xhci, temp, addr);
412
413 spin_unlock_irqrestore(&xhci->lock, flags);
414 msleep(10); /* wait device to enter */
415 spin_lock_irqsave(&xhci->lock, flags);
416
417 temp = xhci_readl(xhci, addr);
418 xhci->suspended_ports[wIndex >> 5] |=
419 1 << (wIndex & (31));
420 break;
241 case USB_PORT_FEAT_POWER: 421 case USB_PORT_FEAT_POWER:
242 /* 422 /*
243 * Turn on ports, even if there isn't per-port switching. 423 * Turn on ports, even if there isn't per-port switching.
@@ -271,6 +451,52 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
271 temp = xhci_readl(xhci, addr); 451 temp = xhci_readl(xhci, addr);
272 temp = xhci_port_state_to_neutral(temp); 452 temp = xhci_port_state_to_neutral(temp);
273 switch (wValue) { 453 switch (wValue) {
454 case USB_PORT_FEAT_SUSPEND:
455 temp = xhci_readl(xhci, addr);
456 xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
457 xhci_dbg(xhci, "PORTSC %04x\n", temp);
458 if (temp & PORT_RESET)
459 goto error;
460 if (temp & XDEV_U3) {
461 if ((temp & PORT_PE) == 0)
462 goto error;
463 if (DEV_SUPERSPEED(temp)) {
464 temp = xhci_port_state_to_neutral(temp);
465 temp &= ~PORT_PLS_MASK;
466 temp |= PORT_LINK_STROBE | XDEV_U0;
467 xhci_writel(xhci, temp, addr);
468 xhci_readl(xhci, addr);
469 } else {
470 temp = xhci_port_state_to_neutral(temp);
471 temp &= ~PORT_PLS_MASK;
472 temp |= PORT_LINK_STROBE | XDEV_RESUME;
473 xhci_writel(xhci, temp, addr);
474
475 spin_unlock_irqrestore(&xhci->lock,
476 flags);
477 msleep(20);
478 spin_lock_irqsave(&xhci->lock, flags);
479
480 temp = xhci_readl(xhci, addr);
481 temp = xhci_port_state_to_neutral(temp);
482 temp &= ~PORT_PLS_MASK;
483 temp |= PORT_LINK_STROBE | XDEV_U0;
484 xhci_writel(xhci, temp, addr);
485 }
486 xhci->port_c_suspend[wIndex >> 5] |=
487 1 << (wIndex & 31);
488 }
489
490 slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
491 if (!slot_id) {
492 xhci_dbg(xhci, "slot_id is zero\n");
493 goto error;
494 }
495 xhci_ring_device(xhci, slot_id);
496 break;
497 case USB_PORT_FEAT_C_SUSPEND:
498 xhci->port_c_suspend[wIndex >> 5] &=
499 ~(1 << (wIndex & 31));
274 case USB_PORT_FEAT_C_RESET: 500 case USB_PORT_FEAT_C_RESET:
275 case USB_PORT_FEAT_C_CONNECTION: 501 case USB_PORT_FEAT_C_CONNECTION:
276 case USB_PORT_FEAT_C_OVER_CURRENT: 502 case USB_PORT_FEAT_C_OVER_CURRENT:
@@ -306,6 +532,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
306{ 532{
307 unsigned long flags; 533 unsigned long flags;
308 u32 temp, status; 534 u32 temp, status;
535 u32 mask;
309 int i, retval; 536 int i, retval;
310 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 537 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
311 int ports; 538 int ports;
@@ -318,13 +545,18 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
318 memset(buf, 0, retval); 545 memset(buf, 0, retval);
319 status = 0; 546 status = 0;
320 547
548 mask = PORT_CSC | PORT_PEC | PORT_OCC;
549
321 spin_lock_irqsave(&xhci->lock, flags); 550 spin_lock_irqsave(&xhci->lock, flags);
322 /* For each port, did anything change? If so, set that bit in buf. */ 551 /* For each port, did anything change? If so, set that bit in buf. */
323 for (i = 0; i < ports; i++) { 552 for (i = 0; i < ports; i++) {
324 addr = &xhci->op_regs->port_status_base + 553 addr = &xhci->op_regs->port_status_base +
325 NUM_PORT_REGS*i; 554 NUM_PORT_REGS*i;
326 temp = xhci_readl(xhci, addr); 555 temp = xhci_readl(xhci, addr);
327 if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) { 556 if ((temp & mask) != 0 ||
557 (xhci->port_c_suspend[i >> 5] & 1 << (i & 31)) ||
558 (xhci->resume_done[i] && time_after_eq(
559 jiffies, xhci->resume_done[i]))) {
328 buf[(i + 1) / 8] |= 1 << (i + 1) % 8; 560 buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
329 status = 1; 561 status = 1;
330 } 562 }
@@ -332,3 +564,182 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
332 spin_unlock_irqrestore(&xhci->lock, flags); 564 spin_unlock_irqrestore(&xhci->lock, flags);
333 return status ? retval : 0; 565 return status ? retval : 0;
334} 566}
567
568#ifdef CONFIG_PM
569
570int xhci_bus_suspend(struct usb_hcd *hcd)
571{
572 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
573 int port;
574 unsigned long flags;
575
576 xhci_dbg(xhci, "suspend root hub\n");
577
578 spin_lock_irqsave(&xhci->lock, flags);
579
580 if (hcd->self.root_hub->do_remote_wakeup) {
581 port = HCS_MAX_PORTS(xhci->hcs_params1);
582 while (port--) {
583 if (xhci->resume_done[port] != 0) {
584 spin_unlock_irqrestore(&xhci->lock, flags);
585 xhci_dbg(xhci, "suspend failed because "
586 "port %d is resuming\n",
587 port + 1);
588 return -EBUSY;
589 }
590 }
591 }
592
593 port = HCS_MAX_PORTS(xhci->hcs_params1);
594 xhci->bus_suspended = 0;
595 while (port--) {
596 /* suspend the port if the port is not suspended */
597 u32 __iomem *addr;
598 u32 t1, t2;
599 int slot_id;
600
601 addr = &xhci->op_regs->port_status_base +
602 NUM_PORT_REGS * (port & 0xff);
603 t1 = xhci_readl(xhci, addr);
604 t2 = xhci_port_state_to_neutral(t1);
605
606 if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
607 xhci_dbg(xhci, "port %d not suspended\n", port);
608 slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
609 if (slot_id) {
610 spin_unlock_irqrestore(&xhci->lock, flags);
611 xhci_stop_device(xhci, slot_id, 1);
612 spin_lock_irqsave(&xhci->lock, flags);
613 }
614 t2 &= ~PORT_PLS_MASK;
615 t2 |= PORT_LINK_STROBE | XDEV_U3;
616 set_bit(port, &xhci->bus_suspended);
617 }
618 if (hcd->self.root_hub->do_remote_wakeup) {
619 if (t1 & PORT_CONNECT) {
620 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
621 t2 &= ~PORT_WKCONN_E;
622 } else {
623 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
624 t2 &= ~PORT_WKDISC_E;
625 }
626 } else
627 t2 &= ~PORT_WAKE_BITS;
628
629 t1 = xhci_port_state_to_neutral(t1);
630 if (t1 != t2)
631 xhci_writel(xhci, t2, addr);
632
633 if (DEV_HIGHSPEED(t1)) {
634 /* enable remote wake up for USB 2.0 */
635 u32 __iomem *addr;
636 u32 tmp;
637
638 addr = &xhci->op_regs->port_power_base +
639 NUM_PORT_REGS * (port & 0xff);
640 tmp = xhci_readl(xhci, addr);
641 tmp |= PORT_RWE;
642 xhci_writel(xhci, tmp, addr);
643 }
644 }
645 hcd->state = HC_STATE_SUSPENDED;
646 xhci->next_statechange = jiffies + msecs_to_jiffies(10);
647 spin_unlock_irqrestore(&xhci->lock, flags);
648 return 0;
649}
650
651int xhci_bus_resume(struct usb_hcd *hcd)
652{
653 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
654 int port;
655 u32 temp;
656 unsigned long flags;
657
658 xhci_dbg(xhci, "resume root hub\n");
659
660 if (time_before(jiffies, xhci->next_statechange))
661 msleep(5);
662
663 spin_lock_irqsave(&xhci->lock, flags);
664 if (!HCD_HW_ACCESSIBLE(hcd)) {
665 spin_unlock_irqrestore(&xhci->lock, flags);
666 return -ESHUTDOWN;
667 }
668
669 /* delay the irqs */
670 temp = xhci_readl(xhci, &xhci->op_regs->command);
671 temp &= ~CMD_EIE;
672 xhci_writel(xhci, temp, &xhci->op_regs->command);
673
674 port = HCS_MAX_PORTS(xhci->hcs_params1);
675 while (port--) {
676 /* Check whether need resume ports. If needed
677 resume port and disable remote wakeup */
678 u32 __iomem *addr;
679 u32 temp;
680 int slot_id;
681
682 addr = &xhci->op_regs->port_status_base +
683 NUM_PORT_REGS * (port & 0xff);
684 temp = xhci_readl(xhci, addr);
685 if (DEV_SUPERSPEED(temp))
686 temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
687 else
688 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
689 if (test_bit(port, &xhci->bus_suspended) &&
690 (temp & PORT_PLS_MASK)) {
691 if (DEV_SUPERSPEED(temp)) {
692 temp = xhci_port_state_to_neutral(temp);
693 temp &= ~PORT_PLS_MASK;
694 temp |= PORT_LINK_STROBE | XDEV_U0;
695 xhci_writel(xhci, temp, addr);
696 } else {
697 temp = xhci_port_state_to_neutral(temp);
698 temp &= ~PORT_PLS_MASK;
699 temp |= PORT_LINK_STROBE | XDEV_RESUME;
700 xhci_writel(xhci, temp, addr);
701
702 spin_unlock_irqrestore(&xhci->lock, flags);
703 msleep(20);
704 spin_lock_irqsave(&xhci->lock, flags);
705
706 temp = xhci_readl(xhci, addr);
707 temp = xhci_port_state_to_neutral(temp);
708 temp &= ~PORT_PLS_MASK;
709 temp |= PORT_LINK_STROBE | XDEV_U0;
710 xhci_writel(xhci, temp, addr);
711 }
712 slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
713 if (slot_id)
714 xhci_ring_device(xhci, slot_id);
715 } else
716 xhci_writel(xhci, temp, addr);
717
718 if (DEV_HIGHSPEED(temp)) {
719 /* disable remote wake up for USB 2.0 */
720 u32 __iomem *addr;
721 u32 tmp;
722
723 addr = &xhci->op_regs->port_power_base +
724 NUM_PORT_REGS * (port & 0xff);
725 tmp = xhci_readl(xhci, addr);
726 tmp &= ~PORT_RWE;
727 xhci_writel(xhci, tmp, addr);
728 }
729 }
730
731 (void) xhci_readl(xhci, &xhci->op_regs->command);
732
733 xhci->next_statechange = jiffies + msecs_to_jiffies(5);
734 hcd->state = HC_STATE_RUNNING;
735 /* re-enable irqs */
736 temp = xhci_readl(xhci, &xhci->op_regs->command);
737 temp |= CMD_EIE;
738 xhci_writel(xhci, temp, &xhci->op_regs->command);
739 temp = xhci_readl(xhci, &xhci->op_regs->command);
740
741 spin_unlock_irqrestore(&xhci->lock, flags);
742 return 0;
743}
744
745#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 4e51343ddffc..202770676da3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -778,6 +778,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
778 778
779 init_completion(&dev->cmd_completion); 779 init_completion(&dev->cmd_completion);
780 INIT_LIST_HEAD(&dev->cmd_list); 780 INIT_LIST_HEAD(&dev->cmd_list);
781 dev->udev = udev;
781 782
782 /* Point to output device context in dcbaa. */ 783 /* Point to output device context in dcbaa. */
783 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 784 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
@@ -866,6 +867,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
866 top_dev = top_dev->parent) 867 top_dev = top_dev->parent)
867 /* Found device below root hub */; 868 /* Found device below root hub */;
868 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 869 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
870 dev->port = top_dev->portnum;
869 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 871 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
870 872
871 /* Is this a LS/FS device under a HS hub? */ 873 /* Is this a LS/FS device under a HS hub? */
@@ -1443,6 +1445,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1443 scratchpad_free(xhci); 1445 scratchpad_free(xhci);
1444 xhci->page_size = 0; 1446 xhci->page_size = 0;
1445 xhci->page_shift = 0; 1447 xhci->page_shift = 0;
1448 xhci->bus_suspended = 0;
1446} 1449}
1447 1450
1448static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1451static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
@@ -1801,6 +1804,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1801 init_completion(&xhci->addr_dev); 1804 init_completion(&xhci->addr_dev);
1802 for (i = 0; i < MAX_HC_SLOTS; ++i) 1805 for (i = 0; i < MAX_HC_SLOTS; ++i)
1803 xhci->devs[i] = NULL; 1806 xhci->devs[i] = NULL;
1807 for (i = 0; i < MAX_HC_PORTS; ++i)
1808 xhci->resume_done[i] = 0;
1804 1809
1805 if (scratchpad_alloc(xhci, flags)) 1810 if (scratchpad_alloc(xhci, flags))
1806 goto fail; 1811 goto fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f7efe025beda..bb668a894ab9 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -116,6 +116,30 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
116 return xhci_pci_reinit(xhci, pdev); 116 return xhci_pci_reinit(xhci, pdev);
117} 117}
118 118
119#ifdef CONFIG_PM
120static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
121{
122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
123 int retval = 0;
124
125 if (hcd->state != HC_STATE_SUSPENDED)
126 return -EINVAL;
127
128 retval = xhci_suspend(xhci);
129
130 return retval;
131}
132
133static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
134{
135 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
136 int retval = 0;
137
138 retval = xhci_resume(xhci, hibernated);
139 return retval;
140}
141#endif /* CONFIG_PM */
142
119static const struct hc_driver xhci_pci_hc_driver = { 143static const struct hc_driver xhci_pci_hc_driver = {
120 .description = hcd_name, 144 .description = hcd_name,
121 .product_desc = "xHCI Host Controller", 145 .product_desc = "xHCI Host Controller",
@@ -132,7 +156,10 @@ static const struct hc_driver xhci_pci_hc_driver = {
132 */ 156 */
133 .reset = xhci_pci_setup, 157 .reset = xhci_pci_setup,
134 .start = xhci_run, 158 .start = xhci_run,
135 /* suspend and resume implemented later */ 159#ifdef CONFIG_PM
160 .pci_suspend = xhci_pci_suspend,
161 .pci_resume = xhci_pci_resume,
162#endif
136 .stop = xhci_stop, 163 .stop = xhci_stop,
137 .shutdown = xhci_shutdown, 164 .shutdown = xhci_shutdown,
138 165
@@ -152,7 +179,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
152 .reset_bandwidth = xhci_reset_bandwidth, 179 .reset_bandwidth = xhci_reset_bandwidth,
153 .address_device = xhci_address_device, 180 .address_device = xhci_address_device,
154 .update_hub_device = xhci_update_hub_device, 181 .update_hub_device = xhci_update_hub_device,
155 .reset_device = xhci_reset_device, 182 .reset_device = xhci_discover_or_reset_device,
156 183
157 /* 184 /*
158 * scheduling support 185 * scheduling support
@@ -162,6 +189,8 @@ static const struct hc_driver xhci_pci_hc_driver = {
162 /* Root hub support */ 189 /* Root hub support */
163 .hub_control = xhci_hub_control, 190 .hub_control = xhci_hub_control,
164 .hub_status_data = xhci_hub_status_data, 191 .hub_status_data = xhci_hub_status_data,
192 .bus_suspend = xhci_bus_suspend,
193 .bus_resume = xhci_bus_resume,
165}; 194};
166 195
167/*-------------------------------------------------------------------------*/ 196/*-------------------------------------------------------------------------*/
@@ -186,6 +215,11 @@ static struct pci_driver xhci_pci_driver = {
186 /* suspend and resume implemented later */ 215 /* suspend and resume implemented later */
187 216
188 .shutdown = usb_hcd_pci_shutdown, 217 .shutdown = usb_hcd_pci_shutdown,
218#ifdef CONFIG_PM_SLEEP
219 .driver = {
220 .pm = &usb_hcd_pci_pm_ops
221 },
222#endif
189}; 223};
190 224
191int xhci_register_pci(void) 225int xhci_register_pci(void)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 48e60d166ff0..9f3115e729b1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -68,6 +68,10 @@
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include "xhci.h" 69#include "xhci.h"
70 70
71static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 struct xhci_virt_device *virt_dev,
73 struct xhci_event_cmd *event);
74
71/* 75/*
72 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
73 * address of the TRB. 77 * address of the TRB.
@@ -313,7 +317,7 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
313 xhci_readl(xhci, &xhci->dba->doorbell[0]); 317 xhci_readl(xhci, &xhci->dba->doorbell[0]);
314} 318}
315 319
316static void ring_ep_doorbell(struct xhci_hcd *xhci, 320void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
317 unsigned int slot_id, 321 unsigned int slot_id,
318 unsigned int ep_index, 322 unsigned int ep_index,
319 unsigned int stream_id) 323 unsigned int stream_id)
@@ -353,7 +357,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
353 /* A ring has pending URBs if its TD list is not empty */ 357 /* A ring has pending URBs if its TD list is not empty */
354 if (!(ep->ep_state & EP_HAS_STREAMS)) { 358 if (!(ep->ep_state & EP_HAS_STREAMS)) {
355 if (!(list_empty(&ep->ring->td_list))) 359 if (!(list_empty(&ep->ring->td_list)))
356 ring_ep_doorbell(xhci, slot_id, ep_index, 0); 360 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
357 return; 361 return;
358 } 362 }
359 363
@@ -361,7 +365,8 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
361 stream_id++) { 365 stream_id++) {
362 struct xhci_stream_info *stream_info = ep->stream_info; 366 struct xhci_stream_info *stream_info = ep->stream_info;
363 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 367 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
364 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 368 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
369 stream_id);
365 } 370 }
366} 371}
367 372
@@ -626,10 +631,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
626 * bit cleared) so that the HW will skip over them. 631 * bit cleared) so that the HW will skip over them.
627 */ 632 */
628static void handle_stopped_endpoint(struct xhci_hcd *xhci, 633static void handle_stopped_endpoint(struct xhci_hcd *xhci,
629 union xhci_trb *trb) 634 union xhci_trb *trb, struct xhci_event_cmd *event)
630{ 635{
631 unsigned int slot_id; 636 unsigned int slot_id;
632 unsigned int ep_index; 637 unsigned int ep_index;
638 struct xhci_virt_device *virt_dev;
633 struct xhci_ring *ep_ring; 639 struct xhci_ring *ep_ring;
634 struct xhci_virt_ep *ep; 640 struct xhci_virt_ep *ep;
635 struct list_head *entry; 641 struct list_head *entry;
@@ -638,6 +644,21 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
638 644
639 struct xhci_dequeue_state deq_state; 645 struct xhci_dequeue_state deq_state;
640 646
647 if (unlikely(TRB_TO_SUSPEND_PORT(
648 xhci->cmd_ring->dequeue->generic.field[3]))) {
649 slot_id = TRB_TO_SLOT_ID(
650 xhci->cmd_ring->dequeue->generic.field[3]);
651 virt_dev = xhci->devs[slot_id];
652 if (virt_dev)
653 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
654 event);
655 else
656 xhci_warn(xhci, "Stop endpoint command "
657 "completion for disabled slot %u\n",
658 slot_id);
659 return;
660 }
661
641 memset(&deq_state, 0, sizeof(deq_state)); 662 memset(&deq_state, 0, sizeof(deq_state));
642 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 663 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
643 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 664 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
@@ -1091,7 +1112,7 @@ bandwidth_change:
1091 complete(&xhci->addr_dev); 1112 complete(&xhci->addr_dev);
1092 break; 1113 break;
1093 case TRB_TYPE(TRB_STOP_RING): 1114 case TRB_TYPE(TRB_STOP_RING):
1094 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue); 1115 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1095 break; 1116 break;
1096 case TRB_TYPE(TRB_SET_DEQ): 1117 case TRB_TYPE(TRB_SET_DEQ):
1097 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 1118 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
@@ -1144,17 +1165,72 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
1144static void handle_port_status(struct xhci_hcd *xhci, 1165static void handle_port_status(struct xhci_hcd *xhci,
1145 union xhci_trb *event) 1166 union xhci_trb *event)
1146{ 1167{
1168 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1147 u32 port_id; 1169 u32 port_id;
1170 u32 temp, temp1;
1171 u32 __iomem *addr;
1172 int ports;
1173 int slot_id;
1148 1174
1149 /* Port status change events always have a successful completion code */ 1175 /* Port status change events always have a successful completion code */
1150 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1176 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
1151 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1177 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1152 xhci->error_bitmask |= 1 << 8; 1178 xhci->error_bitmask |= 1 << 8;
1153 } 1179 }
1154 /* FIXME: core doesn't care about all port link state changes yet */
1155 port_id = GET_PORT_ID(event->generic.field[0]); 1180 port_id = GET_PORT_ID(event->generic.field[0]);
1156 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1181 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1157 1182
1183 ports = HCS_MAX_PORTS(xhci->hcs_params1);
1184 if ((port_id <= 0) || (port_id > ports)) {
1185 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1186 goto cleanup;
1187 }
1188
1189 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
1190 temp = xhci_readl(xhci, addr);
1191 if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
1192 xhci_dbg(xhci, "resume root hub\n");
1193 usb_hcd_resume_root_hub(hcd);
1194 }
1195
1196 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1197 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1198
1199 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1200 if (!(temp1 & CMD_RUN)) {
1201 xhci_warn(xhci, "xHC is not running.\n");
1202 goto cleanup;
1203 }
1204
1205 if (DEV_SUPERSPEED(temp)) {
1206 xhci_dbg(xhci, "resume SS port %d\n", port_id);
1207 temp = xhci_port_state_to_neutral(temp);
1208 temp &= ~PORT_PLS_MASK;
1209 temp |= PORT_LINK_STROBE | XDEV_U0;
1210 xhci_writel(xhci, temp, addr);
1211 slot_id = xhci_find_slot_id_by_port(xhci, port_id);
1212 if (!slot_id) {
1213 xhci_dbg(xhci, "slot_id is zero\n");
1214 goto cleanup;
1215 }
1216 xhci_ring_device(xhci, slot_id);
1217 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1218 /* Clear PORT_PLC */
1219 temp = xhci_readl(xhci, addr);
1220 temp = xhci_port_state_to_neutral(temp);
1221 temp |= PORT_PLC;
1222 xhci_writel(xhci, temp, addr);
1223 } else {
1224 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1225 xhci->resume_done[port_id - 1] = jiffies +
1226 msecs_to_jiffies(20);
1227 mod_timer(&hcd->rh_timer,
1228 xhci->resume_done[port_id - 1]);
1229 /* Do the rest in GetPortStatus */
1230 }
1231 }
1232
1233cleanup:
1158 /* Update event ring dequeue pointer before dropping the lock */ 1234 /* Update event ring dequeue pointer before dropping the lock */
1159 inc_deq(xhci, xhci->event_ring, true); 1235 inc_deq(xhci, xhci->event_ring, true);
1160 1236
@@ -2347,7 +2423,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2347 */ 2423 */
2348 wmb(); 2424 wmb();
2349 start_trb->field[3] |= start_cycle; 2425 start_trb->field[3] |= start_cycle;
2350 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2426 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2351} 2427}
2352 2428
2353/* 2429/*
@@ -2931,7 +3007,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2931 wmb(); 3007 wmb();
2932 start_trb->field[3] |= start_cycle; 3008 start_trb->field[3] |= start_cycle;
2933 3009
2934 ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id); 3010 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
2935 return 0; 3011 return 0;
2936} 3012}
2937 3013
@@ -3108,15 +3184,20 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3108 false); 3184 false);
3109} 3185}
3110 3186
3187/*
3188 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3189 * activity on an endpoint that is about to be suspended.
3190 */
3111int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3191int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3112 unsigned int ep_index) 3192 unsigned int ep_index, int suspend)
3113{ 3193{
3114 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3194 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3115 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3195 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3116 u32 type = TRB_TYPE(TRB_STOP_RING); 3196 u32 type = TRB_TYPE(TRB_STOP_RING);
3197 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3117 3198
3118 return queue_command(xhci, 0, 0, 0, 3199 return queue_command(xhci, 0, 0, 0,
3119 trb_slot_id | trb_ep_index | type, false); 3200 trb_slot_id | trb_ep_index | type | trb_suspend, false);
3120} 3201}
3121 3202
3122/* Set Transfer Ring Dequeue Pointer command. 3203/* Set Transfer Ring Dequeue Pointer command.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d5c550ea3e68..5d7d4e951ea4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -551,6 +551,218 @@ void xhci_shutdown(struct usb_hcd *hcd)
551 xhci_readl(xhci, &xhci->op_regs->status)); 551 xhci_readl(xhci, &xhci->op_regs->status));
552} 552}
553 553
554#ifdef CONFIG_PM
555static void xhci_save_registers(struct xhci_hcd *xhci)
556{
557 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
558 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
559 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
560 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
561 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
562 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
563 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
564 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
565 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
566}
567
568static void xhci_restore_registers(struct xhci_hcd *xhci)
569{
570 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
571 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
572 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
573 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
574 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
575 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
576 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
577 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
578}
579
580/*
581 * Stop HC (not bus-specific)
582 *
583 * This is called when the machine transition into S3/S4 mode.
584 *
585 */
586int xhci_suspend(struct xhci_hcd *xhci)
587{
588 int rc = 0;
589 struct usb_hcd *hcd = xhci_to_hcd(xhci);
590 u32 command;
591
592 spin_lock_irq(&xhci->lock);
593 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
594 /* step 1: stop endpoint */
595 /* skipped assuming that port suspend has done */
596
597 /* step 2: clear Run/Stop bit */
598 command = xhci_readl(xhci, &xhci->op_regs->command);
599 command &= ~CMD_RUN;
600 xhci_writel(xhci, command, &xhci->op_regs->command);
601 if (handshake(xhci, &xhci->op_regs->status,
602 STS_HALT, STS_HALT, 100*100)) {
603 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
604 spin_unlock_irq(&xhci->lock);
605 return -ETIMEDOUT;
606 }
607
608 /* step 3: save registers */
609 xhci_save_registers(xhci);
610
611 /* step 4: set CSS flag */
612 command = xhci_readl(xhci, &xhci->op_regs->command);
613 command |= CMD_CSS;
614 xhci_writel(xhci, command, &xhci->op_regs->command);
615 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
616 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
617 spin_unlock_irq(&xhci->lock);
618 return -ETIMEDOUT;
619 }
620 /* step 5: remove core well power */
621 xhci_cleanup_msix(xhci);
622 spin_unlock_irq(&xhci->lock);
623
624 return rc;
625}
626
627/*
628 * start xHC (not bus-specific)
629 *
630 * This is called when the machine transition from S3/S4 mode.
631 *
632 */
633int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
634{
635 u32 command, temp = 0;
636 struct usb_hcd *hcd = xhci_to_hcd(xhci);
637 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
638 u64 val_64;
639 int old_state, retval;
640
641 old_state = hcd->state;
642 if (time_before(jiffies, xhci->next_statechange))
643 msleep(100);
644
645 spin_lock_irq(&xhci->lock);
646
647 if (!hibernated) {
648 /* step 1: restore register */
649 xhci_restore_registers(xhci);
650 /* step 2: initialize command ring buffer */
651 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
652 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
653 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
654 xhci->cmd_ring->dequeue) &
655 (u64) ~CMD_RING_RSVD_BITS) |
656 xhci->cmd_ring->cycle_state;
657 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
658 (long unsigned long) val_64);
659 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
660 /* step 3: restore state and start state*/
661 /* step 3: set CRS flag */
662 command = xhci_readl(xhci, &xhci->op_regs->command);
663 command |= CMD_CRS;
664 xhci_writel(xhci, command, &xhci->op_regs->command);
665 if (handshake(xhci, &xhci->op_regs->status,
666 STS_RESTORE, 0, 10*100)) {
667 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
668 spin_unlock_irq(&xhci->lock);
669 return -ETIMEDOUT;
670 }
671 temp = xhci_readl(xhci, &xhci->op_regs->status);
672 }
673
674 /* If restore operation fails, re-initialize the HC during resume */
675 if ((temp & STS_SRE) || hibernated) {
676 usb_root_hub_lost_power(hcd->self.root_hub);
677
678 xhci_dbg(xhci, "Stop HCD\n");
679 xhci_halt(xhci);
680 xhci_reset(xhci);
681 if (hibernated)
682 xhci_cleanup_msix(xhci);
683 spin_unlock_irq(&xhci->lock);
684
685#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
686 /* Tell the event ring poll function not to reschedule */
687 xhci->zombie = 1;
688 del_timer_sync(&xhci->event_ring_timer);
689#endif
690
691 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
692 temp = xhci_readl(xhci, &xhci->op_regs->status);
693 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
694 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
695 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
696 &xhci->ir_set->irq_pending);
697 xhci_print_ir_set(xhci, xhci->ir_set, 0);
698
699 xhci_dbg(xhci, "cleaning up memory\n");
700 xhci_mem_cleanup(xhci);
701 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
702 xhci_readl(xhci, &xhci->op_regs->status));
703
704 xhci_dbg(xhci, "Initialize the HCD\n");
705 retval = xhci_init(hcd);
706 if (retval)
707 return retval;
708
709 xhci_dbg(xhci, "Start the HCD\n");
710 retval = xhci_run(hcd);
711 if (!retval)
712 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
713 hcd->state = HC_STATE_SUSPENDED;
714 return retval;
715 }
716
717 /* Re-setup MSI-X */
718 if (hcd->irq)
719 free_irq(hcd->irq, hcd);
720 hcd->irq = -1;
721
722 retval = xhci_setup_msix(xhci);
723 if (retval)
724 /* fall back to msi*/
725 retval = xhci_setup_msi(xhci);
726
727 if (retval) {
728 /* fall back to legacy interrupt*/
729 retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
730 hcd->irq_descr, hcd);
731 if (retval) {
732 xhci_err(xhci, "request interrupt %d failed\n",
733 pdev->irq);
734 return retval;
735 }
736 hcd->irq = pdev->irq;
737 }
738
739 /* step 4: set Run/Stop bit */
740 command = xhci_readl(xhci, &xhci->op_regs->command);
741 command |= CMD_RUN;
742 xhci_writel(xhci, command, &xhci->op_regs->command);
743 handshake(xhci, &xhci->op_regs->status, STS_HALT,
744 0, 250 * 1000);
745
746 /* step 5: walk topology and initialize portsc,
747 * portpmsc and portli
748 */
749 /* this is done in bus_resume */
750
751 /* step 6: restart each of the previously
752 * Running endpoints by ringing their doorbells
753 */
754
755 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
756 if (!hibernated)
757 hcd->state = old_state;
758 else
759 hcd->state = HC_STATE_SUSPENDED;
760
761 spin_unlock_irq(&xhci->lock);
762 return 0;
763}
764#endif /* CONFIG_PM */
765
554/*-------------------------------------------------------------------------*/ 766/*-------------------------------------------------------------------------*/
555 767
556/** 768/**
@@ -607,7 +819,11 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
607 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 819 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
608 */ 820 */
609int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 821int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
610 struct usb_host_endpoint *ep, int check_ep, const char *func) { 822 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
823 const char *func) {
824 struct xhci_hcd *xhci;
825 struct xhci_virt_device *virt_dev;
826
611 if (!hcd || (check_ep && !ep) || !udev) { 827 if (!hcd || (check_ep && !ep) || !udev) {
612 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 828 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
613 func); 829 func);
@@ -618,11 +834,24 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
618 func); 834 func);
619 return 0; 835 return 0;
620 } 836 }
621 if (!udev->slot_id) { 837
622 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", 838 if (check_virt_dev) {
623 func); 839 xhci = hcd_to_xhci(hcd);
624 return -EINVAL; 840 if (!udev->slot_id || !xhci->devs
841 || !xhci->devs[udev->slot_id]) {
842 printk(KERN_DEBUG "xHCI %s called with unaddressed "
843 "device\n", func);
844 return -EINVAL;
845 }
846
847 virt_dev = xhci->devs[udev->slot_id];
848 if (virt_dev->udev != udev) {
849 printk(KERN_DEBUG "xHCI %s called with udev and "
850 "virt_dev does not match\n", func);
851 return -EINVAL;
852 }
625 } 853 }
854
626 return 1; 855 return 1;
627} 856}
628 857
@@ -704,18 +933,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
704 struct urb_priv *urb_priv; 933 struct urb_priv *urb_priv;
705 int size, i; 934 int size, i;
706 935
707 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) 936 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
937 true, true, __func__) <= 0)
708 return -EINVAL; 938 return -EINVAL;
709 939
710 slot_id = urb->dev->slot_id; 940 slot_id = urb->dev->slot_id;
711 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 941 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
712 942
713 if (!xhci->devs || !xhci->devs[slot_id]) {
714 if (!in_interrupt())
715 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
716 ret = -EINVAL;
717 goto exit;
718 }
719 if (!HCD_HW_ACCESSIBLE(hcd)) { 943 if (!HCD_HW_ACCESSIBLE(hcd)) {
720 if (!in_interrupt()) 944 if (!in_interrupt())
721 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 945 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
@@ -956,7 +1180,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
956 ep->stop_cmd_timer.expires = jiffies + 1180 ep->stop_cmd_timer.expires = jiffies +
957 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1181 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
958 add_timer(&ep->stop_cmd_timer); 1182 add_timer(&ep->stop_cmd_timer);
959 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 1183 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
960 xhci_ring_cmd_db(xhci); 1184 xhci_ring_cmd_db(xhci);
961 } 1185 }
962done: 1186done:
@@ -991,7 +1215,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
991 u32 new_add_flags, new_drop_flags, new_slot_info; 1215 u32 new_add_flags, new_drop_flags, new_slot_info;
992 int ret; 1216 int ret;
993 1217
994 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 1218 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
995 if (ret <= 0) 1219 if (ret <= 0)
996 return ret; 1220 return ret;
997 xhci = hcd_to_xhci(hcd); 1221 xhci = hcd_to_xhci(hcd);
@@ -1004,12 +1228,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1004 return 0; 1228 return 0;
1005 } 1229 }
1006 1230
1007 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1008 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1009 __func__);
1010 return -EINVAL;
1011 }
1012
1013 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1231 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1014 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1232 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1015 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1233 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
@@ -1078,7 +1296,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1078 u32 new_add_flags, new_drop_flags, new_slot_info; 1296 u32 new_add_flags, new_drop_flags, new_slot_info;
1079 int ret = 0; 1297 int ret = 0;
1080 1298
1081 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 1299 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1082 if (ret <= 0) { 1300 if (ret <= 0) {
1083 /* So we won't queue a reset ep command for a root hub */ 1301 /* So we won't queue a reset ep command for a root hub */
1084 ep->hcpriv = NULL; 1302 ep->hcpriv = NULL;
@@ -1098,12 +1316,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1098 return 0; 1316 return 0;
1099 } 1317 }
1100 1318
1101 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1102 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1103 __func__);
1104 return -EINVAL;
1105 }
1106
1107 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1319 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1108 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1320 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1109 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1321 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
@@ -1346,16 +1558,11 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1346 struct xhci_input_control_ctx *ctrl_ctx; 1558 struct xhci_input_control_ctx *ctrl_ctx;
1347 struct xhci_slot_ctx *slot_ctx; 1559 struct xhci_slot_ctx *slot_ctx;
1348 1560
1349 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 1561 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1350 if (ret <= 0) 1562 if (ret <= 0)
1351 return ret; 1563 return ret;
1352 xhci = hcd_to_xhci(hcd); 1564 xhci = hcd_to_xhci(hcd);
1353 1565
1354 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1355 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1356 __func__);
1357 return -EINVAL;
1358 }
1359 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1566 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1360 virt_dev = xhci->devs[udev->slot_id]; 1567 virt_dev = xhci->devs[udev->slot_id];
1361 1568
@@ -1405,16 +1612,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1405 struct xhci_virt_device *virt_dev; 1612 struct xhci_virt_device *virt_dev;
1406 int i, ret; 1613 int i, ret;
1407 1614
1408 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 1615 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1409 if (ret <= 0) 1616 if (ret <= 0)
1410 return; 1617 return;
1411 xhci = hcd_to_xhci(hcd); 1618 xhci = hcd_to_xhci(hcd);
1412 1619
1413 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1414 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1415 __func__);
1416 return;
1417 }
1418 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1620 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1419 virt_dev = xhci->devs[udev->slot_id]; 1621 virt_dev = xhci->devs[udev->slot_id];
1420 /* Free any rings allocated for added endpoints */ 1622 /* Free any rings allocated for added endpoints */
@@ -1575,7 +1777,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1575 1777
1576 if (!ep) 1778 if (!ep)
1577 return -EINVAL; 1779 return -EINVAL;
1578 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); 1780 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
1579 if (ret <= 0) 1781 if (ret <= 0)
1580 return -EINVAL; 1782 return -EINVAL;
1581 if (ep->ss_ep_comp.bmAttributes == 0) { 1783 if (ep->ss_ep_comp.bmAttributes == 0) {
@@ -1953,8 +2155,13 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1953 * Wait for the Reset Device command to finish. Remove all structures 2155 * Wait for the Reset Device command to finish. Remove all structures
1954 * associated with the endpoints that were disabled. Clear the input device 2156 * associated with the endpoints that were disabled. Clear the input device
1955 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 2157 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
2158 *
2159 * If the virt_dev to be reset does not exist or does not match the udev,
2160 * it means the device is lost, possibly due to the xHC restore error and
2161 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2162 * re-allocate the device.
1956 */ 2163 */
1957int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 2164int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1958{ 2165{
1959 int ret, i; 2166 int ret, i;
1960 unsigned long flags; 2167 unsigned long flags;
@@ -1965,16 +2172,35 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1965 int timeleft; 2172 int timeleft;
1966 int last_freed_endpoint; 2173 int last_freed_endpoint;
1967 2174
1968 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 2175 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
1969 if (ret <= 0) 2176 if (ret <= 0)
1970 return ret; 2177 return ret;
1971 xhci = hcd_to_xhci(hcd); 2178 xhci = hcd_to_xhci(hcd);
1972 slot_id = udev->slot_id; 2179 slot_id = udev->slot_id;
1973 virt_dev = xhci->devs[slot_id]; 2180 virt_dev = xhci->devs[slot_id];
1974 if (!virt_dev) { 2181 if (!virt_dev) {
1975 xhci_dbg(xhci, "%s called with invalid slot ID %u\n", 2182 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
1976 __func__, slot_id); 2183 "not exist. Re-allocate the device\n", slot_id);
1977 return -EINVAL; 2184 ret = xhci_alloc_dev(hcd, udev);
2185 if (ret == 1)
2186 return 0;
2187 else
2188 return -EINVAL;
2189 }
2190
2191 if (virt_dev->udev != udev) {
2192 /* If the virt_dev and the udev does not match, this virt_dev
2193 * may belong to another udev.
2194 * Re-allocate the device.
2195 */
2196 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2197 "not match the udev. Re-allocate the device\n",
2198 slot_id);
2199 ret = xhci_alloc_dev(hcd, udev);
2200 if (ret == 1)
2201 return 0;
2202 else
2203 return -EINVAL;
1978 } 2204 }
1979 2205
1980 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 2206 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
@@ -2077,13 +2303,13 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2077 struct xhci_virt_device *virt_dev; 2303 struct xhci_virt_device *virt_dev;
2078 unsigned long flags; 2304 unsigned long flags;
2079 u32 state; 2305 u32 state;
2080 int i; 2306 int i, ret;
2081 2307
2082 if (udev->slot_id == 0) 2308 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2309 if (ret <= 0)
2083 return; 2310 return;
2311
2084 virt_dev = xhci->devs[udev->slot_id]; 2312 virt_dev = xhci->devs[udev->slot_id];
2085 if (!virt_dev)
2086 return;
2087 2313
2088 /* Stop any wayward timer functions (which may grab the lock) */ 2314 /* Stop any wayward timer functions (which may grab the lock) */
2089 for (i = 0; i < 31; ++i) { 2315 for (i = 0; i < 31; ++i) {
@@ -2191,12 +2417,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2191 2417
2192 virt_dev = xhci->devs[udev->slot_id]; 2418 virt_dev = xhci->devs[udev->slot_id];
2193 2419
2194 /* If this is a Set Address to an unconfigured device, setup ep 0 */ 2420 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2195 if (!udev->config) 2421 /*
2422 * If this is the first Set Address since device plug-in or
2423 * virt_device realloaction after a resume with an xHCI power loss,
2424 * then set up the slot context.
2425 */
2426 if (!slot_ctx->dev_info)
2196 xhci_setup_addressable_virt_dev(xhci, udev); 2427 xhci_setup_addressable_virt_dev(xhci, udev);
2428 /* Otherwise, update the control endpoint ring enqueue pointer. */
2197 else 2429 else
2198 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 2430 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2199 /* Otherwise, assume the core has the device configured how it wants */
2200 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2431 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2201 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 2432 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2202 2433
@@ -2268,15 +2499,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2268 * address given back to us by the HC. 2499 * address given back to us by the HC.
2269 */ 2500 */
2270 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 2501 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2271 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; 2502 /* Use kernel assigned address for devices; store xHC assigned
2503 * address locally. */
2504 virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
2272 /* Zero the input context control for later use */ 2505 /* Zero the input context control for later use */
2273 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2506 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2274 ctrl_ctx->add_flags = 0; 2507 ctrl_ctx->add_flags = 0;
2275 ctrl_ctx->drop_flags = 0; 2508 ctrl_ctx->drop_flags = 0;
2276 2509
2277 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 2510 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2278 /* XXX Meh, not sure if anyone else but choose_address uses this. */
2279 set_bit(udev->devnum, udev->bus->devmap.devicemap);
2280 2511
2281 return 0; 2512 return 0;
2282} 2513}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 34a60d9f056a..93d3bf4d213c 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -191,7 +191,7 @@ struct xhci_op_regs {
191/* bits 4:6 are reserved (and should be preserved on writes). */ 191/* bits 4:6 are reserved (and should be preserved on writes). */
192/* light reset (port status stays unchanged) - reset completed when this is 0 */ 192/* light reset (port status stays unchanged) - reset completed when this is 0 */
193#define CMD_LRESET (1 << 7) 193#define CMD_LRESET (1 << 7)
194/* FIXME: ignoring host controller save/restore state for now. */ 194/* host controller save/restore state. */
195#define CMD_CSS (1 << 8) 195#define CMD_CSS (1 << 8)
196#define CMD_CRS (1 << 9) 196#define CMD_CRS (1 << 9)
197/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */ 197/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
@@ -269,6 +269,10 @@ struct xhci_op_regs {
269 * A read gives the current link PM state of the port, 269 * A read gives the current link PM state of the port,
270 * a write with Link State Write Strobe set sets the link state. 270 * a write with Link State Write Strobe set sets the link state.
271 */ 271 */
272#define PORT_PLS_MASK (0xf << 5)
273#define XDEV_U0 (0x0 << 5)
274#define XDEV_U3 (0x3 << 5)
275#define XDEV_RESUME (0xf << 5)
272/* true: port has power (see HCC_PPC) */ 276/* true: port has power (see HCC_PPC) */
273#define PORT_POWER (1 << 9) 277#define PORT_POWER (1 << 9)
274/* bits 10:13 indicate device speed: 278/* bits 10:13 indicate device speed:
@@ -353,6 +357,8 @@ struct xhci_op_regs {
353#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8) 357#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
354/* Bits 24:31 for port testing */ 358/* Bits 24:31 for port testing */
355 359
360/* USB2 Protocol PORTSPMSC */
361#define PORT_RWE (1 << 0x3)
356 362
357/** 363/**
358 * struct xhci_intr_reg - Interrupt Register Set 364 * struct xhci_intr_reg - Interrupt Register Set
@@ -510,6 +516,7 @@ struct xhci_slot_ctx {
510#define MAX_EXIT (0xffff) 516#define MAX_EXIT (0xffff)
511/* Root hub port number that is needed to access the USB device */ 517/* Root hub port number that is needed to access the USB device */
512#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) 518#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
519#define DEVINFO_TO_ROOT_HUB_PORT(p) (((p) >> 16) & 0xff)
513/* Maximum number of ports under a hub device */ 520/* Maximum number of ports under a hub device */
514#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24) 521#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
515 522
@@ -731,6 +738,7 @@ struct xhci_virt_ep {
731}; 738};
732 739
733struct xhci_virt_device { 740struct xhci_virt_device {
741 struct usb_device *udev;
734 /* 742 /*
735 * Commands to the hardware are passed an "input context" that 743 * Commands to the hardware are passed an "input context" that
736 * tells the hardware what to change in its data structures. 744 * tells the hardware what to change in its data structures.
@@ -745,12 +753,15 @@ struct xhci_virt_device {
745 /* Rings saved to ensure old alt settings can be re-instated */ 753 /* Rings saved to ensure old alt settings can be re-instated */
746 struct xhci_ring **ring_cache; 754 struct xhci_ring **ring_cache;
747 int num_rings_cached; 755 int num_rings_cached;
756 /* Store xHC assigned device address */
757 int address;
748#define XHCI_MAX_RINGS_CACHED 31 758#define XHCI_MAX_RINGS_CACHED 31
749 struct xhci_virt_ep eps[31]; 759 struct xhci_virt_ep eps[31];
750 struct completion cmd_completion; 760 struct completion cmd_completion;
751 /* Status of the last command issued for this device */ 761 /* Status of the last command issued for this device */
752 u32 cmd_status; 762 u32 cmd_status;
753 struct list_head cmd_list; 763 struct list_head cmd_list;
764 u8 port;
754}; 765};
755 766
756 767
@@ -881,6 +892,10 @@ struct xhci_event_cmd {
881#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) 892#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
882#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) 893#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
883 894
895#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
896#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
897#define LAST_EP_INDEX 30
898
884/* Set TR Dequeue Pointer command TRB fields */ 899/* Set TR Dequeue Pointer command TRB fields */
885#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) 900#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
886#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) 901#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
@@ -1115,6 +1130,17 @@ struct urb_priv {
1115#define XHCI_STOP_EP_CMD_TIMEOUT 5 1130#define XHCI_STOP_EP_CMD_TIMEOUT 5
1116/* XXX: Make these module parameters */ 1131/* XXX: Make these module parameters */
1117 1132
1133struct s3_save {
1134 u32 command;
1135 u32 dev_nt;
1136 u64 dcbaa_ptr;
1137 u32 config_reg;
1138 u32 irq_pending;
1139 u32 irq_control;
1140 u32 erst_size;
1141 u64 erst_base;
1142 u64 erst_dequeue;
1143};
1118 1144
1119/* There is one ehci_hci structure per controller */ 1145/* There is one ehci_hci structure per controller */
1120struct xhci_hcd { 1146struct xhci_hcd {
@@ -1178,6 +1204,12 @@ struct xhci_hcd {
1178#endif 1204#endif
1179 /* Host controller watchdog timer structures */ 1205 /* Host controller watchdog timer structures */
1180 unsigned int xhc_state; 1206 unsigned int xhc_state;
1207
1208 unsigned long bus_suspended;
1209 unsigned long next_statechange;
1210
1211 u32 command;
1212 struct s3_save s3;
1181/* Host controller is dying - not responding to commands. "I'm not dead yet!" 1213/* Host controller is dying - not responding to commands. "I'm not dead yet!"
1182 * 1214 *
1183 * xHC interrupts have been disabled and a watchdog timer will (or has already) 1215 * xHC interrupts have been disabled and a watchdog timer will (or has already)
@@ -1199,6 +1231,10 @@ struct xhci_hcd {
1199#define XHCI_LINK_TRB_QUIRK (1 << 0) 1231#define XHCI_LINK_TRB_QUIRK (1 << 0)
1200#define XHCI_RESET_EP_QUIRK (1 << 1) 1232#define XHCI_RESET_EP_QUIRK (1 << 1)
1201#define XHCI_NEC_HOST (1 << 2) 1233#define XHCI_NEC_HOST (1 << 2)
1234 u32 port_c_suspend[8]; /* port suspend change*/
1235 u32 suspended_ports[8]; /* which ports are
1236 suspended */
1237 unsigned long resume_done[MAX_HC_PORTS];
1202}; 1238};
1203 1239
1204/* For testing purposes */ 1240/* For testing purposes */
@@ -1369,6 +1405,15 @@ int xhci_init(struct usb_hcd *hcd);
1369int xhci_run(struct usb_hcd *hcd); 1405int xhci_run(struct usb_hcd *hcd);
1370void xhci_stop(struct usb_hcd *hcd); 1406void xhci_stop(struct usb_hcd *hcd);
1371void xhci_shutdown(struct usb_hcd *hcd); 1407void xhci_shutdown(struct usb_hcd *hcd);
1408
1409#ifdef CONFIG_PM
1410int xhci_suspend(struct xhci_hcd *xhci);
1411int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
1412#else
1413#define xhci_suspend NULL
1414#define xhci_resume NULL
1415#endif
1416
1372int xhci_get_frame(struct usb_hcd *hcd); 1417int xhci_get_frame(struct usb_hcd *hcd);
1373irqreturn_t xhci_irq(struct usb_hcd *hcd); 1418irqreturn_t xhci_irq(struct usb_hcd *hcd);
1374irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd); 1419irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
@@ -1388,7 +1433,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1388int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1433int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1389int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1434int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1390void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); 1435void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1391int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev); 1436int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
1392int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1437int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1393void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1438void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1394 1439
@@ -1406,7 +1451,7 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1406int xhci_queue_vendor_command(struct xhci_hcd *xhci, 1451int xhci_queue_vendor_command(struct xhci_hcd *xhci,
1407 u32 field1, u32 field2, u32 field3, u32 field4); 1452 u32 field1, u32 field2, u32 field3, u32 field4);
1408int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1453int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1409 unsigned int ep_index); 1454 unsigned int ep_index, int suspend);
1410int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, 1455int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1411 int slot_id, unsigned int ep_index); 1456 int slot_id, unsigned int ep_index);
1412int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, 1457int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
@@ -1436,12 +1481,26 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1436 unsigned int slot_id, unsigned int ep_index, 1481 unsigned int slot_id, unsigned int ep_index,
1437 struct xhci_dequeue_state *deq_state); 1482 struct xhci_dequeue_state *deq_state);
1438void xhci_stop_endpoint_command_watchdog(unsigned long arg); 1483void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1484void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1485 unsigned int ep_index, unsigned int stream_id);
1439 1486
1440/* xHCI roothub code */ 1487/* xHCI roothub code */
1441int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1488int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1442 char *buf, u16 wLength); 1489 char *buf, u16 wLength);
1443int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1490int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1444 1491
1492#ifdef CONFIG_PM
1493int xhci_bus_suspend(struct usb_hcd *hcd);
1494int xhci_bus_resume(struct usb_hcd *hcd);
1495#else
1496#define xhci_bus_suspend NULL
1497#define xhci_bus_resume NULL
1498#endif /* CONFIG_PM */
1499
1500u32 xhci_port_state_to_neutral(u32 state);
1501int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port);
1502void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
1503
1445/* xHCI contexts */ 1504/* xHCI contexts */
1446struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); 1505struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1447struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); 1506struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 55660eaf947c..1bfcd02ebeb5 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -231,3 +231,16 @@ config USB_ISIGHTFW
231 driver beforehand. Tools for doing so are available at 231 driver beforehand. Tools for doing so are available at
232 http://bersace03.free.fr 232 http://bersace03.free.fr
233 233
234config USB_YUREX
235 tristate "USB YUREX driver support"
236 depends on USB
237 help
238 Say Y here if you want to connect a YUREX to your computer's
239 USB port. The YUREX is a leg-shakes sensor. See
240 <http://bbu.kayac.com/en/> for further information.
241 This driver supports read/write of leg-shakes counter and
242 fasync for the counter update via a device file /dev/yurex*.
243
244 To compile this driver as a module, choose M here: the
245 module will be called yurex.
246
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 717703e81425..796ce7ebccc8 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -3,28 +3,27 @@
3# (the ones that don't fit into any other categories) 3# (the ones that don't fit into any other categories)
4# 4#
5 5
6obj-$(CONFIG_USB_ADUTUX) += adutux.o 6ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
8obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
9obj-$(CONFIG_USB_CYTHERM) += cytherm.o
10obj-$(CONFIG_USB_EMI26) += emi26.o
11obj-$(CONFIG_USB_EMI62) += emi62.o
12obj-$(CONFIG_USB_FTDI_ELAN) += ftdi-elan.o
13obj-$(CONFIG_USB_IDMOUSE) += idmouse.o
14obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
15obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
16obj-$(CONFIG_USB_LCD) += usblcd.o
17obj-$(CONFIG_USB_LD) += ldusb.o
18obj-$(CONFIG_USB_LED) += usbled.o
19obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
20obj-$(CONFIG_USB_RIO500) += rio500.o
21obj-$(CONFIG_USB_TEST) += usbtest.o
22obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
23obj-$(CONFIG_USB_USS720) += uss720.o
24obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
25 7
26obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ 8obj-$(CONFIG_USB_ADUTUX) += adutux.o
9obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
10obj-$(CONFIG_USB_CYPRESS_CY7C63) += cypress_cy7c63.o
11obj-$(CONFIG_USB_CYTHERM) += cytherm.o
12obj-$(CONFIG_USB_EMI26) += emi26.o
13obj-$(CONFIG_USB_EMI62) += emi62.o
14obj-$(CONFIG_USB_FTDI_ELAN) += ftdi-elan.o
15obj-$(CONFIG_USB_IDMOUSE) += idmouse.o
16obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
17obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
18obj-$(CONFIG_USB_LCD) += usblcd.o
19obj-$(CONFIG_USB_LD) += ldusb.o
20obj-$(CONFIG_USB_LED) += usbled.o
21obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
22obj-$(CONFIG_USB_RIO500) += rio500.o
23obj-$(CONFIG_USB_TEST) += usbtest.o
24obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
25obj-$(CONFIG_USB_USS720) += uss720.o
26obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
27obj-$(CONFIG_USB_YUREX) += yurex.o
27 28
28ifeq ($(CONFIG_USB_DEBUG),y) 29obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
29EXTRA_CFLAGS += -DDEBUG
30endif
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index aecf380f6ecc..c8eec9c2d89e 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2769,7 +2769,7 @@ static int ftdi_elan_probe(struct usb_interface *interface,
2769 ftdi->sequence_num = ++ftdi_instances; 2769 ftdi->sequence_num = ++ftdi_instances;
2770 mutex_unlock(&ftdi_module_lock); 2770 mutex_unlock(&ftdi_module_lock);
2771 ftdi_elan_init_kref(ftdi); 2771 ftdi_elan_init_kref(ftdi);
2772 init_MUTEX(&ftdi->sw_lock); 2772 sema_init(&ftdi->sw_lock, 1);
2773 ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); 2773 ftdi->udev = usb_get_dev(interface_to_usbdev(interface));
2774 ftdi->interface = interface; 2774 ftdi->interface = interface;
2775 mutex_init(&ftdi->u132_lock); 2775 mutex_init(&ftdi->u132_lock);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 9b50db257019..375664198776 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -374,7 +374,7 @@ static ssize_t iowarrior_write(struct file *file,
374 case USB_DEVICE_ID_CODEMERCS_IOWPV2: 374 case USB_DEVICE_ID_CODEMERCS_IOWPV2:
375 case USB_DEVICE_ID_CODEMERCS_IOW40: 375 case USB_DEVICE_ID_CODEMERCS_IOW40:
376 /* IOW24 and IOW40 use a synchronous call */ 376 /* IOW24 and IOW40 use a synchronous call */
377 buf = kmalloc(8, GFP_KERNEL); /* 8 bytes are enough for both products */ 377 buf = kmalloc(count, GFP_KERNEL);
378 if (!buf) { 378 if (!buf) {
379 retval = -ENOMEM; 379 retval = -ENOMEM;
380 goto exit; 380 goto exit;
diff --git a/drivers/usb/misc/sisusbvga/Makefile b/drivers/usb/misc/sisusbvga/Makefile
index 7f934cfc906c..3142476ccc8e 100644
--- a/drivers/usb/misc/sisusbvga/Makefile
+++ b/drivers/usb/misc/sisusbvga/Makefile
@@ -4,5 +4,4 @@
4 4
5obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga.o 5obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga.o
6 6
7sisusbvga-objs := sisusb.o sisusb_init.o sisusb_con.o 7sisusbvga-y := sisusb.o sisusb_init.o sisusb_con.o
8
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eef370eb7a54..a35b427c0bac 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -13,17 +13,16 @@
13 13
14/*-------------------------------------------------------------------------*/ 14/*-------------------------------------------------------------------------*/
15 15
16// FIXME make these public somewhere; usbdevfs.h? 16/* FIXME make these public somewhere; usbdevfs.h? */
17//
18struct usbtest_param { 17struct usbtest_param {
19 // inputs 18 /* inputs */
20 unsigned test_num; /* 0..(TEST_CASES-1) */ 19 unsigned test_num; /* 0..(TEST_CASES-1) */
21 unsigned iterations; 20 unsigned iterations;
22 unsigned length; 21 unsigned length;
23 unsigned vary; 22 unsigned vary;
24 unsigned sglen; 23 unsigned sglen;
25 24
26 // outputs 25 /* outputs */
27 struct timeval duration; 26 struct timeval duration;
28}; 27};
29#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param) 28#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
@@ -45,9 +44,9 @@ struct usbtest_info {
45 const char *name; 44 const char *name;
46 u8 ep_in; /* bulk/intr source */ 45 u8 ep_in; /* bulk/intr source */
47 u8 ep_out; /* bulk/intr sink */ 46 u8 ep_out; /* bulk/intr sink */
48 unsigned autoconf : 1; 47 unsigned autoconf:1;
49 unsigned ctrl_out : 1; 48 unsigned ctrl_out:1;
50 unsigned iso : 1; /* try iso in/out */ 49 unsigned iso:1; /* try iso in/out */
51 int alt; 50 int alt;
52}; 51};
53 52
@@ -71,9 +70,9 @@ struct usbtest_dev {
71 u8 *buf; 70 u8 *buf;
72}; 71};
73 72
74static struct usb_device *testdev_to_usbdev (struct usbtest_dev *test) 73static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
75{ 74{
76 return interface_to_usbdev (test->intf); 75 return interface_to_usbdev(test->intf);
77} 76}
78 77
79/* set up all urbs so they can be used with either bulk or interrupt */ 78/* set up all urbs so they can be used with either bulk or interrupt */
@@ -87,7 +86,7 @@ static struct usb_device *testdev_to_usbdev (struct usbtest_dev *test)
87/*-------------------------------------------------------------------------*/ 86/*-------------------------------------------------------------------------*/
88 87
89static int 88static int
90get_endpoints (struct usbtest_dev *dev, struct usb_interface *intf) 89get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
91{ 90{
92 int tmp; 91 int tmp;
93 struct usb_host_interface *alt; 92 struct usb_host_interface *alt;
@@ -115,7 +114,7 @@ get_endpoints (struct usbtest_dev *dev, struct usb_interface *intf)
115 case USB_ENDPOINT_XFER_ISOC: 114 case USB_ENDPOINT_XFER_ISOC:
116 if (dev->info->iso) 115 if (dev->info->iso)
117 goto try_iso; 116 goto try_iso;
118 // FALLTHROUGH 117 /* FALLTHROUGH */
119 default: 118 default:
120 continue; 119 continue;
121 } 120 }
@@ -142,9 +141,9 @@ try_iso:
142 return -EINVAL; 141 return -EINVAL;
143 142
144found: 143found:
145 udev = testdev_to_usbdev (dev); 144 udev = testdev_to_usbdev(dev);
146 if (alt->desc.bAlternateSetting != 0) { 145 if (alt->desc.bAlternateSetting != 0) {
147 tmp = usb_set_interface (udev, 146 tmp = usb_set_interface(udev,
148 alt->desc.bInterfaceNumber, 147 alt->desc.bInterfaceNumber,
149 alt->desc.bAlternateSetting); 148 alt->desc.bAlternateSetting);
150 if (tmp < 0) 149 if (tmp < 0)
@@ -152,21 +151,21 @@ found:
152 } 151 }
153 152
154 if (in) { 153 if (in) {
155 dev->in_pipe = usb_rcvbulkpipe (udev, 154 dev->in_pipe = usb_rcvbulkpipe(udev,
156 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
157 dev->out_pipe = usb_sndbulkpipe (udev, 156 dev->out_pipe = usb_sndbulkpipe(udev,
158 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
159 } 158 }
160 if (iso_in) { 159 if (iso_in) {
161 dev->iso_in = &iso_in->desc; 160 dev->iso_in = &iso_in->desc;
162 dev->in_iso_pipe = usb_rcvisocpipe (udev, 161 dev->in_iso_pipe = usb_rcvisocpipe(udev,
163 iso_in->desc.bEndpointAddress 162 iso_in->desc.bEndpointAddress
164 & USB_ENDPOINT_NUMBER_MASK); 163 & USB_ENDPOINT_NUMBER_MASK);
165 } 164 }
166 165
167 if (iso_out) { 166 if (iso_out) {
168 dev->iso_out = &iso_out->desc; 167 dev->iso_out = &iso_out->desc;
169 dev->out_iso_pipe = usb_sndisocpipe (udev, 168 dev->out_iso_pipe = usb_sndisocpipe(udev,
170 iso_out->desc.bEndpointAddress 169 iso_out->desc.bEndpointAddress
171 & USB_ENDPOINT_NUMBER_MASK); 170 & USB_ENDPOINT_NUMBER_MASK);
172 } 171 }
@@ -182,12 +181,12 @@ found:
182 * them with non-zero test data (or test for it) when appropriate. 181 * them with non-zero test data (or test for it) when appropriate.
183 */ 182 */
184 183
185static void simple_callback (struct urb *urb) 184static void simple_callback(struct urb *urb)
186{ 185{
187 complete(urb->context); 186 complete(urb->context);
188} 187}
189 188
190static struct urb *simple_alloc_urb ( 189static struct urb *simple_alloc_urb(
191 struct usb_device *udev, 190 struct usb_device *udev,
192 int pipe, 191 int pipe,
193 unsigned long bytes 192 unsigned long bytes
@@ -195,32 +194,32 @@ static struct urb *simple_alloc_urb (
195{ 194{
196 struct urb *urb; 195 struct urb *urb;
197 196
198 urb = usb_alloc_urb (0, GFP_KERNEL); 197 urb = usb_alloc_urb(0, GFP_KERNEL);
199 if (!urb) 198 if (!urb)
200 return urb; 199 return urb;
201 usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL); 200 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
202 urb->interval = (udev->speed == USB_SPEED_HIGH) 201 urb->interval = (udev->speed == USB_SPEED_HIGH)
203 ? (INTERRUPT_RATE << 3) 202 ? (INTERRUPT_RATE << 3)
204 : INTERRUPT_RATE; 203 : INTERRUPT_RATE;
205 urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; 204 urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
206 if (usb_pipein (pipe)) 205 if (usb_pipein(pipe))
207 urb->transfer_flags |= URB_SHORT_NOT_OK; 206 urb->transfer_flags |= URB_SHORT_NOT_OK;
208 urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL, 207 urb->transfer_buffer = usb_alloc_coherent(udev, bytes, GFP_KERNEL,
209 &urb->transfer_dma); 208 &urb->transfer_dma);
210 if (!urb->transfer_buffer) { 209 if (!urb->transfer_buffer) {
211 usb_free_urb (urb); 210 usb_free_urb(urb);
212 urb = NULL; 211 urb = NULL;
213 } else 212 } else
214 memset (urb->transfer_buffer, 0, bytes); 213 memset(urb->transfer_buffer, 0, bytes);
215 return urb; 214 return urb;
216} 215}
217 216
218static unsigned pattern = 0; 217static unsigned pattern;
219static unsigned mod_pattern; 218static unsigned mod_pattern;
220module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR); 219module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
221MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)"); 220MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
222 221
223static inline void simple_fill_buf (struct urb *urb) 222static inline void simple_fill_buf(struct urb *urb)
224{ 223{
225 unsigned i; 224 unsigned i;
226 u8 *buf = urb->transfer_buffer; 225 u8 *buf = urb->transfer_buffer;
@@ -228,9 +227,9 @@ static inline void simple_fill_buf (struct urb *urb)
228 227
229 switch (pattern) { 228 switch (pattern) {
230 default: 229 default:
231 // FALLTHROUGH 230 /* FALLTHROUGH */
232 case 0: 231 case 0:
233 memset (buf, 0, len); 232 memset(buf, 0, len);
234 break; 233 break;
235 case 1: /* mod63 */ 234 case 1: /* mod63 */
236 for (i = 0; i < len; i++) 235 for (i = 0; i < len; i++)
@@ -273,14 +272,14 @@ static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
273 return 0; 272 return 0;
274} 273}
275 274
276static void simple_free_urb (struct urb *urb) 275static void simple_free_urb(struct urb *urb)
277{ 276{
278 usb_free_coherent(urb->dev, urb->transfer_buffer_length, 277 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
279 urb->transfer_buffer, urb->transfer_dma); 278 urb->transfer_buffer, urb->transfer_dma);
280 usb_free_urb (urb); 279 usb_free_urb(urb);
281} 280}
282 281
283static int simple_io ( 282static int simple_io(
284 struct usbtest_dev *tdev, 283 struct usbtest_dev *tdev,
285 struct urb *urb, 284 struct urb *urb,
286 int iterations, 285 int iterations,
@@ -296,17 +295,18 @@ static int simple_io (
296 295
297 urb->context = &completion; 296 urb->context = &completion;
298 while (retval == 0 && iterations-- > 0) { 297 while (retval == 0 && iterations-- > 0) {
299 init_completion (&completion); 298 init_completion(&completion);
300 if (usb_pipeout (urb->pipe)) 299 if (usb_pipeout(urb->pipe))
301 simple_fill_buf (urb); 300 simple_fill_buf(urb);
302 if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) 301 retval = usb_submit_urb(urb, GFP_KERNEL);
302 if (retval != 0)
303 break; 303 break;
304 304
305 /* NOTE: no timeouts; can't be broken out of by interrupt */ 305 /* NOTE: no timeouts; can't be broken out of by interrupt */
306 wait_for_completion (&completion); 306 wait_for_completion(&completion);
307 retval = urb->status; 307 retval = urb->status;
308 urb->dev = udev; 308 urb->dev = udev;
309 if (retval == 0 && usb_pipein (urb->pipe)) 309 if (retval == 0 && usb_pipein(urb->pipe))
310 retval = simple_check_buf(tdev, urb); 310 retval = simple_check_buf(tdev, urb);
311 311
312 if (vary) { 312 if (vary) {
@@ -337,7 +337,7 @@ static int simple_io (
337 * Yes, this also tests the scatterlist primitives. 337 * Yes, this also tests the scatterlist primitives.
338 */ 338 */
339 339
340static void free_sglist (struct scatterlist *sg, int nents) 340static void free_sglist(struct scatterlist *sg, int nents)
341{ 341{
342 unsigned i; 342 unsigned i;
343 343
@@ -346,19 +346,19 @@ static void free_sglist (struct scatterlist *sg, int nents)
346 for (i = 0; i < nents; i++) { 346 for (i = 0; i < nents; i++) {
347 if (!sg_page(&sg[i])) 347 if (!sg_page(&sg[i]))
348 continue; 348 continue;
349 kfree (sg_virt(&sg[i])); 349 kfree(sg_virt(&sg[i]));
350 } 350 }
351 kfree (sg); 351 kfree(sg);
352} 352}
353 353
354static struct scatterlist * 354static struct scatterlist *
355alloc_sglist (int nents, int max, int vary) 355alloc_sglist(int nents, int max, int vary)
356{ 356{
357 struct scatterlist *sg; 357 struct scatterlist *sg;
358 unsigned i; 358 unsigned i;
359 unsigned size = max; 359 unsigned size = max;
360 360
361 sg = kmalloc (nents * sizeof *sg, GFP_KERNEL); 361 sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
362 if (!sg) 362 if (!sg)
363 return NULL; 363 return NULL;
364 sg_init_table(sg, nents); 364 sg_init_table(sg, nents);
@@ -367,9 +367,9 @@ alloc_sglist (int nents, int max, int vary)
367 char *buf; 367 char *buf;
368 unsigned j; 368 unsigned j;
369 369
370 buf = kzalloc (size, GFP_KERNEL); 370 buf = kzalloc(size, GFP_KERNEL);
371 if (!buf) { 371 if (!buf) {
372 free_sglist (sg, i); 372 free_sglist(sg, i);
373 return NULL; 373 return NULL;
374 } 374 }
375 375
@@ -397,7 +397,7 @@ alloc_sglist (int nents, int max, int vary)
397 return sg; 397 return sg;
398} 398}
399 399
400static int perform_sglist ( 400static int perform_sglist(
401 struct usbtest_dev *tdev, 401 struct usbtest_dev *tdev,
402 unsigned iterations, 402 unsigned iterations,
403 int pipe, 403 int pipe,
@@ -410,7 +410,7 @@ static int perform_sglist (
410 int retval = 0; 410 int retval = 0;
411 411
412 while (retval == 0 && iterations-- > 0) { 412 while (retval == 0 && iterations-- > 0) {
413 retval = usb_sg_init (req, udev, pipe, 413 retval = usb_sg_init(req, udev, pipe,
414 (udev->speed == USB_SPEED_HIGH) 414 (udev->speed == USB_SPEED_HIGH)
415 ? (INTERRUPT_RATE << 3) 415 ? (INTERRUPT_RATE << 3)
416 : INTERRUPT_RATE, 416 : INTERRUPT_RATE,
@@ -418,7 +418,7 @@ static int perform_sglist (
418 418
419 if (retval) 419 if (retval)
420 break; 420 break;
421 usb_sg_wait (req); 421 usb_sg_wait(req);
422 retval = req->status; 422 retval = req->status;
423 423
424 /* FIXME check resulting data pattern */ 424 /* FIXME check resulting data pattern */
@@ -426,9 +426,9 @@ static int perform_sglist (
426 /* FIXME if endpoint halted, clear halt (and log) */ 426 /* FIXME if endpoint halted, clear halt (and log) */
427 } 427 }
428 428
429 // FIXME for unlink or fault handling tests, don't report 429 /* FIXME for unlink or fault handling tests, don't report
430 // failure if retval is as we expected ... 430 * failure if retval is as we expected ...
431 431 */
432 if (retval) 432 if (retval)
433 ERROR(tdev, "perform_sglist failed, " 433 ERROR(tdev, "perform_sglist failed, "
434 "iterations left %d, status %d\n", 434 "iterations left %d, status %d\n",
@@ -452,31 +452,31 @@ static int perform_sglist (
452 */ 452 */
453 453
454static unsigned realworld = 1; 454static unsigned realworld = 1;
455module_param (realworld, uint, 0); 455module_param(realworld, uint, 0);
456MODULE_PARM_DESC (realworld, "clear to demand stricter spec compliance"); 456MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
457 457
458static int get_altsetting (struct usbtest_dev *dev) 458static int get_altsetting(struct usbtest_dev *dev)
459{ 459{
460 struct usb_interface *iface = dev->intf; 460 struct usb_interface *iface = dev->intf;
461 struct usb_device *udev = interface_to_usbdev (iface); 461 struct usb_device *udev = interface_to_usbdev(iface);
462 int retval; 462 int retval;
463 463
464 retval = usb_control_msg (udev, usb_rcvctrlpipe (udev, 0), 464 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
465 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE, 465 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
466 0, iface->altsetting [0].desc.bInterfaceNumber, 466 0, iface->altsetting[0].desc.bInterfaceNumber,
467 dev->buf, 1, USB_CTRL_GET_TIMEOUT); 467 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
468 switch (retval) { 468 switch (retval) {
469 case 1: 469 case 1:
470 return dev->buf [0]; 470 return dev->buf[0];
471 case 0: 471 case 0:
472 retval = -ERANGE; 472 retval = -ERANGE;
473 // FALLTHROUGH 473 /* FALLTHROUGH */
474 default: 474 default:
475 return retval; 475 return retval;
476 } 476 }
477} 477}
478 478
479static int set_altsetting (struct usbtest_dev *dev, int alternate) 479static int set_altsetting(struct usbtest_dev *dev, int alternate)
480{ 480{
481 struct usb_interface *iface = dev->intf; 481 struct usb_interface *iface = dev->intf;
482 struct usb_device *udev; 482 struct usb_device *udev;
@@ -484,9 +484,9 @@ static int set_altsetting (struct usbtest_dev *dev, int alternate)
484 if (alternate < 0 || alternate >= 256) 484 if (alternate < 0 || alternate >= 256)
485 return -EINVAL; 485 return -EINVAL;
486 486
487 udev = interface_to_usbdev (iface); 487 udev = interface_to_usbdev(iface);
488 return usb_set_interface (udev, 488 return usb_set_interface(udev,
489 iface->altsetting [0].desc.bInterfaceNumber, 489 iface->altsetting[0].desc.bInterfaceNumber,
490 alternate); 490 alternate);
491} 491}
492 492
@@ -519,9 +519,9 @@ static int is_good_config(struct usbtest_dev *tdev, int len)
519 return 0; 519 return 0;
520 } 520 }
521 521
522 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */ 522 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
523 return 1; 523 return 1;
524 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */ 524 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
525 return 1; 525 return 1;
526 ERROR(tdev, "bogus config descriptor read size\n"); 526 ERROR(tdev, "bogus config descriptor read size\n");
527 return 0; 527 return 0;
@@ -542,10 +542,10 @@ static int is_good_config(struct usbtest_dev *tdev, int len)
542 * to see if usbcore, hcd, and device all behave right. such testing would 542 * to see if usbcore, hcd, and device all behave right. such testing would
543 * involve varied read sizes and other operation sequences. 543 * involve varied read sizes and other operation sequences.
544 */ 544 */
545static int ch9_postconfig (struct usbtest_dev *dev) 545static int ch9_postconfig(struct usbtest_dev *dev)
546{ 546{
547 struct usb_interface *iface = dev->intf; 547 struct usb_interface *iface = dev->intf;
548 struct usb_device *udev = interface_to_usbdev (iface); 548 struct usb_device *udev = interface_to_usbdev(iface);
549 int i, alt, retval; 549 int i, alt, retval;
550 550
551 /* [9.2.3] if there's more than one altsetting, we need to be able to 551 /* [9.2.3] if there's more than one altsetting, we need to be able to
@@ -554,7 +554,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
554 for (i = 0; i < iface->num_altsetting; i++) { 554 for (i = 0; i < iface->num_altsetting; i++) {
555 555
556 /* 9.2.3 constrains the range here */ 556 /* 9.2.3 constrains the range here */
557 alt = iface->altsetting [i].desc.bAlternateSetting; 557 alt = iface->altsetting[i].desc.bAlternateSetting;
558 if (alt < 0 || alt >= iface->num_altsetting) { 558 if (alt < 0 || alt >= iface->num_altsetting) {
559 dev_err(&iface->dev, 559 dev_err(&iface->dev,
560 "invalid alt [%d].bAltSetting = %d\n", 560 "invalid alt [%d].bAltSetting = %d\n",
@@ -566,7 +566,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
566 continue; 566 continue;
567 567
568 /* [9.4.10] set_interface */ 568 /* [9.4.10] set_interface */
569 retval = set_altsetting (dev, alt); 569 retval = set_altsetting(dev, alt);
570 if (retval) { 570 if (retval) {
571 dev_err(&iface->dev, "can't set_interface = %d, %d\n", 571 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
572 alt, retval); 572 alt, retval);
@@ -574,7 +574,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
574 } 574 }
575 575
576 /* [9.4.4] get_interface always works */ 576 /* [9.4.4] get_interface always works */
577 retval = get_altsetting (dev); 577 retval = get_altsetting(dev);
578 if (retval != alt) { 578 if (retval != alt) {
579 dev_err(&iface->dev, "get alt should be %d, was %d\n", 579 dev_err(&iface->dev, "get alt should be %d, was %d\n",
580 alt, retval); 580 alt, retval);
@@ -591,11 +591,11 @@ static int ch9_postconfig (struct usbtest_dev *dev)
591 * ... although some cheap devices (like one TI Hub I've got) 591 * ... although some cheap devices (like one TI Hub I've got)
592 * won't return config descriptors except before set_config. 592 * won't return config descriptors except before set_config.
593 */ 593 */
594 retval = usb_control_msg (udev, usb_rcvctrlpipe (udev, 0), 594 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
595 USB_REQ_GET_CONFIGURATION, 595 USB_REQ_GET_CONFIGURATION,
596 USB_DIR_IN | USB_RECIP_DEVICE, 596 USB_DIR_IN | USB_RECIP_DEVICE,
597 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); 597 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
598 if (retval != 1 || dev->buf [0] != expected) { 598 if (retval != 1 || dev->buf[0] != expected) {
599 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n", 599 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
600 retval, dev->buf[0], expected); 600 retval, dev->buf[0], expected);
601 return (retval < 0) ? retval : -EDOM; 601 return (retval < 0) ? retval : -EDOM;
@@ -603,7 +603,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
603 } 603 }
604 604
605 /* there's always [9.4.3] a device descriptor [9.6.1] */ 605 /* there's always [9.4.3] a device descriptor [9.6.1] */
606 retval = usb_get_descriptor (udev, USB_DT_DEVICE, 0, 606 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
607 dev->buf, sizeof udev->descriptor); 607 dev->buf, sizeof udev->descriptor);
608 if (retval != sizeof udev->descriptor) { 608 if (retval != sizeof udev->descriptor) {
609 dev_err(&iface->dev, "dev descriptor --> %d\n", retval); 609 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
@@ -612,7 +612,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
612 612
613 /* there's always [9.4.3] at least one config descriptor [9.6.3] */ 613 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
614 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) { 614 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
615 retval = usb_get_descriptor (udev, USB_DT_CONFIG, i, 615 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
616 dev->buf, TBUF_SIZE); 616 dev->buf, TBUF_SIZE);
617 if (!is_good_config(dev, retval)) { 617 if (!is_good_config(dev, retval)) {
618 dev_err(&iface->dev, 618 dev_err(&iface->dev,
@@ -621,18 +621,19 @@ static int ch9_postconfig (struct usbtest_dev *dev)
621 return (retval < 0) ? retval : -EDOM; 621 return (retval < 0) ? retval : -EDOM;
622 } 622 }
623 623
624 // FIXME cross-checking udev->config[i] to make sure usbcore 624 /* FIXME cross-checking udev->config[i] to make sure usbcore
625 // parsed it right (etc) would be good testing paranoia 625 * parsed it right (etc) would be good testing paranoia
626 */
626 } 627 }
627 628
628 /* and sometimes [9.2.6.6] speed dependent descriptors */ 629 /* and sometimes [9.2.6.6] speed dependent descriptors */
629 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) { 630 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
630 struct usb_qualifier_descriptor *d = NULL; 631 struct usb_qualifier_descriptor *d = NULL;
631 632
632 /* device qualifier [9.6.2] */ 633 /* device qualifier [9.6.2] */
633 retval = usb_get_descriptor (udev, 634 retval = usb_get_descriptor(udev,
634 USB_DT_DEVICE_QUALIFIER, 0, dev->buf, 635 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
635 sizeof (struct usb_qualifier_descriptor)); 636 sizeof(struct usb_qualifier_descriptor));
636 if (retval == -EPIPE) { 637 if (retval == -EPIPE) {
637 if (udev->speed == USB_SPEED_HIGH) { 638 if (udev->speed == USB_SPEED_HIGH) {
638 dev_err(&iface->dev, 639 dev_err(&iface->dev,
@@ -641,7 +642,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
641 return (retval < 0) ? retval : -EDOM; 642 return (retval < 0) ? retval : -EDOM;
642 } 643 }
643 /* usb2.0 but not high-speed capable; fine */ 644 /* usb2.0 but not high-speed capable; fine */
644 } else if (retval != sizeof (struct usb_qualifier_descriptor)) { 645 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
645 dev_err(&iface->dev, "dev qualifier --> %d\n", retval); 646 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
646 return (retval < 0) ? retval : -EDOM; 647 return (retval < 0) ? retval : -EDOM;
647 } else 648 } else
@@ -651,7 +652,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
651 if (d) { 652 if (d) {
652 unsigned max = d->bNumConfigurations; 653 unsigned max = d->bNumConfigurations;
653 for (i = 0; i < max; i++) { 654 for (i = 0; i < max; i++) {
654 retval = usb_get_descriptor (udev, 655 retval = usb_get_descriptor(udev,
655 USB_DT_OTHER_SPEED_CONFIG, i, 656 USB_DT_OTHER_SPEED_CONFIG, i,
656 dev->buf, TBUF_SIZE); 657 dev->buf, TBUF_SIZE);
657 if (!is_good_config(dev, retval)) { 658 if (!is_good_config(dev, retval)) {
@@ -663,25 +664,26 @@ static int ch9_postconfig (struct usbtest_dev *dev)
663 } 664 }
664 } 665 }
665 } 666 }
666 // FIXME fetch strings from at least the device descriptor 667 /* FIXME fetch strings from at least the device descriptor */
667 668
668 /* [9.4.5] get_status always works */ 669 /* [9.4.5] get_status always works */
669 retval = usb_get_status (udev, USB_RECIP_DEVICE, 0, dev->buf); 670 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
670 if (retval != 2) { 671 if (retval != 2) {
671 dev_err(&iface->dev, "get dev status --> %d\n", retval); 672 dev_err(&iface->dev, "get dev status --> %d\n", retval);
672 return (retval < 0) ? retval : -EDOM; 673 return (retval < 0) ? retval : -EDOM;
673 } 674 }
674 675
675 // FIXME configuration.bmAttributes says if we could try to set/clear 676 /* FIXME configuration.bmAttributes says if we could try to set/clear
676 // the device's remote wakeup feature ... if we can, test that here 677 * the device's remote wakeup feature ... if we can, test that here
678 */
677 679
678 retval = usb_get_status (udev, USB_RECIP_INTERFACE, 680 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
679 iface->altsetting [0].desc.bInterfaceNumber, dev->buf); 681 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
680 if (retval != 2) { 682 if (retval != 2) {
681 dev_err(&iface->dev, "get interface status --> %d\n", retval); 683 dev_err(&iface->dev, "get interface status --> %d\n", retval);
682 return (retval < 0) ? retval : -EDOM; 684 return (retval < 0) ? retval : -EDOM;
683 } 685 }
684 // FIXME get status for each endpoint in the interface 686 /* FIXME get status for each endpoint in the interface */
685 687
686 return 0; 688 return 0;
687} 689}
@@ -717,7 +719,7 @@ struct subcase {
717 int expected; 719 int expected;
718}; 720};
719 721
720static void ctrl_complete (struct urb *urb) 722static void ctrl_complete(struct urb *urb)
721{ 723{
722 struct ctrl_ctx *ctx = urb->context; 724 struct ctrl_ctx *ctx = urb->context;
723 struct usb_ctrlrequest *reqp; 725 struct usb_ctrlrequest *reqp;
@@ -725,9 +727,9 @@ static void ctrl_complete (struct urb *urb)
725 int status = urb->status; 727 int status = urb->status;
726 728
727 reqp = (struct usb_ctrlrequest *)urb->setup_packet; 729 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
728 subcase = container_of (reqp, struct subcase, setup); 730 subcase = container_of(reqp, struct subcase, setup);
729 731
730 spin_lock (&ctx->lock); 732 spin_lock(&ctx->lock);
731 ctx->count--; 733 ctx->count--;
732 ctx->pending--; 734 ctx->pending--;
733 735
@@ -787,14 +789,14 @@ error:
787 789
788 /* unlink whatever's still pending */ 790 /* unlink whatever's still pending */
789 for (i = 1; i < ctx->param->sglen; i++) { 791 for (i = 1; i < ctx->param->sglen; i++) {
790 struct urb *u = ctx->urb [ 792 struct urb *u = ctx->urb[
791 (i + subcase->number) 793 (i + subcase->number)
792 % ctx->param->sglen]; 794 % ctx->param->sglen];
793 795
794 if (u == urb || !u->dev) 796 if (u == urb || !u->dev)
795 continue; 797 continue;
796 spin_unlock(&ctx->lock); 798 spin_unlock(&ctx->lock);
797 status = usb_unlink_urb (u); 799 status = usb_unlink_urb(u);
798 spin_lock(&ctx->lock); 800 spin_lock(&ctx->lock);
799 switch (status) { 801 switch (status) {
800 case -EINPROGRESS: 802 case -EINPROGRESS:
@@ -812,7 +814,8 @@ error:
812 814
813 /* resubmit if we need to, else mark this as done */ 815 /* resubmit if we need to, else mark this as done */
814 if ((status == 0) && (ctx->pending < ctx->count)) { 816 if ((status == 0) && (ctx->pending < ctx->count)) {
815 if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) { 817 status = usb_submit_urb(urb, GFP_ATOMIC);
818 if (status != 0) {
816 ERROR(ctx->dev, 819 ERROR(ctx->dev,
817 "can't resubmit ctrl %02x.%02x, err %d\n", 820 "can't resubmit ctrl %02x.%02x, err %d\n",
818 reqp->bRequestType, reqp->bRequest, status); 821 reqp->bRequestType, reqp->bRequest, status);
@@ -824,21 +827,21 @@ error:
824 827
825 /* signal completion when nothing's queued */ 828 /* signal completion when nothing's queued */
826 if (ctx->pending == 0) 829 if (ctx->pending == 0)
827 complete (&ctx->complete); 830 complete(&ctx->complete);
828 spin_unlock (&ctx->lock); 831 spin_unlock(&ctx->lock);
829} 832}
830 833
831static int 834static int
832test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) 835test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
833{ 836{
834 struct usb_device *udev = testdev_to_usbdev (dev); 837 struct usb_device *udev = testdev_to_usbdev(dev);
835 struct urb **urb; 838 struct urb **urb;
836 struct ctrl_ctx context; 839 struct ctrl_ctx context;
837 int i; 840 int i;
838 841
839 spin_lock_init (&context.lock); 842 spin_lock_init(&context.lock);
840 context.dev = dev; 843 context.dev = dev;
841 init_completion (&context.complete); 844 init_completion(&context.complete);
842 context.count = param->sglen * param->iterations; 845 context.count = param->sglen * param->iterations;
843 context.pending = 0; 846 context.pending = 0;
844 context.status = -ENOMEM; 847 context.status = -ENOMEM;
@@ -853,7 +856,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
853 if (!urb) 856 if (!urb)
854 return -ENOMEM; 857 return -ENOMEM;
855 for (i = 0; i < param->sglen; i++) { 858 for (i = 0; i < param->sglen; i++) {
856 int pipe = usb_rcvctrlpipe (udev, 0); 859 int pipe = usb_rcvctrlpipe(udev, 0);
857 unsigned len; 860 unsigned len;
858 struct urb *u; 861 struct urb *u;
859 struct usb_ctrlrequest req; 862 struct usb_ctrlrequest req;
@@ -869,104 +872,108 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
869 * device, but some are chosen to trigger protocol stalls 872 * device, but some are chosen to trigger protocol stalls
870 * or short reads. 873 * or short reads.
871 */ 874 */
872 memset (&req, 0, sizeof req); 875 memset(&req, 0, sizeof req);
873 req.bRequest = USB_REQ_GET_DESCRIPTOR; 876 req.bRequest = USB_REQ_GET_DESCRIPTOR;
874 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 877 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
875 878
876 switch (i % NUM_SUBCASES) { 879 switch (i % NUM_SUBCASES) {
877 case 0: // get device descriptor 880 case 0: /* get device descriptor */
878 req.wValue = cpu_to_le16 (USB_DT_DEVICE << 8); 881 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
879 len = sizeof (struct usb_device_descriptor); 882 len = sizeof(struct usb_device_descriptor);
880 break; 883 break;
881 case 1: // get first config descriptor (only) 884 case 1: /* get first config descriptor (only) */
882 req.wValue = cpu_to_le16 ((USB_DT_CONFIG << 8) | 0); 885 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
883 len = sizeof (struct usb_config_descriptor); 886 len = sizeof(struct usb_config_descriptor);
884 break; 887 break;
885 case 2: // get altsetting (OFTEN STALLS) 888 case 2: /* get altsetting (OFTEN STALLS) */
886 req.bRequest = USB_REQ_GET_INTERFACE; 889 req.bRequest = USB_REQ_GET_INTERFACE;
887 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 890 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
888 // index = 0 means first interface 891 /* index = 0 means first interface */
889 len = 1; 892 len = 1;
890 expected = EPIPE; 893 expected = EPIPE;
891 break; 894 break;
892 case 3: // get interface status 895 case 3: /* get interface status */
893 req.bRequest = USB_REQ_GET_STATUS; 896 req.bRequest = USB_REQ_GET_STATUS;
894 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 897 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
895 // interface 0 898 /* interface 0 */
896 len = 2; 899 len = 2;
897 break; 900 break;
898 case 4: // get device status 901 case 4: /* get device status */
899 req.bRequest = USB_REQ_GET_STATUS; 902 req.bRequest = USB_REQ_GET_STATUS;
900 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 903 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
901 len = 2; 904 len = 2;
902 break; 905 break;
903 case 5: // get device qualifier (MAY STALL) 906 case 5: /* get device qualifier (MAY STALL) */
904 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8); 907 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
905 len = sizeof (struct usb_qualifier_descriptor); 908 len = sizeof(struct usb_qualifier_descriptor);
906 if (udev->speed != USB_SPEED_HIGH) 909 if (udev->speed != USB_SPEED_HIGH)
907 expected = EPIPE; 910 expected = EPIPE;
908 break; 911 break;
909 case 6: // get first config descriptor, plus interface 912 case 6: /* get first config descriptor, plus interface */
910 req.wValue = cpu_to_le16 ((USB_DT_CONFIG << 8) | 0); 913 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
911 len = sizeof (struct usb_config_descriptor); 914 len = sizeof(struct usb_config_descriptor);
912 len += sizeof (struct usb_interface_descriptor); 915 len += sizeof(struct usb_interface_descriptor);
913 break; 916 break;
914 case 7: // get interface descriptor (ALWAYS STALLS) 917 case 7: /* get interface descriptor (ALWAYS STALLS) */
915 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); 918 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
916 // interface == 0 919 /* interface == 0 */
917 len = sizeof (struct usb_interface_descriptor); 920 len = sizeof(struct usb_interface_descriptor);
918 expected = -EPIPE; 921 expected = -EPIPE;
919 break; 922 break;
920 // NOTE: two consecutive stalls in the queue here. 923 /* NOTE: two consecutive stalls in the queue here.
921 // that tests fault recovery a bit more aggressively. 924 * that tests fault recovery a bit more aggressively. */
922 case 8: // clear endpoint halt (MAY STALL) 925 case 8: /* clear endpoint halt (MAY STALL) */
923 req.bRequest = USB_REQ_CLEAR_FEATURE; 926 req.bRequest = USB_REQ_CLEAR_FEATURE;
924 req.bRequestType = USB_RECIP_ENDPOINT; 927 req.bRequestType = USB_RECIP_ENDPOINT;
925 // wValue 0 == ep halt 928 /* wValue 0 == ep halt */
926 // wIndex 0 == ep0 (shouldn't halt!) 929 /* wIndex 0 == ep0 (shouldn't halt!) */
927 len = 0; 930 len = 0;
928 pipe = usb_sndctrlpipe (udev, 0); 931 pipe = usb_sndctrlpipe(udev, 0);
929 expected = EPIPE; 932 expected = EPIPE;
930 break; 933 break;
931 case 9: // get endpoint status 934 case 9: /* get endpoint status */
932 req.bRequest = USB_REQ_GET_STATUS; 935 req.bRequest = USB_REQ_GET_STATUS;
933 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT; 936 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
934 // endpoint 0 937 /* endpoint 0 */
935 len = 2; 938 len = 2;
936 break; 939 break;
937 case 10: // trigger short read (EREMOTEIO) 940 case 10: /* trigger short read (EREMOTEIO) */
938 req.wValue = cpu_to_le16 ((USB_DT_CONFIG << 8) | 0); 941 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
939 len = 1024; 942 len = 1024;
940 expected = -EREMOTEIO; 943 expected = -EREMOTEIO;
941 break; 944 break;
942 // NOTE: two consecutive _different_ faults in the queue. 945 /* NOTE: two consecutive _different_ faults in the queue. */
943 case 11: // get endpoint descriptor (ALWAYS STALLS) 946 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
944 req.wValue = cpu_to_le16 (USB_DT_ENDPOINT << 8); 947 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
945 // endpoint == 0 948 /* endpoint == 0 */
946 len = sizeof (struct usb_interface_descriptor); 949 len = sizeof(struct usb_interface_descriptor);
947 expected = EPIPE; 950 expected = EPIPE;
948 break; 951 break;
949 // NOTE: sometimes even a third fault in the queue! 952 /* NOTE: sometimes even a third fault in the queue! */
950 case 12: // get string 0 descriptor (MAY STALL) 953 case 12: /* get string 0 descriptor (MAY STALL) */
951 req.wValue = cpu_to_le16 (USB_DT_STRING << 8); 954 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
952 // string == 0, for language IDs 955 /* string == 0, for language IDs */
953 len = sizeof (struct usb_interface_descriptor); 956 len = sizeof(struct usb_interface_descriptor);
954 // may succeed when > 4 languages 957 /* may succeed when > 4 languages */
955 expected = EREMOTEIO; // or EPIPE, if no strings 958 expected = EREMOTEIO; /* or EPIPE, if no strings */
956 break; 959 break;
957 case 13: // short read, resembling case 10 960 case 13: /* short read, resembling case 10 */
958 req.wValue = cpu_to_le16 ((USB_DT_CONFIG << 8) | 0); 961 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
959 // last data packet "should" be DATA1, not DATA0 962 /* last data packet "should" be DATA1, not DATA0 */
960 len = 1024 - udev->descriptor.bMaxPacketSize0; 963 len = 1024 - udev->descriptor.bMaxPacketSize0;
961 expected = -EREMOTEIO; 964 expected = -EREMOTEIO;
962 break; 965 break;
963 case 14: // short read; try to fill the last packet 966 case 14: /* short read; try to fill the last packet */
964 req.wValue = cpu_to_le16 ((USB_DT_DEVICE << 8) | 0); 967 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
965 /* device descriptor size == 18 bytes */ 968 /* device descriptor size == 18 bytes */
966 len = udev->descriptor.bMaxPacketSize0; 969 len = udev->descriptor.bMaxPacketSize0;
967 switch (len) { 970 switch (len) {
968 case 8: len = 24; break; 971 case 8:
969 case 16: len = 32; break; 972 len = 24;
973 break;
974 case 16:
975 len = 32;
976 break;
970 } 977 }
971 expected = -EREMOTEIO; 978 expected = -EREMOTEIO;
972 break; 979 break;
@@ -975,8 +982,8 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
975 context.status = -EINVAL; 982 context.status = -EINVAL;
976 goto cleanup; 983 goto cleanup;
977 } 984 }
978 req.wLength = cpu_to_le16 (len); 985 req.wLength = cpu_to_le16(len);
979 urb [i] = u = simple_alloc_urb (udev, pipe, len); 986 urb[i] = u = simple_alloc_urb(udev, pipe, len);
980 if (!u) 987 if (!u)
981 goto cleanup; 988 goto cleanup;
982 989
@@ -994,9 +1001,9 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
994 1001
995 /* queue the urbs */ 1002 /* queue the urbs */
996 context.urb = urb; 1003 context.urb = urb;
997 spin_lock_irq (&context.lock); 1004 spin_lock_irq(&context.lock);
998 for (i = 0; i < param->sglen; i++) { 1005 for (i = 0; i < param->sglen; i++) {
999 context.status = usb_submit_urb (urb [i], GFP_ATOMIC); 1006 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1000 if (context.status != 0) { 1007 if (context.status != 0) {
1001 ERROR(dev, "can't submit urb[%d], status %d\n", 1008 ERROR(dev, "can't submit urb[%d], status %d\n",
1002 i, context.status); 1009 i, context.status);
@@ -1005,23 +1012,23 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
1005 } 1012 }
1006 context.pending++; 1013 context.pending++;
1007 } 1014 }
1008 spin_unlock_irq (&context.lock); 1015 spin_unlock_irq(&context.lock);
1009 1016
1010 /* FIXME set timer and time out; provide a disconnect hook */ 1017 /* FIXME set timer and time out; provide a disconnect hook */
1011 1018
1012 /* wait for the last one to complete */ 1019 /* wait for the last one to complete */
1013 if (context.pending > 0) 1020 if (context.pending > 0)
1014 wait_for_completion (&context.complete); 1021 wait_for_completion(&context.complete);
1015 1022
1016cleanup: 1023cleanup:
1017 for (i = 0; i < param->sglen; i++) { 1024 for (i = 0; i < param->sglen; i++) {
1018 if (!urb [i]) 1025 if (!urb[i])
1019 continue; 1026 continue;
1020 urb [i]->dev = udev; 1027 urb[i]->dev = udev;
1021 kfree(urb[i]->setup_packet); 1028 kfree(urb[i]->setup_packet);
1022 simple_free_urb (urb [i]); 1029 simple_free_urb(urb[i]);
1023 } 1030 }
1024 kfree (urb); 1031 kfree(urb);
1025 return context.status; 1032 return context.status;
1026} 1033}
1027#undef NUM_SUBCASES 1034#undef NUM_SUBCASES
@@ -1029,27 +1036,27 @@ cleanup:
1029 1036
1030/*-------------------------------------------------------------------------*/ 1037/*-------------------------------------------------------------------------*/
1031 1038
1032static void unlink1_callback (struct urb *urb) 1039static void unlink1_callback(struct urb *urb)
1033{ 1040{
1034 int status = urb->status; 1041 int status = urb->status;
1035 1042
1036 // we "know" -EPIPE (stall) never happens 1043 /* we "know" -EPIPE (stall) never happens */
1037 if (!status) 1044 if (!status)
1038 status = usb_submit_urb (urb, GFP_ATOMIC); 1045 status = usb_submit_urb(urb, GFP_ATOMIC);
1039 if (status) { 1046 if (status) {
1040 urb->status = status; 1047 urb->status = status;
1041 complete(urb->context); 1048 complete(urb->context);
1042 } 1049 }
1043} 1050}
1044 1051
1045static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async) 1052static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1046{ 1053{
1047 struct urb *urb; 1054 struct urb *urb;
1048 struct completion completion; 1055 struct completion completion;
1049 int retval = 0; 1056 int retval = 0;
1050 1057
1051 init_completion (&completion); 1058 init_completion(&completion);
1052 urb = simple_alloc_urb (testdev_to_usbdev (dev), pipe, size); 1059 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1053 if (!urb) 1060 if (!urb)
1054 return -ENOMEM; 1061 return -ENOMEM;
1055 urb->context = &completion; 1062 urb->context = &completion;
@@ -1061,7 +1068,8 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1061 * FIXME want additional tests for when endpoint is STALLing 1068 * FIXME want additional tests for when endpoint is STALLing
1062 * due to errors, or is just NAKing requests. 1069 * due to errors, or is just NAKing requests.
1063 */ 1070 */
1064 if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) { 1071 retval = usb_submit_urb(urb, GFP_KERNEL);
1072 if (retval != 0) {
1065 dev_err(&dev->intf->dev, "submit fail %d\n", retval); 1073 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1066 return retval; 1074 return retval;
1067 } 1075 }
@@ -1069,7 +1077,7 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1069 /* unlinking that should always work. variable delay tests more 1077 /* unlinking that should always work. variable delay tests more
1070 * hcd states and code paths, even with little other system load. 1078 * hcd states and code paths, even with little other system load.
1071 */ 1079 */
1072 msleep (jiffies % (2 * INTERRUPT_RATE)); 1080 msleep(jiffies % (2 * INTERRUPT_RATE));
1073 if (async) { 1081 if (async) {
1074 while (!completion_done(&completion)) { 1082 while (!completion_done(&completion)) {
1075 retval = usb_unlink_urb(urb); 1083 retval = usb_unlink_urb(urb);
@@ -1098,11 +1106,11 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1098 break; 1106 break;
1099 } 1107 }
1100 } else 1108 } else
1101 usb_kill_urb (urb); 1109 usb_kill_urb(urb);
1102 1110
1103 wait_for_completion (&completion); 1111 wait_for_completion(&completion);
1104 retval = urb->status; 1112 retval = urb->status;
1105 simple_free_urb (urb); 1113 simple_free_urb(urb);
1106 1114
1107 if (async) 1115 if (async)
1108 return (retval == -ECONNRESET) ? 0 : retval - 1000; 1116 return (retval == -ECONNRESET) ? 0 : retval - 1000;
@@ -1111,14 +1119,14 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1111 0 : retval - 2000; 1119 0 : retval - 2000;
1112} 1120}
1113 1121
1114static int unlink_simple (struct usbtest_dev *dev, int pipe, int len) 1122static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1115{ 1123{
1116 int retval = 0; 1124 int retval = 0;
1117 1125
1118 /* test sync and async paths */ 1126 /* test sync and async paths */
1119 retval = unlink1 (dev, pipe, len, 1); 1127 retval = unlink1(dev, pipe, len, 1);
1120 if (!retval) 1128 if (!retval)
1121 retval = unlink1 (dev, pipe, len, 0); 1129 retval = unlink1(dev, pipe, len, 0);
1122 return retval; 1130 return retval;
1123} 1131}
1124 1132
@@ -1130,7 +1138,7 @@ static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1130 u16 status; 1138 u16 status;
1131 1139
1132 /* shouldn't look or act halted */ 1140 /* shouldn't look or act halted */
1133 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1141 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1134 if (retval < 0) { 1142 if (retval < 0) {
1135 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n", 1143 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1136 ep, retval); 1144 ep, retval);
@@ -1152,7 +1160,7 @@ static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1152 u16 status; 1160 u16 status;
1153 1161
1154 /* should look and act halted */ 1162 /* should look and act halted */
1155 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1163 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1156 if (retval < 0) { 1164 if (retval < 0) {
1157 ERROR(tdev, "ep %02x couldn't get halt status, %d\n", 1165 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1158 ep, retval); 1166 ep, retval);
@@ -1182,7 +1190,7 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1182 return retval; 1190 return retval;
1183 1191
1184 /* set halt (protocol test only), verify it worked */ 1192 /* set halt (protocol test only), verify it worked */
1185 retval = usb_control_msg (urb->dev, usb_sndctrlpipe (urb->dev, 0), 1193 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1186 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT, 1194 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1187 USB_ENDPOINT_HALT, ep, 1195 USB_ENDPOINT_HALT, ep,
1188 NULL, 0, USB_CTRL_SET_TIMEOUT); 1196 NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1195,7 +1203,7 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1195 return retval; 1203 return retval;
1196 1204
1197 /* clear halt (tests API + protocol), verify it worked */ 1205 /* clear halt (tests API + protocol), verify it worked */
1198 retval = usb_clear_halt (urb->dev, urb->pipe); 1206 retval = usb_clear_halt(urb->dev, urb->pipe);
1199 if (retval < 0) { 1207 if (retval < 0) {
1200 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1208 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1201 return retval; 1209 return retval;
@@ -1209,18 +1217,18 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1209 return 0; 1217 return 0;
1210} 1218}
1211 1219
1212static int halt_simple (struct usbtest_dev *dev) 1220static int halt_simple(struct usbtest_dev *dev)
1213{ 1221{
1214 int ep; 1222 int ep;
1215 int retval = 0; 1223 int retval = 0;
1216 struct urb *urb; 1224 struct urb *urb;
1217 1225
1218 urb = simple_alloc_urb (testdev_to_usbdev (dev), 0, 512); 1226 urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
1219 if (urb == NULL) 1227 if (urb == NULL)
1220 return -ENOMEM; 1228 return -ENOMEM;
1221 1229
1222 if (dev->in_pipe) { 1230 if (dev->in_pipe) {
1223 ep = usb_pipeendpoint (dev->in_pipe) | USB_DIR_IN; 1231 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1224 urb->pipe = dev->in_pipe; 1232 urb->pipe = dev->in_pipe;
1225 retval = test_halt(dev, ep, urb); 1233 retval = test_halt(dev, ep, urb);
1226 if (retval < 0) 1234 if (retval < 0)
@@ -1228,12 +1236,12 @@ static int halt_simple (struct usbtest_dev *dev)
1228 } 1236 }
1229 1237
1230 if (dev->out_pipe) { 1238 if (dev->out_pipe) {
1231 ep = usb_pipeendpoint (dev->out_pipe); 1239 ep = usb_pipeendpoint(dev->out_pipe);
1232 urb->pipe = dev->out_pipe; 1240 urb->pipe = dev->out_pipe;
1233 retval = test_halt(dev, ep, urb); 1241 retval = test_halt(dev, ep, urb);
1234 } 1242 }
1235done: 1243done:
1236 simple_free_urb (urb); 1244 simple_free_urb(urb);
1237 return retval; 1245 return retval;
1238} 1246}
1239 1247
@@ -1247,7 +1255,7 @@ done:
1247 * need to be able to handle more than one OUT data packet. We'll 1255 * need to be able to handle more than one OUT data packet. We'll
1248 * try whatever we're told to try. 1256 * try whatever we're told to try.
1249 */ 1257 */
1250static int ctrl_out (struct usbtest_dev *dev, 1258static int ctrl_out(struct usbtest_dev *dev,
1251 unsigned count, unsigned length, unsigned vary) 1259 unsigned count, unsigned length, unsigned vary)
1252{ 1260{
1253 unsigned i, j, len; 1261 unsigned i, j, len;
@@ -1263,7 +1271,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1263 if (!buf) 1271 if (!buf)
1264 return -ENOMEM; 1272 return -ENOMEM;
1265 1273
1266 udev = testdev_to_usbdev (dev); 1274 udev = testdev_to_usbdev(dev);
1267 len = length; 1275 len = length;
1268 retval = 0; 1276 retval = 0;
1269 1277
@@ -1273,8 +1281,8 @@ static int ctrl_out (struct usbtest_dev *dev,
1273 for (i = 0; i < count; i++) { 1281 for (i = 0; i < count; i++) {
1274 /* write patterned data */ 1282 /* write patterned data */
1275 for (j = 0; j < len; j++) 1283 for (j = 0; j < len; j++)
1276 buf [j] = i + j; 1284 buf[j] = i + j;
1277 retval = usb_control_msg (udev, usb_sndctrlpipe (udev,0), 1285 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1278 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR, 1286 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1279 0, 0, buf, len, USB_CTRL_SET_TIMEOUT); 1287 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1280 if (retval != len) { 1288 if (retval != len) {
@@ -1288,7 +1296,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1288 } 1296 }
1289 1297
1290 /* read it back -- assuming nothing intervened!! */ 1298 /* read it back -- assuming nothing intervened!! */
1291 retval = usb_control_msg (udev, usb_rcvctrlpipe (udev,0), 1299 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1292 0x5c, USB_DIR_IN|USB_TYPE_VENDOR, 1300 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1293 0, 0, buf, len, USB_CTRL_GET_TIMEOUT); 1301 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1294 if (retval != len) { 1302 if (retval != len) {
@@ -1303,9 +1311,9 @@ static int ctrl_out (struct usbtest_dev *dev,
1303 1311
1304 /* fail if we can't verify */ 1312 /* fail if we can't verify */
1305 for (j = 0; j < len; j++) { 1313 for (j = 0; j < len; j++) {
1306 if (buf [j] != (u8) (i + j)) { 1314 if (buf[j] != (u8) (i + j)) {
1307 ERROR(dev, "ctrl_out, byte %d is %d not %d\n", 1315 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1308 j, buf [j], (u8) i + j); 1316 j, buf[j], (u8) i + j);
1309 retval = -EBADMSG; 1317 retval = -EBADMSG;
1310 break; 1318 break;
1311 } 1319 }
@@ -1326,10 +1334,10 @@ static int ctrl_out (struct usbtest_dev *dev,
1326 } 1334 }
1327 1335
1328 if (retval < 0) 1336 if (retval < 0)
1329 ERROR (dev, "ctrl_out %s failed, code %d, count %d\n", 1337 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1330 what, retval, i); 1338 what, retval, i);
1331 1339
1332 kfree (buf); 1340 kfree(buf);
1333 return retval; 1341 return retval;
1334} 1342}
1335 1343
@@ -1351,7 +1359,7 @@ struct iso_context {
1351 struct usbtest_dev *dev; 1359 struct usbtest_dev *dev;
1352}; 1360};
1353 1361
1354static void iso_callback (struct urb *urb) 1362static void iso_callback(struct urb *urb)
1355{ 1363{
1356 struct iso_context *ctx = urb->context; 1364 struct iso_context *ctx = urb->context;
1357 1365
@@ -1363,10 +1371,12 @@ static void iso_callback (struct urb *urb)
1363 ctx->errors += urb->error_count; 1371 ctx->errors += urb->error_count;
1364 else if (urb->status != 0) 1372 else if (urb->status != 0)
1365 ctx->errors += urb->number_of_packets; 1373 ctx->errors += urb->number_of_packets;
1374 else if (urb->actual_length != urb->transfer_buffer_length)
1375 ctx->errors++;
1366 1376
1367 if (urb->status == 0 && ctx->count > (ctx->pending - 1) 1377 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1368 && !ctx->submit_error) { 1378 && !ctx->submit_error) {
1369 int status = usb_submit_urb (urb, GFP_ATOMIC); 1379 int status = usb_submit_urb(urb, GFP_ATOMIC);
1370 switch (status) { 1380 switch (status) {
1371 case 0: 1381 case 0:
1372 goto done; 1382 goto done;
@@ -1388,13 +1398,13 @@ static void iso_callback (struct urb *urb)
1388 dev_err(&ctx->dev->intf->dev, 1398 dev_err(&ctx->dev->intf->dev,
1389 "iso test, %lu errors out of %lu\n", 1399 "iso test, %lu errors out of %lu\n",
1390 ctx->errors, ctx->packet_count); 1400 ctx->errors, ctx->packet_count);
1391 complete (&ctx->done); 1401 complete(&ctx->done);
1392 } 1402 }
1393done: 1403done:
1394 spin_unlock(&ctx->lock); 1404 spin_unlock(&ctx->lock);
1395} 1405}
1396 1406
1397static struct urb *iso_alloc_urb ( 1407static struct urb *iso_alloc_urb(
1398 struct usb_device *udev, 1408 struct usb_device *udev,
1399 int pipe, 1409 int pipe,
1400 struct usb_endpoint_descriptor *desc, 1410 struct usb_endpoint_descriptor *desc,
@@ -1410,7 +1420,7 @@ static struct urb *iso_alloc_urb (
1410 maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11)); 1420 maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
1411 packets = DIV_ROUND_UP(bytes, maxp); 1421 packets = DIV_ROUND_UP(bytes, maxp);
1412 1422
1413 urb = usb_alloc_urb (packets, GFP_KERNEL); 1423 urb = usb_alloc_urb(packets, GFP_KERNEL);
1414 if (!urb) 1424 if (!urb)
1415 return urb; 1425 return urb;
1416 urb->dev = udev; 1426 urb->dev = udev;
@@ -1418,30 +1428,30 @@ static struct urb *iso_alloc_urb (
1418 1428
1419 urb->number_of_packets = packets; 1429 urb->number_of_packets = packets;
1420 urb->transfer_buffer_length = bytes; 1430 urb->transfer_buffer_length = bytes;
1421 urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL, 1431 urb->transfer_buffer = usb_alloc_coherent(udev, bytes, GFP_KERNEL,
1422 &urb->transfer_dma); 1432 &urb->transfer_dma);
1423 if (!urb->transfer_buffer) { 1433 if (!urb->transfer_buffer) {
1424 usb_free_urb (urb); 1434 usb_free_urb(urb);
1425 return NULL; 1435 return NULL;
1426 } 1436 }
1427 memset (urb->transfer_buffer, 0, bytes); 1437 memset(urb->transfer_buffer, 0, bytes);
1428 for (i = 0; i < packets; i++) { 1438 for (i = 0; i < packets; i++) {
1429 /* here, only the last packet will be short */ 1439 /* here, only the last packet will be short */
1430 urb->iso_frame_desc[i].length = min ((unsigned) bytes, maxp); 1440 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1431 bytes -= urb->iso_frame_desc[i].length; 1441 bytes -= urb->iso_frame_desc[i].length;
1432 1442
1433 urb->iso_frame_desc[i].offset = maxp * i; 1443 urb->iso_frame_desc[i].offset = maxp * i;
1434 } 1444 }
1435 1445
1436 urb->complete = iso_callback; 1446 urb->complete = iso_callback;
1437 // urb->context = SET BY CALLER 1447 /* urb->context = SET BY CALLER */
1438 urb->interval = 1 << (desc->bInterval - 1); 1448 urb->interval = 1 << (desc->bInterval - 1);
1439 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; 1449 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1440 return urb; 1450 return urb;
1441} 1451}
1442 1452
1443static int 1453static int
1444test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param, 1454test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1445 int pipe, struct usb_endpoint_descriptor *desc) 1455 int pipe, struct usb_endpoint_descriptor *desc)
1446{ 1456{
1447 struct iso_context context; 1457 struct iso_context context;
@@ -1457,11 +1467,11 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1457 memset(&context, 0, sizeof context); 1467 memset(&context, 0, sizeof context);
1458 context.count = param->iterations * param->sglen; 1468 context.count = param->iterations * param->sglen;
1459 context.dev = dev; 1469 context.dev = dev;
1460 init_completion (&context.done); 1470 init_completion(&context.done);
1461 spin_lock_init (&context.lock); 1471 spin_lock_init(&context.lock);
1462 1472
1463 memset (urbs, 0, sizeof urbs); 1473 memset(urbs, 0, sizeof urbs);
1464 udev = testdev_to_usbdev (dev); 1474 udev = testdev_to_usbdev(dev);
1465 dev_info(&dev->intf->dev, 1475 dev_info(&dev->intf->dev,
1466 "... iso period %d %sframes, wMaxPacket %04x\n", 1476 "... iso period %d %sframes, wMaxPacket %04x\n",
1467 1 << (desc->bInterval - 1), 1477 1 << (desc->bInterval - 1),
@@ -1469,14 +1479,14 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1469 le16_to_cpu(desc->wMaxPacketSize)); 1479 le16_to_cpu(desc->wMaxPacketSize));
1470 1480
1471 for (i = 0; i < param->sglen; i++) { 1481 for (i = 0; i < param->sglen; i++) {
1472 urbs [i] = iso_alloc_urb (udev, pipe, desc, 1482 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1473 param->length); 1483 param->length);
1474 if (!urbs [i]) { 1484 if (!urbs[i]) {
1475 status = -ENOMEM; 1485 status = -ENOMEM;
1476 goto fail; 1486 goto fail;
1477 } 1487 }
1478 packets += urbs[i]->number_of_packets; 1488 packets += urbs[i]->number_of_packets;
1479 urbs [i]->context = &context; 1489 urbs[i]->context = &context;
1480 } 1490 }
1481 packets *= param->iterations; 1491 packets *= param->iterations;
1482 dev_info(&dev->intf->dev, 1492 dev_info(&dev->intf->dev,
@@ -1485,27 +1495,27 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1485 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1), 1495 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1486 packets); 1496 packets);
1487 1497
1488 spin_lock_irq (&context.lock); 1498 spin_lock_irq(&context.lock);
1489 for (i = 0; i < param->sglen; i++) { 1499 for (i = 0; i < param->sglen; i++) {
1490 ++context.pending; 1500 ++context.pending;
1491 status = usb_submit_urb (urbs [i], GFP_ATOMIC); 1501 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1492 if (status < 0) { 1502 if (status < 0) {
1493 ERROR (dev, "submit iso[%d], error %d\n", i, status); 1503 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1494 if (i == 0) { 1504 if (i == 0) {
1495 spin_unlock_irq (&context.lock); 1505 spin_unlock_irq(&context.lock);
1496 goto fail; 1506 goto fail;
1497 } 1507 }
1498 1508
1499 simple_free_urb (urbs [i]); 1509 simple_free_urb(urbs[i]);
1500 urbs[i] = NULL; 1510 urbs[i] = NULL;
1501 context.pending--; 1511 context.pending--;
1502 context.submit_error = 1; 1512 context.submit_error = 1;
1503 break; 1513 break;
1504 } 1514 }
1505 } 1515 }
1506 spin_unlock_irq (&context.lock); 1516 spin_unlock_irq(&context.lock);
1507 1517
1508 wait_for_completion (&context.done); 1518 wait_for_completion(&context.done);
1509 1519
1510 for (i = 0; i < param->sglen; i++) { 1520 for (i = 0; i < param->sglen; i++) {
1511 if (urbs[i]) 1521 if (urbs[i])
@@ -1526,8 +1536,8 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1526 1536
1527fail: 1537fail:
1528 for (i = 0; i < param->sglen; i++) { 1538 for (i = 0; i < param->sglen; i++) {
1529 if (urbs [i]) 1539 if (urbs[i])
1530 simple_free_urb (urbs [i]); 1540 simple_free_urb(urbs[i]);
1531 } 1541 }
1532 return status; 1542 return status;
1533} 1543}
@@ -1557,10 +1567,10 @@ fail:
1557 1567
1558/* No BKL needed */ 1568/* No BKL needed */
1559static int 1569static int
1560usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf) 1570usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1561{ 1571{
1562 struct usbtest_dev *dev = usb_get_intfdata (intf); 1572 struct usbtest_dev *dev = usb_get_intfdata(intf);
1563 struct usb_device *udev = testdev_to_usbdev (dev); 1573 struct usb_device *udev = testdev_to_usbdev(dev);
1564 struct usbtest_param *param = buf; 1574 struct usbtest_param *param = buf;
1565 int retval = -EOPNOTSUPP; 1575 int retval = -EOPNOTSUPP;
1566 struct urb *urb; 1576 struct urb *urb;
@@ -1569,7 +1579,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1569 struct timeval start; 1579 struct timeval start;
1570 unsigned i; 1580 unsigned i;
1571 1581
1572 // FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. 1582 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1573 1583
1574 pattern = mod_pattern; 1584 pattern = mod_pattern;
1575 1585
@@ -1595,9 +1605,9 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1595 mutex_unlock(&dev->lock); 1605 mutex_unlock(&dev->lock);
1596 return -ENODEV; 1606 return -ENODEV;
1597 } 1607 }
1598 res = set_altsetting (dev, dev->info->alt); 1608 res = set_altsetting(dev, dev->info->alt);
1599 if (res) { 1609 if (res) {
1600 dev_err (&intf->dev, 1610 dev_err(&intf->dev,
1601 "set altsetting to %d failed, %d\n", 1611 "set altsetting to %d failed, %d\n",
1602 dev->info->alt, res); 1612 dev->info->alt, res);
1603 mutex_unlock(&dev->lock); 1613 mutex_unlock(&dev->lock);
@@ -1614,7 +1624,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1614 * FIXME add more tests! cancel requests, verify the data, control 1624 * FIXME add more tests! cancel requests, verify the data, control
1615 * queueing, concurrent read+write threads, and so on. 1625 * queueing, concurrent read+write threads, and so on.
1616 */ 1626 */
1617 do_gettimeofday (&start); 1627 do_gettimeofday(&start);
1618 switch (param->test_num) { 1628 switch (param->test_num) {
1619 1629
1620 case 0: 1630 case 0:
@@ -1629,14 +1639,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1629 dev_info(&intf->dev, 1639 dev_info(&intf->dev,
1630 "TEST 1: write %d bytes %u times\n", 1640 "TEST 1: write %d bytes %u times\n",
1631 param->length, param->iterations); 1641 param->length, param->iterations);
1632 urb = simple_alloc_urb (udev, dev->out_pipe, param->length); 1642 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1633 if (!urb) { 1643 if (!urb) {
1634 retval = -ENOMEM; 1644 retval = -ENOMEM;
1635 break; 1645 break;
1636 } 1646 }
1637 // FIRMWARE: bulk sink (maybe accepts short writes) 1647 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1638 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1"); 1648 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1639 simple_free_urb (urb); 1649 simple_free_urb(urb);
1640 break; 1650 break;
1641 case 2: 1651 case 2:
1642 if (dev->in_pipe == 0) 1652 if (dev->in_pipe == 0)
@@ -1644,14 +1654,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1644 dev_info(&intf->dev, 1654 dev_info(&intf->dev,
1645 "TEST 2: read %d bytes %u times\n", 1655 "TEST 2: read %d bytes %u times\n",
1646 param->length, param->iterations); 1656 param->length, param->iterations);
1647 urb = simple_alloc_urb (udev, dev->in_pipe, param->length); 1657 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1648 if (!urb) { 1658 if (!urb) {
1649 retval = -ENOMEM; 1659 retval = -ENOMEM;
1650 break; 1660 break;
1651 } 1661 }
1652 // FIRMWARE: bulk source (maybe generates short writes) 1662 /* FIRMWARE: bulk source (maybe generates short writes) */
1653 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2"); 1663 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1654 simple_free_urb (urb); 1664 simple_free_urb(urb);
1655 break; 1665 break;
1656 case 3: 1666 case 3:
1657 if (dev->out_pipe == 0 || param->vary == 0) 1667 if (dev->out_pipe == 0 || param->vary == 0)
@@ -1659,15 +1669,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1659 dev_info(&intf->dev, 1669 dev_info(&intf->dev,
1660 "TEST 3: write/%d 0..%d bytes %u times\n", 1670 "TEST 3: write/%d 0..%d bytes %u times\n",
1661 param->vary, param->length, param->iterations); 1671 param->vary, param->length, param->iterations);
1662 urb = simple_alloc_urb (udev, dev->out_pipe, param->length); 1672 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1663 if (!urb) { 1673 if (!urb) {
1664 retval = -ENOMEM; 1674 retval = -ENOMEM;
1665 break; 1675 break;
1666 } 1676 }
1667 // FIRMWARE: bulk sink (maybe accepts short writes) 1677 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1668 retval = simple_io(dev, urb, param->iterations, param->vary, 1678 retval = simple_io(dev, urb, param->iterations, param->vary,
1669 0, "test3"); 1679 0, "test3");
1670 simple_free_urb (urb); 1680 simple_free_urb(urb);
1671 break; 1681 break;
1672 case 4: 1682 case 4:
1673 if (dev->in_pipe == 0 || param->vary == 0) 1683 if (dev->in_pipe == 0 || param->vary == 0)
@@ -1675,15 +1685,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1675 dev_info(&intf->dev, 1685 dev_info(&intf->dev,
1676 "TEST 4: read/%d 0..%d bytes %u times\n", 1686 "TEST 4: read/%d 0..%d bytes %u times\n",
1677 param->vary, param->length, param->iterations); 1687 param->vary, param->length, param->iterations);
1678 urb = simple_alloc_urb (udev, dev->in_pipe, param->length); 1688 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1679 if (!urb) { 1689 if (!urb) {
1680 retval = -ENOMEM; 1690 retval = -ENOMEM;
1681 break; 1691 break;
1682 } 1692 }
1683 // FIRMWARE: bulk source (maybe generates short writes) 1693 /* FIRMWARE: bulk source (maybe generates short writes) */
1684 retval = simple_io(dev, urb, param->iterations, param->vary, 1694 retval = simple_io(dev, urb, param->iterations, param->vary,
1685 0, "test4"); 1695 0, "test4");
1686 simple_free_urb (urb); 1696 simple_free_urb(urb);
1687 break; 1697 break;
1688 1698
1689 /* Queued bulk I/O tests */ 1699 /* Queued bulk I/O tests */
@@ -1694,15 +1704,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1694 "TEST 5: write %d sglists %d entries of %d bytes\n", 1704 "TEST 5: write %d sglists %d entries of %d bytes\n",
1695 param->iterations, 1705 param->iterations,
1696 param->sglen, param->length); 1706 param->sglen, param->length);
1697 sg = alloc_sglist (param->sglen, param->length, 0); 1707 sg = alloc_sglist(param->sglen, param->length, 0);
1698 if (!sg) { 1708 if (!sg) {
1699 retval = -ENOMEM; 1709 retval = -ENOMEM;
1700 break; 1710 break;
1701 } 1711 }
1702 // FIRMWARE: bulk sink (maybe accepts short writes) 1712 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1703 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 1713 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1704 &req, sg, param->sglen); 1714 &req, sg, param->sglen);
1705 free_sglist (sg, param->sglen); 1715 free_sglist(sg, param->sglen);
1706 break; 1716 break;
1707 1717
1708 case 6: 1718 case 6:
@@ -1712,15 +1722,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1712 "TEST 6: read %d sglists %d entries of %d bytes\n", 1722 "TEST 6: read %d sglists %d entries of %d bytes\n",
1713 param->iterations, 1723 param->iterations,
1714 param->sglen, param->length); 1724 param->sglen, param->length);
1715 sg = alloc_sglist (param->sglen, param->length, 0); 1725 sg = alloc_sglist(param->sglen, param->length, 0);
1716 if (!sg) { 1726 if (!sg) {
1717 retval = -ENOMEM; 1727 retval = -ENOMEM;
1718 break; 1728 break;
1719 } 1729 }
1720 // FIRMWARE: bulk source (maybe generates short writes) 1730 /* FIRMWARE: bulk source (maybe generates short writes) */
1721 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 1731 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1722 &req, sg, param->sglen); 1732 &req, sg, param->sglen);
1723 free_sglist (sg, param->sglen); 1733 free_sglist(sg, param->sglen);
1724 break; 1734 break;
1725 case 7: 1735 case 7:
1726 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0) 1736 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
@@ -1729,15 +1739,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1729 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n", 1739 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1730 param->vary, param->iterations, 1740 param->vary, param->iterations,
1731 param->sglen, param->length); 1741 param->sglen, param->length);
1732 sg = alloc_sglist (param->sglen, param->length, param->vary); 1742 sg = alloc_sglist(param->sglen, param->length, param->vary);
1733 if (!sg) { 1743 if (!sg) {
1734 retval = -ENOMEM; 1744 retval = -ENOMEM;
1735 break; 1745 break;
1736 } 1746 }
1737 // FIRMWARE: bulk sink (maybe accepts short writes) 1747 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1738 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 1748 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1739 &req, sg, param->sglen); 1749 &req, sg, param->sglen);
1740 free_sglist (sg, param->sglen); 1750 free_sglist(sg, param->sglen);
1741 break; 1751 break;
1742 case 8: 1752 case 8:
1743 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0) 1753 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
@@ -1746,15 +1756,15 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1746 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n", 1756 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1747 param->vary, param->iterations, 1757 param->vary, param->iterations,
1748 param->sglen, param->length); 1758 param->sglen, param->length);
1749 sg = alloc_sglist (param->sglen, param->length, param->vary); 1759 sg = alloc_sglist(param->sglen, param->length, param->vary);
1750 if (!sg) { 1760 if (!sg) {
1751 retval = -ENOMEM; 1761 retval = -ENOMEM;
1752 break; 1762 break;
1753 } 1763 }
1754 // FIRMWARE: bulk source (maybe generates short writes) 1764 /* FIRMWARE: bulk source (maybe generates short writes) */
1755 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 1765 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1756 &req, sg, param->sglen); 1766 &req, sg, param->sglen);
1757 free_sglist (sg, param->sglen); 1767 free_sglist(sg, param->sglen);
1758 break; 1768 break;
1759 1769
1760 /* non-queued sanity tests for control (chapter 9 subset) */ 1770 /* non-queued sanity tests for control (chapter 9 subset) */
@@ -1764,7 +1774,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1764 "TEST 9: ch9 (subset) control tests, %d times\n", 1774 "TEST 9: ch9 (subset) control tests, %d times\n",
1765 param->iterations); 1775 param->iterations);
1766 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1776 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1767 retval = ch9_postconfig (dev); 1777 retval = ch9_postconfig(dev);
1768 if (retval) 1778 if (retval)
1769 dev_err(&intf->dev, "ch9 subset failed, " 1779 dev_err(&intf->dev, "ch9 subset failed, "
1770 "iterations left %d\n", i); 1780 "iterations left %d\n", i);
@@ -1779,7 +1789,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1779 "TEST 10: queue %d control calls, %d times\n", 1789 "TEST 10: queue %d control calls, %d times\n",
1780 param->sglen, 1790 param->sglen,
1781 param->iterations); 1791 param->iterations);
1782 retval = test_ctrl_queue (dev, param); 1792 retval = test_ctrl_queue(dev, param);
1783 break; 1793 break;
1784 1794
1785 /* simple non-queued unlinks (ring with one urb) */ 1795 /* simple non-queued unlinks (ring with one urb) */
@@ -1790,7 +1800,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1790 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n", 1800 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
1791 param->iterations, param->length); 1801 param->iterations, param->length);
1792 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1802 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1793 retval = unlink_simple (dev, dev->in_pipe, 1803 retval = unlink_simple(dev, dev->in_pipe,
1794 param->length); 1804 param->length);
1795 if (retval) 1805 if (retval)
1796 dev_err(&intf->dev, "unlink reads failed %d, " 1806 dev_err(&intf->dev, "unlink reads failed %d, "
@@ -1803,7 +1813,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1803 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n", 1813 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
1804 param->iterations, param->length); 1814 param->iterations, param->length);
1805 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1815 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1806 retval = unlink_simple (dev, dev->out_pipe, 1816 retval = unlink_simple(dev, dev->out_pipe,
1807 param->length); 1817 param->length);
1808 if (retval) 1818 if (retval)
1809 dev_err(&intf->dev, "unlink writes failed %d, " 1819 dev_err(&intf->dev, "unlink writes failed %d, "
@@ -1818,7 +1828,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1818 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n", 1828 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
1819 param->iterations); 1829 param->iterations);
1820 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1830 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1821 retval = halt_simple (dev); 1831 retval = halt_simple(dev);
1822 1832
1823 if (retval) 1833 if (retval)
1824 ERROR(dev, "halts failed, iterations left %d\n", i); 1834 ERROR(dev, "halts failed, iterations left %d\n", i);
@@ -1844,8 +1854,8 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1844 "TEST 15: write %d iso, %d entries of %d bytes\n", 1854 "TEST 15: write %d iso, %d entries of %d bytes\n",
1845 param->iterations, 1855 param->iterations,
1846 param->sglen, param->length); 1856 param->sglen, param->length);
1847 // FIRMWARE: iso sink 1857 /* FIRMWARE: iso sink */
1848 retval = test_iso_queue (dev, param, 1858 retval = test_iso_queue(dev, param,
1849 dev->out_iso_pipe, dev->iso_out); 1859 dev->out_iso_pipe, dev->iso_out);
1850 break; 1860 break;
1851 1861
@@ -1857,17 +1867,17 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1857 "TEST 16: read %d iso, %d entries of %d bytes\n", 1867 "TEST 16: read %d iso, %d entries of %d bytes\n",
1858 param->iterations, 1868 param->iterations,
1859 param->sglen, param->length); 1869 param->sglen, param->length);
1860 // FIRMWARE: iso source 1870 /* FIRMWARE: iso source */
1861 retval = test_iso_queue (dev, param, 1871 retval = test_iso_queue(dev, param,
1862 dev->in_iso_pipe, dev->iso_in); 1872 dev->in_iso_pipe, dev->iso_in);
1863 break; 1873 break;
1864 1874
1865 // FIXME unlink from queue (ring with N urbs) 1875 /* FIXME unlink from queue (ring with N urbs) */
1866 1876
1867 // FIXME scatterlist cancel (needs helper thread) 1877 /* FIXME scatterlist cancel (needs helper thread) */
1868 1878
1869 } 1879 }
1870 do_gettimeofday (&param->duration); 1880 do_gettimeofday(&param->duration);
1871 param->duration.tv_sec -= start.tv_sec; 1881 param->duration.tv_sec -= start.tv_sec;
1872 param->duration.tv_usec -= start.tv_usec; 1882 param->duration.tv_usec -= start.tv_usec;
1873 if (param->duration.tv_usec < 0) { 1883 if (param->duration.tv_usec < 0) {
@@ -1880,22 +1890,22 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1880 1890
1881/*-------------------------------------------------------------------------*/ 1891/*-------------------------------------------------------------------------*/
1882 1892
1883static unsigned force_interrupt = 0; 1893static unsigned force_interrupt;
1884module_param (force_interrupt, uint, 0); 1894module_param(force_interrupt, uint, 0);
1885MODULE_PARM_DESC (force_interrupt, "0 = test default; else interrupt"); 1895MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
1886 1896
1887#ifdef GENERIC 1897#ifdef GENERIC
1888static unsigned short vendor; 1898static unsigned short vendor;
1889module_param(vendor, ushort, 0); 1899module_param(vendor, ushort, 0);
1890MODULE_PARM_DESC (vendor, "vendor code (from usb-if)"); 1900MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
1891 1901
1892static unsigned short product; 1902static unsigned short product;
1893module_param(product, ushort, 0); 1903module_param(product, ushort, 0);
1894MODULE_PARM_DESC (product, "product code (from vendor)"); 1904MODULE_PARM_DESC(product, "product code (from vendor)");
1895#endif 1905#endif
1896 1906
1897static int 1907static int
1898usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id) 1908usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
1899{ 1909{
1900 struct usb_device *udev; 1910 struct usb_device *udev;
1901 struct usbtest_dev *dev; 1911 struct usbtest_dev *dev;
@@ -1903,7 +1913,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1903 char *rtest, *wtest; 1913 char *rtest, *wtest;
1904 char *irtest, *iwtest; 1914 char *irtest, *iwtest;
1905 1915
1906 udev = interface_to_usbdev (intf); 1916 udev = interface_to_usbdev(intf);
1907 1917
1908#ifdef GENERIC 1918#ifdef GENERIC
1909 /* specify devices by module parameters? */ 1919 /* specify devices by module parameters? */
@@ -1930,8 +1940,9 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1930 dev->intf = intf; 1940 dev->intf = intf;
1931 1941
1932 /* cacheline-aligned scratch for i/o */ 1942 /* cacheline-aligned scratch for i/o */
1933 if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) { 1943 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
1934 kfree (dev); 1944 if (dev->buf == NULL) {
1945 kfree(dev);
1935 return -ENOMEM; 1946 return -ENOMEM;
1936 } 1947 }
1937 1948
@@ -1943,18 +1954,18 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1943 irtest = iwtest = ""; 1954 irtest = iwtest = "";
1944 if (force_interrupt || udev->speed == USB_SPEED_LOW) { 1955 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
1945 if (info->ep_in) { 1956 if (info->ep_in) {
1946 dev->in_pipe = usb_rcvintpipe (udev, info->ep_in); 1957 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
1947 rtest = " intr-in"; 1958 rtest = " intr-in";
1948 } 1959 }
1949 if (info->ep_out) { 1960 if (info->ep_out) {
1950 dev->out_pipe = usb_sndintpipe (udev, info->ep_out); 1961 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
1951 wtest = " intr-out"; 1962 wtest = " intr-out";
1952 } 1963 }
1953 } else { 1964 } else {
1954 if (info->autoconf) { 1965 if (info->autoconf) {
1955 int status; 1966 int status;
1956 1967
1957 status = get_endpoints (dev, intf); 1968 status = get_endpoints(dev, intf);
1958 if (status < 0) { 1969 if (status < 0) {
1959 WARNING(dev, "couldn't get endpoints, %d\n", 1970 WARNING(dev, "couldn't get endpoints, %d\n",
1960 status); 1971 status);
@@ -1963,10 +1974,10 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1963 /* may find bulk or ISO pipes */ 1974 /* may find bulk or ISO pipes */
1964 } else { 1975 } else {
1965 if (info->ep_in) 1976 if (info->ep_in)
1966 dev->in_pipe = usb_rcvbulkpipe (udev, 1977 dev->in_pipe = usb_rcvbulkpipe(udev,
1967 info->ep_in); 1978 info->ep_in);
1968 if (info->ep_out) 1979 if (info->ep_out)
1969 dev->out_pipe = usb_sndbulkpipe (udev, 1980 dev->out_pipe = usb_sndbulkpipe(udev,
1970 info->ep_out); 1981 info->ep_out);
1971 } 1982 }
1972 if (dev->in_pipe) 1983 if (dev->in_pipe)
@@ -1979,15 +1990,23 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1979 iwtest = " iso-out"; 1990 iwtest = " iso-out";
1980 } 1991 }
1981 1992
1982 usb_set_intfdata (intf, dev); 1993 usb_set_intfdata(intf, dev);
1983 dev_info (&intf->dev, "%s\n", info->name); 1994 dev_info(&intf->dev, "%s\n", info->name);
1984 dev_info (&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n", 1995 dev_info(&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n",
1985 ({ char *tmp; 1996 ({ char *tmp;
1986 switch (udev->speed) { 1997 switch (udev->speed) {
1987 case USB_SPEED_LOW: tmp = "low"; break; 1998 case USB_SPEED_LOW:
1988 case USB_SPEED_FULL: tmp = "full"; break; 1999 tmp = "low";
1989 case USB_SPEED_HIGH: tmp = "high"; break; 2000 break;
1990 default: tmp = "unknown"; break; 2001 case USB_SPEED_FULL:
2002 tmp = "full";
2003 break;
2004 case USB_SPEED_HIGH:
2005 tmp = "high";
2006 break;
2007 default:
2008 tmp = "unknown";
2009 break;
1991 }; tmp; }), 2010 }; tmp; }),
1992 info->ctrl_out ? " in/out" : "", 2011 info->ctrl_out ? " in/out" : "",
1993 rtest, wtest, 2012 rtest, wtest,
@@ -1996,24 +2015,24 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1996 return 0; 2015 return 0;
1997} 2016}
1998 2017
1999static int usbtest_suspend (struct usb_interface *intf, pm_message_t message) 2018static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2000{ 2019{
2001 return 0; 2020 return 0;
2002} 2021}
2003 2022
2004static int usbtest_resume (struct usb_interface *intf) 2023static int usbtest_resume(struct usb_interface *intf)
2005{ 2024{
2006 return 0; 2025 return 0;
2007} 2026}
2008 2027
2009 2028
2010static void usbtest_disconnect (struct usb_interface *intf) 2029static void usbtest_disconnect(struct usb_interface *intf)
2011{ 2030{
2012 struct usbtest_dev *dev = usb_get_intfdata (intf); 2031 struct usbtest_dev *dev = usb_get_intfdata(intf);
2013 2032
2014 usb_set_intfdata (intf, NULL); 2033 usb_set_intfdata(intf, NULL);
2015 dev_dbg (&intf->dev, "disconnect\n"); 2034 dev_dbg(&intf->dev, "disconnect\n");
2016 kfree (dev); 2035 kfree(dev);
2017} 2036}
2018 2037
2019/* Basic testing only needs a device that can source or sink bulk traffic. 2038/* Basic testing only needs a device that can source or sink bulk traffic.
@@ -2050,9 +2069,9 @@ static struct usbtest_info fw_info = {
2050 .ep_in = 2, 2069 .ep_in = 2,
2051 .ep_out = 2, 2070 .ep_out = 2,
2052 .alt = 1, 2071 .alt = 1,
2053 .autoconf = 1, // iso and ctrl_out need autoconf 2072 .autoconf = 1, /* iso and ctrl_out need autoconf */
2054 .ctrl_out = 1, 2073 .ctrl_out = 1,
2055 .iso = 1, // iso_ep's are #8 in/out 2074 .iso = 1, /* iso_ep's are #8 in/out */
2056}; 2075};
2057 2076
2058/* peripheral running Linux and 'zero.c' test firmware, or 2077/* peripheral running Linux and 'zero.c' test firmware, or
@@ -2109,56 +2128,56 @@ static const struct usb_device_id id_table[] = {
2109 */ 2128 */
2110 2129
2111 /* generic EZ-USB FX controller */ 2130 /* generic EZ-USB FX controller */
2112 { USB_DEVICE (0x0547, 0x2235), 2131 { USB_DEVICE(0x0547, 0x2235),
2113 .driver_info = (unsigned long) &ez1_info, 2132 .driver_info = (unsigned long) &ez1_info,
2114 }, 2133 },
2115 2134
2116 /* CY3671 development board with EZ-USB FX */ 2135 /* CY3671 development board with EZ-USB FX */
2117 { USB_DEVICE (0x0547, 0x0080), 2136 { USB_DEVICE(0x0547, 0x0080),
2118 .driver_info = (unsigned long) &ez1_info, 2137 .driver_info = (unsigned long) &ez1_info,
2119 }, 2138 },
2120 2139
2121 /* generic EZ-USB FX2 controller (or development board) */ 2140 /* generic EZ-USB FX2 controller (or development board) */
2122 { USB_DEVICE (0x04b4, 0x8613), 2141 { USB_DEVICE(0x04b4, 0x8613),
2123 .driver_info = (unsigned long) &ez2_info, 2142 .driver_info = (unsigned long) &ez2_info,
2124 }, 2143 },
2125 2144
2126 /* re-enumerated usb test device firmware */ 2145 /* re-enumerated usb test device firmware */
2127 { USB_DEVICE (0xfff0, 0xfff0), 2146 { USB_DEVICE(0xfff0, 0xfff0),
2128 .driver_info = (unsigned long) &fw_info, 2147 .driver_info = (unsigned long) &fw_info,
2129 }, 2148 },
2130 2149
2131 /* "Gadget Zero" firmware runs under Linux */ 2150 /* "Gadget Zero" firmware runs under Linux */
2132 { USB_DEVICE (0x0525, 0xa4a0), 2151 { USB_DEVICE(0x0525, 0xa4a0),
2133 .driver_info = (unsigned long) &gz_info, 2152 .driver_info = (unsigned long) &gz_info,
2134 }, 2153 },
2135 2154
2136 /* so does a user-mode variant */ 2155 /* so does a user-mode variant */
2137 { USB_DEVICE (0x0525, 0xa4a4), 2156 { USB_DEVICE(0x0525, 0xa4a4),
2138 .driver_info = (unsigned long) &um_info, 2157 .driver_info = (unsigned long) &um_info,
2139 }, 2158 },
2140 2159
2141 /* ... and a user-mode variant that talks iso */ 2160 /* ... and a user-mode variant that talks iso */
2142 { USB_DEVICE (0x0525, 0xa4a3), 2161 { USB_DEVICE(0x0525, 0xa4a3),
2143 .driver_info = (unsigned long) &um2_info, 2162 .driver_info = (unsigned long) &um2_info,
2144 }, 2163 },
2145 2164
2146#ifdef KEYSPAN_19Qi 2165#ifdef KEYSPAN_19Qi
2147 /* Keyspan 19qi uses an21xx (original EZ-USB) */ 2166 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2148 // this does not coexist with the real Keyspan 19qi driver! 2167 /* this does not coexist with the real Keyspan 19qi driver! */
2149 { USB_DEVICE (0x06cd, 0x010b), 2168 { USB_DEVICE(0x06cd, 0x010b),
2150 .driver_info = (unsigned long) &ez1_info, 2169 .driver_info = (unsigned long) &ez1_info,
2151 }, 2170 },
2152#endif 2171#endif
2153 2172
2154 /*-------------------------------------------------------------*/ 2173 /*-------------------------------------------------------------*/
2155 2174
2156#ifdef IBOT2 2175#ifdef IBOT2
2157 /* iBOT2 makes a nice source of high speed bulk-in data */ 2176 /* iBOT2 makes a nice source of high speed bulk-in data */
2158 // this does not coexist with a real iBOT2 driver! 2177 /* this does not coexist with a real iBOT2 driver! */
2159 { USB_DEVICE (0x0b62, 0x0059), 2178 { USB_DEVICE(0x0b62, 0x0059),
2160 .driver_info = (unsigned long) &ibot2_info, 2179 .driver_info = (unsigned long) &ibot2_info,
2161 }, 2180 },
2162#endif 2181#endif
2163 2182
2164 /*-------------------------------------------------------------*/ 2183 /*-------------------------------------------------------------*/
@@ -2172,7 +2191,7 @@ static const struct usb_device_id id_table[] = {
2172 2191
2173 { } 2192 { }
2174}; 2193};
2175MODULE_DEVICE_TABLE (usb, id_table); 2194MODULE_DEVICE_TABLE(usb, id_table);
2176 2195
2177static struct usb_driver usbtest_driver = { 2196static struct usb_driver usbtest_driver = {
2178 .name = "usbtest", 2197 .name = "usbtest",
@@ -2186,22 +2205,22 @@ static struct usb_driver usbtest_driver = {
2186 2205
2187/*-------------------------------------------------------------------------*/ 2206/*-------------------------------------------------------------------------*/
2188 2207
2189static int __init usbtest_init (void) 2208static int __init usbtest_init(void)
2190{ 2209{
2191#ifdef GENERIC 2210#ifdef GENERIC
2192 if (vendor) 2211 if (vendor)
2193 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product); 2212 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2194#endif 2213#endif
2195 return usb_register (&usbtest_driver); 2214 return usb_register(&usbtest_driver);
2196} 2215}
2197module_init (usbtest_init); 2216module_init(usbtest_init);
2198 2217
2199static void __exit usbtest_exit (void) 2218static void __exit usbtest_exit(void)
2200{ 2219{
2201 usb_deregister (&usbtest_driver); 2220 usb_deregister(&usbtest_driver);
2202} 2221}
2203module_exit (usbtest_exit); 2222module_exit(usbtest_exit);
2204 2223
2205MODULE_DESCRIPTION ("USB Core/HCD Testing Driver"); 2224MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2206MODULE_LICENSE ("GPL"); 2225MODULE_LICENSE("GPL");
2207 2226
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
new file mode 100644
index 000000000000..719c6180b31f
--- /dev/null
+++ b/drivers/usb/misc/yurex.c
@@ -0,0 +1,563 @@
1/*
2 * Driver for Meywa-Denki & KAYAC YUREX
3 *
4 * Copyright (C) 2010 Tomoki Sekiyama (tomoki.sekiyama@gmail.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/kref.h>
18#include <linux/mutex.h>
19#include <linux/uaccess.h>
20#include <linux/usb.h>
21#include <linux/hid.h>
22
23#define DRIVER_AUTHOR "Tomoki Sekiyama"
24#define DRIVER_DESC "Driver for Meywa-Denki & KAYAC YUREX"
25
26#define YUREX_VENDOR_ID 0x0c45
27#define YUREX_PRODUCT_ID 0x1010
28
29#define CMD_ACK '!'
30#define CMD_ANIMATE 'A'
31#define CMD_COUNT 'C'
32#define CMD_LED 'L'
33#define CMD_READ 'R'
34#define CMD_SET 'S'
35#define CMD_VERSION 'V'
36#define CMD_EOF 0x0d
37#define CMD_PADDING 0xff
38
39#define YUREX_BUF_SIZE 8
40#define YUREX_WRITE_TIMEOUT (HZ*2)
41
42/* table of devices that work with this driver */
43static struct usb_device_id yurex_table[] = {
44 { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) },
45 { } /* Terminating entry */
46};
47MODULE_DEVICE_TABLE(usb, yurex_table);
48
49#ifdef CONFIG_USB_DYNAMIC_MINORS
50#define YUREX_MINOR_BASE 0
51#else
52#define YUREX_MINOR_BASE 192
53#endif
54
55/* Structure to hold all of our device specific stuff */
56struct usb_yurex {
57 struct usb_device *udev;
58 struct usb_interface *interface;
59 __u8 int_in_endpointAddr;
60 struct urb *urb; /* URB for interrupt in */
61 unsigned char *int_buffer; /* buffer for intterupt in */
62 struct urb *cntl_urb; /* URB for control msg */
63 struct usb_ctrlrequest *cntl_req; /* req for control msg */
64 unsigned char *cntl_buffer; /* buffer for control msg */
65
66 struct kref kref;
67 struct mutex io_mutex;
68 struct fasync_struct *async_queue;
69 wait_queue_head_t waitq;
70
71 spinlock_t lock;
72 __s64 bbu; /* BBU from device */
73};
74#define to_yurex_dev(d) container_of(d, struct usb_yurex, kref)
75
76static struct usb_driver yurex_driver;
77static const struct file_operations yurex_fops;
78
79
80static void yurex_control_callback(struct urb *urb)
81{
82 struct usb_yurex *dev = urb->context;
83 int status = urb->status;
84
85 if (status) {
86 err("%s - control failed: %d\n", __func__, status);
87 wake_up_interruptible(&dev->waitq);
88 return;
89 }
90 /* on success, sender woken up by CMD_ACK int in, or timeout */
91}
92
93static void yurex_delete(struct kref *kref)
94{
95 struct usb_yurex *dev = to_yurex_dev(kref);
96
97 dbg("yurex_delete");
98
99 usb_put_dev(dev->udev);
100 if (dev->cntl_urb) {
101 usb_kill_urb(dev->cntl_urb);
102 if (dev->cntl_req)
103 usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
104 dev->cntl_req, dev->cntl_urb->setup_dma);
105 if (dev->cntl_buffer)
106 usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
107 dev->cntl_buffer, dev->cntl_urb->transfer_dma);
108 usb_free_urb(dev->cntl_urb);
109 }
110 if (dev->urb) {
111 usb_kill_urb(dev->urb);
112 if (dev->int_buffer)
113 usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
114 dev->int_buffer, dev->urb->transfer_dma);
115 usb_free_urb(dev->urb);
116 }
117 kfree(dev);
118}
119
120/*
121 * usb class driver info in order to get a minor number from the usb core,
122 * and to have the device registered with the driver core
123 */
124static struct usb_class_driver yurex_class = {
125 .name = "yurex%d",
126 .fops = &yurex_fops,
127 .minor_base = YUREX_MINOR_BASE,
128};
129
130static void yurex_interrupt(struct urb *urb)
131{
132 struct usb_yurex *dev = urb->context;
133 unsigned char *buf = dev->int_buffer;
134 int status = urb->status;
135 unsigned long flags;
136 int retval, i;
137
138 switch (status) {
139 case 0: /*success*/
140 break;
141 case -EOVERFLOW:
142 err("%s - overflow with length %d, actual length is %d",
143 __func__, YUREX_BUF_SIZE, dev->urb->actual_length);
144 case -ECONNRESET:
145 case -ENOENT:
146 case -ESHUTDOWN:
147 case -EILSEQ:
148 /* The device is terminated, clean up */
149 return;
150 default:
151 err("%s - unknown status received: %d", __func__, status);
152 goto exit;
153 }
154
155 /* handle received message */
156 switch (buf[0]) {
157 case CMD_COUNT:
158 case CMD_READ:
159 if (buf[6] == CMD_EOF) {
160 spin_lock_irqsave(&dev->lock, flags);
161 dev->bbu = 0;
162 for (i = 1; i < 6; i++) {
163 dev->bbu += buf[i];
164 if (i != 5)
165 dev->bbu <<= 8;
166 }
167 dbg("%s count: %lld", __func__, dev->bbu);
168 spin_unlock_irqrestore(&dev->lock, flags);
169
170 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
171 }
172 else
173 dbg("data format error - no EOF");
174 break;
175 case CMD_ACK:
176 dbg("%s ack: %c", __func__, buf[1]);
177 wake_up_interruptible(&dev->waitq);
178 break;
179 }
180
181exit:
182 retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
183 if (retval) {
184 err("%s - usb_submit_urb failed: %d",
185 __func__, retval);
186 }
187}
188
189static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id)
190{
191 struct usb_yurex *dev;
192 struct usb_host_interface *iface_desc;
193 struct usb_endpoint_descriptor *endpoint;
194 int retval = -ENOMEM;
195 int i;
196 DEFINE_WAIT(wait);
197
198 /* allocate memory for our device state and initialize it */
199 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
200 if (!dev) {
201 err("Out of memory");
202 goto error;
203 }
204 kref_init(&dev->kref);
205 mutex_init(&dev->io_mutex);
206 spin_lock_init(&dev->lock);
207 init_waitqueue_head(&dev->waitq);
208
209 dev->udev = usb_get_dev(interface_to_usbdev(interface));
210 dev->interface = interface;
211
212 /* set up the endpoint information */
213 iface_desc = interface->cur_altsetting;
214 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
215 endpoint = &iface_desc->endpoint[i].desc;
216
217 if (usb_endpoint_is_int_in(endpoint)) {
218 dev->int_in_endpointAddr = endpoint->bEndpointAddress;
219 break;
220 }
221 }
222 if (!dev->int_in_endpointAddr) {
223 retval = -ENODEV;
224 err("Could not find endpoints");
225 goto error;
226 }
227
228
229 /* allocate control URB */
230 dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
231 if (!dev->cntl_urb) {
232 err("Could not allocate control URB");
233 goto error;
234 }
235
236 /* allocate buffer for control req */
237 dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
238 GFP_KERNEL,
239 &dev->cntl_urb->setup_dma);
240 if (!dev->cntl_req) {
241 err("Could not allocate cntl_req");
242 goto error;
243 }
244
245 /* allocate buffer for control msg */
246 dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
247 GFP_KERNEL,
248 &dev->cntl_urb->transfer_dma);
249 if (!dev->cntl_buffer) {
250 err("Could not allocate cntl_buffer");
251 goto error;
252 }
253
254 /* configure control URB */
255 dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS |
256 USB_RECIP_INTERFACE;
257 dev->cntl_req->bRequest = HID_REQ_SET_REPORT;
258 dev->cntl_req->wValue = cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8);
259 dev->cntl_req->wIndex = cpu_to_le16(iface_desc->desc.bInterfaceNumber);
260 dev->cntl_req->wLength = cpu_to_le16(YUREX_BUF_SIZE);
261
262 usb_fill_control_urb(dev->cntl_urb, dev->udev,
263 usb_sndctrlpipe(dev->udev, 0),
264 (void *)dev->cntl_req, dev->cntl_buffer,
265 YUREX_BUF_SIZE, yurex_control_callback, dev);
266 dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
267
268
269 /* allocate interrupt URB */
270 dev->urb = usb_alloc_urb(0, GFP_KERNEL);
271 if (!dev->urb) {
272 err("Could not allocate URB");
273 goto error;
274 }
275
276 /* allocate buffer for interrupt in */
277 dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
278 GFP_KERNEL, &dev->urb->transfer_dma);
279 if (!dev->int_buffer) {
280 err("Could not allocate int_buffer");
281 goto error;
282 }
283
284 /* configure interrupt URB */
285 usb_fill_int_urb(dev->urb, dev->udev,
286 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
287 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
288 dev, 1);
289 dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
290 if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
291 retval = -EIO;
292 err("Could not submitting URB");
293 goto error;
294 }
295
296 /* save our data pointer in this interface device */
297 usb_set_intfdata(interface, dev);
298
299 /* we can register the device now, as it is ready */
300 retval = usb_register_dev(interface, &yurex_class);
301 if (retval) {
302 err("Not able to get a minor for this device.");
303 usb_set_intfdata(interface, NULL);
304 goto error;
305 }
306
307 dev->bbu = -1;
308
309 dev_info(&interface->dev,
310 "USB YUREX device now attached to Yurex #%d\n",
311 interface->minor);
312
313 return 0;
314
315error:
316 if (dev)
317 /* this frees allocated memory */
318 kref_put(&dev->kref, yurex_delete);
319 return retval;
320}
321
322static void yurex_disconnect(struct usb_interface *interface)
323{
324 struct usb_yurex *dev;
325 int minor = interface->minor;
326
327 dev = usb_get_intfdata(interface);
328 usb_set_intfdata(interface, NULL);
329
330 /* give back our minor */
331 usb_deregister_dev(interface, &yurex_class);
332
333 /* prevent more I/O from starting */
334 mutex_lock(&dev->io_mutex);
335 dev->interface = NULL;
336 mutex_unlock(&dev->io_mutex);
337
338 /* wakeup waiters */
339 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
340 wake_up_interruptible(&dev->waitq);
341
342 /* decrement our usage count */
343 kref_put(&dev->kref, yurex_delete);
344
345 dev_info(&interface->dev, "USB YUREX #%d now disconnected\n", minor);
346}
347
348static struct usb_driver yurex_driver = {
349 .name = "yurex",
350 .probe = yurex_probe,
351 .disconnect = yurex_disconnect,
352 .id_table = yurex_table,
353};
354
355
356static int yurex_fasync(int fd, struct file *file, int on)
357{
358 struct usb_yurex *dev;
359
360 dev = (struct usb_yurex *)file->private_data;
361 return fasync_helper(fd, file, on, &dev->async_queue);
362}
363
364static int yurex_open(struct inode *inode, struct file *file)
365{
366 struct usb_yurex *dev;
367 struct usb_interface *interface;
368 int subminor;
369 int retval = 0;
370
371 subminor = iminor(inode);
372
373 interface = usb_find_interface(&yurex_driver, subminor);
374 if (!interface) {
375 err("%s - error, can't find device for minor %d",
376 __func__, subminor);
377 retval = -ENODEV;
378 goto exit;
379 }
380
381 dev = usb_get_intfdata(interface);
382 if (!dev) {
383 retval = -ENODEV;
384 goto exit;
385 }
386
387 /* increment our usage count for the device */
388 kref_get(&dev->kref);
389
390 /* save our object in the file's private structure */
391 mutex_lock(&dev->io_mutex);
392 file->private_data = dev;
393 mutex_unlock(&dev->io_mutex);
394
395exit:
396 return retval;
397}
398
399static int yurex_release(struct inode *inode, struct file *file)
400{
401 struct usb_yurex *dev;
402
403 dev = (struct usb_yurex *)file->private_data;
404 if (dev == NULL)
405 return -ENODEV;
406
407 yurex_fasync(-1, file, 0);
408
409 /* decrement the count on our device */
410 kref_put(&dev->kref, yurex_delete);
411 return 0;
412}
413
414static ssize_t yurex_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
415{
416 struct usb_yurex *dev;
417 int retval = 0;
418 int bytes_read = 0;
419 char in_buffer[20];
420 unsigned long flags;
421
422 dev = (struct usb_yurex *)file->private_data;
423
424 mutex_lock(&dev->io_mutex);
425 if (!dev->interface) { /* already disconnected */
426 retval = -ENODEV;
427 goto exit;
428 }
429
430 spin_lock_irqsave(&dev->lock, flags);
431 bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
432 spin_unlock_irqrestore(&dev->lock, flags);
433
434 if (*ppos < bytes_read) {
435 if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
436 retval = -EFAULT;
437 else {
438 retval = bytes_read - *ppos;
439 *ppos += bytes_read;
440 }
441 }
442
443exit:
444 mutex_unlock(&dev->io_mutex);
445 return retval;
446}
447
448static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos)
449{
450 struct usb_yurex *dev;
451 int i, set = 0, retval = 0;
452 char buffer[16];
453 char *data = buffer;
454 unsigned long long c, c2 = 0;
455 signed long timeout = 0;
456 DEFINE_WAIT(wait);
457
458 count = min(sizeof(buffer), count);
459 dev = (struct usb_yurex *)file->private_data;
460
461 /* verify that we actually have some data to write */
462 if (count == 0)
463 goto error;
464
465 mutex_lock(&dev->io_mutex);
466 if (!dev->interface) { /* alreaday disconnected */
467 mutex_unlock(&dev->io_mutex);
468 retval = -ENODEV;
469 goto error;
470 }
471
472 if (copy_from_user(buffer, user_buffer, count)) {
473 mutex_unlock(&dev->io_mutex);
474 retval = -EFAULT;
475 goto error;
476 }
477 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
478
479 switch (buffer[0]) {
480 case CMD_ANIMATE:
481 case CMD_LED:
482 dev->cntl_buffer[0] = buffer[0];
483 dev->cntl_buffer[1] = buffer[1];
484 dev->cntl_buffer[2] = CMD_EOF;
485 break;
486 case CMD_READ:
487 case CMD_VERSION:
488 dev->cntl_buffer[0] = buffer[0];
489 dev->cntl_buffer[1] = 0x00;
490 dev->cntl_buffer[2] = CMD_EOF;
491 break;
492 case CMD_SET:
493 data++;
494 /* FALL THROUGH */
495 case '0' ... '9':
496 set = 1;
497 c = c2 = simple_strtoull(data, NULL, 0);
498 dev->cntl_buffer[0] = CMD_SET;
499 for (i = 1; i < 6; i++) {
500 dev->cntl_buffer[i] = (c>>32) & 0xff;
501 c <<= 8;
502 }
503 buffer[6] = CMD_EOF;
504 break;
505 default:
506 mutex_unlock(&dev->io_mutex);
507 return -EINVAL;
508 }
509
510 /* send the data as the control msg */
511 prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
512 dbg("%s - submit %c", __func__, dev->cntl_buffer[0]);
513 retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
514 if (retval >= 0)
515 timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
516 finish_wait(&dev->waitq, &wait);
517
518 mutex_unlock(&dev->io_mutex);
519
520 if (retval < 0) {
521 err("%s - failed to send bulk msg, error %d", __func__, retval);
522 goto error;
523 }
524 if (set && timeout)
525 dev->bbu = c2;
526 return timeout ? count : -EIO;
527
528error:
529 return retval;
530}
531
532static const struct file_operations yurex_fops = {
533 .owner = THIS_MODULE,
534 .read = yurex_read,
535 .write = yurex_write,
536 .open = yurex_open,
537 .release = yurex_release,
538 .fasync = yurex_fasync,
539};
540
541
542static int __init usb_yurex_init(void)
543{
544 int result;
545
546 /* register this driver with the USB subsystem */
547 result = usb_register(&yurex_driver);
548 if (result)
549 err("usb_register failed. Error number %d", result);
550
551 return result;
552}
553
554static void __exit usb_yurex_exit(void)
555{
556 /* deregister this driver with the USB subsystem */
557 usb_deregister(&yurex_driver);
558}
559
560module_init(usb_yurex_init);
561module_exit(usb_yurex_exit);
562
563MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 384b198faa7c..8ed24ab08698 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -2,6 +2,6 @@
2# Makefile for USB monitor 2# Makefile for USB monitor
3# 3#
4 4
5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o 5usbmon-y := mon_main.o mon_stat.o mon_text.o mon_bin.o
6 6
7obj-$(CONFIG_USB_MON) += usbmon.o 7obj-$(CONFIG_USB_MON) += usbmon.o
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index cfd38edfcf9e..341a37a469bd 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -45,6 +45,9 @@ config USB_MUSB_SOC
45comment "DaVinci 35x and 644x USB support" 45comment "DaVinci 35x and 644x USB support"
46 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx 46 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx
47 47
48comment "DA8xx/OMAP-L1x USB support"
49 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DA8XX
50
48comment "OMAP 243x high speed USB support" 51comment "OMAP 243x high speed USB support"
49 depends on USB_MUSB_HDRC && ARCH_OMAP2430 52 depends on USB_MUSB_HDRC && ARCH_OMAP2430
50 53
@@ -57,6 +60,17 @@ comment "OMAP 44xx high speed USB support"
57comment "Blackfin high speed USB Support" 60comment "Blackfin high speed USB Support"
58 depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523)) 61 depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
59 62
63config USB_MUSB_AM35X
64 bool
65 depends on USB_MUSB_HDRC && !ARCH_OMAP2430 && !ARCH_OMAP4
66 select NOP_USB_XCEIV
67 default MACH_OMAP3517EVM
68 help
69 Select this option if your platform is based on AM35x. As
70 AM35x has an updated MUSB with CPPI4.1 DMA so this config
71 is introduced to differentiate musb ip between OMAP3x and
72 AM35x platforms.
73
60config USB_TUSB6010 74config USB_TUSB6010
61 boolean "TUSB 6010 support" 75 boolean "TUSB 6010 support"
62 depends on USB_MUSB_HDRC && !USB_MUSB_SOC 76 depends on USB_MUSB_HDRC && !USB_MUSB_SOC
@@ -144,7 +158,7 @@ config USB_MUSB_HDRC_HCD
144config MUSB_PIO_ONLY 158config MUSB_PIO_ONLY
145 bool 'Disable DMA (always use PIO)' 159 bool 'Disable DMA (always use PIO)'
146 depends on USB_MUSB_HDRC 160 depends on USB_MUSB_HDRC
147 default y if USB_TUSB6010 161 default USB_TUSB6010 || ARCH_DAVINCI_DA8XX || USB_MUSB_AM35X
148 help 162 help
149 All data is copied between memory and FIFO by the CPU. 163 All data is copied between memory and FIFO by the CPU.
150 DMA controllers are ignored. 164 DMA controllers are ignored.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 9705f716386e..ce164e8998d8 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -2,49 +2,27 @@
2# for USB OTG silicon based on Mentor Graphics INVENTRA designs 2# for USB OTG silicon based on Mentor Graphics INVENTRA designs
3# 3#
4 4
5musb_hdrc-objs := musb_core.o 5ccflags-$(CONFIG_USB_MUSB_DEBUG) := -DDEBUG
6 6
7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o 7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
8 8
9ifeq ($(CONFIG_ARCH_DAVINCI_DMx),y) 9musb_hdrc-y := musb_core.o
10 musb_hdrc-objs += davinci.o
11endif
12
13ifeq ($(CONFIG_USB_TUSB6010),y)
14 musb_hdrc-objs += tusb6010.o
15endif
16
17ifeq ($(CONFIG_ARCH_OMAP2430),y)
18 musb_hdrc-objs += omap2430.o
19endif
20
21ifeq ($(CONFIG_ARCH_OMAP3430),y)
22 musb_hdrc-objs += omap2430.o
23endif
24
25ifeq ($(CONFIG_ARCH_OMAP4),y)
26 musb_hdrc-objs += omap2430.o
27endif
28
29ifeq ($(CONFIG_BF54x),y)
30 musb_hdrc-objs += blackfin.o
31endif
32 10
33ifeq ($(CONFIG_BF52x),y) 11musb_hdrc-$(CONFIG_ARCH_DAVINCI_DMx) += davinci.o
34 musb_hdrc-objs += blackfin.o 12musb_hdrc-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx.o
35endif 13musb_hdrc-$(CONFIG_USB_TUSB6010) += tusb6010.o
36 14musb_hdrc-$(CONFIG_ARCH_OMAP2430) += omap2430.o
37ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) 15ifeq ($(CONFIG_USB_MUSB_AM35X),y)
38 musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o 16 musb_hdrc-$(CONFIG_ARCH_OMAP3430) += am35x.o
39endif 17else
40 18 musb_hdrc-$(CONFIG_ARCH_OMAP3430) += omap2430.o
41ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
42 musb_hdrc-objs += musb_virthub.o musb_host.o
43endif
44
45ifeq ($(CONFIG_DEBUG_FS),y)
46 musb_hdrc-objs += musb_debugfs.o
47endif 19endif
20musb_hdrc-$(CONFIG_ARCH_OMAP4) += omap2430.o
21musb_hdrc-$(CONFIG_BF54x) += blackfin.o
22musb_hdrc-$(CONFIG_BF52x) += blackfin.o
23musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC) += musb_gadget_ep0.o musb_gadget.o
24musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD) += musb_virthub.o musb_host.o
25musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
48 26
49# the kconfig must guarantee that only one of the 27# the kconfig must guarantee that only one of the
50# possible I/O schemes will be enabled at a time ... 28# possible I/O schemes will be enabled at a time ...
@@ -54,26 +32,17 @@ endif
54ifneq ($(CONFIG_MUSB_PIO_ONLY),y) 32ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
55 33
56 ifeq ($(CONFIG_USB_INVENTRA_DMA),y) 34 ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
57 musb_hdrc-objs += musbhsdma.o 35 musb_hdrc-y += musbhsdma.o
58 36
59 else 37 else
60 ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) 38 ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
61 musb_hdrc-objs += cppi_dma.o 39 musb_hdrc-y += cppi_dma.o
62 40
63 else 41 else
64 ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) 42 ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
65 musb_hdrc-objs += tusb6010_omap.o 43 musb_hdrc-y += tusb6010_omap.o
66 44
67 endif 45 endif
68 endif 46 endif
69 endif 47 endif
70endif 48endif
71
72
73################################################################################
74
75# Debugging
76
77ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
78 EXTRA_CFLAGS += -DDEBUG
79endif
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
new file mode 100644
index 000000000000..b0aabf3a606f
--- /dev/null
+++ b/drivers/usb/musb/am35x.c
@@ -0,0 +1,524 @@
1/*
2 * Texas Instruments AM35x "glue layer"
3 *
4 * Copyright (c) 2010, by Texas Instruments
5 *
6 * Based on the DA8xx "glue layer" code.
7 * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This file is part of the Inventra Controller Driver for Linux.
10 *
11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software
14 * Foundation.
15 *
16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28
29#include <linux/init.h>
30#include <linux/clk.h>
31#include <linux/io.h>
32
33#include <plat/control.h>
34#include <plat/usb.h>
35
36#include "musb_core.h"
37
38/*
39 * AM35x specific definitions
40 */
41/* USB 2.0 OTG module registers */
42#define USB_REVISION_REG 0x00
43#define USB_CTRL_REG 0x04
44#define USB_STAT_REG 0x08
45#define USB_EMULATION_REG 0x0c
46/* 0x10 Reserved */
47#define USB_AUTOREQ_REG 0x14
48#define USB_SRP_FIX_TIME_REG 0x18
49#define USB_TEARDOWN_REG 0x1c
50#define EP_INTR_SRC_REG 0x20
51#define EP_INTR_SRC_SET_REG 0x24
52#define EP_INTR_SRC_CLEAR_REG 0x28
53#define EP_INTR_MASK_REG 0x2c
54#define EP_INTR_MASK_SET_REG 0x30
55#define EP_INTR_MASK_CLEAR_REG 0x34
56#define EP_INTR_SRC_MASKED_REG 0x38
57#define CORE_INTR_SRC_REG 0x40
58#define CORE_INTR_SRC_SET_REG 0x44
59#define CORE_INTR_SRC_CLEAR_REG 0x48
60#define CORE_INTR_MASK_REG 0x4c
61#define CORE_INTR_MASK_SET_REG 0x50
62#define CORE_INTR_MASK_CLEAR_REG 0x54
63#define CORE_INTR_SRC_MASKED_REG 0x58
64/* 0x5c Reserved */
65#define USB_END_OF_INTR_REG 0x60
66
67/* Control register bits */
68#define AM35X_SOFT_RESET_MASK 1
69
70/* USB interrupt register bits */
71#define AM35X_INTR_USB_SHIFT 16
72#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
73#define AM35X_INTR_DRVVBUS 0x100
74#define AM35X_INTR_RX_SHIFT 16
75#define AM35X_INTR_TX_SHIFT 0
76#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
77#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
78#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
79#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
80
81#define USB_MENTOR_CORE_OFFSET 0x400
82
83static inline void phy_on(void)
84{
85 unsigned long timeout = jiffies + msecs_to_jiffies(100);
86 u32 devconf2;
87
88 /*
89 * Start the on-chip PHY and its PLL.
90 */
91 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
92
93 devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
94 devconf2 |= CONF2_PHY_PLLON;
95
96 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
97
98 DBG(1, "Waiting for PHY clock good...\n");
99 while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
100 & CONF2_PHYCLKGD)) {
101 cpu_relax();
102
103 if (time_after(jiffies, timeout)) {
104 DBG(1, "musb PHY clock good timed out\n");
105 break;
106 }
107 }
108}
109
110static inline void phy_off(void)
111{
112 u32 devconf2;
113
114 /*
115 * Power down the on-chip PHY.
116 */
117 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
118
119 devconf2 &= ~CONF2_PHY_PLLON;
120 devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
121 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
122}
123
124/*
125 * musb_platform_enable - enable interrupts
126 */
127void musb_platform_enable(struct musb *musb)
128{
129 void __iomem *reg_base = musb->ctrl_base;
130 u32 epmask;
131
132 /* Workaround: setup IRQs through both register sets. */
133 epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
134 ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
135
136 musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
137 musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
138
139 /* Force the DRVVBUS IRQ so we can start polling for ID change. */
140 if (is_otg_enabled(musb))
141 musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
142 AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
143}
144
145/*
146 * musb_platform_disable - disable HDRC and flush interrupts
147 */
148void musb_platform_disable(struct musb *musb)
149{
150 void __iomem *reg_base = musb->ctrl_base;
151
152 musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
153 musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
154 AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
155 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
156 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
157}
158
159#ifdef CONFIG_USB_MUSB_HDRC_HCD
160#define portstate(stmt) stmt
161#else
162#define portstate(stmt)
163#endif
164
165static void am35x_set_vbus(struct musb *musb, int is_on)
166{
167 WARN_ON(is_on && is_peripheral_active(musb));
168}
169
170#define POLL_SECONDS 2
171
172static struct timer_list otg_workaround;
173
174static void otg_timer(unsigned long _musb)
175{
176 struct musb *musb = (void *)_musb;
177 void __iomem *mregs = musb->mregs;
178 u8 devctl;
179 unsigned long flags;
180
181 /*
182 * We poll because AM35x's won't expose several OTG-critical
183 * status change events (from the transceiver) otherwise.
184 */
185 devctl = musb_readb(mregs, MUSB_DEVCTL);
186 DBG(7, "Poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
187
188 spin_lock_irqsave(&musb->lock, flags);
189 switch (musb->xceiv->state) {
190 case OTG_STATE_A_WAIT_BCON:
191 devctl &= ~MUSB_DEVCTL_SESSION;
192 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
193
194 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
195 if (devctl & MUSB_DEVCTL_BDEVICE) {
196 musb->xceiv->state = OTG_STATE_B_IDLE;
197 MUSB_DEV_MODE(musb);
198 } else {
199 musb->xceiv->state = OTG_STATE_A_IDLE;
200 MUSB_HST_MODE(musb);
201 }
202 break;
203 case OTG_STATE_A_WAIT_VFALL:
204 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
205 musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
206 MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
207 break;
208 case OTG_STATE_B_IDLE:
209 if (!is_peripheral_enabled(musb))
210 break;
211
212 devctl = musb_readb(mregs, MUSB_DEVCTL);
213 if (devctl & MUSB_DEVCTL_BDEVICE)
214 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
215 else
216 musb->xceiv->state = OTG_STATE_A_IDLE;
217 break;
218 default:
219 break;
220 }
221 spin_unlock_irqrestore(&musb->lock, flags);
222}
223
224void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
225{
226 static unsigned long last_timer;
227
228 if (!is_otg_enabled(musb))
229 return;
230
231 if (timeout == 0)
232 timeout = jiffies + msecs_to_jiffies(3);
233
234 /* Never idle if active, or when VBUS timeout is not set as host */
235 if (musb->is_active || (musb->a_wait_bcon == 0 &&
236 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
237 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
238 del_timer(&otg_workaround);
239 last_timer = jiffies;
240 return;
241 }
242
243 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
244 DBG(4, "Longer idle timer already pending, ignoring...\n");
245 return;
246 }
247 last_timer = timeout;
248
249 DBG(4, "%s inactive, starting idle timer for %u ms\n",
250 otg_state_string(musb), jiffies_to_msecs(timeout - jiffies));
251 mod_timer(&otg_workaround, timeout);
252}
253
254static irqreturn_t am35x_interrupt(int irq, void *hci)
255{
256 struct musb *musb = hci;
257 void __iomem *reg_base = musb->ctrl_base;
258 unsigned long flags;
259 irqreturn_t ret = IRQ_NONE;
260 u32 epintr, usbintr, lvl_intr;
261
262 spin_lock_irqsave(&musb->lock, flags);
263
264 /* Get endpoint interrupts */
265 epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
266
267 if (epintr) {
268 musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
269
270 musb->int_rx =
271 (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
272 musb->int_tx =
273 (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
274 }
275
276 /* Get usb core interrupts */
277 usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
278 if (!usbintr && !epintr)
279 goto eoi;
280
281 if (usbintr) {
282 musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
283
284 musb->int_usb =
285 (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
286 }
287 /*
288 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
289 * AM35x's missing ID change IRQ. We need an ID change IRQ to
290 * switch appropriately between halves of the OTG state machine.
291 * Managing DEVCTL.SESSION per Mentor docs requires that we know its
292 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
293 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
294 */
295 if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
296 int drvvbus = musb_readl(reg_base, USB_STAT_REG);
297 void __iomem *mregs = musb->mregs;
298 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
299 int err;
300
301 err = is_host_enabled(musb) && (musb->int_usb &
302 MUSB_INTR_VBUSERROR);
303 if (err) {
304 /*
305 * The Mentor core doesn't debounce VBUS as needed
306 * to cope with device connect current spikes. This
307 * means it's not uncommon for bus-powered devices
308 * to get VBUS errors during enumeration.
309 *
310 * This is a workaround, but newer RTL from Mentor
311 * seems to allow a better one: "re"-starting sessions
312 * without waiting for VBUS to stop registering in
313 * devctl.
314 */
315 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
316 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
317 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
318 WARNING("VBUS error workaround (delay coming)\n");
319 } else if (is_host_enabled(musb) && drvvbus) {
320 MUSB_HST_MODE(musb);
321 musb->xceiv->default_a = 1;
322 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
323 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
324 del_timer(&otg_workaround);
325 } else {
326 musb->is_active = 0;
327 MUSB_DEV_MODE(musb);
328 musb->xceiv->default_a = 0;
329 musb->xceiv->state = OTG_STATE_B_IDLE;
330 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
331 }
332
333 /* NOTE: this must complete power-on within 100 ms. */
334 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
335 drvvbus ? "on" : "off",
336 otg_state_string(musb),
337 err ? " ERROR" : "",
338 devctl);
339 ret = IRQ_HANDLED;
340 }
341
342 if (musb->int_tx || musb->int_rx || musb->int_usb)
343 ret |= musb_interrupt(musb);
344
345eoi:
346 /* EOI needs to be written for the IRQ to be re-asserted. */
347 if (ret == IRQ_HANDLED || epintr || usbintr) {
348 /* clear level interrupt */
349 lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
350 lvl_intr |= AM35XX_USBOTGSS_INT_CLR;
351 omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR);
352 /* write EOI */
353 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
354 }
355
356 /* Poll for ID change */
357 if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
358 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
359
360 spin_unlock_irqrestore(&musb->lock, flags);
361
362 return ret;
363}
364
365int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
366{
367 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
368
369 devconf2 &= ~CONF2_OTGMODE;
370 switch (musb_mode) {
371#ifdef CONFIG_USB_MUSB_HDRC_HCD
372 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
373 devconf2 |= CONF2_FORCE_HOST;
374 break;
375#endif
376#ifdef CONFIG_USB_GADGET_MUSB_HDRC
377 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
378 devconf2 |= CONF2_FORCE_DEVICE;
379 break;
380#endif
381#ifdef CONFIG_USB_MUSB_OTG
382 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
383 devconf2 |= CONF2_NO_OVERRIDE;
384 break;
385#endif
386 default:
387 DBG(2, "Trying to set unsupported mode %u\n", musb_mode);
388 }
389
390 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
391 return 0;
392}
393
394int __init musb_platform_init(struct musb *musb, void *board_data)
395{
396 void __iomem *reg_base = musb->ctrl_base;
397 u32 rev, lvl_intr, sw_reset;
398 int status;
399
400 musb->mregs += USB_MENTOR_CORE_OFFSET;
401
402 clk_enable(musb->clock);
403 DBG(2, "musb->clock=%lud\n", clk_get_rate(musb->clock));
404
405 musb->phy_clock = clk_get(musb->controller, "fck");
406 if (IS_ERR(musb->phy_clock)) {
407 status = PTR_ERR(musb->phy_clock);
408 goto exit0;
409 }
410 clk_enable(musb->phy_clock);
411 DBG(2, "musb->phy_clock=%lud\n", clk_get_rate(musb->phy_clock));
412
413 /* Returns zero if e.g. not clocked */
414 rev = musb_readl(reg_base, USB_REVISION_REG);
415 if (!rev) {
416 status = -ENODEV;
417 goto exit1;
418 }
419
420 usb_nop_xceiv_register();
421 musb->xceiv = otg_get_transceiver();
422 if (!musb->xceiv) {
423 status = -ENODEV;
424 goto exit1;
425 }
426
427 if (is_host_enabled(musb))
428 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
429
430 musb->board_set_vbus = am35x_set_vbus;
431
432 /* Global reset */
433 sw_reset = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
434
435 sw_reset |= AM35XX_USBOTGSS_SW_RST;
436 omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
437
438 sw_reset &= ~AM35XX_USBOTGSS_SW_RST;
439 omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
440
441 /* Reset the controller */
442 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
443
444 /* Start the on-chip PHY and its PLL. */
445 phy_on();
446
447 msleep(5);
448
449 musb->isr = am35x_interrupt;
450
451 /* clear level interrupt */
452 lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
453 lvl_intr |= AM35XX_USBOTGSS_INT_CLR;
454 omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR);
455 return 0;
456exit1:
457 clk_disable(musb->phy_clock);
458 clk_put(musb->phy_clock);
459exit0:
460 clk_disable(musb->clock);
461 return status;
462}
463
464int musb_platform_exit(struct musb *musb)
465{
466 if (is_host_enabled(musb))
467 del_timer_sync(&otg_workaround);
468
469 phy_off();
470
471 otg_put_transceiver(musb->xceiv);
472 usb_nop_xceiv_unregister();
473
474 clk_disable(musb->clock);
475
476 clk_disable(musb->phy_clock);
477 clk_put(musb->phy_clock);
478
479 return 0;
480}
481
482#ifdef CONFIG_PM
483void musb_platform_save_context(struct musb *musb,
484 struct musb_context_registers *musb_context)
485{
486 phy_off();
487}
488
489void musb_platform_restore_context(struct musb *musb,
490 struct musb_context_registers *musb_context)
491{
492 phy_on();
493}
494#endif
495
496/* AM35x supports only 32bit read operation */
497void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
498{
499 void __iomem *fifo = hw_ep->fifo;
500 u32 val;
501 int i;
502
503 /* Read for 32bit-aligned destination address */
504 if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
505 readsl(fifo, dst, len >> 2);
506 dst += len & ~0x03;
507 len &= 0x03;
508 }
509 /*
510 * Now read the remaining 1 to 3 byte or complete length if
511 * unaligned address.
512 */
513 if (len > 4) {
514 for (i = 0; i < (len >> 2); i++) {
515 *(u32 *) dst = musb_readl(fifo, 0);
516 dst += 4;
517 }
518 len &= 0x03;
519 }
520 if (len > 0) {
521 val = musb_readl(fifo, 0);
522 memcpy(dst, &val, len);
523 }
524}
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index b611420a8050..611a9d274363 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -342,8 +342,10 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
342 342
343 usb_nop_xceiv_register(); 343 usb_nop_xceiv_register();
344 musb->xceiv = otg_get_transceiver(); 344 musb->xceiv = otg_get_transceiver();
345 if (!musb->xceiv) 345 if (!musb->xceiv) {
346 gpio_free(musb->config->gpio_vrsel);
346 return -ENODEV; 347 return -ENODEV;
348 }
347 349
348 if (ANOMALY_05000346) { 350 if (ANOMALY_05000346) {
349 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 351 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
@@ -394,8 +396,9 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
394 396
395int musb_platform_exit(struct musb *musb) 397int musb_platform_exit(struct musb *musb)
396{ 398{
397
398 gpio_free(musb->config->gpio_vrsel); 399 gpio_free(musb->config->gpio_vrsel);
399 400
401 otg_put_transceiver(musb->xceiv);
402 usb_nop_xceiv_unregister();
400 return 0; 403 return 0;
401} 404}
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 5ab5bb89bae3..f5a65ff0ac2b 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1156,7 +1156,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1156 struct musb_hw_ep *hw_ep = NULL; 1156 struct musb_hw_ep *hw_ep = NULL;
1157 u32 rx, tx; 1157 u32 rx, tx;
1158 int i, index; 1158 int i, index;
1159 unsigned long flags; 1159 unsigned long uninitialized_var(flags);
1160 1160
1161 cppi = container_of(musb->dma_controller, struct cppi, controller); 1161 cppi = container_of(musb->dma_controller, struct cppi, controller);
1162 if (cppi->irq) 1162 if (cppi->irq)
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
new file mode 100644
index 000000000000..84427bebbf62
--- /dev/null
+++ b/drivers/usb/musb/da8xx.c
@@ -0,0 +1,469 @@
1/*
2 * Texas Instruments DA8xx/OMAP-L1x "glue layer"
3 *
4 * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
5 *
6 * Based on the DaVinci "glue layer" code.
7 * Copyright (C) 2005-2006 by Texas Instruments
8 *
9 * This file is part of the Inventra Controller Driver for Linux.
10 *
11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software
14 * Foundation.
15 *
16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28
29#include <linux/init.h>
30#include <linux/clk.h>
31#include <linux/io.h>
32
33#include <mach/da8xx.h>
34#include <mach/usb.h>
35
36#include "musb_core.h"
37
38/*
39 * DA8XX specific definitions
40 */
41
42/* USB 2.0 OTG module registers */
43#define DA8XX_USB_REVISION_REG 0x00
44#define DA8XX_USB_CTRL_REG 0x04
45#define DA8XX_USB_STAT_REG 0x08
46#define DA8XX_USB_EMULATION_REG 0x0c
47#define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */
48#define DA8XX_USB_AUTOREQ_REG 0x14
49#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
50#define DA8XX_USB_TEARDOWN_REG 0x1c
51#define DA8XX_USB_INTR_SRC_REG 0x20
52#define DA8XX_USB_INTR_SRC_SET_REG 0x24
53#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
54#define DA8XX_USB_INTR_MASK_REG 0x2c
55#define DA8XX_USB_INTR_MASK_SET_REG 0x30
56#define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34
57#define DA8XX_USB_INTR_SRC_MASKED_REG 0x38
58#define DA8XX_USB_END_OF_INTR_REG 0x3c
59#define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2))
60
61/* Control register bits */
62#define DA8XX_SOFT_RESET_MASK 1
63
64#define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */
65#define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */
66
67/* USB interrupt register bits */
68#define DA8XX_INTR_USB_SHIFT 16
69#define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */
70 /* interrupts and DRVVBUS interrupt */
71#define DA8XX_INTR_DRVVBUS 0x100
72#define DA8XX_INTR_RX_SHIFT 8
73#define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT)
74#define DA8XX_INTR_TX_SHIFT 0
75#define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT)
76
77#define DA8XX_MENTOR_CORE_OFFSET 0x400
78
79#define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
80
81/*
82 * REVISIT (PM): we should be able to keep the PHY in low power mode most
83 * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
84 * and, when in host mode, autosuspending idle root ports... PHY_PLLON
85 * (overriding SUSPENDM?) then likely needs to stay off.
86 */
87
88static inline void phy_on(void)
89{
90 u32 cfgchip2 = __raw_readl(CFGCHIP2);
91
92 /*
93 * Start the on-chip PHY and its PLL.
94 */
95 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN);
96 cfgchip2 |= CFGCHIP2_PHY_PLLON;
97 __raw_writel(cfgchip2, CFGCHIP2);
98
99 pr_info("Waiting for USB PHY clock good...\n");
100 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
101 cpu_relax();
102}
103
104static inline void phy_off(void)
105{
106 u32 cfgchip2 = __raw_readl(CFGCHIP2);
107
108 /*
109 * Ensure that USB 1.1 reference clock is not being sourced from
110 * USB 2.0 PHY. Otherwise do not power down the PHY.
111 */
112 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) &&
113 (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) {
114 pr_warning("USB 1.1 clocked from USB 2.0 PHY -- "
115 "can't power it down\n");
116 return;
117 }
118
119 /*
120 * Power down the on-chip PHY.
121 */
122 cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN;
123 __raw_writel(cfgchip2, CFGCHIP2);
124}
125
126/*
127 * Because we don't set CTRL.UINT, it's "important" to:
128 * - not read/write INTRUSB/INTRUSBE (except during
129 * initial setup, as a workaround);
130 * - use INTSET/INTCLR instead.
131 */
132
133/**
134 * musb_platform_enable - enable interrupts
135 */
136void musb_platform_enable(struct musb *musb)
137{
138 void __iomem *reg_base = musb->ctrl_base;
139 u32 mask;
140
141 /* Workaround: setup IRQs through both register sets. */
142 mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) |
143 ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) |
144 DA8XX_INTR_USB_MASK;
145 musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask);
146
147 /* Force the DRVVBUS IRQ so we can start polling for ID change. */
148 if (is_otg_enabled(musb))
149 musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG,
150 DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
151}
152
153/**
154 * musb_platform_disable - disable HDRC and flush interrupts
155 */
156void musb_platform_disable(struct musb *musb)
157{
158 void __iomem *reg_base = musb->ctrl_base;
159
160 musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG,
161 DA8XX_INTR_USB_MASK |
162 DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK);
163 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
164 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
165}
166
167#ifdef CONFIG_USB_MUSB_HDRC_HCD
168#define portstate(stmt) stmt
169#else
170#define portstate(stmt)
171#endif
172
173static void da8xx_set_vbus(struct musb *musb, int is_on)
174{
175 WARN_ON(is_on && is_peripheral_active(musb));
176}
177
178#define POLL_SECONDS 2
179
180static struct timer_list otg_workaround;
181
182static void otg_timer(unsigned long _musb)
183{
184 struct musb *musb = (void *)_musb;
185 void __iomem *mregs = musb->mregs;
186 u8 devctl;
187 unsigned long flags;
188
189 /*
190 * We poll because DaVinci's won't expose several OTG-critical
191 * status change events (from the transceiver) otherwise.
192 */
193 devctl = musb_readb(mregs, MUSB_DEVCTL);
194 DBG(7, "Poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
195
196 spin_lock_irqsave(&musb->lock, flags);
197 switch (musb->xceiv->state) {
198 case OTG_STATE_A_WAIT_BCON:
199 devctl &= ~MUSB_DEVCTL_SESSION;
200 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
201
202 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
203 if (devctl & MUSB_DEVCTL_BDEVICE) {
204 musb->xceiv->state = OTG_STATE_B_IDLE;
205 MUSB_DEV_MODE(musb);
206 } else {
207 musb->xceiv->state = OTG_STATE_A_IDLE;
208 MUSB_HST_MODE(musb);
209 }
210 break;
211 case OTG_STATE_A_WAIT_VFALL:
212 /*
213 * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
214 * RTL seems to mis-handle session "start" otherwise (or in
215 * our case "recover"), in routine "VBUS was valid by the time
216 * VBUSERR got reported during enumeration" cases.
217 */
218 if (devctl & MUSB_DEVCTL_VBUS) {
219 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
220 break;
221 }
222 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
223 musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
224 MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
225 break;
226 case OTG_STATE_B_IDLE:
227 if (!is_peripheral_enabled(musb))
228 break;
229
230 /*
231 * There's no ID-changed IRQ, so we have no good way to tell
232 * when to switch to the A-Default state machine (by setting
233 * the DEVCTL.Session bit).
234 *
235 * Workaround: whenever we're in B_IDLE, try setting the
236 * session flag every few seconds. If it works, ID was
237 * grounded and we're now in the A-Default state machine.
238 *
239 * NOTE: setting the session flag is _supposed_ to trigger
240 * SRP but clearly it doesn't.
241 */
242 musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
243 devctl = musb_readb(mregs, MUSB_DEVCTL);
244 if (devctl & MUSB_DEVCTL_BDEVICE)
245 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
246 else
247 musb->xceiv->state = OTG_STATE_A_IDLE;
248 break;
249 default:
250 break;
251 }
252 spin_unlock_irqrestore(&musb->lock, flags);
253}
254
255void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
256{
257 static unsigned long last_timer;
258
259 if (!is_otg_enabled(musb))
260 return;
261
262 if (timeout == 0)
263 timeout = jiffies + msecs_to_jiffies(3);
264
265 /* Never idle if active, or when VBUS timeout is not set as host */
266 if (musb->is_active || (musb->a_wait_bcon == 0 &&
267 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
268 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
269 del_timer(&otg_workaround);
270 last_timer = jiffies;
271 return;
272 }
273
274 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
275 DBG(4, "Longer idle timer already pending, ignoring...\n");
276 return;
277 }
278 last_timer = timeout;
279
280 DBG(4, "%s inactive, starting idle timer for %u ms\n",
281 otg_state_string(musb), jiffies_to_msecs(timeout - jiffies));
282 mod_timer(&otg_workaround, timeout);
283}
284
285static irqreturn_t da8xx_interrupt(int irq, void *hci)
286{
287 struct musb *musb = hci;
288 void __iomem *reg_base = musb->ctrl_base;
289 unsigned long flags;
290 irqreturn_t ret = IRQ_NONE;
291 u32 status;
292
293 spin_lock_irqsave(&musb->lock, flags);
294
295 /*
296 * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through
297 * the Mentor registers (except for setup), use the TI ones and EOI.
298 */
299
300 /* Acknowledge and handle non-CPPI interrupts */
301 status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG);
302 if (!status)
303 goto eoi;
304
305 musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status);
306 DBG(4, "USB IRQ %08x\n", status);
307
308 musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT;
309 musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT;
310 musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT;
311
312 /*
313 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
314 * DA8xx's missing ID change IRQ. We need an ID change IRQ to
315 * switch appropriately between halves of the OTG state machine.
316 * Managing DEVCTL.Session per Mentor docs requires that we know its
317 * value but DEVCTL.BDevice is invalid without DEVCTL.Session set.
318 * Also, DRVVBUS pulses for SRP (but not at 5 V)...
319 */
320 if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) {
321 int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG);
322 void __iomem *mregs = musb->mregs;
323 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
324 int err;
325
326 err = is_host_enabled(musb) && (musb->int_usb &
327 MUSB_INTR_VBUSERROR);
328 if (err) {
329 /*
330 * The Mentor core doesn't debounce VBUS as needed
331 * to cope with device connect current spikes. This
332 * means it's not uncommon for bus-powered devices
333 * to get VBUS errors during enumeration.
334 *
335 * This is a workaround, but newer RTL from Mentor
336 * seems to allow a better one: "re"-starting sessions
337 * without waiting for VBUS to stop registering in
338 * devctl.
339 */
340 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
341 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
342 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
343 WARNING("VBUS error workaround (delay coming)\n");
344 } else if (is_host_enabled(musb) && drvvbus) {
345 MUSB_HST_MODE(musb);
346 musb->xceiv->default_a = 1;
347 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
348 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
349 del_timer(&otg_workaround);
350 } else {
351 musb->is_active = 0;
352 MUSB_DEV_MODE(musb);
353 musb->xceiv->default_a = 0;
354 musb->xceiv->state = OTG_STATE_B_IDLE;
355 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
356 }
357
358 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
359 drvvbus ? "on" : "off",
360 otg_state_string(musb),
361 err ? " ERROR" : "",
362 devctl);
363 ret = IRQ_HANDLED;
364 }
365
366 if (musb->int_tx || musb->int_rx || musb->int_usb)
367 ret |= musb_interrupt(musb);
368
369 eoi:
370 /* EOI needs to be written for the IRQ to be re-asserted. */
371 if (ret == IRQ_HANDLED || status)
372 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
373
374 /* Poll for ID change */
375 if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
376 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
377
378 spin_unlock_irqrestore(&musb->lock, flags);
379
380 return ret;
381}
382
383int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
384{
385 u32 cfgchip2 = __raw_readl(CFGCHIP2);
386
387 cfgchip2 &= ~CFGCHIP2_OTGMODE;
388 switch (musb_mode) {
389#ifdef CONFIG_USB_MUSB_HDRC_HCD
390 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
391 cfgchip2 |= CFGCHIP2_FORCE_HOST;
392 break;
393#endif
394#ifdef CONFIG_USB_GADGET_MUSB_HDRC
395 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
396 cfgchip2 |= CFGCHIP2_FORCE_DEVICE;
397 break;
398#endif
399#ifdef CONFIG_USB_MUSB_OTG
400 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
401 cfgchip2 |= CFGCHIP2_NO_OVERRIDE;
402 break;
403#endif
404 default:
405 DBG(2, "Trying to set unsupported mode %u\n", musb_mode);
406 }
407
408 __raw_writel(cfgchip2, CFGCHIP2);
409 return 0;
410}
411
412int __init musb_platform_init(struct musb *musb, void *board_data)
413{
414 void __iomem *reg_base = musb->ctrl_base;
415 u32 rev;
416
417 musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
418
419 clk_enable(musb->clock);
420
421 /* Returns zero if e.g. not clocked */
422 rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
423 if (!rev)
424 goto fail;
425
426 usb_nop_xceiv_register();
427 musb->xceiv = otg_get_transceiver();
428 if (!musb->xceiv)
429 goto fail;
430
431 if (is_host_enabled(musb))
432 setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
433
434 musb->board_set_vbus = da8xx_set_vbus;
435
436 /* Reset the controller */
437 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
438
439 /* Start the on-chip PHY and its PLL. */
440 phy_on();
441
442 msleep(5);
443
444 /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
445 pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n",
446 rev, __raw_readl(CFGCHIP2),
447 musb_readb(reg_base, DA8XX_USB_CTRL_REG));
448
449 musb->isr = da8xx_interrupt;
450 return 0;
451fail:
452 clk_disable(musb->clock);
453 return -ENODEV;
454}
455
456int musb_platform_exit(struct musb *musb)
457{
458 if (is_host_enabled(musb))
459 del_timer_sync(&otg_workaround);
460
461 phy_off();
462
463 otg_put_transceiver(musb->xceiv);
464 usb_nop_xceiv_unregister();
465
466 clk_disable(musb->clock);
467
468 return 0;
469}
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 57624361c1de..6e67629f50cc 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -446,6 +446,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
446fail: 446fail:
447 clk_disable(musb->clock); 447 clk_disable(musb->clock);
448 448
449 otg_put_transceiver(musb->xceiv);
449 usb_nop_xceiv_unregister(); 450 usb_nop_xceiv_unregister();
450 return -ENODEV; 451 return -ENODEV;
451} 452}
@@ -496,6 +497,7 @@ int musb_platform_exit(struct musb *musb)
496 497
497 clk_disable(musb->clock); 498 clk_disable(musb->clock);
498 499
500 otg_put_transceiver(musb->xceiv);
499 usb_nop_xceiv_unregister(); 501 usb_nop_xceiv_unregister();
500 502
501 return 0; 503 return 0;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 540c766c4f86..c9f9024c5515 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -272,6 +272,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
272 } 272 }
273} 273}
274 274
275#if !defined(CONFIG_USB_MUSB_AM35X)
275/* 276/*
276 * Unload an endpoint's FIFO 277 * Unload an endpoint's FIFO
277 */ 278 */
@@ -309,6 +310,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
309 readsb(fifo, dst, len); 310 readsb(fifo, dst, len);
310 } 311 }
311} 312}
313#endif
312 314
313#endif /* normal PIO */ 315#endif /* normal PIO */
314 316
@@ -550,6 +552,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
550 if (int_usb & MUSB_INTR_SESSREQ) { 552 if (int_usb & MUSB_INTR_SESSREQ) {
551 void __iomem *mbase = musb->mregs; 553 void __iomem *mbase = musb->mregs;
552 554
555 if (devctl & MUSB_DEVCTL_BDEVICE) {
556 DBG(3, "SessReq while on B state\n");
557 return IRQ_HANDLED;
558 }
559
553 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); 560 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
554 561
555 /* IRQ arrives from ID pin sense or (later, if VBUS power 562 /* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -1921,10 +1928,6 @@ static void musb_free(struct musb *musb)
1921 dma_controller_destroy(c); 1928 dma_controller_destroy(c);
1922 } 1929 }
1923 1930
1924#ifdef CONFIG_USB_MUSB_OTG
1925 put_device(musb->xceiv->dev);
1926#endif
1927
1928#ifdef CONFIG_USB_MUSB_HDRC_HCD 1931#ifdef CONFIG_USB_MUSB_HDRC_HCD
1929 usb_put_hcd(musb_to_hcd(musb)); 1932 usb_put_hcd(musb_to_hcd(musb));
1930#else 1933#else
@@ -2266,6 +2269,7 @@ void musb_save_context(struct musb *musb)
2266{ 2269{
2267 int i; 2270 int i;
2268 void __iomem *musb_base = musb->mregs; 2271 void __iomem *musb_base = musb->mregs;
2272 void __iomem *epio;
2269 2273
2270 if (is_host_enabled(musb)) { 2274 if (is_host_enabled(musb)) {
2271 musb_context.frame = musb_readw(musb_base, MUSB_FRAME); 2275 musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
@@ -2279,16 +2283,16 @@ void musb_save_context(struct musb *musb)
2279 musb_context.index = musb_readb(musb_base, MUSB_INDEX); 2283 musb_context.index = musb_readb(musb_base, MUSB_INDEX);
2280 musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL); 2284 musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2281 2285
2282 for (i = 0; i < MUSB_C_NUM_EPS; ++i) { 2286 for (i = 0; i < musb->config->num_eps; ++i) {
2283 musb_writeb(musb_base, MUSB_INDEX, i); 2287 epio = musb->endpoints[i].regs;
2284 musb_context.index_regs[i].txmaxp = 2288 musb_context.index_regs[i].txmaxp =
2285 musb_readw(musb_base, 0x10 + MUSB_TXMAXP); 2289 musb_readw(epio, MUSB_TXMAXP);
2286 musb_context.index_regs[i].txcsr = 2290 musb_context.index_regs[i].txcsr =
2287 musb_readw(musb_base, 0x10 + MUSB_TXCSR); 2291 musb_readw(epio, MUSB_TXCSR);
2288 musb_context.index_regs[i].rxmaxp = 2292 musb_context.index_regs[i].rxmaxp =
2289 musb_readw(musb_base, 0x10 + MUSB_RXMAXP); 2293 musb_readw(epio, MUSB_RXMAXP);
2290 musb_context.index_regs[i].rxcsr = 2294 musb_context.index_regs[i].rxcsr =
2291 musb_readw(musb_base, 0x10 + MUSB_RXCSR); 2295 musb_readw(epio, MUSB_RXCSR);
2292 2296
2293 if (musb->dyn_fifo) { 2297 if (musb->dyn_fifo) {
2294 musb_context.index_regs[i].txfifoadd = 2298 musb_context.index_regs[i].txfifoadd =
@@ -2302,13 +2306,13 @@ void musb_save_context(struct musb *musb)
2302 } 2306 }
2303 if (is_host_enabled(musb)) { 2307 if (is_host_enabled(musb)) {
2304 musb_context.index_regs[i].txtype = 2308 musb_context.index_regs[i].txtype =
2305 musb_readb(musb_base, 0x10 + MUSB_TXTYPE); 2309 musb_readb(epio, MUSB_TXTYPE);
2306 musb_context.index_regs[i].txinterval = 2310 musb_context.index_regs[i].txinterval =
2307 musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL); 2311 musb_readb(epio, MUSB_TXINTERVAL);
2308 musb_context.index_regs[i].rxtype = 2312 musb_context.index_regs[i].rxtype =
2309 musb_readb(musb_base, 0x10 + MUSB_RXTYPE); 2313 musb_readb(epio, MUSB_RXTYPE);
2310 musb_context.index_regs[i].rxinterval = 2314 musb_context.index_regs[i].rxinterval =
2311 musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL); 2315 musb_readb(epio, MUSB_RXINTERVAL);
2312 2316
2313 musb_context.index_regs[i].txfunaddr = 2317 musb_context.index_regs[i].txfunaddr =
2314 musb_read_txfunaddr(musb_base, i); 2318 musb_read_txfunaddr(musb_base, i);
@@ -2326,8 +2330,6 @@ void musb_save_context(struct musb *musb)
2326 } 2330 }
2327 } 2331 }
2328 2332
2329 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2330
2331 musb_platform_save_context(musb, &musb_context); 2333 musb_platform_save_context(musb, &musb_context);
2332} 2334}
2333 2335
@@ -2336,6 +2338,7 @@ void musb_restore_context(struct musb *musb)
2336 int i; 2338 int i;
2337 void __iomem *musb_base = musb->mregs; 2339 void __iomem *musb_base = musb->mregs;
2338 void __iomem *ep_target_regs; 2340 void __iomem *ep_target_regs;
2341 void __iomem *epio;
2339 2342
2340 musb_platform_restore_context(musb, &musb_context); 2343 musb_platform_restore_context(musb, &musb_context);
2341 2344
@@ -2350,15 +2353,15 @@ void musb_restore_context(struct musb *musb)
2350 musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe); 2353 musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
2351 musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl); 2354 musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
2352 2355
2353 for (i = 0; i < MUSB_C_NUM_EPS; ++i) { 2356 for (i = 0; i < musb->config->num_eps; ++i) {
2354 musb_writeb(musb_base, MUSB_INDEX, i); 2357 epio = musb->endpoints[i].regs;
2355 musb_writew(musb_base, 0x10 + MUSB_TXMAXP, 2358 musb_writew(epio, MUSB_TXMAXP,
2356 musb_context.index_regs[i].txmaxp); 2359 musb_context.index_regs[i].txmaxp);
2357 musb_writew(musb_base, 0x10 + MUSB_TXCSR, 2360 musb_writew(epio, MUSB_TXCSR,
2358 musb_context.index_regs[i].txcsr); 2361 musb_context.index_regs[i].txcsr);
2359 musb_writew(musb_base, 0x10 + MUSB_RXMAXP, 2362 musb_writew(epio, MUSB_RXMAXP,
2360 musb_context.index_regs[i].rxmaxp); 2363 musb_context.index_regs[i].rxmaxp);
2361 musb_writew(musb_base, 0x10 + MUSB_RXCSR, 2364 musb_writew(epio, MUSB_RXCSR,
2362 musb_context.index_regs[i].rxcsr); 2365 musb_context.index_regs[i].rxcsr);
2363 2366
2364 if (musb->dyn_fifo) { 2367 if (musb->dyn_fifo) {
@@ -2373,13 +2376,13 @@ void musb_restore_context(struct musb *musb)
2373 } 2376 }
2374 2377
2375 if (is_host_enabled(musb)) { 2378 if (is_host_enabled(musb)) {
2376 musb_writeb(musb_base, 0x10 + MUSB_TXTYPE, 2379 musb_writeb(epio, MUSB_TXTYPE,
2377 musb_context.index_regs[i].txtype); 2380 musb_context.index_regs[i].txtype);
2378 musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL, 2381 musb_writeb(epio, MUSB_TXINTERVAL,
2379 musb_context.index_regs[i].txinterval); 2382 musb_context.index_regs[i].txinterval);
2380 musb_writeb(musb_base, 0x10 + MUSB_RXTYPE, 2383 musb_writeb(epio, MUSB_RXTYPE,
2381 musb_context.index_regs[i].rxtype); 2384 musb_context.index_regs[i].rxtype);
2382 musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL, 2385 musb_writeb(epio, MUSB_RXINTERVAL,
2383 2386
2384 musb_context.index_regs[i].rxinterval); 2387 musb_context.index_regs[i].rxinterval);
2385 musb_write_txfunaddr(musb_base, i, 2388 musb_write_txfunaddr(musb_base, i,
@@ -2400,8 +2403,6 @@ void musb_restore_context(struct musb *musb)
2400 musb_context.index_regs[i].rxhubport); 2403 musb_context.index_regs[i].rxhubport);
2401 } 2404 }
2402 } 2405 }
2403
2404 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2405} 2406}
2406 2407
2407static int musb_suspend(struct device *dev) 2408static int musb_suspend(struct device *dev)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 91d67794e350..69797e5b46a7 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -330,6 +330,7 @@ struct musb {
330 /* device lock */ 330 /* device lock */
331 spinlock_t lock; 331 spinlock_t lock;
332 struct clk *clock; 332 struct clk *clock;
333 struct clk *phy_clock;
333 irqreturn_t (*isr)(int, void *); 334 irqreturn_t (*isr)(int, void *);
334 struct work_struct irq_work; 335 struct work_struct irq_work;
335 u16 hwvers; 336 u16 hwvers;
@@ -599,6 +600,7 @@ extern void musb_hnp_stop(struct musb *musb);
599extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode); 600extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
600 601
601#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \ 602#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
603 defined(CONFIG_ARCH_DAVINCI_DA8XX) || \
602 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 604 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
603 defined(CONFIG_ARCH_OMAP4) 605 defined(CONFIG_ARCH_OMAP4)
604extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); 606extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index d73afdbde3ee..94f6973cf8f7 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -42,11 +42,10 @@
42#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) 42#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
43#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) 43#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
44 44
45#define xprintk(level, facility, format, args...) do { \ 45#define DBG(level, format, args...) do { \
46 if (_dbg_level(level)) { \ 46 if (_dbg_level(level)) \
47 printk(facility "%s %d: " format , \ 47 pr_debug("%s %d: " format, __func__, __LINE__, ## args); \
48 __func__, __LINE__ , ## args); \ 48 } while (0)
49 } } while (0)
50 49
51extern unsigned musb_debug; 50extern unsigned musb_debug;
52 51
@@ -55,8 +54,6 @@ static inline int _dbg_level(unsigned l)
55 return musb_debug >= l; 54 return musb_debug >= l;
56} 55}
57 56
58#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
59
60extern const char *otg_state_string(struct musb *); 57extern const char *otg_state_string(struct musb *);
61 58
62#ifdef CONFIG_DEBUG_FS 59#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d065e23f123e..5d815049cbaa 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -337,13 +337,15 @@ static void txstate(struct musb *musb, struct musb_request *req)
337 csr |= (MUSB_TXCSR_DMAENAB | 337 csr |= (MUSB_TXCSR_DMAENAB |
338 MUSB_TXCSR_MODE); 338 MUSB_TXCSR_MODE);
339 /* against programming guide */ 339 /* against programming guide */
340 } else 340 } else {
341 csr |= (MUSB_TXCSR_AUTOSET 341 csr |= (MUSB_TXCSR_DMAENAB
342 | MUSB_TXCSR_DMAENAB
343 | MUSB_TXCSR_DMAMODE 342 | MUSB_TXCSR_DMAMODE
344 | MUSB_TXCSR_MODE); 343 | MUSB_TXCSR_MODE);
345 344 if (!musb_ep->hb_mult)
345 csr |= MUSB_TXCSR_AUTOSET;
346 }
346 csr &= ~MUSB_TXCSR_P_UNDERRUN; 347 csr &= ~MUSB_TXCSR_P_UNDERRUN;
348
347 musb_writew(epio, MUSB_TXCSR, csr); 349 musb_writew(epio, MUSB_TXCSR, csr);
348 } 350 }
349 } 351 }
@@ -475,40 +477,39 @@ void musb_g_tx(struct musb *musb, u8 epnum)
475 epnum, csr, musb_ep->dma->actual_len, request); 477 epnum, csr, musb_ep->dma->actual_len, request);
476 } 478 }
477 479
478 if (is_dma || request->actual == request->length) { 480 /*
479 /* 481 * First, maybe a terminating short packet. Some DMA
480 * First, maybe a terminating short packet. Some DMA 482 * engines might handle this by themselves.
481 * engines might handle this by themselves. 483 */
482 */ 484 if ((request->zero && request->length
483 if ((request->zero && request->length 485 && (request->length % musb_ep->packet_sz == 0)
484 && request->length % musb_ep->packet_sz == 0) 486 && (request->actual == request->length))
485#ifdef CONFIG_USB_INVENTRA_DMA 487#ifdef CONFIG_USB_INVENTRA_DMA
486 || (is_dma && (!dma->desired_mode || 488 || (is_dma && (!dma->desired_mode ||
487 (request->actual & 489 (request->actual &
488 (musb_ep->packet_sz - 1)))) 490 (musb_ep->packet_sz - 1))))
489#endif 491#endif
490 ) { 492 ) {
491 /* 493 /*
492 * On DMA completion, FIFO may not be 494 * On DMA completion, FIFO may not be
493 * available yet... 495 * available yet...
494 */ 496 */
495 if (csr & MUSB_TXCSR_TXPKTRDY) 497 if (csr & MUSB_TXCSR_TXPKTRDY)
496 return; 498 return;
497 499
498 DBG(4, "sending zero pkt\n"); 500 DBG(4, "sending zero pkt\n");
499 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 501 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
500 | MUSB_TXCSR_TXPKTRDY); 502 | MUSB_TXCSR_TXPKTRDY);
501 request->zero = 0; 503 request->zero = 0;
502 } 504 }
503 505
504 if (request->actual == request->length) { 506 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 507 musb_g_giveback(musb_ep, request, 0);
506 request = musb_ep->desc ? next_request(musb_ep) : NULL; 508 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 if (!request) { 509 if (!request) {
508 DBG(4, "%s idle now\n", 510 DBG(4, "%s idle now\n",
509 musb_ep->end_point.name); 511 musb_ep->end_point.name);
510 return; 512 return;
511 }
512 } 513 }
513 } 514 }
514 515
@@ -643,7 +644,9 @@ static void rxstate(struct musb *musb, struct musb_request *req)
643 */ 644 */
644 645
645 csr |= MUSB_RXCSR_DMAENAB; 646 csr |= MUSB_RXCSR_DMAENAB;
646 csr |= MUSB_RXCSR_AUTOCLEAR; 647 if (!musb_ep->hb_mult &&
648 musb_ep->hw_ep->rx_double_buffered)
649 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1 650#ifdef USE_MODE1
648 /* csr |= MUSB_RXCSR_DMAMODE; */ 651 /* csr |= MUSB_RXCSR_DMAMODE; */
649 652
@@ -772,7 +775,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
772 musb_writew(epio, MUSB_RXCSR, csr); 775 musb_writew(epio, MUSB_RXCSR, csr);
773 776
774 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); 777 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
775 if (request && request->status == -EINPROGRESS) 778 if (request->status == -EINPROGRESS)
776 request->status = -EOVERFLOW; 779 request->status = -EOVERFLOW;
777 } 780 }
778 if (csr & MUSB_RXCSR_INCOMPRX) { 781 if (csr & MUSB_RXCSR_INCOMPRX) {
@@ -825,14 +828,8 @@ void musb_g_rx(struct musb *musb, u8 epnum)
825 return; 828 return;
826 } 829 }
827 830
828 /* analyze request if the ep is hot */ 831 /* Analyze request */
829 if (request) 832 rxstate(musb, to_musb_request(request));
830 rxstate(musb, to_musb_request(request));
831 else
832 DBG(3, "packet waiting for %s%s request\n",
833 musb_ep->desc ? "" : "inactive ",
834 musb_ep->end_point.name);
835 return;
836} 833}
837 834
838/* ------------------------------------------------------------ */ 835/* ------------------------------------------------------------ */
@@ -875,9 +872,25 @@ static int musb_gadget_enable(struct usb_ep *ep,
875 872
876 /* REVISIT this rules out high bandwidth periodic transfers */ 873 /* REVISIT this rules out high bandwidth periodic transfers */
877 tmp = le16_to_cpu(desc->wMaxPacketSize); 874 tmp = le16_to_cpu(desc->wMaxPacketSize);
878 if (tmp & ~0x07ff) 875 if (tmp & ~0x07ff) {
879 goto fail; 876 int ok;
880 musb_ep->packet_sz = tmp; 877
878 if (usb_endpoint_dir_in(desc))
879 ok = musb->hb_iso_tx;
880 else
881 ok = musb->hb_iso_rx;
882
883 if (!ok) {
884 DBG(4, "%s: not support ISO high bandwidth\n", __func__);
885 goto fail;
886 }
887 musb_ep->hb_mult = (tmp >> 11) & 3;
888 } else {
889 musb_ep->hb_mult = 0;
890 }
891
892 musb_ep->packet_sz = tmp & 0x7ff;
893 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
881 894
882 /* enable the interrupts for the endpoint, set the endpoint 895 /* enable the interrupts for the endpoint, set the endpoint
883 * packet size (or fail), set the mode, clear the fifo 896 * packet size (or fail), set the mode, clear the fifo
@@ -890,8 +903,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
890 musb_ep->is_in = 1; 903 musb_ep->is_in = 1;
891 if (!musb_ep->is_in) 904 if (!musb_ep->is_in)
892 goto fail; 905 goto fail;
893 if (tmp > hw_ep->max_packet_sz_tx) 906
907 if (tmp > hw_ep->max_packet_sz_tx) {
908 DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
894 goto fail; 909 goto fail;
910 }
895 911
896 int_txe |= (1 << epnum); 912 int_txe |= (1 << epnum);
897 musb_writew(mbase, MUSB_INTRTXE, int_txe); 913 musb_writew(mbase, MUSB_INTRTXE, int_txe);
@@ -906,7 +922,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
906 if (musb->hwvers < MUSB_HWVERS_2000) 922 if (musb->hwvers < MUSB_HWVERS_2000)
907 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); 923 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
908 else 924 else
909 musb_writew(regs, MUSB_TXMAXP, tmp); 925 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
910 926
911 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 927 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
912 if (musb_readw(regs, MUSB_TXCSR) 928 if (musb_readw(regs, MUSB_TXCSR)
@@ -927,8 +943,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
927 musb_ep->is_in = 0; 943 musb_ep->is_in = 0;
928 if (musb_ep->is_in) 944 if (musb_ep->is_in)
929 goto fail; 945 goto fail;
930 if (tmp > hw_ep->max_packet_sz_rx) 946
947 if (tmp > hw_ep->max_packet_sz_rx) {
948 DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
931 goto fail; 949 goto fail;
950 }
932 951
933 int_rxe |= (1 << epnum); 952 int_rxe |= (1 << epnum);
934 musb_writew(mbase, MUSB_INTRRXE, int_rxe); 953 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
@@ -942,7 +961,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
942 if (musb->hwvers < MUSB_HWVERS_2000) 961 if (musb->hwvers < MUSB_HWVERS_2000)
943 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); 962 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
944 else 963 else
945 musb_writew(regs, MUSB_RXMAXP, tmp); 964 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
946 965
947 /* force shared fifo to OUT-only mode */ 966 /* force shared fifo to OUT-only mode */
948 if (hw_ep->is_shared_fifo) { 967 if (hw_ep->is_shared_fifo) {
@@ -1699,9 +1718,11 @@ void musb_gadget_cleanup(struct musb *musb)
1699 * -ENOMEM no memeory to perform the operation 1718 * -ENOMEM no memeory to perform the operation
1700 * 1719 *
1701 * @param driver the gadget driver 1720 * @param driver the gadget driver
1721 * @param bind the driver's bind function
1702 * @return <0 if error, 0 if everything is fine 1722 * @return <0 if error, 0 if everything is fine
1703 */ 1723 */
1704int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1724int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1725 int (*bind)(struct usb_gadget *))
1705{ 1726{
1706 int retval; 1727 int retval;
1707 unsigned long flags; 1728 unsigned long flags;
@@ -1709,8 +1730,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1709 1730
1710 if (!driver 1731 if (!driver
1711 || driver->speed != USB_SPEED_HIGH 1732 || driver->speed != USB_SPEED_HIGH
1712 || !driver->bind 1733 || !bind || !driver->setup)
1713 || !driver->setup)
1714 return -EINVAL; 1734 return -EINVAL;
1715 1735
1716 /* driver must be initialized to support peripheral mode */ 1736 /* driver must be initialized to support peripheral mode */
@@ -1738,7 +1758,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1738 spin_unlock_irqrestore(&musb->lock, flags); 1758 spin_unlock_irqrestore(&musb->lock, flags);
1739 1759
1740 if (retval == 0) { 1760 if (retval == 0) {
1741 retval = driver->bind(&musb->g); 1761 retval = bind(&musb->g);
1742 if (retval != 0) { 1762 if (retval != 0) {
1743 DBG(3, "bind to driver %s failed --> %d\n", 1763 DBG(3, "bind to driver %s failed --> %d\n",
1744 driver->driver.name, retval); 1764 driver->driver.name, retval);
@@ -1786,7 +1806,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1786 1806
1787 return retval; 1807 return retval;
1788} 1808}
1789EXPORT_SYMBOL(usb_gadget_register_driver); 1809EXPORT_SYMBOL(usb_gadget_probe_driver);
1790 1810
1791static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) 1811static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1792{ 1812{
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 572b1da7f2dc..dec8dc008191 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -79,6 +79,8 @@ struct musb_ep {
79 79
80 /* true if lock must be dropped but req_list may not be advanced */ 80 /* true if lock must be dropped but req_list may not be advanced */
81 u8 busy; 81 u8 busy;
82
83 u8 hb_mult;
82}; 84};
83 85
84static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) 86static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 9e65c47cc98b..4d5bcb4e14d2 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -41,6 +41,7 @@
41#include <linux/errno.h> 41#include <linux/errno.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/list.h> 43#include <linux/list.h>
44#include <linux/dma-mapping.h>
44 45
45#include "musb_core.h" 46#include "musb_core.h"
46#include "musb_host.h" 47#include "musb_host.h"
@@ -1119,6 +1120,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1119 u32 status = 0; 1120 u32 status = 0;
1120 void __iomem *mbase = musb->mregs; 1121 void __iomem *mbase = musb->mregs;
1121 struct dma_channel *dma; 1122 struct dma_channel *dma;
1123 bool transfer_pending = false;
1122 1124
1123 musb_ep_select(mbase, epnum); 1125 musb_ep_select(mbase, epnum);
1124 tx_csr = musb_readw(epio, MUSB_TXCSR); 1126 tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1279,7 +1281,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1279 offset = d->offset; 1281 offset = d->offset;
1280 length = d->length; 1282 length = d->length;
1281 } 1283 }
1282 } else if (dma) { 1284 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1283 done = true; 1285 done = true;
1284 } else { 1286 } else {
1285 /* see if we need to send more data, or ZLP */ 1287 /* see if we need to send more data, or ZLP */
@@ -1292,6 +1294,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1292 if (!done) { 1294 if (!done) {
1293 offset = qh->offset; 1295 offset = qh->offset;
1294 length = urb->transfer_buffer_length - offset; 1296 length = urb->transfer_buffer_length - offset;
1297 transfer_pending = true;
1295 } 1298 }
1296 } 1299 }
1297 } 1300 }
@@ -1311,7 +1314,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1311 urb->actual_length = qh->offset; 1314 urb->actual_length = qh->offset;
1312 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); 1315 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1313 return; 1316 return;
1314 } else if (usb_pipeisoc(pipe) && dma) { 1317 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1315 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1318 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1316 offset, length)) { 1319 offset, length)) {
1317 if (is_cppi_enabled() || tusb_dma_omap()) 1320 if (is_cppi_enabled() || tusb_dma_omap())
@@ -1332,6 +1335,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1332 */ 1335 */
1333 if (length > qh->maxpacket) 1336 if (length > qh->maxpacket)
1334 length = qh->maxpacket; 1337 length = qh->maxpacket;
1338 /* Unmap the buffer so that CPU can use it */
1339 unmap_urb_for_dma(musb_to_hcd(musb), urb);
1335 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); 1340 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1336 qh->segsize = length; 1341 qh->segsize = length;
1337 1342
@@ -1752,6 +1757,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1752#endif /* Mentor DMA */ 1757#endif /* Mentor DMA */
1753 1758
1754 if (!dma) { 1759 if (!dma) {
1760 /* Unmap the buffer so that CPU can use it */
1761 unmap_urb_for_dma(musb_to_hcd(musb), urb);
1755 done = musb_host_packet_rx(musb, urb, 1762 done = musb_host_packet_rx(musb, urb,
1756 epnum, iso_err); 1763 epnum, iso_err);
1757 DBG(6, "read %spacket\n", done ? "last " : ""); 1764 DBG(6, "read %spacket\n", done ? "last " : "");
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6dc107f25245..6f771af5cbdb 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -91,7 +91,7 @@ static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
91 channel = &(musb_channel->channel); 91 channel = &(musb_channel->channel);
92 channel->private_data = musb_channel; 92 channel->private_data = musb_channel;
93 channel->status = MUSB_DMA_STATUS_FREE; 93 channel->status = MUSB_DMA_STATUS_FREE;
94 channel->max_len = 0x10000; 94 channel->max_len = 0x100000;
95 /* Tx => mode 1; Rx => mode 0 */ 95 /* Tx => mode 1; Rx => mode 0 */
96 channel->desired_mode = transmit; 96 channel->desired_mode = transmit;
97 channel->actual_len = 0; 97 channel->actual_len = 0;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2111a241dd03..ed618bde1eec 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -320,5 +320,6 @@ int musb_platform_exit(struct musb *musb)
320 320
321 musb_platform_suspend(musb); 321 musb_platform_suspend(musb);
322 322
323 otg_put_transceiver(musb->xceiv);
323 return 0; 324 return 0;
324} 325}
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 3c48e77a0aa2..bde40efc7046 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1152,6 +1152,8 @@ done:
1152 if (ret < 0) { 1152 if (ret < 0) {
1153 if (sync) 1153 if (sync)
1154 iounmap(sync); 1154 iounmap(sync);
1155
1156 otg_put_transceiver(musb->xceiv);
1155 usb_nop_xceiv_unregister(); 1157 usb_nop_xceiv_unregister();
1156 } 1158 }
1157 return ret; 1159 return ret;
@@ -1166,6 +1168,8 @@ int musb_platform_exit(struct musb *musb)
1166 musb->board_set_power(0); 1168 musb->board_set_power(0);
1167 1169
1168 iounmap(musb->sync_va); 1170 iounmap(musb->sync_va);
1171
1172 otg_put_transceiver(musb->xceiv);
1169 usb_nop_xceiv_unregister(); 1173 usb_nop_xceiv_unregister();
1170 return 0; 1174 return 0;
1171} 1175}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 3b1289572d72..5ce07528cd0c 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -67,4 +67,18 @@ config NOP_USB_XCEIV
67 built-in with usb ip or which are autonomous and doesn't require any 67 built-in with usb ip or which are autonomous and doesn't require any
68 phy programming such as ISP1x04 etc. 68 phy programming such as ISP1x04 etc.
69 69
70config USB_LANGWELL_OTG
71 tristate "Intel Langwell USB OTG dual-role support"
72 depends on USB && PCI && INTEL_SCU_IPC
73 select USB_OTG
74 select USB_OTG_UTILS
75 help
76 Say Y here if you want to build Intel Langwell USB OTG
77 transciever driver in kernel. This driver implements role
78 switch between EHCI host driver and Langwell USB OTG
79 client driver.
80
81 To compile this driver as a module, choose M here: the
82 module will be called langwell_otg.
83
70endif # USB || OTG 84endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index aeb49a8ec412..66f1b83e4fa7 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -2,6 +2,9 @@
2# OTG infrastructure and transceiver drivers 2# OTG infrastructure and transceiver drivers
3# 3#
4 4
5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
7
5# infrastructure 8# infrastructure
6obj-$(CONFIG_USB_OTG_UTILS) += otg.o 9obj-$(CONFIG_USB_OTG_UTILS) += otg.o
7 10
@@ -9,9 +12,6 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
9obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o 12obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 13obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o 14obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
15obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
12obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o 16obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
13obj-$(CONFIG_USB_ULPI) += ulpi.o 17obj-$(CONFIG_USB_ULPI) += ulpi.o
14
15ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
16ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
17
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
new file mode 100644
index 000000000000..bdc3ea66be69
--- /dev/null
+++ b/drivers/usb/otg/langwell_otg.c
@@ -0,0 +1,2408 @@
1/*
2 * Intel Langwell USB OTG transceiver driver
3 * Copyright (C) 2008 - 2010, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19/* This driver helps to switch Langwell OTG controller function between host
20 * and peripheral. It works with EHCI driver and Langwell client controller
21 * driver together.
22 */
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/pci.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/device.h>
30#include <linux/moduleparam.h>
31#include <linux/usb/ch9.h>
32#include <linux/usb/gadget.h>
33#include <linux/usb.h>
34#include <linux/usb/otg.h>
35#include <linux/usb/hcd.h>
36#include <linux/notifier.h>
37#include <linux/delay.h>
38#include <asm/intel_scu_ipc.h>
39
40#include <linux/usb/langwell_otg.h>
41
42#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
43#define DRIVER_VERSION "July 10, 2010"
44
45MODULE_DESCRIPTION(DRIVER_DESC);
46MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
47MODULE_VERSION(DRIVER_VERSION);
48MODULE_LICENSE("GPL");
49
50static const char driver_name[] = "langwell_otg";
51
52static int langwell_otg_probe(struct pci_dev *pdev,
53 const struct pci_device_id *id);
54static void langwell_otg_remove(struct pci_dev *pdev);
55static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
56static int langwell_otg_resume(struct pci_dev *pdev);
57
58static int langwell_otg_set_host(struct otg_transceiver *otg,
59 struct usb_bus *host);
60static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
61 struct usb_gadget *gadget);
62static int langwell_otg_start_srp(struct otg_transceiver *otg);
63
64static const struct pci_device_id pci_ids[] = {{
65 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
66 .class_mask = ~0,
67 .vendor = 0x8086,
68 .device = 0x0811,
69 .subvendor = PCI_ANY_ID,
70 .subdevice = PCI_ANY_ID,
71}, { /* end: all zeroes */ }
72};
73
74static struct pci_driver otg_pci_driver = {
75 .name = (char *) driver_name,
76 .id_table = pci_ids,
77
78 .probe = langwell_otg_probe,
79 .remove = langwell_otg_remove,
80
81 .suspend = langwell_otg_suspend,
82 .resume = langwell_otg_resume,
83};
84
85static const char *state_string(enum usb_otg_state state)
86{
87 switch (state) {
88 case OTG_STATE_A_IDLE:
89 return "a_idle";
90 case OTG_STATE_A_WAIT_VRISE:
91 return "a_wait_vrise";
92 case OTG_STATE_A_WAIT_BCON:
93 return "a_wait_bcon";
94 case OTG_STATE_A_HOST:
95 return "a_host";
96 case OTG_STATE_A_SUSPEND:
97 return "a_suspend";
98 case OTG_STATE_A_PERIPHERAL:
99 return "a_peripheral";
100 case OTG_STATE_A_WAIT_VFALL:
101 return "a_wait_vfall";
102 case OTG_STATE_A_VBUS_ERR:
103 return "a_vbus_err";
104 case OTG_STATE_B_IDLE:
105 return "b_idle";
106 case OTG_STATE_B_SRP_INIT:
107 return "b_srp_init";
108 case OTG_STATE_B_PERIPHERAL:
109 return "b_peripheral";
110 case OTG_STATE_B_WAIT_ACON:
111 return "b_wait_acon";
112 case OTG_STATE_B_HOST:
113 return "b_host";
114 default:
115 return "UNDEFINED";
116 }
117}
118
119/* HSM timers */
120static inline struct langwell_otg_timer *otg_timer_initializer
121(void (*function)(unsigned long), unsigned long expires, unsigned long data)
122{
123 struct langwell_otg_timer *timer;
124 timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
125 if (timer == NULL)
126 return timer;
127
128 timer->function = function;
129 timer->expires = expires;
130 timer->data = data;
131 return timer;
132}
133
134static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr,
135 *b_se0_srp_tmr, *b_srp_init_tmr;
136
137static struct list_head active_timers;
138
139static struct langwell_otg *the_transceiver;
140
141/* host/client notify transceiver when event affects HNP state */
142void langwell_update_transceiver(void)
143{
144 struct langwell_otg *lnw = the_transceiver;
145
146 dev_dbg(lnw->dev, "transceiver is updated\n");
147
148 if (!lnw->qwork)
149 return ;
150
151 queue_work(lnw->qwork, &lnw->work);
152}
153EXPORT_SYMBOL(langwell_update_transceiver);
154
155static int langwell_otg_set_host(struct otg_transceiver *otg,
156 struct usb_bus *host)
157{
158 otg->host = host;
159
160 return 0;
161}
162
163static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
164 struct usb_gadget *gadget)
165{
166 otg->gadget = gadget;
167
168 return 0;
169}
170
171static int langwell_otg_set_power(struct otg_transceiver *otg,
172 unsigned mA)
173{
174 return 0;
175}
176
177/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
178static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
179{
180 struct langwell_otg *lnw = the_transceiver;
181 u8 r;
182
183 dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off");
184
185 /* FIXME: surely we should cache this on the first read. If not use
186 readv to avoid two transactions */
187 if (intel_scu_ipc_ioread8(0x00, &r) < 0) {
188 dev_dbg(lnw->dev, "Failed to read PMIC register 0xD2");
189 return -EBUSY;
190 }
191 if ((r & 0x03) != 0x02) {
192 dev_dbg(lnw->dev, "not NEC PMIC attached\n");
193 return -EBUSY;
194 }
195
196 if (intel_scu_ipc_ioread8(0x20, &r) < 0) {
197 dev_dbg(lnw->dev, "Failed to read PMIC register 0xD2");
198 return -EBUSY;
199 }
200
201 if ((r & 0x20) == 0) {
202 dev_dbg(lnw->dev, "no battery attached\n");
203 return -EBUSY;
204 }
205
206 /* Workaround for battery attachment issue */
207 if (r == 0x34) {
208 dev_dbg(lnw->dev, "no battery attached on SH\n");
209 return -EBUSY;
210 }
211
212 dev_dbg(lnw->dev, "battery attached. 2 reg = %x\n", r);
213
214 /* workaround: FW detect writing 0x20/0xc0 to d4 event.
215 * this is only for NEC PMIC.
216 */
217
218 if (intel_scu_ipc_iowrite8(0xD4, enabled ? 0x20 : 0xC0))
219 dev_dbg(lnw->dev, "Failed to write PMIC.\n");
220
221 dev_dbg(lnw->dev, "%s --->\n", __func__);
222
223 return 0;
224}
225
226/* charge vbus or discharge vbus through a resistor to ground */
227static void langwell_otg_chrg_vbus(int on)
228{
229 struct langwell_otg *lnw = the_transceiver;
230 u32 val;
231
232 val = readl(lnw->iotg.base + CI_OTGSC);
233
234 if (on)
235 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
236 lnw->iotg.base + CI_OTGSC);
237 else
238 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
239 lnw->iotg.base + CI_OTGSC);
240}
241
242/* Start SRP */
243static int langwell_otg_start_srp(struct otg_transceiver *otg)
244{
245 struct langwell_otg *lnw = the_transceiver;
246 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
247 u32 val;
248
249 dev_dbg(lnw->dev, "%s --->\n", __func__);
250
251 val = readl(iotg->base + CI_OTGSC);
252
253 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
254 iotg->base + CI_OTGSC);
255
256 /* Check if the data plus is finished or not */
257 msleep(8);
258 val = readl(iotg->base + CI_OTGSC);
259 if (val & (OTGSC_HADP | OTGSC_DP))
260 dev_dbg(lnw->dev, "DataLine SRP Error\n");
261
262 /* Disable interrupt - b_sess_vld */
263 val = readl(iotg->base + CI_OTGSC);
264 val &= (~(OTGSC_BSVIE | OTGSC_BSEIE));
265 writel(val, iotg->base + CI_OTGSC);
266
267 /* Start VBus SRP, drive vbus to generate VBus pulse */
268 iotg->otg.set_vbus(&iotg->otg, true);
269 msleep(15);
270 iotg->otg.set_vbus(&iotg->otg, false);
271
272 /* Enable interrupt - b_sess_vld*/
273 val = readl(iotg->base + CI_OTGSC);
274 dev_dbg(lnw->dev, "after VBUS pulse otgsc = %x\n", val);
275
276 val |= (OTGSC_BSVIE | OTGSC_BSEIE);
277 writel(val, iotg->base + CI_OTGSC);
278
279 /* If Vbus is valid, then update the hsm */
280 if (val & OTGSC_BSV) {
281 dev_dbg(lnw->dev, "no b_sess_vld interrupt\n");
282
283 lnw->iotg.hsm.b_sess_vld = 1;
284 langwell_update_transceiver();
285 }
286
287 dev_dbg(lnw->dev, "%s <---\n", __func__);
288 return 0;
289}
290
291/* stop SOF via bus_suspend */
292static void langwell_otg_loc_sof(int on)
293{
294 struct langwell_otg *lnw = the_transceiver;
295 struct usb_hcd *hcd;
296 int err;
297
298 dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume");
299
300 hcd = bus_to_hcd(lnw->iotg.otg.host);
301 if (on)
302 err = hcd->driver->bus_resume(hcd);
303 else
304 err = hcd->driver->bus_suspend(hcd);
305
306 if (err)
307 dev_dbg(lnw->dev, "Fail to resume/suspend USB bus - %d\n", err);
308
309 dev_dbg(lnw->dev, "%s <---\n", __func__);
310}
311
312static int langwell_otg_check_otgsc(void)
313{
314 struct langwell_otg *lnw = the_transceiver;
315 u32 otgsc, usbcfg;
316
317 dev_dbg(lnw->dev, "check sync OTGSC and USBCFG registers\n");
318
319 otgsc = readl(lnw->iotg.base + CI_OTGSC);
320 usbcfg = readl(lnw->usbcfg);
321
322 dev_dbg(lnw->dev, "OTGSC = %08x, USBCFG = %08x\n",
323 otgsc, usbcfg);
324 dev_dbg(lnw->dev, "OTGSC_AVV = %d\n", !!(otgsc & OTGSC_AVV));
325 dev_dbg(lnw->dev, "USBCFG.VBUSVAL = %d\n",
326 !!(usbcfg & USBCFG_VBUSVAL));
327 dev_dbg(lnw->dev, "OTGSC_ASV = %d\n", !!(otgsc & OTGSC_ASV));
328 dev_dbg(lnw->dev, "USBCFG.AVALID = %d\n",
329 !!(usbcfg & USBCFG_AVALID));
330 dev_dbg(lnw->dev, "OTGSC_BSV = %d\n", !!(otgsc & OTGSC_BSV));
331 dev_dbg(lnw->dev, "USBCFG.BVALID = %d\n",
332 !!(usbcfg & USBCFG_BVALID));
333 dev_dbg(lnw->dev, "OTGSC_BSE = %d\n", !!(otgsc & OTGSC_BSE));
334 dev_dbg(lnw->dev, "USBCFG.SESEND = %d\n",
335 !!(usbcfg & USBCFG_SESEND));
336
337 /* Check USBCFG VBusValid/AValid/BValid/SessEnd */
338 if (!!(otgsc & OTGSC_AVV) ^ !!(usbcfg & USBCFG_VBUSVAL)) {
339 dev_dbg(lnw->dev, "OTGSC.AVV != USBCFG.VBUSVAL\n");
340 goto err;
341 }
342 if (!!(otgsc & OTGSC_ASV) ^ !!(usbcfg & USBCFG_AVALID)) {
343 dev_dbg(lnw->dev, "OTGSC.ASV != USBCFG.AVALID\n");
344 goto err;
345 }
346 if (!!(otgsc & OTGSC_BSV) ^ !!(usbcfg & USBCFG_BVALID)) {
347 dev_dbg(lnw->dev, "OTGSC.BSV != USBCFG.BVALID\n");
348 goto err;
349 }
350 if (!!(otgsc & OTGSC_BSE) ^ !!(usbcfg & USBCFG_SESEND)) {
351 dev_dbg(lnw->dev, "OTGSC.BSE != USBCFG.SESSEN\n");
352 goto err;
353 }
354
355 dev_dbg(lnw->dev, "OTGSC and USBCFG are synced\n");
356
357 return 0;
358
359err:
360 dev_warn(lnw->dev, "OTGSC isn't equal to USBCFG\n");
361 return -EPIPE;
362}
363
364
365static void langwell_otg_phy_low_power(int on)
366{
367 struct langwell_otg *lnw = the_transceiver;
368 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
369 u8 val, phcd;
370 int retval;
371
372 dev_dbg(lnw->dev, "%s ---> %s mode\n",
373 __func__, on ? "Low power" : "Normal");
374
375 phcd = 0x40;
376
377 val = readb(iotg->base + CI_HOSTPC1 + 2);
378
379 if (on) {
380 /* Due to hardware issue, after set PHCD, sync will failed
381 * between USBCFG and OTGSC, so before set PHCD, check if
382 * sync is in process now. If the answer is "yes", then do
383 * not touch PHCD bit */
384 retval = langwell_otg_check_otgsc();
385 if (retval) {
386 dev_dbg(lnw->dev, "Skip PHCD programming..\n");
387 return ;
388 }
389
390 writeb(val | phcd, iotg->base + CI_HOSTPC1 + 2);
391 } else
392 writeb(val & ~phcd, iotg->base + CI_HOSTPC1 + 2);
393
394 dev_dbg(lnw->dev, "%s <--- done\n", __func__);
395}
396
397/* After drv vbus, add 2 ms delay to set PHCD */
398static void langwell_otg_phy_low_power_wait(int on)
399{
400 struct langwell_otg *lnw = the_transceiver;
401
402 dev_dbg(lnw->dev, "add 2ms delay before programing PHCD\n");
403
404 mdelay(2);
405 langwell_otg_phy_low_power(on);
406}
407
408/* Enable/Disable OTG interrupt */
409static void langwell_otg_intr(int on)
410{
411 struct langwell_otg *lnw = the_transceiver;
412 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
413 u32 val;
414
415 dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
416
417 val = readl(iotg->base + CI_OTGSC);
418
419 /* OTGSC_INT_MASK doesn't contains 1msInt */
420 if (on) {
421 val = val | (OTGSC_INT_MASK);
422 writel(val, iotg->base + CI_OTGSC);
423 } else {
424 val = val & ~(OTGSC_INT_MASK);
425 writel(val, iotg->base + CI_OTGSC);
426 }
427
428 dev_dbg(lnw->dev, "%s <---\n", __func__);
429}
430
431/* set HAAR: Hardware Assist Auto-Reset */
432static void langwell_otg_HAAR(int on)
433{
434 struct langwell_otg *lnw = the_transceiver;
435 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
436 u32 val;
437
438 dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
439
440 val = readl(iotg->base + CI_OTGSC);
441 if (on)
442 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
443 iotg->base + CI_OTGSC);
444 else
445 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
446 iotg->base + CI_OTGSC);
447
448 dev_dbg(lnw->dev, "%s <---\n", __func__);
449}
450
451/* set HABA: Hardware Assist B-Disconnect to A-Connect */
452static void langwell_otg_HABA(int on)
453{
454 struct langwell_otg *lnw = the_transceiver;
455 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
456 u32 val;
457
458 dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
459
460 val = readl(iotg->base + CI_OTGSC);
461 if (on)
462 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
463 iotg->base + CI_OTGSC);
464 else
465 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
466 iotg->base + CI_OTGSC);
467
468 dev_dbg(lnw->dev, "%s <---\n", __func__);
469}
470
471static int langwell_otg_check_se0_srp(int on)
472{
473 struct langwell_otg *lnw = the_transceiver;
474 int delay_time = TB_SE0_SRP * 10;
475 u32 val;
476
477 dev_dbg(lnw->dev, "%s --->\n", __func__);
478
479 do {
480 udelay(100);
481 if (!delay_time--)
482 break;
483 val = readl(lnw->iotg.base + CI_PORTSC1);
484 val &= PORTSC_LS;
485 } while (!val);
486
487 dev_dbg(lnw->dev, "%s <---\n", __func__);
488 return val;
489}
490
491/* The timeout callback function to set time out bit */
492static void set_tmout(unsigned long indicator)
493{
494 *(int *)indicator = 1;
495}
496
497void langwell_otg_nsf_msg(unsigned long indicator)
498{
499 struct langwell_otg *lnw = the_transceiver;
500
501 switch (indicator) {
502 case 2:
503 case 4:
504 case 6:
505 case 7:
506 dev_warn(lnw->dev,
507 "OTG:NSF-%lu - deivce not responding\n", indicator);
508 break;
509 case 3:
510 dev_warn(lnw->dev,
511 "OTG:NSF-%lu - deivce not supported\n", indicator);
512 break;
513 default:
514 dev_warn(lnw->dev, "Do not have this kind of NSF\n");
515 break;
516 }
517}
518
519/* Initialize timers */
520static int langwell_otg_init_timers(struct otg_hsm *hsm)
521{
522 /* HSM used timers */
523 a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
524 (unsigned long)&hsm->a_wait_vrise_tmout);
525 if (a_wait_vrise_tmr == NULL)
526 return -ENOMEM;
527 a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
528 (unsigned long)&hsm->a_aidl_bdis_tmout);
529 if (a_aidl_bdis_tmr == NULL)
530 return -ENOMEM;
531 b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
532 (unsigned long)&hsm->b_se0_srp);
533 if (b_se0_srp_tmr == NULL)
534 return -ENOMEM;
535 b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT,
536 (unsigned long)&hsm->b_srp_init_tmout);
537 if (b_srp_init_tmr == NULL)
538 return -ENOMEM;
539
540 return 0;
541}
542
543/* Free timers */
544static void langwell_otg_free_timers(void)
545{
546 kfree(a_wait_vrise_tmr);
547 kfree(a_aidl_bdis_tmr);
548 kfree(b_se0_srp_tmr);
549 kfree(b_srp_init_tmr);
550}
551
552/* The timeout callback function to set time out bit */
553static void langwell_otg_timer_fn(unsigned long indicator)
554{
555 struct langwell_otg *lnw = the_transceiver;
556
557 *(int *)indicator = 1;
558
559 dev_dbg(lnw->dev, "kernel timer - timeout\n");
560
561 langwell_update_transceiver();
562}
563
564/* kernel timer used instead of HW based interrupt */
565static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers)
566{
567 struct langwell_otg *lnw = the_transceiver;
568 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
569 unsigned long j = jiffies;
570 unsigned long data, time;
571
572 switch (timers) {
573 case TA_WAIT_VRISE_TMR:
574 iotg->hsm.a_wait_vrise_tmout = 0;
575 data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
576 time = TA_WAIT_VRISE;
577 break;
578 case TA_WAIT_BCON_TMR:
579 iotg->hsm.a_wait_bcon_tmout = 0;
580 data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
581 time = TA_WAIT_BCON;
582 break;
583 case TA_AIDL_BDIS_TMR:
584 iotg->hsm.a_aidl_bdis_tmout = 0;
585 data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
586 time = TA_AIDL_BDIS;
587 break;
588 case TB_ASE0_BRST_TMR:
589 iotg->hsm.b_ase0_brst_tmout = 0;
590 data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
591 time = TB_ASE0_BRST;
592 break;
593 case TB_SRP_INIT_TMR:
594 iotg->hsm.b_srp_init_tmout = 0;
595 data = (unsigned long)&iotg->hsm.b_srp_init_tmout;
596 time = TB_SRP_INIT;
597 break;
598 case TB_SRP_FAIL_TMR:
599 iotg->hsm.b_srp_fail_tmout = 0;
600 data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
601 time = TB_SRP_FAIL;
602 break;
603 case TB_BUS_SUSPEND_TMR:
604 iotg->hsm.b_bus_suspend_tmout = 0;
605 data = (unsigned long)&iotg->hsm.b_bus_suspend_tmout;
606 time = TB_BUS_SUSPEND;
607 break;
608 default:
609 dev_dbg(lnw->dev, "unkown timer, cannot enable it\n");
610 return;
611 }
612
613 lnw->hsm_timer.data = data;
614 lnw->hsm_timer.function = langwell_otg_timer_fn;
615 lnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
616
617 add_timer(&lnw->hsm_timer);
618
619 dev_dbg(lnw->dev, "add timer successfully\n");
620}
621
622/* Add timer to timer list */
623static void langwell_otg_add_timer(void *gtimer)
624{
625 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
626 struct langwell_otg_timer *tmp_timer;
627 struct intel_mid_otg_xceiv *iotg = &the_transceiver->iotg;
628 u32 val32;
629
630 /* Check if the timer is already in the active list,
631 * if so update timer count
632 */
633 list_for_each_entry(tmp_timer, &active_timers, list)
634 if (tmp_timer == timer) {
635 timer->count = timer->expires;
636 return;
637 }
638 timer->count = timer->expires;
639
640 if (list_empty(&active_timers)) {
641 val32 = readl(iotg->base + CI_OTGSC);
642 writel(val32 | OTGSC_1MSE, iotg->base + CI_OTGSC);
643 }
644
645 list_add_tail(&timer->list, &active_timers);
646}
647
648/* Remove timer from the timer list; clear timeout status */
649static void langwell_otg_del_timer(void *gtimer)
650{
651 struct langwell_otg *lnw = the_transceiver;
652 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
653 struct langwell_otg_timer *tmp_timer, *del_tmp;
654 u32 val32;
655
656 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
657 if (tmp_timer == timer)
658 list_del(&timer->list);
659
660 if (list_empty(&active_timers)) {
661 val32 = readl(lnw->iotg.base + CI_OTGSC);
662 writel(val32 & ~OTGSC_1MSE, lnw->iotg.base + CI_OTGSC);
663 }
664}
665
666/* Reduce timer count by 1, and find timeout conditions.*/
667static int langwell_otg_tick_timer(u32 *int_sts)
668{
669 struct langwell_otg *lnw = the_transceiver;
670 struct langwell_otg_timer *tmp_timer, *del_tmp;
671 int expired = 0;
672
673 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
674 tmp_timer->count--;
675 /* check if timer expires */
676 if (!tmp_timer->count) {
677 list_del(&tmp_timer->list);
678 tmp_timer->function(tmp_timer->data);
679 expired = 1;
680 }
681 }
682
683 if (list_empty(&active_timers)) {
684 dev_dbg(lnw->dev, "tick timer: disable 1ms int\n");
685 *int_sts = *int_sts & ~OTGSC_1MSE;
686 }
687 return expired;
688}
689
690static void reset_otg(void)
691{
692 struct langwell_otg *lnw = the_transceiver;
693 int delay_time = 1000;
694 u32 val;
695
696 dev_dbg(lnw->dev, "reseting OTG controller ...\n");
697 val = readl(lnw->iotg.base + CI_USBCMD);
698 writel(val | USBCMD_RST, lnw->iotg.base + CI_USBCMD);
699 do {
700 udelay(100);
701 if (!delay_time--)
702 dev_dbg(lnw->dev, "reset timeout\n");
703 val = readl(lnw->iotg.base + CI_USBCMD);
704 val &= USBCMD_RST;
705 } while (val != 0);
706 dev_dbg(lnw->dev, "reset done.\n");
707}
708
709static void set_host_mode(void)
710{
711 struct langwell_otg *lnw = the_transceiver;
712 u32 val;
713
714 reset_otg();
715 val = readl(lnw->iotg.base + CI_USBMODE);
716 val = (val & (~USBMODE_CM)) | USBMODE_HOST;
717 writel(val, lnw->iotg.base + CI_USBMODE);
718}
719
720static void set_client_mode(void)
721{
722 struct langwell_otg *lnw = the_transceiver;
723 u32 val;
724
725 reset_otg();
726 val = readl(lnw->iotg.base + CI_USBMODE);
727 val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
728 writel(val, lnw->iotg.base + CI_USBMODE);
729}
730
731static void init_hsm(void)
732{
733 struct langwell_otg *lnw = the_transceiver;
734 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
735 u32 val32;
736
737 /* read OTGSC after reset */
738 val32 = readl(lnw->iotg.base + CI_OTGSC);
739 dev_dbg(lnw->dev, "%s: OTGSC init value = 0x%x\n", __func__, val32);
740
741 /* set init state */
742 if (val32 & OTGSC_ID) {
743 iotg->hsm.id = 1;
744 iotg->otg.default_a = 0;
745 set_client_mode();
746 iotg->otg.state = OTG_STATE_B_IDLE;
747 } else {
748 iotg->hsm.id = 0;
749 iotg->otg.default_a = 1;
750 set_host_mode();
751 iotg->otg.state = OTG_STATE_A_IDLE;
752 }
753
754 /* set session indicator */
755 if (val32 & OTGSC_BSE)
756 iotg->hsm.b_sess_end = 1;
757 if (val32 & OTGSC_BSV)
758 iotg->hsm.b_sess_vld = 1;
759 if (val32 & OTGSC_ASV)
760 iotg->hsm.a_sess_vld = 1;
761 if (val32 & OTGSC_AVV)
762 iotg->hsm.a_vbus_vld = 1;
763
764 /* defautly power the bus */
765 iotg->hsm.a_bus_req = 1;
766 iotg->hsm.a_bus_drop = 0;
767 /* defautly don't request bus as B device */
768 iotg->hsm.b_bus_req = 0;
769 /* no system error */
770 iotg->hsm.a_clr_err = 0;
771
772 langwell_otg_phy_low_power_wait(1);
773}
774
775static void update_hsm(void)
776{
777 struct langwell_otg *lnw = the_transceiver;
778 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
779 u32 val32;
780
781 /* read OTGSC */
782 val32 = readl(lnw->iotg.base + CI_OTGSC);
783 dev_dbg(lnw->dev, "%s: OTGSC value = 0x%x\n", __func__, val32);
784
785 iotg->hsm.id = !!(val32 & OTGSC_ID);
786 iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
787 iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
788 iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
789 iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
790}
791
792static irqreturn_t otg_dummy_irq(int irq, void *_dev)
793{
794 struct langwell_otg *lnw = the_transceiver;
795 void __iomem *reg_base = _dev;
796 u32 val;
797 u32 int_mask = 0;
798
799 val = readl(reg_base + CI_USBMODE);
800 if ((val & USBMODE_CM) != USBMODE_DEVICE)
801 return IRQ_NONE;
802
803 val = readl(reg_base + CI_USBSTS);
804 int_mask = val & INTR_DUMMY_MASK;
805
806 if (int_mask == 0)
807 return IRQ_NONE;
808
809 /* clear hsm.b_conn here since host driver can't detect it
810 * otg_dummy_irq called means B-disconnect happened.
811 */
812 if (lnw->iotg.hsm.b_conn) {
813 lnw->iotg.hsm.b_conn = 0;
814 if (spin_trylock(&lnw->wq_lock)) {
815 langwell_update_transceiver();
816 spin_unlock(&lnw->wq_lock);
817 }
818 }
819
820 /* Clear interrupts */
821 writel(int_mask, reg_base + CI_USBSTS);
822 return IRQ_HANDLED;
823}
824
825static irqreturn_t otg_irq(int irq, void *_dev)
826{
827 struct langwell_otg *lnw = _dev;
828 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
829 u32 int_sts, int_en;
830 u32 int_mask = 0;
831 int flag = 0;
832
833 int_sts = readl(lnw->iotg.base + CI_OTGSC);
834 int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
835 int_mask = int_sts & int_en;
836 if (int_mask == 0)
837 return IRQ_NONE;
838
839 if (int_mask & OTGSC_IDIS) {
840 dev_dbg(lnw->dev, "%s: id change int\n", __func__);
841 iotg->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
842 dev_dbg(lnw->dev, "id = %d\n", iotg->hsm.id);
843 flag = 1;
844 }
845 if (int_mask & OTGSC_DPIS) {
846 dev_dbg(lnw->dev, "%s: data pulse int\n", __func__);
847 iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
848 dev_dbg(lnw->dev, "data pulse = %d\n", iotg->hsm.a_srp_det);
849 flag = 1;
850 }
851 if (int_mask & OTGSC_BSEIS) {
852 dev_dbg(lnw->dev, "%s: b session end int\n", __func__);
853 iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
854 dev_dbg(lnw->dev, "b_sess_end = %d\n", iotg->hsm.b_sess_end);
855 flag = 1;
856 }
857 if (int_mask & OTGSC_BSVIS) {
858 dev_dbg(lnw->dev, "%s: b session valid int\n", __func__);
859 iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
860 dev_dbg(lnw->dev, "b_sess_vld = %d\n", iotg->hsm.b_sess_end);
861 flag = 1;
862 }
863 if (int_mask & OTGSC_ASVIS) {
864 dev_dbg(lnw->dev, "%s: a session valid int\n", __func__);
865 iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
866 dev_dbg(lnw->dev, "a_sess_vld = %d\n", iotg->hsm.a_sess_vld);
867 flag = 1;
868 }
869 if (int_mask & OTGSC_AVVIS) {
870 dev_dbg(lnw->dev, "%s: a vbus valid int\n", __func__);
871 iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
872 dev_dbg(lnw->dev, "a_vbus_vld = %d\n", iotg->hsm.a_vbus_vld);
873 flag = 1;
874 }
875
876 if (int_mask & OTGSC_1MSS) {
877 /* need to schedule otg_work if any timer is expired */
878 if (langwell_otg_tick_timer(&int_sts))
879 flag = 1;
880 }
881
882 writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
883 lnw->iotg.base + CI_OTGSC);
884 if (flag)
885 langwell_update_transceiver();
886
887 return IRQ_HANDLED;
888}
889
890static int langwell_otg_iotg_notify(struct notifier_block *nb,
891 unsigned long action, void *data)
892{
893 struct langwell_otg *lnw = the_transceiver;
894 struct intel_mid_otg_xceiv *iotg = data;
895 int flag = 0;
896
897 if (iotg == NULL)
898 return NOTIFY_BAD;
899
900 if (lnw == NULL)
901 return NOTIFY_BAD;
902
903 switch (action) {
904 case MID_OTG_NOTIFY_CONNECT:
905 dev_dbg(lnw->dev, "Lnw OTG Notify Connect Event\n");
906 if (iotg->otg.default_a == 1)
907 iotg->hsm.b_conn = 1;
908 else
909 iotg->hsm.a_conn = 1;
910 flag = 1;
911 break;
912 case MID_OTG_NOTIFY_DISCONN:
913 dev_dbg(lnw->dev, "Lnw OTG Notify Disconnect Event\n");
914 if (iotg->otg.default_a == 1)
915 iotg->hsm.b_conn = 0;
916 else
917 iotg->hsm.a_conn = 0;
918 flag = 1;
919 break;
920 case MID_OTG_NOTIFY_HSUSPEND:
921 dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus suspend Event\n");
922 if (iotg->otg.default_a == 1)
923 iotg->hsm.a_suspend_req = 1;
924 else
925 iotg->hsm.b_bus_req = 0;
926 flag = 1;
927 break;
928 case MID_OTG_NOTIFY_HRESUME:
929 dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus resume Event\n");
930 if (iotg->otg.default_a == 1)
931 iotg->hsm.b_bus_resume = 1;
932 flag = 1;
933 break;
934 case MID_OTG_NOTIFY_CSUSPEND:
935 dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus suspend Event\n");
936 if (iotg->otg.default_a == 1) {
937 if (iotg->hsm.b_bus_suspend_vld == 2) {
938 iotg->hsm.b_bus_suspend = 1;
939 iotg->hsm.b_bus_suspend_vld = 0;
940 flag = 1;
941 } else {
942 iotg->hsm.b_bus_suspend_vld++;
943 flag = 0;
944 }
945 } else {
946 if (iotg->hsm.a_bus_suspend == 0) {
947 iotg->hsm.a_bus_suspend = 1;
948 flag = 1;
949 }
950 }
951 break;
952 case MID_OTG_NOTIFY_CRESUME:
953 dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus resume Event\n");
954 if (iotg->otg.default_a == 0)
955 iotg->hsm.a_bus_suspend = 0;
956 flag = 0;
957 break;
958 case MID_OTG_NOTIFY_HOSTADD:
959 dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver Add\n");
960 flag = 1;
961 break;
962 case MID_OTG_NOTIFY_HOSTREMOVE:
963 dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver remove\n");
964 flag = 1;
965 break;
966 case MID_OTG_NOTIFY_CLIENTADD:
967 dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver Add\n");
968 flag = 1;
969 break;
970 case MID_OTG_NOTIFY_CLIENTREMOVE:
971 dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver remove\n");
972 flag = 1;
973 break;
974 default:
975 dev_dbg(lnw->dev, "Lnw OTG Nofity unknown notify message\n");
976 return NOTIFY_DONE;
977 }
978
979 if (flag)
980 langwell_update_transceiver();
981
982 return NOTIFY_OK;
983}
984
985static void langwell_otg_work(struct work_struct *work)
986{
987 struct langwell_otg *lnw;
988 struct intel_mid_otg_xceiv *iotg;
989 int retval;
990 struct pci_dev *pdev;
991
992 lnw = container_of(work, struct langwell_otg, work);
993 iotg = &lnw->iotg;
994 pdev = to_pci_dev(lnw->dev);
995
996 dev_dbg(lnw->dev, "%s: old state = %s\n", __func__,
997 state_string(iotg->otg.state));
998
999 switch (iotg->otg.state) {
1000 case OTG_STATE_UNDEFINED:
1001 case OTG_STATE_B_IDLE:
1002 if (!iotg->hsm.id) {
1003 langwell_otg_del_timer(b_srp_init_tmr);
1004 del_timer_sync(&lnw->hsm_timer);
1005
1006 iotg->otg.default_a = 1;
1007 iotg->hsm.a_srp_det = 0;
1008
1009 langwell_otg_chrg_vbus(0);
1010 set_host_mode();
1011 langwell_otg_phy_low_power(1);
1012
1013 iotg->otg.state = OTG_STATE_A_IDLE;
1014 langwell_update_transceiver();
1015 } else if (iotg->hsm.b_sess_vld) {
1016 langwell_otg_del_timer(b_srp_init_tmr);
1017 del_timer_sync(&lnw->hsm_timer);
1018 iotg->hsm.b_sess_end = 0;
1019 iotg->hsm.a_bus_suspend = 0;
1020 langwell_otg_chrg_vbus(0);
1021
1022 if (lnw->iotg.start_peripheral) {
1023 lnw->iotg.start_peripheral(&lnw->iotg);
1024 iotg->otg.state = OTG_STATE_B_PERIPHERAL;
1025 } else
1026 dev_dbg(lnw->dev, "client driver not loaded\n");
1027
1028 } else if (iotg->hsm.b_srp_init_tmout) {
1029 iotg->hsm.b_srp_init_tmout = 0;
1030 dev_warn(lnw->dev, "SRP init timeout\n");
1031 } else if (iotg->hsm.b_srp_fail_tmout) {
1032 iotg->hsm.b_srp_fail_tmout = 0;
1033 iotg->hsm.b_bus_req = 0;
1034
1035 /* No silence failure */
1036 langwell_otg_nsf_msg(6);
1037 } else if (iotg->hsm.b_bus_req && iotg->hsm.b_sess_end) {
1038 del_timer_sync(&lnw->hsm_timer);
1039 /* workaround for b_se0_srp detection */
1040 retval = langwell_otg_check_se0_srp(0);
1041 if (retval) {
1042 iotg->hsm.b_bus_req = 0;
1043 dev_dbg(lnw->dev, "LS isn't SE0, try later\n");
1044 } else {
1045 /* clear the PHCD before start srp */
1046 langwell_otg_phy_low_power(0);
1047
1048 /* Start SRP */
1049 langwell_otg_add_timer(b_srp_init_tmr);
1050 iotg->otg.start_srp(&iotg->otg);
1051 langwell_otg_del_timer(b_srp_init_tmr);
1052 langwell_otg_add_ktimer(TB_SRP_FAIL_TMR);
1053
1054 /* reset PHY low power mode here */
1055 langwell_otg_phy_low_power_wait(1);
1056 }
1057 }
1058 break;
1059 case OTG_STATE_B_SRP_INIT:
1060 if (!iotg->hsm.id) {
1061 iotg->otg.default_a = 1;
1062 iotg->hsm.a_srp_det = 0;
1063
1064 /* Turn off VBus */
1065 iotg->otg.set_vbus(&iotg->otg, false);
1066 langwell_otg_chrg_vbus(0);
1067 set_host_mode();
1068 langwell_otg_phy_low_power(1);
1069 iotg->otg.state = OTG_STATE_A_IDLE;
1070 langwell_update_transceiver();
1071 } else if (iotg->hsm.b_sess_vld) {
1072 langwell_otg_chrg_vbus(0);
1073 if (lnw->iotg.start_peripheral) {
1074 lnw->iotg.start_peripheral(&lnw->iotg);
1075 iotg->otg.state = OTG_STATE_B_PERIPHERAL;
1076 } else
1077 dev_dbg(lnw->dev, "client driver not loaded\n");
1078 }
1079 break;
1080 case OTG_STATE_B_PERIPHERAL:
1081 if (!iotg->hsm.id) {
1082 iotg->otg.default_a = 1;
1083 iotg->hsm.a_srp_det = 0;
1084
1085 langwell_otg_chrg_vbus(0);
1086
1087 if (lnw->iotg.stop_peripheral)
1088 lnw->iotg.stop_peripheral(&lnw->iotg);
1089 else
1090 dev_dbg(lnw->dev,
1091 "client driver has been removed.\n");
1092
1093 set_host_mode();
1094 langwell_otg_phy_low_power(1);
1095 iotg->otg.state = OTG_STATE_A_IDLE;
1096 langwell_update_transceiver();
1097 } else if (!iotg->hsm.b_sess_vld) {
1098 iotg->hsm.b_hnp_enable = 0;
1099
1100 if (lnw->iotg.stop_peripheral)
1101 lnw->iotg.stop_peripheral(&lnw->iotg);
1102 else
1103 dev_dbg(lnw->dev,
1104 "client driver has been removed.\n");
1105
1106 iotg->otg.state = OTG_STATE_B_IDLE;
1107 } else if (iotg->hsm.b_bus_req && iotg->otg.gadget &&
1108 iotg->otg.gadget->b_hnp_enable &&
1109 iotg->hsm.a_bus_suspend) {
1110
1111 if (lnw->iotg.stop_peripheral)
1112 lnw->iotg.stop_peripheral(&lnw->iotg);
1113 else
1114 dev_dbg(lnw->dev,
1115 "client driver has been removed.\n");
1116
1117 langwell_otg_HAAR(1);
1118 iotg->hsm.a_conn = 0;
1119
1120 if (lnw->iotg.start_host) {
1121 lnw->iotg.start_host(&lnw->iotg);
1122 iotg->otg.state = OTG_STATE_B_WAIT_ACON;
1123 } else
1124 dev_dbg(lnw->dev,
1125 "host driver not loaded.\n");
1126
1127 iotg->hsm.a_bus_resume = 0;
1128 langwell_otg_add_ktimer(TB_ASE0_BRST_TMR);
1129 }
1130 break;
1131
1132 case OTG_STATE_B_WAIT_ACON:
1133 if (!iotg->hsm.id) {
1134 /* delete hsm timer for b_ase0_brst_tmr */
1135 del_timer_sync(&lnw->hsm_timer);
1136
1137 iotg->otg.default_a = 1;
1138 iotg->hsm.a_srp_det = 0;
1139
1140 langwell_otg_chrg_vbus(0);
1141
1142 langwell_otg_HAAR(0);
1143 if (lnw->iotg.stop_host)
1144 lnw->iotg.stop_host(&lnw->iotg);
1145 else
1146 dev_dbg(lnw->dev,
1147 "host driver has been removed.\n");
1148
1149 set_host_mode();
1150 langwell_otg_phy_low_power(1);
1151 iotg->otg.state = OTG_STATE_A_IDLE;
1152 langwell_update_transceiver();
1153 } else if (!iotg->hsm.b_sess_vld) {
1154 /* delete hsm timer for b_ase0_brst_tmr */
1155 del_timer_sync(&lnw->hsm_timer);
1156
1157 iotg->hsm.b_hnp_enable = 0;
1158 iotg->hsm.b_bus_req = 0;
1159
1160 langwell_otg_chrg_vbus(0);
1161 langwell_otg_HAAR(0);
1162
1163 if (lnw->iotg.stop_host)
1164 lnw->iotg.stop_host(&lnw->iotg);
1165 else
1166 dev_dbg(lnw->dev,
1167 "host driver has been removed.\n");
1168
1169 set_client_mode();
1170 langwell_otg_phy_low_power(1);
1171 iotg->otg.state = OTG_STATE_B_IDLE;
1172 } else if (iotg->hsm.a_conn) {
1173 /* delete hsm timer for b_ase0_brst_tmr */
1174 del_timer_sync(&lnw->hsm_timer);
1175
1176 langwell_otg_HAAR(0);
1177 iotg->otg.state = OTG_STATE_B_HOST;
1178 langwell_update_transceiver();
1179 } else if (iotg->hsm.a_bus_resume ||
1180 iotg->hsm.b_ase0_brst_tmout) {
1181 /* delete hsm timer for b_ase0_brst_tmr */
1182 del_timer_sync(&lnw->hsm_timer);
1183
1184 langwell_otg_HAAR(0);
1185 langwell_otg_nsf_msg(7);
1186
1187 if (lnw->iotg.stop_host)
1188 lnw->iotg.stop_host(&lnw->iotg);
1189 else
1190 dev_dbg(lnw->dev,
1191 "host driver has been removed.\n");
1192
1193 iotg->hsm.a_bus_suspend = 0;
1194 iotg->hsm.b_bus_req = 0;
1195
1196 if (lnw->iotg.start_peripheral)
1197 lnw->iotg.start_peripheral(&lnw->iotg);
1198 else
1199 dev_dbg(lnw->dev,
1200 "client driver not loaded.\n");
1201
1202 iotg->otg.state = OTG_STATE_B_PERIPHERAL;
1203 }
1204 break;
1205
1206 case OTG_STATE_B_HOST:
1207 if (!iotg->hsm.id) {
1208 iotg->otg.default_a = 1;
1209 iotg->hsm.a_srp_det = 0;
1210
1211 langwell_otg_chrg_vbus(0);
1212
1213 if (lnw->iotg.stop_host)
1214 lnw->iotg.stop_host(&lnw->iotg);
1215 else
1216 dev_dbg(lnw->dev,
1217 "host driver has been removed.\n");
1218
1219 set_host_mode();
1220 langwell_otg_phy_low_power(1);
1221 iotg->otg.state = OTG_STATE_A_IDLE;
1222 langwell_update_transceiver();
1223 } else if (!iotg->hsm.b_sess_vld) {
1224 iotg->hsm.b_hnp_enable = 0;
1225 iotg->hsm.b_bus_req = 0;
1226
1227 langwell_otg_chrg_vbus(0);
1228 if (lnw->iotg.stop_host)
1229 lnw->iotg.stop_host(&lnw->iotg);
1230 else
1231 dev_dbg(lnw->dev,
1232 "host driver has been removed.\n");
1233
1234 set_client_mode();
1235 langwell_otg_phy_low_power(1);
1236 iotg->otg.state = OTG_STATE_B_IDLE;
1237 } else if ((!iotg->hsm.b_bus_req) ||
1238 (!iotg->hsm.a_conn)) {
1239 iotg->hsm.b_bus_req = 0;
1240 langwell_otg_loc_sof(0);
1241
1242 if (lnw->iotg.stop_host)
1243 lnw->iotg.stop_host(&lnw->iotg);
1244 else
1245 dev_dbg(lnw->dev,
1246 "host driver has been removed.\n");
1247
1248 iotg->hsm.a_bus_suspend = 0;
1249
1250 if (lnw->iotg.start_peripheral)
1251 lnw->iotg.start_peripheral(&lnw->iotg);
1252 else
1253 dev_dbg(lnw->dev,
1254 "client driver not loaded.\n");
1255
1256 iotg->otg.state = OTG_STATE_B_PERIPHERAL;
1257 }
1258 break;
1259
1260 case OTG_STATE_A_IDLE:
1261 iotg->otg.default_a = 1;
1262 if (iotg->hsm.id) {
1263 iotg->otg.default_a = 0;
1264 iotg->hsm.b_bus_req = 0;
1265 iotg->hsm.vbus_srp_up = 0;
1266
1267 langwell_otg_chrg_vbus(0);
1268 set_client_mode();
1269 langwell_otg_phy_low_power(1);
1270 iotg->otg.state = OTG_STATE_B_IDLE;
1271 langwell_update_transceiver();
1272 } else if (!iotg->hsm.a_bus_drop &&
1273 (iotg->hsm.a_srp_det || iotg->hsm.a_bus_req)) {
1274 langwell_otg_phy_low_power(0);
1275
1276 /* Turn on VBus */
1277 iotg->otg.set_vbus(&iotg->otg, true);
1278
1279 iotg->hsm.vbus_srp_up = 0;
1280 iotg->hsm.a_wait_vrise_tmout = 0;
1281 langwell_otg_add_timer(a_wait_vrise_tmr);
1282 iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
1283 langwell_update_transceiver();
1284 } else if (!iotg->hsm.a_bus_drop && iotg->hsm.a_sess_vld) {
1285 iotg->hsm.vbus_srp_up = 1;
1286 } else if (!iotg->hsm.a_sess_vld && iotg->hsm.vbus_srp_up) {
1287 msleep(10);
1288 langwell_otg_phy_low_power(0);
1289
1290 /* Turn on VBus */
1291 iotg->otg.set_vbus(&iotg->otg, true);
1292 iotg->hsm.a_srp_det = 1;
1293 iotg->hsm.vbus_srp_up = 0;
1294 iotg->hsm.a_wait_vrise_tmout = 0;
1295 langwell_otg_add_timer(a_wait_vrise_tmr);
1296 iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
1297 langwell_update_transceiver();
1298 } else if (!iotg->hsm.a_sess_vld &&
1299 !iotg->hsm.vbus_srp_up) {
1300 langwell_otg_phy_low_power(1);
1301 }
1302 break;
1303 case OTG_STATE_A_WAIT_VRISE:
1304 if (iotg->hsm.id) {
1305 langwell_otg_del_timer(a_wait_vrise_tmr);
1306 iotg->hsm.b_bus_req = 0;
1307 iotg->otg.default_a = 0;
1308
1309 /* Turn off VBus */
1310 iotg->otg.set_vbus(&iotg->otg, false);
1311 set_client_mode();
1312 langwell_otg_phy_low_power_wait(1);
1313 iotg->otg.state = OTG_STATE_B_IDLE;
1314 } else if (iotg->hsm.a_vbus_vld) {
1315 langwell_otg_del_timer(a_wait_vrise_tmr);
1316 iotg->hsm.b_conn = 0;
1317 if (lnw->iotg.start_host)
1318 lnw->iotg.start_host(&lnw->iotg);
1319 else {
1320 dev_dbg(lnw->dev, "host driver not loaded.\n");
1321 break;
1322 }
1323
1324 langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
1325 iotg->otg.state = OTG_STATE_A_WAIT_BCON;
1326 } else if (iotg->hsm.a_wait_vrise_tmout) {
1327 iotg->hsm.b_conn = 0;
1328 if (iotg->hsm.a_vbus_vld) {
1329 if (lnw->iotg.start_host)
1330 lnw->iotg.start_host(&lnw->iotg);
1331 else {
1332 dev_dbg(lnw->dev,
1333 "host driver not loaded.\n");
1334 break;
1335 }
1336 langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
1337 iotg->otg.state = OTG_STATE_A_WAIT_BCON;
1338 } else {
1339
1340 /* Turn off VBus */
1341 iotg->otg.set_vbus(&iotg->otg, false);
1342 langwell_otg_phy_low_power_wait(1);
1343 iotg->otg.state = OTG_STATE_A_VBUS_ERR;
1344 }
1345 }
1346 break;
1347 case OTG_STATE_A_WAIT_BCON:
1348 if (iotg->hsm.id) {
1349 /* delete hsm timer for a_wait_bcon_tmr */
1350 del_timer_sync(&lnw->hsm_timer);
1351
1352 iotg->otg.default_a = 0;
1353 iotg->hsm.b_bus_req = 0;
1354
1355 if (lnw->iotg.stop_host)
1356 lnw->iotg.stop_host(&lnw->iotg);
1357 else
1358 dev_dbg(lnw->dev,
1359 "host driver has been removed.\n");
1360
1361 /* Turn off VBus */
1362 iotg->otg.set_vbus(&iotg->otg, false);
1363 set_client_mode();
1364 langwell_otg_phy_low_power_wait(1);
1365 iotg->otg.state = OTG_STATE_B_IDLE;
1366 langwell_update_transceiver();
1367 } else if (!iotg->hsm.a_vbus_vld) {
1368 /* delete hsm timer for a_wait_bcon_tmr */
1369 del_timer_sync(&lnw->hsm_timer);
1370
1371 if (lnw->iotg.stop_host)
1372 lnw->iotg.stop_host(&lnw->iotg);
1373 else
1374 dev_dbg(lnw->dev,
1375 "host driver has been removed.\n");
1376
1377 /* Turn off VBus */
1378 iotg->otg.set_vbus(&iotg->otg, false);
1379 langwell_otg_phy_low_power_wait(1);
1380 iotg->otg.state = OTG_STATE_A_VBUS_ERR;
1381 } else if (iotg->hsm.a_bus_drop ||
1382 (iotg->hsm.a_wait_bcon_tmout &&
1383 !iotg->hsm.a_bus_req)) {
1384 /* delete hsm timer for a_wait_bcon_tmr */
1385 del_timer_sync(&lnw->hsm_timer);
1386
1387 if (lnw->iotg.stop_host)
1388 lnw->iotg.stop_host(&lnw->iotg);
1389 else
1390 dev_dbg(lnw->dev,
1391 "host driver has been removed.\n");
1392
1393 /* Turn off VBus */
1394 iotg->otg.set_vbus(&iotg->otg, false);
1395 iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
1396 } else if (iotg->hsm.b_conn) {
1397 /* delete hsm timer for a_wait_bcon_tmr */
1398 del_timer_sync(&lnw->hsm_timer);
1399
1400 iotg->hsm.a_suspend_req = 0;
1401 iotg->otg.state = OTG_STATE_A_HOST;
1402 if (iotg->hsm.a_srp_det && iotg->otg.host &&
1403 !iotg->otg.host->b_hnp_enable) {
1404 /* SRP capable peripheral-only device */
1405 iotg->hsm.a_bus_req = 1;
1406 iotg->hsm.a_srp_det = 0;
1407 } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
1408 iotg->otg.host->b_hnp_enable) {
1409 /* It is not safe enough to do a fast
1410 * transistion from A_WAIT_BCON to
1411 * A_SUSPEND */
1412 msleep(10000);
1413 if (iotg->hsm.a_bus_req)
1414 break;
1415
1416 if (request_irq(pdev->irq,
1417 otg_dummy_irq, IRQF_SHARED,
1418 driver_name, iotg->base) != 0) {
1419 dev_dbg(lnw->dev,
1420 "request interrupt %d fail\n",
1421 pdev->irq);
1422 }
1423
1424 langwell_otg_HABA(1);
1425 iotg->hsm.b_bus_resume = 0;
1426 iotg->hsm.a_aidl_bdis_tmout = 0;
1427
1428 langwell_otg_loc_sof(0);
1429 /* clear PHCD to enable HW timer */
1430 langwell_otg_phy_low_power(0);
1431 langwell_otg_add_timer(a_aidl_bdis_tmr);
1432 iotg->otg.state = OTG_STATE_A_SUSPEND;
1433 } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
1434 !iotg->otg.host->b_hnp_enable) {
1435 if (lnw->iotg.stop_host)
1436 lnw->iotg.stop_host(&lnw->iotg);
1437 else
1438 dev_dbg(lnw->dev,
1439 "host driver removed.\n");
1440
1441 /* Turn off VBus */
1442 iotg->otg.set_vbus(&iotg->otg, false);
1443 iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
1444 }
1445 }
1446 break;
1447 case OTG_STATE_A_HOST:
1448 if (iotg->hsm.id) {
1449 iotg->otg.default_a = 0;
1450 iotg->hsm.b_bus_req = 0;
1451
1452 if (lnw->iotg.stop_host)
1453 lnw->iotg.stop_host(&lnw->iotg);
1454 else
1455 dev_dbg(lnw->dev,
1456 "host driver has been removed.\n");
1457
1458 /* Turn off VBus */
1459 iotg->otg.set_vbus(&iotg->otg, false);
1460 set_client_mode();
1461 langwell_otg_phy_low_power_wait(1);
1462 iotg->otg.state = OTG_STATE_B_IDLE;
1463 langwell_update_transceiver();
1464 } else if (iotg->hsm.a_bus_drop ||
1465 (iotg->otg.host &&
1466 !iotg->otg.host->b_hnp_enable &&
1467 !iotg->hsm.a_bus_req)) {
1468 if (lnw->iotg.stop_host)
1469 lnw->iotg.stop_host(&lnw->iotg);
1470 else
1471 dev_dbg(lnw->dev,
1472 "host driver has been removed.\n");
1473
1474 /* Turn off VBus */
1475 iotg->otg.set_vbus(&iotg->otg, false);
1476 iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
1477 } else if (!iotg->hsm.a_vbus_vld) {
1478 if (lnw->iotg.stop_host)
1479 lnw->iotg.stop_host(&lnw->iotg);
1480 else
1481 dev_dbg(lnw->dev,
1482 "host driver has been removed.\n");
1483
1484 /* Turn off VBus */
1485 iotg->otg.set_vbus(&iotg->otg, false);
1486 langwell_otg_phy_low_power_wait(1);
1487 iotg->otg.state = OTG_STATE_A_VBUS_ERR;
1488 } else if (iotg->otg.host &&
1489 iotg->otg.host->b_hnp_enable &&
1490 !iotg->hsm.a_bus_req) {
1491 /* Set HABA to enable hardware assistance to signal
1492 * A-connect after receiver B-disconnect. Hardware
1493 * will then set client mode and enable URE, SLE and
1494 * PCE after the assistance. otg_dummy_irq is used to
1495 * clean these ints when client driver is not resumed.
1496 */
1497 if (request_irq(pdev->irq, otg_dummy_irq, IRQF_SHARED,
1498 driver_name, iotg->base) != 0) {
1499 dev_dbg(lnw->dev,
1500 "request interrupt %d failed\n",
1501 pdev->irq);
1502 }
1503
1504 /* set HABA */
1505 langwell_otg_HABA(1);
1506 iotg->hsm.b_bus_resume = 0;
1507 iotg->hsm.a_aidl_bdis_tmout = 0;
1508 langwell_otg_loc_sof(0);
1509 /* clear PHCD to enable HW timer */
1510 langwell_otg_phy_low_power(0);
1511 langwell_otg_add_timer(a_aidl_bdis_tmr);
1512 iotg->otg.state = OTG_STATE_A_SUSPEND;
1513 } else if (!iotg->hsm.b_conn || !iotg->hsm.a_bus_req) {
1514 langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
1515 iotg->otg.state = OTG_STATE_A_WAIT_BCON;
1516 }
1517 break;
1518 case OTG_STATE_A_SUSPEND:
1519 if (iotg->hsm.id) {
1520 langwell_otg_del_timer(a_aidl_bdis_tmr);
1521 langwell_otg_HABA(0);
1522 free_irq(pdev->irq, iotg->base);
1523 iotg->otg.default_a = 0;
1524 iotg->hsm.b_bus_req = 0;
1525
1526 if (lnw->iotg.stop_host)
1527 lnw->iotg.stop_host(&lnw->iotg);
1528 else
1529 dev_dbg(lnw->dev,
1530 "host driver has been removed.\n");
1531
1532 /* Turn off VBus */
1533 iotg->otg.set_vbus(&iotg->otg, false);
1534 set_client_mode();
1535 langwell_otg_phy_low_power(1);
1536 iotg->otg.state = OTG_STATE_B_IDLE;
1537 langwell_update_transceiver();
1538 } else if (iotg->hsm.a_bus_req ||
1539 iotg->hsm.b_bus_resume) {
1540 langwell_otg_del_timer(a_aidl_bdis_tmr);
1541 langwell_otg_HABA(0);
1542 free_irq(pdev->irq, iotg->base);
1543 iotg->hsm.a_suspend_req = 0;
1544 langwell_otg_loc_sof(1);
1545 iotg->otg.state = OTG_STATE_A_HOST;
1546 } else if (iotg->hsm.a_aidl_bdis_tmout ||
1547 iotg->hsm.a_bus_drop) {
1548 langwell_otg_del_timer(a_aidl_bdis_tmr);
1549 langwell_otg_HABA(0);
1550 free_irq(pdev->irq, iotg->base);
1551 if (lnw->iotg.stop_host)
1552 lnw->iotg.stop_host(&lnw->iotg);
1553 else
1554 dev_dbg(lnw->dev,
1555 "host driver has been removed.\n");
1556
1557 /* Turn off VBus */
1558 iotg->otg.set_vbus(&iotg->otg, false);
1559 iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
1560 } else if (!iotg->hsm.b_conn && iotg->otg.host &&
1561 iotg->otg.host->b_hnp_enable) {
1562 langwell_otg_del_timer(a_aidl_bdis_tmr);
1563 langwell_otg_HABA(0);
1564 free_irq(pdev->irq, iotg->base);
1565
1566 if (lnw->iotg.stop_host)
1567 lnw->iotg.stop_host(&lnw->iotg);
1568 else
1569 dev_dbg(lnw->dev,
1570 "host driver has been removed.\n");
1571
1572 iotg->hsm.b_bus_suspend = 0;
1573 iotg->hsm.b_bus_suspend_vld = 0;
1574
1575 /* msleep(200); */
1576 if (lnw->iotg.start_peripheral)
1577 lnw->iotg.start_peripheral(&lnw->iotg);
1578 else
1579 dev_dbg(lnw->dev,
1580 "client driver not loaded.\n");
1581
1582 langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR);
1583 iotg->otg.state = OTG_STATE_A_PERIPHERAL;
1584 break;
1585 } else if (!iotg->hsm.a_vbus_vld) {
1586 langwell_otg_del_timer(a_aidl_bdis_tmr);
1587 langwell_otg_HABA(0);
1588 free_irq(pdev->irq, iotg->base);
1589 if (lnw->iotg.stop_host)
1590 lnw->iotg.stop_host(&lnw->iotg);
1591 else
1592 dev_dbg(lnw->dev,
1593 "host driver has been removed.\n");
1594
1595 /* Turn off VBus */
1596 iotg->otg.set_vbus(&iotg->otg, false);
1597 langwell_otg_phy_low_power_wait(1);
1598 iotg->otg.state = OTG_STATE_A_VBUS_ERR;
1599 }
1600 break;
1601 case OTG_STATE_A_PERIPHERAL:
1602 if (iotg->hsm.id) {
1603 /* delete hsm timer for b_bus_suspend_tmr */
1604 del_timer_sync(&lnw->hsm_timer);
1605 iotg->otg.default_a = 0;
1606 iotg->hsm.b_bus_req = 0;
1607 if (lnw->iotg.stop_peripheral)
1608 lnw->iotg.stop_peripheral(&lnw->iotg);
1609 else
1610 dev_dbg(lnw->dev,
1611 "client driver has been removed.\n");
1612
1613 /* Turn off VBus */
1614 iotg->otg.set_vbus(&iotg->otg, false);
1615 set_client_mode();
1616 langwell_otg_phy_low_power_wait(1);
1617 iotg->otg.state = OTG_STATE_B_IDLE;
1618 langwell_update_transceiver();
1619 } else if (!iotg->hsm.a_vbus_vld) {
1620 /* delete hsm timer for b_bus_suspend_tmr */
1621 del_timer_sync(&lnw->hsm_timer);
1622
1623 if (lnw->iotg.stop_peripheral)
1624 lnw->iotg.stop_peripheral(&lnw->iotg);
1625 else
1626 dev_dbg(lnw->dev,
1627 "client driver has been removed.\n");
1628
1629 /* Turn off VBus */
1630 iotg->otg.set_vbus(&iotg->otg, false);
1631 langwell_otg_phy_low_power_wait(1);
1632 iotg->otg.state = OTG_STATE_A_VBUS_ERR;
1633 } else if (iotg->hsm.a_bus_drop) {
1634 /* delete hsm timer for b_bus_suspend_tmr */
1635 del_timer_sync(&lnw->hsm_timer);
1636
1637 if (lnw->iotg.stop_peripheral)
1638 lnw->iotg.stop_peripheral(&lnw->iotg);
1639 else
1640 dev_dbg(lnw->dev,
1641 "client driver has been removed.\n");
1642
1643 /* Turn off VBus */
1644 iotg->otg.set_vbus(&iotg->otg, false);
1645 iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
1646 } else if (iotg->hsm.b_bus_suspend) {
1647 /* delete hsm timer for b_bus_suspend_tmr */
1648 del_timer_sync(&lnw->hsm_timer);
1649
1650 if (lnw->iotg.stop_peripheral)
1651 lnw->iotg.stop_peripheral(&lnw->iotg);
1652 else
1653 dev_dbg(lnw->dev,
1654 "client driver has been removed.\n");
1655
1656 if (lnw->iotg.start_host)
1657 lnw->iotg.start_host(&lnw->iotg);
1658 else
1659 dev_dbg(lnw->dev,
1660 "host driver not loaded.\n");
1661 langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
1662 iotg->otg.state = OTG_STATE_A_WAIT_BCON;
1663 } else if (iotg->hsm.b_bus_suspend_tmout) {
1664 u32 val;
1665 val = readl(lnw->iotg.base + CI_PORTSC1);
1666 if (!(val & PORTSC_SUSP))
1667 break;
1668
1669 if (lnw->iotg.stop_peripheral)
1670 lnw->iotg.stop_peripheral(&lnw->iotg);
1671 else
1672 dev_dbg(lnw->dev,
1673 "client driver has been removed.\n");
1674
1675 if (lnw->iotg.start_host)
1676 lnw->iotg.start_host(&lnw->iotg);
1677 else
1678 dev_dbg(lnw->dev,
1679 "host driver not loaded.\n");
1680 langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
1681 iotg->otg.state = OTG_STATE_A_WAIT_BCON;
1682 }
1683 break;
1684 case OTG_STATE_A_VBUS_ERR:
1685 if (iotg->hsm.id) {
1686 iotg->otg.default_a = 0;
1687 iotg->hsm.a_clr_err = 0;
1688 iotg->hsm.a_srp_det = 0;
1689 set_client_mode();
1690 langwell_otg_phy_low_power(1);
1691 iotg->otg.state = OTG_STATE_B_IDLE;
1692 langwell_update_transceiver();
1693 } else if (iotg->hsm.a_clr_err) {
1694 iotg->hsm.a_clr_err = 0;
1695 iotg->hsm.a_srp_det = 0;
1696 reset_otg();
1697 init_hsm();
1698 if (iotg->otg.state == OTG_STATE_A_IDLE)
1699 langwell_update_transceiver();
1700 } else {
1701 /* FW will clear PHCD bit when any VBus
1702 * event detected. Reset PHCD to 1 again */
1703 langwell_otg_phy_low_power(1);
1704 }
1705 break;
1706 case OTG_STATE_A_WAIT_VFALL:
1707 if (iotg->hsm.id) {
1708 iotg->otg.default_a = 0;
1709 set_client_mode();
1710 langwell_otg_phy_low_power(1);
1711 iotg->otg.state = OTG_STATE_B_IDLE;
1712 langwell_update_transceiver();
1713 } else if (iotg->hsm.a_bus_req) {
1714
1715 /* Turn on VBus */
1716 iotg->otg.set_vbus(&iotg->otg, true);
1717 iotg->hsm.a_wait_vrise_tmout = 0;
1718 langwell_otg_add_timer(a_wait_vrise_tmr);
1719 iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
1720 } else if (!iotg->hsm.a_sess_vld) {
1721 iotg->hsm.a_srp_det = 0;
1722 set_host_mode();
1723 langwell_otg_phy_low_power(1);
1724 iotg->otg.state = OTG_STATE_A_IDLE;
1725 }
1726 break;
1727 default:
1728 ;
1729 }
1730
1731 dev_dbg(lnw->dev, "%s: new state = %s\n", __func__,
1732 state_string(iotg->otg.state));
1733}
1734
1735static ssize_t
1736show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1737{
1738 struct langwell_otg *lnw = the_transceiver;
1739 char *next;
1740 unsigned size, t;
1741
1742 next = buf;
1743 size = PAGE_SIZE;
1744
1745 t = scnprintf(next, size,
1746 "\n"
1747 "USBCMD = 0x%08x\n"
1748 "USBSTS = 0x%08x\n"
1749 "USBINTR = 0x%08x\n"
1750 "ASYNCLISTADDR = 0x%08x\n"
1751 "PORTSC1 = 0x%08x\n"
1752 "HOSTPC1 = 0x%08x\n"
1753 "OTGSC = 0x%08x\n"
1754 "USBMODE = 0x%08x\n",
1755 readl(lnw->iotg.base + 0x30),
1756 readl(lnw->iotg.base + 0x34),
1757 readl(lnw->iotg.base + 0x38),
1758 readl(lnw->iotg.base + 0x48),
1759 readl(lnw->iotg.base + 0x74),
1760 readl(lnw->iotg.base + 0xb4),
1761 readl(lnw->iotg.base + 0xf4),
1762 readl(lnw->iotg.base + 0xf8)
1763 );
1764 size -= t;
1765 next += t;
1766
1767 return PAGE_SIZE - size;
1768}
1769static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
1770
1771static ssize_t
1772show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
1773{
1774 struct langwell_otg *lnw = the_transceiver;
1775 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
1776 char *next;
1777 unsigned size, t;
1778
1779 next = buf;
1780 size = PAGE_SIZE;
1781
1782 if (iotg->otg.host)
1783 iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable;
1784
1785 if (iotg->otg.gadget)
1786 iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable;
1787
1788 t = scnprintf(next, size,
1789 "\n"
1790 "current state = %s\n"
1791 "a_bus_resume = \t%d\n"
1792 "a_bus_suspend = \t%d\n"
1793 "a_conn = \t%d\n"
1794 "a_sess_vld = \t%d\n"
1795 "a_srp_det = \t%d\n"
1796 "a_vbus_vld = \t%d\n"
1797 "b_bus_resume = \t%d\n"
1798 "b_bus_suspend = \t%d\n"
1799 "b_conn = \t%d\n"
1800 "b_se0_srp = \t%d\n"
1801 "b_sess_end = \t%d\n"
1802 "b_sess_vld = \t%d\n"
1803 "id = \t%d\n"
1804 "a_set_b_hnp_en = \t%d\n"
1805 "b_srp_done = \t%d\n"
1806 "b_hnp_enable = \t%d\n"
1807 "a_wait_vrise_tmout = \t%d\n"
1808 "a_wait_bcon_tmout = \t%d\n"
1809 "a_aidl_bdis_tmout = \t%d\n"
1810 "b_ase0_brst_tmout = \t%d\n"
1811 "a_bus_drop = \t%d\n"
1812 "a_bus_req = \t%d\n"
1813 "a_clr_err = \t%d\n"
1814 "a_suspend_req = \t%d\n"
1815 "b_bus_req = \t%d\n"
1816 "b_bus_suspend_tmout = \t%d\n"
1817 "b_bus_suspend_vld = \t%d\n",
1818 state_string(iotg->otg.state),
1819 iotg->hsm.a_bus_resume,
1820 iotg->hsm.a_bus_suspend,
1821 iotg->hsm.a_conn,
1822 iotg->hsm.a_sess_vld,
1823 iotg->hsm.a_srp_det,
1824 iotg->hsm.a_vbus_vld,
1825 iotg->hsm.b_bus_resume,
1826 iotg->hsm.b_bus_suspend,
1827 iotg->hsm.b_conn,
1828 iotg->hsm.b_se0_srp,
1829 iotg->hsm.b_sess_end,
1830 iotg->hsm.b_sess_vld,
1831 iotg->hsm.id,
1832 iotg->hsm.a_set_b_hnp_en,
1833 iotg->hsm.b_srp_done,
1834 iotg->hsm.b_hnp_enable,
1835 iotg->hsm.a_wait_vrise_tmout,
1836 iotg->hsm.a_wait_bcon_tmout,
1837 iotg->hsm.a_aidl_bdis_tmout,
1838 iotg->hsm.b_ase0_brst_tmout,
1839 iotg->hsm.a_bus_drop,
1840 iotg->hsm.a_bus_req,
1841 iotg->hsm.a_clr_err,
1842 iotg->hsm.a_suspend_req,
1843 iotg->hsm.b_bus_req,
1844 iotg->hsm.b_bus_suspend_tmout,
1845 iotg->hsm.b_bus_suspend_vld
1846 );
1847 size -= t;
1848 next += t;
1849
1850 return PAGE_SIZE - size;
1851}
1852static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
1853
1854static ssize_t
1855get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1856{
1857 struct langwell_otg *lnw = the_transceiver;
1858 char *next;
1859 unsigned size, t;
1860
1861 next = buf;
1862 size = PAGE_SIZE;
1863
1864 t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_req);
1865 size -= t;
1866 next += t;
1867
1868 return PAGE_SIZE - size;
1869}
1870
1871static ssize_t
1872set_a_bus_req(struct device *dev, struct device_attribute *attr,
1873 const char *buf, size_t count)
1874{
1875 struct langwell_otg *lnw = the_transceiver;
1876 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
1877
1878 if (!iotg->otg.default_a)
1879 return -1;
1880 if (count > 2)
1881 return -1;
1882
1883 if (buf[0] == '0') {
1884 iotg->hsm.a_bus_req = 0;
1885 dev_dbg(lnw->dev, "User request: a_bus_req = 0\n");
1886 } else if (buf[0] == '1') {
1887 /* If a_bus_drop is TRUE, a_bus_req can't be set */
1888 if (iotg->hsm.a_bus_drop)
1889 return -1;
1890 iotg->hsm.a_bus_req = 1;
1891 dev_dbg(lnw->dev, "User request: a_bus_req = 1\n");
1892 }
1893 if (spin_trylock(&lnw->wq_lock)) {
1894 langwell_update_transceiver();
1895 spin_unlock(&lnw->wq_lock);
1896 }
1897 return count;
1898}
1899static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
1900
1901static ssize_t
1902get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
1903{
1904 struct langwell_otg *lnw = the_transceiver;
1905 char *next;
1906 unsigned size, t;
1907
1908 next = buf;
1909 size = PAGE_SIZE;
1910
1911 t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_drop);
1912 size -= t;
1913 next += t;
1914
1915 return PAGE_SIZE - size;
1916}
1917
1918static ssize_t
1919set_a_bus_drop(struct device *dev, struct device_attribute *attr,
1920 const char *buf, size_t count)
1921{
1922 struct langwell_otg *lnw = the_transceiver;
1923 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
1924
1925 if (!iotg->otg.default_a)
1926 return -1;
1927 if (count > 2)
1928 return -1;
1929
1930 if (buf[0] == '0') {
1931 iotg->hsm.a_bus_drop = 0;
1932 dev_dbg(lnw->dev, "User request: a_bus_drop = 0\n");
1933 } else if (buf[0] == '1') {
1934 iotg->hsm.a_bus_drop = 1;
1935 iotg->hsm.a_bus_req = 0;
1936 dev_dbg(lnw->dev, "User request: a_bus_drop = 1\n");
1937 dev_dbg(lnw->dev, "User request: and a_bus_req = 0\n");
1938 }
1939 if (spin_trylock(&lnw->wq_lock)) {
1940 langwell_update_transceiver();
1941 spin_unlock(&lnw->wq_lock);
1942 }
1943 return count;
1944}
1945static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
1946 get_a_bus_drop, set_a_bus_drop);
1947
1948static ssize_t
1949get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1950{
1951 struct langwell_otg *lnw = the_transceiver;
1952 char *next;
1953 unsigned size, t;
1954
1955 next = buf;
1956 size = PAGE_SIZE;
1957
1958 t = scnprintf(next, size, "%d", lnw->iotg.hsm.b_bus_req);
1959 size -= t;
1960 next += t;
1961
1962 return PAGE_SIZE - size;
1963}
1964
1965static ssize_t
1966set_b_bus_req(struct device *dev, struct device_attribute *attr,
1967 const char *buf, size_t count)
1968{
1969 struct langwell_otg *lnw = the_transceiver;
1970 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
1971
1972 if (iotg->otg.default_a)
1973 return -1;
1974
1975 if (count > 2)
1976 return -1;
1977
1978 if (buf[0] == '0') {
1979 iotg->hsm.b_bus_req = 0;
1980 dev_dbg(lnw->dev, "User request: b_bus_req = 0\n");
1981 } else if (buf[0] == '1') {
1982 iotg->hsm.b_bus_req = 1;
1983 dev_dbg(lnw->dev, "User request: b_bus_req = 1\n");
1984 }
1985 if (spin_trylock(&lnw->wq_lock)) {
1986 langwell_update_transceiver();
1987 spin_unlock(&lnw->wq_lock);
1988 }
1989 return count;
1990}
1991static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
1992
1993static ssize_t
1994set_a_clr_err(struct device *dev, struct device_attribute *attr,
1995 const char *buf, size_t count)
1996{
1997 struct langwell_otg *lnw = the_transceiver;
1998 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
1999
2000 if (!iotg->otg.default_a)
2001 return -1;
2002 if (count > 2)
2003 return -1;
2004
2005 if (buf[0] == '1') {
2006 iotg->hsm.a_clr_err = 1;
2007 dev_dbg(lnw->dev, "User request: a_clr_err = 1\n");
2008 }
2009 if (spin_trylock(&lnw->wq_lock)) {
2010 langwell_update_transceiver();
2011 spin_unlock(&lnw->wq_lock);
2012 }
2013 return count;
2014}
2015static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
2016
2017static struct attribute *inputs_attrs[] = {
2018 &dev_attr_a_bus_req.attr,
2019 &dev_attr_a_bus_drop.attr,
2020 &dev_attr_b_bus_req.attr,
2021 &dev_attr_a_clr_err.attr,
2022 NULL,
2023};
2024
2025static struct attribute_group debug_dev_attr_group = {
2026 .name = "inputs",
2027 .attrs = inputs_attrs,
2028};
2029
2030static int langwell_otg_probe(struct pci_dev *pdev,
2031 const struct pci_device_id *id)
2032{
2033 unsigned long resource, len;
2034 void __iomem *base = NULL;
2035 int retval;
2036 u32 val32;
2037 struct langwell_otg *lnw;
2038 char qname[] = "langwell_otg_queue";
2039
2040 retval = 0;
2041 dev_dbg(&pdev->dev, "\notg controller is detected.\n");
2042 if (pci_enable_device(pdev) < 0) {
2043 retval = -ENODEV;
2044 goto done;
2045 }
2046
2047 lnw = kzalloc(sizeof *lnw, GFP_KERNEL);
2048 if (lnw == NULL) {
2049 retval = -ENOMEM;
2050 goto done;
2051 }
2052 the_transceiver = lnw;
2053
2054 /* control register: BAR 0 */
2055 resource = pci_resource_start(pdev, 0);
2056 len = pci_resource_len(pdev, 0);
2057 if (!request_mem_region(resource, len, driver_name)) {
2058 retval = -EBUSY;
2059 goto err;
2060 }
2061 lnw->region = 1;
2062
2063 base = ioremap_nocache(resource, len);
2064 if (base == NULL) {
2065 retval = -EFAULT;
2066 goto err;
2067 }
2068 lnw->iotg.base = base;
2069
2070 if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
2071 retval = -EBUSY;
2072 goto err;
2073 }
2074 lnw->cfg_region = 1;
2075
2076 /* For the SCCB.USBCFG register */
2077 base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN);
2078 if (base == NULL) {
2079 retval = -EFAULT;
2080 goto err;
2081 }
2082 lnw->usbcfg = base;
2083
2084 if (!pdev->irq) {
2085 dev_dbg(&pdev->dev, "No IRQ.\n");
2086 retval = -ENODEV;
2087 goto err;
2088 }
2089
2090 lnw->qwork = create_singlethread_workqueue(qname);
2091 if (!lnw->qwork) {
2092 dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
2093 retval = -ENOMEM;
2094 goto err;
2095 }
2096 INIT_WORK(&lnw->work, langwell_otg_work);
2097
2098 /* OTG common part */
2099 lnw->dev = &pdev->dev;
2100 lnw->iotg.otg.dev = lnw->dev;
2101 lnw->iotg.otg.label = driver_name;
2102 lnw->iotg.otg.set_host = langwell_otg_set_host;
2103 lnw->iotg.otg.set_peripheral = langwell_otg_set_peripheral;
2104 lnw->iotg.otg.set_power = langwell_otg_set_power;
2105 lnw->iotg.otg.set_vbus = langwell_otg_set_vbus;
2106 lnw->iotg.otg.start_srp = langwell_otg_start_srp;
2107 lnw->iotg.otg.state = OTG_STATE_UNDEFINED;
2108
2109 if (otg_set_transceiver(&lnw->iotg.otg)) {
2110 dev_dbg(lnw->dev, "can't set transceiver\n");
2111 retval = -EBUSY;
2112 goto err;
2113 }
2114
2115 reset_otg();
2116 init_hsm();
2117
2118 spin_lock_init(&lnw->lock);
2119 spin_lock_init(&lnw->wq_lock);
2120 INIT_LIST_HEAD(&active_timers);
2121 retval = langwell_otg_init_timers(&lnw->iotg.hsm);
2122 if (retval) {
2123 dev_dbg(&pdev->dev, "Failed to init timers\n");
2124 goto err;
2125 }
2126
2127 init_timer(&lnw->hsm_timer);
2128 ATOMIC_INIT_NOTIFIER_HEAD(&lnw->iotg.iotg_notifier);
2129
2130 lnw->iotg_notifier.notifier_call = langwell_otg_iotg_notify;
2131
2132 retval = intel_mid_otg_register_notifier(&lnw->iotg,
2133 &lnw->iotg_notifier);
2134 if (retval) {
2135 dev_dbg(lnw->dev, "Failed to register notifier\n");
2136 goto err;
2137 }
2138
2139 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
2140 driver_name, lnw) != 0) {
2141 dev_dbg(lnw->dev, "request interrupt %d failed\n", pdev->irq);
2142 retval = -EBUSY;
2143 goto err;
2144 }
2145
2146 /* enable OTGSC int */
2147 val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
2148 OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
2149 writel(val32, lnw->iotg.base + CI_OTGSC);
2150
2151 retval = device_create_file(&pdev->dev, &dev_attr_registers);
2152 if (retval < 0) {
2153 dev_dbg(lnw->dev,
2154 "Can't register sysfs attribute: %d\n", retval);
2155 goto err;
2156 }
2157
2158 retval = device_create_file(&pdev->dev, &dev_attr_hsm);
2159 if (retval < 0) {
2160 dev_dbg(lnw->dev, "Can't hsm sysfs attribute: %d\n", retval);
2161 goto err;
2162 }
2163
2164 retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
2165 if (retval < 0) {
2166 dev_dbg(lnw->dev,
2167 "Can't register sysfs attr group: %d\n", retval);
2168 goto err;
2169 }
2170
2171 if (lnw->iotg.otg.state == OTG_STATE_A_IDLE)
2172 langwell_update_transceiver();
2173
2174 return 0;
2175
2176err:
2177 if (the_transceiver)
2178 langwell_otg_remove(pdev);
2179done:
2180 return retval;
2181}
2182
2183static void langwell_otg_remove(struct pci_dev *pdev)
2184{
2185 struct langwell_otg *lnw = the_transceiver;
2186
2187 if (lnw->qwork) {
2188 flush_workqueue(lnw->qwork);
2189 destroy_workqueue(lnw->qwork);
2190 }
2191 intel_mid_otg_unregister_notifier(&lnw->iotg, &lnw->iotg_notifier);
2192 langwell_otg_free_timers();
2193
2194 /* disable OTGSC interrupt as OTGSC doesn't change in reset */
2195 writel(0, lnw->iotg.base + CI_OTGSC);
2196
2197 if (pdev->irq)
2198 free_irq(pdev->irq, lnw);
2199 if (lnw->usbcfg)
2200 iounmap(lnw->usbcfg);
2201 if (lnw->cfg_region)
2202 release_mem_region(USBCFG_ADDR, USBCFG_LEN);
2203 if (lnw->iotg.base)
2204 iounmap(lnw->iotg.base);
2205 if (lnw->region)
2206 release_mem_region(pci_resource_start(pdev, 0),
2207 pci_resource_len(pdev, 0));
2208
2209 otg_set_transceiver(NULL);
2210 pci_disable_device(pdev);
2211 sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
2212 device_remove_file(&pdev->dev, &dev_attr_hsm);
2213 device_remove_file(&pdev->dev, &dev_attr_registers);
2214 kfree(lnw);
2215 lnw = NULL;
2216}
2217
2218static void transceiver_suspend(struct pci_dev *pdev)
2219{
2220 pci_save_state(pdev);
2221 pci_set_power_state(pdev, PCI_D3hot);
2222 langwell_otg_phy_low_power(1);
2223}
2224
2225static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
2226{
2227 struct langwell_otg *lnw = the_transceiver;
2228 struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
2229 int ret = 0;
2230
2231 /* Disbale OTG interrupts */
2232 langwell_otg_intr(0);
2233
2234 if (pdev->irq)
2235 free_irq(pdev->irq, lnw);
2236
2237 /* Prevent more otg_work */
2238 flush_workqueue(lnw->qwork);
2239 destroy_workqueue(lnw->qwork);
2240 lnw->qwork = NULL;
2241
2242 /* start actions */
2243 switch (iotg->otg.state) {
2244 case OTG_STATE_A_WAIT_VFALL:
2245 iotg->otg.state = OTG_STATE_A_IDLE;
2246 case OTG_STATE_A_IDLE:
2247 case OTG_STATE_B_IDLE:
2248 case OTG_STATE_A_VBUS_ERR:
2249 transceiver_suspend(pdev);
2250 break;
2251 case OTG_STATE_A_WAIT_VRISE:
2252 langwell_otg_del_timer(a_wait_vrise_tmr);
2253 iotg->hsm.a_srp_det = 0;
2254
2255 /* Turn off VBus */
2256 iotg->otg.set_vbus(&iotg->otg, false);
2257 iotg->otg.state = OTG_STATE_A_IDLE;
2258 transceiver_suspend(pdev);
2259 break;
2260 case OTG_STATE_A_WAIT_BCON:
2261 del_timer_sync(&lnw->hsm_timer);
2262 if (lnw->iotg.stop_host)
2263 lnw->iotg.stop_host(&lnw->iotg);
2264 else
2265 dev_dbg(&pdev->dev, "host driver has been removed.\n");
2266
2267 iotg->hsm.a_srp_det = 0;
2268
2269 /* Turn off VBus */
2270 iotg->otg.set_vbus(&iotg->otg, false);
2271 iotg->otg.state = OTG_STATE_A_IDLE;
2272 transceiver_suspend(pdev);
2273 break;
2274 case OTG_STATE_A_HOST:
2275 if (lnw->iotg.stop_host)
2276 lnw->iotg.stop_host(&lnw->iotg);
2277 else
2278 dev_dbg(&pdev->dev, "host driver has been removed.\n");
2279
2280 iotg->hsm.a_srp_det = 0;
2281
2282 /* Turn off VBus */
2283 iotg->otg.set_vbus(&iotg->otg, false);
2284
2285 iotg->otg.state = OTG_STATE_A_IDLE;
2286 transceiver_suspend(pdev);
2287 break;
2288 case OTG_STATE_A_SUSPEND:
2289 langwell_otg_del_timer(a_aidl_bdis_tmr);
2290 langwell_otg_HABA(0);
2291 if (lnw->iotg.stop_host)
2292 lnw->iotg.stop_host(&lnw->iotg);
2293 else
2294 dev_dbg(lnw->dev, "host driver has been removed.\n");
2295 iotg->hsm.a_srp_det = 0;
2296
2297 /* Turn off VBus */
2298 iotg->otg.set_vbus(&iotg->otg, false);
2299 iotg->otg.state = OTG_STATE_A_IDLE;
2300 transceiver_suspend(pdev);
2301 break;
2302 case OTG_STATE_A_PERIPHERAL:
2303 del_timer_sync(&lnw->hsm_timer);
2304
2305 if (lnw->iotg.stop_peripheral)
2306 lnw->iotg.stop_peripheral(&lnw->iotg);
2307 else
2308 dev_dbg(&pdev->dev,
2309 "client driver has been removed.\n");
2310 iotg->hsm.a_srp_det = 0;
2311
2312 /* Turn off VBus */
2313 iotg->otg.set_vbus(&iotg->otg, false);
2314 iotg->otg.state = OTG_STATE_A_IDLE;
2315 transceiver_suspend(pdev);
2316 break;
2317 case OTG_STATE_B_HOST:
2318 if (lnw->iotg.stop_host)
2319 lnw->iotg.stop_host(&lnw->iotg);
2320 else
2321 dev_dbg(&pdev->dev, "host driver has been removed.\n");
2322 iotg->hsm.b_bus_req = 0;
2323 iotg->otg.state = OTG_STATE_B_IDLE;
2324 transceiver_suspend(pdev);
2325 break;
2326 case OTG_STATE_B_PERIPHERAL:
2327 if (lnw->iotg.stop_peripheral)
2328 lnw->iotg.stop_peripheral(&lnw->iotg);
2329 else
2330 dev_dbg(&pdev->dev,
2331 "client driver has been removed.\n");
2332 iotg->otg.state = OTG_STATE_B_IDLE;
2333 transceiver_suspend(pdev);
2334 break;
2335 case OTG_STATE_B_WAIT_ACON:
2336 /* delete hsm timer for b_ase0_brst_tmr */
2337 del_timer_sync(&lnw->hsm_timer);
2338
2339 langwell_otg_HAAR(0);
2340
2341 if (lnw->iotg.stop_host)
2342 lnw->iotg.stop_host(&lnw->iotg);
2343 else
2344 dev_dbg(&pdev->dev, "host driver has been removed.\n");
2345 iotg->hsm.b_bus_req = 0;
2346 iotg->otg.state = OTG_STATE_B_IDLE;
2347 transceiver_suspend(pdev);
2348 break;
2349 default:
2350 dev_dbg(lnw->dev, "error state before suspend\n");
2351 break;
2352 }
2353
2354 return ret;
2355}
2356
2357static void transceiver_resume(struct pci_dev *pdev)
2358{
2359 pci_restore_state(pdev);
2360 pci_set_power_state(pdev, PCI_D0);
2361}
2362
2363static int langwell_otg_resume(struct pci_dev *pdev)
2364{
2365 struct langwell_otg *lnw = the_transceiver;
2366 int ret = 0;
2367
2368 transceiver_resume(pdev);
2369
2370 lnw->qwork = create_singlethread_workqueue("langwell_otg_queue");
2371 if (!lnw->qwork) {
2372 dev_dbg(&pdev->dev, "cannot create langwell otg workqueuen");
2373 ret = -ENOMEM;
2374 goto error;
2375 }
2376
2377 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
2378 driver_name, lnw) != 0) {
2379 dev_dbg(&pdev->dev, "request interrupt %d failed\n", pdev->irq);
2380 ret = -EBUSY;
2381 goto error;
2382 }
2383
2384 /* enable OTG interrupts */
2385 langwell_otg_intr(1);
2386
2387 update_hsm();
2388
2389 langwell_update_transceiver();
2390
2391 return ret;
2392error:
2393 langwell_otg_intr(0);
2394 transceiver_suspend(pdev);
2395 return ret;
2396}
2397
2398static int __init langwell_otg_init(void)
2399{
2400 return pci_register_driver(&otg_pci_driver);
2401}
2402module_init(langwell_otg_init);
2403
2404static void __exit langwell_otg_cleanup(void)
2405{
2406 pci_unregister_driver(&otg_pci_driver);
2407}
2408module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index ccc81950822b..059d9ac0ab5b 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -29,12 +29,23 @@
29#include <linux/usb/otg.h> 29#include <linux/usb/otg.h>
30#include <linux/usb/ulpi.h> 30#include <linux/usb/ulpi.h>
31 31
32
33struct ulpi_info {
34 unsigned int id;
35 char *name;
36};
37
32#define ULPI_ID(vendor, product) (((vendor) << 16) | (product)) 38#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
39#define ULPI_INFO(_id, _name) \
40 { \
41 .id = (_id), \
42 .name = (_name), \
43 }
33 44
34/* ULPI hardcoded IDs, used for probing */ 45/* ULPI hardcoded IDs, used for probing */
35static unsigned int ulpi_ids[] = { 46static struct ulpi_info ulpi_ids[] = {
36 ULPI_ID(0x04cc, 0x1504), /* NXP ISP1504 */ 47 ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
37 ULPI_ID(0x0424, 0x0006), /* SMSC USB3319 */ 48 ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"),
38}; 49};
39 50
40static int ulpi_set_otg_flags(struct otg_transceiver *otg) 51static int ulpi_set_otg_flags(struct otg_transceiver *otg)
@@ -137,6 +148,32 @@ static int ulpi_set_flags(struct otg_transceiver *otg)
137 return ulpi_set_fc_flags(otg); 148 return ulpi_set_fc_flags(otg);
138} 149}
139 150
151static int ulpi_check_integrity(struct otg_transceiver *otg)
152{
153 int ret, i;
154 unsigned int val = 0x55;
155
156 for (i = 0; i < 2; i++) {
157 ret = otg_io_write(otg, val, ULPI_SCRATCH);
158 if (ret < 0)
159 return ret;
160
161 ret = otg_io_read(otg, ULPI_SCRATCH);
162 if (ret < 0)
163 return ret;
164
165 if (ret != val) {
166 pr_err("ULPI integrity check: failed!");
167 return -ENODEV;
168 }
169 val = val << 1;
170 }
171
172 pr_info("ULPI integrity check: passed.\n");
173
174 return 0;
175}
176
140static int ulpi_init(struct otg_transceiver *otg) 177static int ulpi_init(struct otg_transceiver *otg)
141{ 178{
142 int i, vid, pid, ret; 179 int i, vid, pid, ret;
@@ -153,12 +190,19 @@ static int ulpi_init(struct otg_transceiver *otg)
153 190
154 pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); 191 pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
155 192
156 for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) 193 for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) {
157 if (ulpi_ids[i] == ULPI_ID(vid, pid)) 194 if (ulpi_ids[i].id == ULPI_ID(vid, pid)) {
158 return ulpi_set_flags(otg); 195 pr_info("Found %s ULPI transceiver.\n",
196 ulpi_ids[i].name);
197 break;
198 }
199 }
200
201 ret = ulpi_check_integrity(otg);
202 if (ret)
203 return ret;
159 204
160 pr_err("ULPI ID does not match any known transceiver.\n"); 205 return ulpi_set_flags(otg);
161 return -ENODEV;
162} 206}
163 207
164static int ulpi_set_host(struct otg_transceiver *otg, struct usb_bus *host) 208static int ulpi_set_host(struct otg_transceiver *otg, struct usb_bus *host)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 916b2b6d765f..95058109f9fa 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -527,6 +527,15 @@ config USB_SERIAL_SAFE_PADDED
527 bool "USB Secure Encapsulated Driver - Padded" 527 bool "USB Secure Encapsulated Driver - Padded"
528 depends on USB_SERIAL_SAFE 528 depends on USB_SERIAL_SAFE
529 529
530config USB_SERIAL_SAMBA
531 tristate "USB Atmel SAM Boot Assistant (SAM-BA) driver"
532 help
533 Say Y here if you want to access the SAM-BA boot application of an
534 Atmel AT91SAM device.
535
536 To compile this driver as a module, choose M here: the
537 module will be called sam-ba.
538
530config USB_SERIAL_SIEMENS_MPI 539config USB_SERIAL_SIEMENS_MPI
531 tristate "USB Siemens MPI driver" 540 tristate "USB Siemens MPI driver"
532 help 541 help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 40ebe17b6ea8..9a2117f2b06e 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -6,10 +6,10 @@
6 6
7obj-$(CONFIG_USB_SERIAL) += usbserial.o 7obj-$(CONFIG_USB_SERIAL) += usbserial.o
8 8
9usbserial-obj-$(CONFIG_USB_SERIAL_CONSOLE) += console.o 9usbserial-y := usb-serial.o generic.o bus.o
10usbserial-obj-$(CONFIG_USB_EZUSB) += ezusb.o
11 10
12usbserial-objs := usb-serial.o generic.o bus.o $(usbserial-obj-y) 11usbserial-$(CONFIG_USB_SERIAL_CONSOLE) += console.o
12usbserial-$(CONFIG_USB_EZUSB) += ezusb.o
13 13
14obj-$(CONFIG_USB_SERIAL_AIRCABLE) += aircable.o 14obj-$(CONFIG_USB_SERIAL_AIRCABLE) += aircable.o
15obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o 15obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
48obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o 48obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
49obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o 49obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
50obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o 50obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
51obj-$(CONFIG_USB_SERIAL_SAMBA) += sam-ba.o
51obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o 52obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
52obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o 53obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
53obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o 54obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
@@ -58,6 +59,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
58obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o 59obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
59obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o 60obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
60obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o 61obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
61obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o 62obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
62obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o 63obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o
63
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 4e41a2a39422..8f1d4fb19d24 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -411,6 +411,26 @@ err_out:
411 return result; 411 return result;
412} 412}
413 413
414static int ark3116_get_icount(struct tty_struct *tty,
415 struct serial_icounter_struct *icount)
416{
417 struct usb_serial_port *port = tty->driver_data;
418 struct ark3116_private *priv = usb_get_serial_port_data(port);
419 struct async_icount cnow = priv->icount;
420 icount->cts = cnow.cts;
421 icount->dsr = cnow.dsr;
422 icount->rng = cnow.rng;
423 icount->dcd = cnow.dcd;
424 icount->rx = cnow.rx;
425 icount->tx = cnow.tx;
426 icount->frame = cnow.frame;
427 icount->overrun = cnow.overrun;
428 icount->parity = cnow.parity;
429 icount->brk = cnow.brk;
430 icount->buf_overrun = cnow.buf_overrun;
431 return 0;
432}
433
414static int ark3116_ioctl(struct tty_struct *tty, struct file *file, 434static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
415 unsigned int cmd, unsigned long arg) 435 unsigned int cmd, unsigned long arg)
416{ 436{
@@ -460,25 +480,6 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
460 return 0; 480 return 0;
461 } 481 }
462 break; 482 break;
463 case TIOCGICOUNT: {
464 struct serial_icounter_struct icount;
465 struct async_icount cnow = priv->icount;
466 memset(&icount, 0, sizeof(icount));
467 icount.cts = cnow.cts;
468 icount.dsr = cnow.dsr;
469 icount.rng = cnow.rng;
470 icount.dcd = cnow.dcd;
471 icount.rx = cnow.rx;
472 icount.tx = cnow.tx;
473 icount.frame = cnow.frame;
474 icount.overrun = cnow.overrun;
475 icount.parity = cnow.parity;
476 icount.brk = cnow.brk;
477 icount.buf_overrun = cnow.buf_overrun;
478 if (copy_to_user(user_arg, &icount, sizeof(icount)))
479 return -EFAULT;
480 return 0;
481 }
482 } 483 }
483 484
484 return -ENOIOCTLCMD; 485 return -ENOIOCTLCMD;
@@ -736,6 +737,7 @@ static struct usb_serial_driver ark3116_device = {
736 .ioctl = ark3116_ioctl, 737 .ioctl = ark3116_ioctl,
737 .tiocmget = ark3116_tiocmget, 738 .tiocmget = ark3116_tiocmget,
738 .tiocmset = ark3116_tiocmset, 739 .tiocmset = ark3116_tiocmset,
740 .get_icount = ark3116_get_icount,
739 .open = ark3116_open, 741 .open = ark3116_open,
740 .close = ark3116_close, 742 .close = ark3116_close,
741 .break_ctl = ark3116_break_ctl, 743 .break_ctl = ark3116_break_ctl,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 4f1744c5871f..8d7731dbf478 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -54,6 +54,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
54static int debug; 54static int debug;
55 55
56static const struct usb_device_id id_table[] = { 56static const struct usb_device_id id_table[] = {
57 { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 58 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 59 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 60 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
@@ -132,6 +133,7 @@ static const struct usb_device_id id_table[] = {
132 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ 133 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
133 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 134 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
134 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 135 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
136 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
135 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ 137 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
136 { } /* Terminating Entry */ 138 { } /* Terminating Entry */
137}; 139};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 97cc87d654ce..37b57c785cc7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -177,6 +177,7 @@ static struct usb_device_id id_table_combined [] = {
177 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) }, 177 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
178 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, 178 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
179 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, 179 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
180 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 182 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, 183 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -674,7 +675,6 @@ static struct usb_device_id id_table_combined [] = {
674 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, 675 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
675 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, 676 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
676 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, 677 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
677 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
678 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, 678 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
679 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, 679 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
680 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, 680 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -715,8 +715,37 @@ static struct usb_device_id id_table_combined [] = {
715 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 715 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
716 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 716 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
717 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 717 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
718
719 /* Papouch devices based on FTDI chip */
720 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
721 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
722 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
723 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
724 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
725 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
726 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
727 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
728 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
729 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
730 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
731 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
732 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
733 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
718 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 734 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
735 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
736 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
737 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
738 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
739 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
740 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
741 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
742 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
743 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
744 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
719 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, 745 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
746 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
747 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
748
720 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 749 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
721 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 750 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
722 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 751 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
@@ -751,6 +780,7 @@ static struct usb_device_id id_table_combined [] = {
751 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), 780 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
752 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 781 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
753 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, 782 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
783 { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
754 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), 784 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
755 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 785 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
756 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, 786 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
@@ -761,6 +791,9 @@ static struct usb_device_id id_table_combined [] = {
761 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, 791 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
762 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, 792 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
763 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, 793 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
764 { }, /* Optional parameter entry */ 797 { }, /* Optional parameter entry */
765 { } /* Terminating entry */ 798 { } /* Terminating entry */
766}; 799};
@@ -1559,6 +1592,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1559 ftdi_set_max_packet_size(port); 1592 ftdi_set_max_packet_size(port);
1560 if (read_latency_timer(port) < 0) 1593 if (read_latency_timer(port) < 0)
1561 priv->latency = 16; 1594 priv->latency = 16;
1595 write_latency_timer(port);
1562 create_sysfs_attrs(port); 1596 create_sysfs_attrs(port);
1563 return 0; 1597 return 0;
1564} 1598}
@@ -1687,8 +1721,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1687 1721
1688 dbg("%s", __func__); 1722 dbg("%s", __func__);
1689 1723
1690 write_latency_timer(port);
1691
1692 /* No error checking for this (will get errors later anyway) */ 1724 /* No error checking for this (will get errors later anyway) */
1693 /* See ftdi_sio.h for description of what is reset */ 1725 /* See ftdi_sio.h for description of what is reset */
1694 usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1726 usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2028,8 +2060,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
2028 "urb failed to set to rts/cts flow control\n"); 2060 "urb failed to set to rts/cts flow control\n");
2029 } 2061 }
2030 2062
2031 /* raise DTR/RTS */
2032 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
2033 } else { 2063 } else {
2034 /* 2064 /*
2035 * Xon/Xoff code 2065 * Xon/Xoff code
@@ -2077,8 +2107,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
2077 } 2107 }
2078 } 2108 }
2079 2109
2080 /* lower DTR/RTS */
2081 clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
2082 } 2110 }
2083 return; 2111 return;
2084} 2112}
@@ -2168,6 +2196,7 @@ static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
2168 * - mask passed in arg for lines of interest 2196 * - mask passed in arg for lines of interest
2169 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) 2197 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
2170 * Caller should use TIOCGICOUNT to see which one it was. 2198 * Caller should use TIOCGICOUNT to see which one it was.
2199 * (except that the driver doesn't support it !)
2171 * 2200 *
2172 * This code is borrowed from linux/drivers/char/serial.c 2201 * This code is borrowed from linux/drivers/char/serial.c
2173 */ 2202 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 15a4583775ad..cf1aea1b9ee7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -61,6 +61,7 @@
61#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9 61#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
62#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA 62#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
63#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB 63#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
64#define FTDI_OPENDCC_GBM_PID 0xBFDC
64 65
65/* 66/*
66 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) 67 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
@@ -1022,9 +1023,34 @@
1022 */ 1023 */
1023 1024
1024#define PAPOUCH_VID 0x5050 /* Vendor ID */ 1025#define PAPOUCH_VID 0x5050 /* Vendor ID */
1026#define PAPOUCH_SB485_PID 0x0100 /* Papouch SB485 USB-485/422 Converter */
1027#define PAPOUCH_AP485_PID 0x0101 /* AP485 USB-RS485 Converter */
1028#define PAPOUCH_SB422_PID 0x0102 /* Papouch SB422 USB-RS422 Converter */
1029#define PAPOUCH_SB485_2_PID 0x0103 /* Papouch SB485 USB-485/422 Converter */
1030#define PAPOUCH_AP485_2_PID 0x0104 /* AP485 USB-RS485 Converter */
1031#define PAPOUCH_SB422_2_PID 0x0105 /* Papouch SB422 USB-RS422 Converter */
1032#define PAPOUCH_SB485S_PID 0x0106 /* Papouch SB485S USB-485/422 Converter */
1033#define PAPOUCH_SB485C_PID 0x0107 /* Papouch SB485C USB-485/422 Converter */
1034#define PAPOUCH_LEC_PID 0x0300 /* LEC USB Converter */
1035#define PAPOUCH_SB232_PID 0x0301 /* Papouch SB232 USB-RS232 Converter */
1025#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 1036#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
1026#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 1037#define PAPOUCH_IRAMP_PID 0x0500 /* Papouch IRAmp Duplex */
1038#define PAPOUCH_DRAK5_PID 0x0700 /* Papouch DRAK5 */
1039#define PAPOUCH_QUIDO8x8_PID 0x0800 /* Papouch Quido 8/8 Module */
1040#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Papouch Quido 4/4 Module */
1041#define PAPOUCH_QUIDO2x2_PID 0x0a00 /* Papouch Quido 2/2 Module */
1042#define PAPOUCH_QUIDO10x1_PID 0x0b00 /* Papouch Quido 10/1 Module */
1043#define PAPOUCH_QUIDO30x3_PID 0x0c00 /* Papouch Quido 30/3 Module */
1044#define PAPOUCH_QUIDO60x3_PID 0x0d00 /* Papouch Quido 60(100)/3 Module */
1045#define PAPOUCH_QUIDO2x16_PID 0x0e00 /* Papouch Quido 2/16 Module */
1046#define PAPOUCH_QUIDO3x32_PID 0x0f00 /* Papouch Quido 3/32 Module */
1047#define PAPOUCH_DRAK6_PID 0x1000 /* Papouch DRAK6 */
1048#define PAPOUCH_UPSUSB_PID 0x8000 /* Papouch UPS-USB adapter */
1049#define PAPOUCH_MU_PID 0x8001 /* MU controller */
1050#define PAPOUCH_SIMUKEY_PID 0x8002 /* Papouch SimuKey */
1027#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ 1051#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
1052#define PAPOUCH_GMUX_PID 0x8004 /* Papouch GOLIATH MUX */
1053#define PAPOUCH_GMSR_PID 0x8005 /* Papouch GOLIATH MSR */
1028 1054
1029/* 1055/*
1030 * Marvell SheevaPlug 1056 * Marvell SheevaPlug
@@ -1063,3 +1089,14 @@
1063 * Submitted by John G. Rogers 1089 * Submitted by John G. Rogers
1064 */ 1090 */
1065#define SEGWAY_RMP200_PID 0xe729 1091#define SEGWAY_RMP200_PID 0xe729
1092
1093
1094/*
1095 * Accesio USB Data Acquisition products (http://www.accesio.com/)
1096 */
1097#define ACCESIO_COM4SM_PID 0xD578
1098
1099/* www.sciencescope.co.uk educational dataloggers */
1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 76e6fb3aab7a..a0ab78ada25e 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -222,6 +222,8 @@ static void edge_break(struct tty_struct *tty, int break_state);
222static int edge_tiocmget(struct tty_struct *tty, struct file *file); 222static int edge_tiocmget(struct tty_struct *tty, struct file *file);
223static int edge_tiocmset(struct tty_struct *tty, struct file *file, 223static int edge_tiocmset(struct tty_struct *tty, struct file *file,
224 unsigned int set, unsigned int clear); 224 unsigned int set, unsigned int clear);
225static int edge_get_icount(struct tty_struct *tty,
226 struct serial_icounter_struct *icount);
225static int edge_startup(struct usb_serial *serial); 227static int edge_startup(struct usb_serial *serial);
226static void edge_disconnect(struct usb_serial *serial); 228static void edge_disconnect(struct usb_serial *serial);
227static void edge_release(struct usb_serial *serial); 229static void edge_release(struct usb_serial *serial);
@@ -1624,6 +1626,31 @@ static int edge_tiocmget(struct tty_struct *tty, struct file *file)
1624 return result; 1626 return result;
1625} 1627}
1626 1628
1629static int edge_get_icount(struct tty_struct *tty,
1630 struct serial_icounter_struct *icount)
1631{
1632 struct usb_serial_port *port = tty->driver_data;
1633 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
1634 struct async_icount cnow;
1635 cnow = edge_port->icount;
1636
1637 icount->cts = cnow.cts;
1638 icount->dsr = cnow.dsr;
1639 icount->rng = cnow.rng;
1640 icount->dcd = cnow.dcd;
1641 icount->rx = cnow.rx;
1642 icount->tx = cnow.tx;
1643 icount->frame = cnow.frame;
1644 icount->overrun = cnow.overrun;
1645 icount->parity = cnow.parity;
1646 icount->brk = cnow.brk;
1647 icount->buf_overrun = cnow.buf_overrun;
1648
1649 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d",
1650 __func__, port->number, icount->rx, icount->tx);
1651 return 0;
1652}
1653
1627static int get_serial_info(struct edgeport_port *edge_port, 1654static int get_serial_info(struct edgeport_port *edge_port,
1628 struct serial_struct __user *retinfo) 1655 struct serial_struct __user *retinfo)
1629{ 1656{
@@ -1650,7 +1677,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
1650} 1677}
1651 1678
1652 1679
1653
1654/***************************************************************************** 1680/*****************************************************************************
1655 * SerialIoctl 1681 * SerialIoctl
1656 * this function handles any ioctl calls to the driver 1682 * this function handles any ioctl calls to the driver
@@ -1663,7 +1689,6 @@ static int edge_ioctl(struct tty_struct *tty, struct file *file,
1663 struct edgeport_port *edge_port = usb_get_serial_port_data(port); 1689 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
1664 struct async_icount cnow; 1690 struct async_icount cnow;
1665 struct async_icount cprev; 1691 struct async_icount cprev;
1666 struct serial_icounter_struct icount;
1667 1692
1668 dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); 1693 dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
1669 1694
@@ -1702,26 +1727,6 @@ static int edge_ioctl(struct tty_struct *tty, struct file *file,
1702 /* NOTREACHED */ 1727 /* NOTREACHED */
1703 break; 1728 break;
1704 1729
1705 case TIOCGICOUNT:
1706 cnow = edge_port->icount;
1707 memset(&icount, 0, sizeof(icount));
1708 icount.cts = cnow.cts;
1709 icount.dsr = cnow.dsr;
1710 icount.rng = cnow.rng;
1711 icount.dcd = cnow.dcd;
1712 icount.rx = cnow.rx;
1713 icount.tx = cnow.tx;
1714 icount.frame = cnow.frame;
1715 icount.overrun = cnow.overrun;
1716 icount.parity = cnow.parity;
1717 icount.brk = cnow.brk;
1718 icount.buf_overrun = cnow.buf_overrun;
1719
1720 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d",
1721 __func__, port->number, icount.rx, icount.tx);
1722 if (copy_to_user((void __user *)arg, &icount, sizeof(icount)))
1723 return -EFAULT;
1724 return 0;
1725 } 1730 }
1726 return -ENOIOCTLCMD; 1731 return -ENOIOCTLCMD;
1727} 1732}
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index feb56a4ca799..6ab2a3f97fe8 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -123,6 +123,7 @@ static struct usb_serial_driver edgeport_2port_device = {
123 .set_termios = edge_set_termios, 123 .set_termios = edge_set_termios,
124 .tiocmget = edge_tiocmget, 124 .tiocmget = edge_tiocmget,
125 .tiocmset = edge_tiocmset, 125 .tiocmset = edge_tiocmset,
126 .get_icount = edge_get_icount,
126 .write = edge_write, 127 .write = edge_write,
127 .write_room = edge_write_room, 128 .write_room = edge_write_room,
128 .chars_in_buffer = edge_chars_in_buffer, 129 .chars_in_buffer = edge_chars_in_buffer,
@@ -152,6 +153,7 @@ static struct usb_serial_driver edgeport_4port_device = {
152 .set_termios = edge_set_termios, 153 .set_termios = edge_set_termios,
153 .tiocmget = edge_tiocmget, 154 .tiocmget = edge_tiocmget,
154 .tiocmset = edge_tiocmset, 155 .tiocmset = edge_tiocmset,
156 .get_icount = edge_get_icount,
155 .write = edge_write, 157 .write = edge_write,
156 .write_room = edge_write_room, 158 .write_room = edge_write_room,
157 .chars_in_buffer = edge_chars_in_buffer, 159 .chars_in_buffer = edge_chars_in_buffer,
@@ -181,6 +183,7 @@ static struct usb_serial_driver edgeport_8port_device = {
181 .set_termios = edge_set_termios, 183 .set_termios = edge_set_termios,
182 .tiocmget = edge_tiocmget, 184 .tiocmget = edge_tiocmget,
183 .tiocmset = edge_tiocmset, 185 .tiocmset = edge_tiocmset,
186 .get_icount = edge_get_icount,
184 .write = edge_write, 187 .write = edge_write,
185 .write_room = edge_write_room, 188 .write_room = edge_write_room,
186 .chars_in_buffer = edge_chars_in_buffer, 189 .chars_in_buffer = edge_chars_in_buffer,
@@ -209,6 +212,7 @@ static struct usb_serial_driver epic_device = {
209 .set_termios = edge_set_termios, 212 .set_termios = edge_set_termios,
210 .tiocmget = edge_tiocmget, 213 .tiocmget = edge_tiocmget,
211 .tiocmset = edge_tiocmset, 214 .tiocmset = edge_tiocmset,
215 .get_icount = edge_get_icount,
212 .write = edge_write, 216 .write = edge_write,
213 .write_room = edge_write_room, 217 .write_room = edge_write_room,
214 .chars_in_buffer = edge_chars_in_buffer, 218 .chars_in_buffer = edge_chars_in_buffer,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a7cfc5952937..4dad27a0f22a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2510,6 +2510,27 @@ static int edge_tiocmget(struct tty_struct *tty, struct file *file)
2510 return result; 2510 return result;
2511} 2511}
2512 2512
2513static int edge_get_icount(struct tty_struct *tty,
2514 struct serial_icounter_struct *icount)
2515{
2516 struct usb_serial_port *port = tty->driver_data;
2517 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
2518 struct async_icount *ic = &edge_port->icount;
2519
2520 icount->cts = ic->cts;
2521 icount->dsr = ic->dsr;
2522 icount->rng = ic->rng;
2523 icount->dcd = ic->dcd;
2524 icount->tx = ic->tx;
2525 icount->rx = ic->rx;
2526 icount->frame = ic->frame;
2527 icount->parity = ic->parity;
2528 icount->overrun = ic->overrun;
2529 icount->brk = ic->brk;
2530 icount->buf_overrun = ic->buf_overrun;
2531 return 0;
2532}
2533
2513static int get_serial_info(struct edgeport_port *edge_port, 2534static int get_serial_info(struct edgeport_port *edge_port,
2514 struct serial_struct __user *retinfo) 2535 struct serial_struct __user *retinfo)
2515{ 2536{
@@ -2572,13 +2593,6 @@ static int edge_ioctl(struct tty_struct *tty, struct file *file,
2572 } 2593 }
2573 /* not reached */ 2594 /* not reached */
2574 break; 2595 break;
2575 case TIOCGICOUNT:
2576 dbg("%s - (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
2577 port->number, edge_port->icount.rx, edge_port->icount.tx);
2578 if (copy_to_user((void __user *)arg, &edge_port->icount,
2579 sizeof(edge_port->icount)))
2580 return -EFAULT;
2581 return 0;
2582 } 2596 }
2583 return -ENOIOCTLCMD; 2597 return -ENOIOCTLCMD;
2584} 2598}
@@ -2758,6 +2772,7 @@ static struct usb_serial_driver edgeport_1port_device = {
2758 .set_termios = edge_set_termios, 2772 .set_termios = edge_set_termios,
2759 .tiocmget = edge_tiocmget, 2773 .tiocmget = edge_tiocmget,
2760 .tiocmset = edge_tiocmset, 2774 .tiocmset = edge_tiocmset,
2775 .get_icount = edge_get_icount,
2761 .write = edge_write, 2776 .write = edge_write,
2762 .write_room = edge_write_room, 2777 .write_room = edge_write_room,
2763 .chars_in_buffer = edge_chars_in_buffer, 2778 .chars_in_buffer = edge_chars_in_buffer,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 7aa01b95b1d4..2849f8c32015 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -549,9 +549,12 @@ static void mct_u232_close(struct usb_serial_port *port)
549{ 549{
550 dbg("%s port %d", __func__, port->number); 550 dbg("%s port %d", __func__, port->number);
551 551
552 usb_serial_generic_close(port); 552 if (port->serial->dev) {
553 if (port->serial->dev) 553 /* shutdown our urbs */
554 usb_kill_urb(port->write_urb);
555 usb_kill_urb(port->read_urb);
554 usb_kill_urb(port->interrupt_in_urb); 556 usb_kill_urb(port->interrupt_in_urb);
557 }
555} /* mct_u232_close */ 558} /* mct_u232_close */
556 559
557 560
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index aa665817a272..fd0b6414f459 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1896,10 +1896,37 @@ static int mos7720_tiocmset(struct tty_struct *tty, struct file *file,
1896 return 0; 1896 return 0;
1897} 1897}
1898 1898
1899static int mos7720_get_icount(struct tty_struct *tty,
1900 struct serial_icounter_struct *icount)
1901{
1902 struct usb_serial_port *port = tty->driver_data;
1903 struct moschip_port *mos7720_port;
1904 struct async_icount cnow;
1905
1906 mos7720_port = usb_get_serial_port_data(port);
1907 cnow = mos7720_port->icount;
1908
1909 icount->cts = cnow.cts;
1910 icount->dsr = cnow.dsr;
1911 icount->rng = cnow.rng;
1912 icount->dcd = cnow.dcd;
1913 icount->rx = cnow.rx;
1914 icount->tx = cnow.tx;
1915 icount->frame = cnow.frame;
1916 icount->overrun = cnow.overrun;
1917 icount->parity = cnow.parity;
1918 icount->brk = cnow.brk;
1919 icount->buf_overrun = cnow.buf_overrun;
1920
1921 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
1922 port->number, icount->rx, icount->tx);
1923 return 0;
1924}
1925
1899static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd, 1926static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
1900 unsigned int __user *value) 1927 unsigned int __user *value)
1901{ 1928{
1902 unsigned int mcr ; 1929 unsigned int mcr;
1903 unsigned int arg; 1930 unsigned int arg;
1904 1931
1905 struct usb_serial_port *port; 1932 struct usb_serial_port *port;
@@ -1973,7 +2000,6 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
1973 struct moschip_port *mos7720_port; 2000 struct moschip_port *mos7720_port;
1974 struct async_icount cnow; 2001 struct async_icount cnow;
1975 struct async_icount cprev; 2002 struct async_icount cprev;
1976 struct serial_icounter_struct icount;
1977 2003
1978 mos7720_port = usb_get_serial_port_data(port); 2004 mos7720_port = usb_get_serial_port_data(port);
1979 if (mos7720_port == NULL) 2005 if (mos7720_port == NULL)
@@ -2021,29 +2047,6 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
2021 } 2047 }
2022 /* NOTREACHED */ 2048 /* NOTREACHED */
2023 break; 2049 break;
2024
2025 case TIOCGICOUNT:
2026 cnow = mos7720_port->icount;
2027
2028 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2029
2030 icount.cts = cnow.cts;
2031 icount.dsr = cnow.dsr;
2032 icount.rng = cnow.rng;
2033 icount.dcd = cnow.dcd;
2034 icount.rx = cnow.rx;
2035 icount.tx = cnow.tx;
2036 icount.frame = cnow.frame;
2037 icount.overrun = cnow.overrun;
2038 icount.parity = cnow.parity;
2039 icount.brk = cnow.brk;
2040 icount.buf_overrun = cnow.buf_overrun;
2041
2042 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
2043 port->number, icount.rx, icount.tx);
2044 if (copy_to_user((void __user *)arg, &icount, sizeof(icount)))
2045 return -EFAULT;
2046 return 0;
2047 } 2050 }
2048 2051
2049 return -ENOIOCTLCMD; 2052 return -ENOIOCTLCMD;
@@ -2212,6 +2215,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
2212 .ioctl = mos7720_ioctl, 2215 .ioctl = mos7720_ioctl,
2213 .tiocmget = mos7720_tiocmget, 2216 .tiocmget = mos7720_tiocmget,
2214 .tiocmset = mos7720_tiocmset, 2217 .tiocmset = mos7720_tiocmset,
2218 .get_icount = mos7720_get_icount,
2215 .set_termios = mos7720_set_termios, 2219 .set_termios = mos7720_set_termios,
2216 .write = mos7720_write, 2220 .write = mos7720_write,
2217 .write_room = mos7720_write_room, 2221 .write_room = mos7720_write_room,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 1a42bc213799..93dad5853cd5 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2209,6 +2209,34 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
2209 return 0; 2209 return 0;
2210} 2210}
2211 2211
2212static int mos7840_get_icount(struct tty_struct *tty,
2213 struct serial_icounter_struct *icount)
2214{
2215 struct usb_serial_port *port = tty->driver_data;
2216 struct moschip_port *mos7840_port;
2217 struct async_icount cnow;
2218
2219 mos7840_port = mos7840_get_port_private(port);
2220 cnow = mos7840_port->icount;
2221
2222 smp_rmb();
2223 icount->cts = cnow.cts;
2224 icount->dsr = cnow.dsr;
2225 icount->rng = cnow.rng;
2226 icount->dcd = cnow.dcd;
2227 icount->rx = cnow.rx;
2228 icount->tx = cnow.tx;
2229 icount->frame = cnow.frame;
2230 icount->overrun = cnow.overrun;
2231 icount->parity = cnow.parity;
2232 icount->brk = cnow.brk;
2233 icount->buf_overrun = cnow.buf_overrun;
2234
2235 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
2236 port->number, icount->rx, icount->tx);
2237 return 0;
2238}
2239
2212/***************************************************************************** 2240/*****************************************************************************
2213 * SerialIoctl 2241 * SerialIoctl
2214 * this function handles any ioctl calls to the driver 2242 * this function handles any ioctl calls to the driver
@@ -2223,7 +2251,6 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2223 2251
2224 struct async_icount cnow; 2252 struct async_icount cnow;
2225 struct async_icount cprev; 2253 struct async_icount cprev;
2226 struct serial_icounter_struct icount;
2227 2254
2228 if (mos7840_port_paranoia_check(port, __func__)) { 2255 if (mos7840_port_paranoia_check(port, __func__)) {
2229 dbg("%s", "Invalid port"); 2256 dbg("%s", "Invalid port");
@@ -2282,29 +2309,6 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2282 /* NOTREACHED */ 2309 /* NOTREACHED */
2283 break; 2310 break;
2284 2311
2285 case TIOCGICOUNT:
2286 cnow = mos7840_port->icount;
2287 smp_rmb();
2288
2289 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2290
2291 icount.cts = cnow.cts;
2292 icount.dsr = cnow.dsr;
2293 icount.rng = cnow.rng;
2294 icount.dcd = cnow.dcd;
2295 icount.rx = cnow.rx;
2296 icount.tx = cnow.tx;
2297 icount.frame = cnow.frame;
2298 icount.overrun = cnow.overrun;
2299 icount.parity = cnow.parity;
2300 icount.brk = cnow.brk;
2301 icount.buf_overrun = cnow.buf_overrun;
2302
2303 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
2304 port->number, icount.rx, icount.tx);
2305 if (copy_to_user(argp, &icount, sizeof(icount)))
2306 return -EFAULT;
2307 return 0;
2308 default: 2312 default:
2309 break; 2313 break;
2310 } 2314 }
@@ -2674,6 +2678,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
2674 .break_ctl = mos7840_break, 2678 .break_ctl = mos7840_break,
2675 .tiocmget = mos7840_tiocmget, 2679 .tiocmget = mos7840_tiocmget,
2676 .tiocmset = mos7840_tiocmset, 2680 .tiocmset = mos7840_tiocmset,
2681 .get_icount = mos7840_get_icount,
2677 .attach = mos7840_startup, 2682 .attach = mos7840_startup,
2678 .disconnect = mos7840_disconnect, 2683 .disconnect = mos7840_disconnect,
2679 .release = mos7840_release, 2684 .release = mos7840_release,
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index ed01f3b2de8c..eda1f9266c4e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -96,8 +96,8 @@ static void opticon_bulk_callback(struct urb *urb)
96 /* real data, send it to the tty layer */ 96 /* real data, send it to the tty layer */
97 tty = tty_port_tty_get(&port->port); 97 tty = tty_port_tty_get(&port->port);
98 if (tty) { 98 if (tty) {
99 tty_insert_flip_string(tty, data, 99 tty_insert_flip_string(tty, data + 2,
100 data_length); 100 data_length);
101 tty_flip_buffer_push(tty); 101 tty_flip_buffer_push(tty);
102 tty_kref_put(tty); 102 tty_kref_put(tty);
103 } 103 }
@@ -108,10 +108,10 @@ static void opticon_bulk_callback(struct urb *urb)
108 else 108 else
109 priv->rts = true; 109 priv->rts = true;
110 } else { 110 } else {
111 dev_dbg(&priv->udev->dev, 111 dev_dbg(&priv->udev->dev,
112 "Unknown data packet received from the device:" 112 "Unknown data packet received from the device:"
113 " %2x %2x\n", 113 " %2x %2x\n",
114 data[0], data[1]); 114 data[0], data[1]);
115 } 115 }
116 } 116 }
117 } else { 117 } else {
@@ -130,7 +130,7 @@ exit:
130 priv->bulk_address), 130 priv->bulk_address),
131 priv->bulk_in_buffer, priv->buffer_size, 131 priv->bulk_in_buffer, priv->buffer_size,
132 opticon_bulk_callback, priv); 132 opticon_bulk_callback, priv);
133 result = usb_submit_urb(port->read_urb, GFP_ATOMIC); 133 result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
134 if (result) 134 if (result)
135 dev_err(&port->dev, 135 dev_err(&port->dev,
136 "%s - failed resubmitting read urb, error %d\n", 136 "%s - failed resubmitting read urb, error %d\n",
@@ -187,6 +187,9 @@ static void opticon_write_bulk_callback(struct urb *urb)
187 /* free up the transfer buffer, as usb_free_urb() does not do this */ 187 /* free up the transfer buffer, as usb_free_urb() does not do this */
188 kfree(urb->transfer_buffer); 188 kfree(urb->transfer_buffer);
189 189
190 /* setup packet may be set if we're using it for writing */
191 kfree(urb->setup_packet);
192
190 if (status) 193 if (status)
191 dbg("%s - nonzero write bulk status received: %d", 194 dbg("%s - nonzero write bulk status received: %d",
192 __func__, status); 195 __func__, status);
@@ -237,10 +240,29 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
237 240
238 usb_serial_debug_data(debug, &port->dev, __func__, count, buffer); 241 usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
239 242
240 usb_fill_bulk_urb(urb, serial->dev, 243 if (port->bulk_out_endpointAddress) {
241 usb_sndbulkpipe(serial->dev, 244 usb_fill_bulk_urb(urb, serial->dev,
242 port->bulk_out_endpointAddress), 245 usb_sndbulkpipe(serial->dev,
243 buffer, count, opticon_write_bulk_callback, priv); 246 port->bulk_out_endpointAddress),
247 buffer, count, opticon_write_bulk_callback, priv);
248 } else {
249 struct usb_ctrlrequest *dr;
250
251 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
252 if (!dr)
253 return -ENOMEM;
254
255 dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
256 dr->bRequest = 0x01;
257 dr->wValue = 0;
258 dr->wIndex = 0;
259 dr->wLength = cpu_to_le16(count);
260
261 usb_fill_control_urb(urb, serial->dev,
262 usb_sndctrlpipe(serial->dev, 0),
263 (unsigned char *)dr, buffer, count,
264 opticon_write_bulk_callback, priv);
265 }
244 266
245 /* send it down the pipe */ 267 /* send it down the pipe */
246 status = usb_submit_urb(urb, GFP_ATOMIC); 268 status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c46911af282f..2297fb1bcf65 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -392,6 +392,12 @@ static void option_instat_callback(struct urb *urb);
392#define CELOT_VENDOR_ID 0x211f 392#define CELOT_VENDOR_ID 0x211f
393#define CELOT_PRODUCT_CT680M 0x6801 393#define CELOT_PRODUCT_CT680M 0x6801
394 394
395/* ONDA Communication vendor id */
396#define ONDA_VENDOR_ID 0x1ee8
397
398/* ONDA MT825UP HSDPA 14.2 modem */
399#define ONDA_MT825UP 0x000b
400
395/* some devices interfaces need special handling due to a number of reasons */ 401/* some devices interfaces need special handling due to a number of reasons */
396enum option_blacklist_reason { 402enum option_blacklist_reason {
397 OPTION_BLACKLIST_NONE = 0, 403 OPTION_BLACKLIST_NONE = 0,
@@ -622,6 +628,7 @@ static const struct usb_device_id option_ids[] = {
622 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, 628 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
623 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, 629 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
624 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, 630 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
631 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
625 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, 632 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
626 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, 633 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
627 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, 634 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
@@ -633,38 +640,52 @@ static const struct usb_device_id option_ids[] = {
633 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, 640 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
634 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, 641 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
635 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, 642 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
636 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, 643 /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
637 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, 644 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
638 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, 645 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
639 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, 646 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
640 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, 647 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
641 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, 648 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
642 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, 649 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
650 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
643 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, 651 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
652 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
644 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, 653 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
654 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
645 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, 655 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
646 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, 656 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
657 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
647 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, 658 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
648 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, 659 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
660 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
649 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, 661 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
650 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, 662 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
663 /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
651 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, 664 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
652 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, 665 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
666 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
653 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, 667 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
654 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, 668 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
669 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
655 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, 670 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
656 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, 671 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
657 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, 672 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
658 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, 673 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
674 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
659 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, 675 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
676 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
660 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, 677 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
678 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
661 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, 679 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
680 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
662 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, 681 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
682 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
663 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, 683 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
684 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
664 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, 685 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
665 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, 686 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
666 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
667 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, 687 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
688 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
668 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, 689 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
669 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, 690 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
670 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, 691 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
@@ -880,6 +901,8 @@ static const struct usb_device_id option_ids[] = {
880 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 901 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
881 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, 902 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
882 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, 903 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
904 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
905 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
883 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 906 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
884 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 907 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
885 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 908 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -925,6 +948,7 @@ static const struct usb_device_id option_ids[] = {
925 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, 948 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
926 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 949 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
927 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 950 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
951 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
928 { } /* Terminating entry */ 952 { } /* Terminating entry */
929}; 953};
930MODULE_DEVICE_TABLE(usb, option_ids); 954MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cde67cacb2c3..8858201eb1d3 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -118,6 +118,8 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
118 118
119 spin_lock_init(&data->susp_lock); 119 spin_lock_init(&data->susp_lock);
120 120
121 usb_enable_autosuspend(serial->dev);
122
121 switch (nintf) { 123 switch (nintf) {
122 case 1: 124 case 1:
123 /* QDL mode */ 125 /* QDL mode */
@@ -150,7 +152,22 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
150 case 3: 152 case 3:
151 case 4: 153 case 4:
152 /* Composite mode */ 154 /* Composite mode */
153 if (ifnum == 2) { 155 /* ifnum == 0 is a broadband network adapter */
156 if (ifnum == 1) {
157 /*
158 * Diagnostics Monitor (serial line 9600 8N1)
159 * Qualcomm DM protocol
160 * use "libqcdm" (ModemManager) for communication
161 */
162 dbg("Diagnostics Monitor found");
163 retval = usb_set_interface(serial->dev, ifnum, 0);
164 if (retval < 0) {
165 dev_err(&serial->dev->dev,
166 "Could not set interface, error %d\n",
167 retval);
168 retval = -ENODEV;
169 }
170 } else if (ifnum == 2) {
154 dbg("Modem port found"); 171 dbg("Modem port found");
155 retval = usb_set_interface(serial->dev, ifnum, 0); 172 retval = usb_set_interface(serial->dev, ifnum, 0);
156 if (retval < 0) { 173 if (retval < 0) {
@@ -161,6 +178,20 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
161 kfree(data); 178 kfree(data);
162 } 179 }
163 return retval; 180 return retval;
181 } else if (ifnum==3) {
182 /*
183 * NMEA (serial line 9600 8N1)
184 * # echo "\$GPS_START" > /dev/ttyUSBx
185 * # echo "\$GPS_STOP" > /dev/ttyUSBx
186 */
187 dbg("NMEA GPS interface found");
188 retval = usb_set_interface(serial->dev, ifnum, 0);
189 if (retval < 0) {
190 dev_err(&serial->dev->dev,
191 "Could not set interface, error %d\n",
192 retval);
193 retval = -ENODEV;
194 }
164 } 195 }
165 break; 196 break;
166 197
diff --git a/drivers/usb/serial/sam-ba.c b/drivers/usb/serial/sam-ba.c
new file mode 100644
index 000000000000..e3bba64afc57
--- /dev/null
+++ b/drivers/usb/serial/sam-ba.c
@@ -0,0 +1,206 @@
1/*
2 * Atmel SAM Boot Assistant (SAM-BA) driver
3 *
4 * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/tty.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/usb.h>
16#include <linux/usb/serial.h>
17
18
19#define DRIVER_VERSION "v1.0"
20#define DRIVER_AUTHOR "Johan Hovold <jhovold@gmail.com>"
21#define DRIVER_DESC "Atmel SAM Boot Assistant (SAM-BA) driver"
22
23#define SAMBA_VENDOR_ID 0x3eb
24#define SAMBA_PRODUCT_ID 0x6124
25
26
27static int debug;
28
29static const struct usb_device_id id_table[] = {
30 /*
31 * NOTE: Only match the CDC Data interface.
32 */
33 { USB_DEVICE_AND_INTERFACE_INFO(SAMBA_VENDOR_ID, SAMBA_PRODUCT_ID,
34 USB_CLASS_CDC_DATA, 0, 0) },
35 { }
36};
37MODULE_DEVICE_TABLE(usb, id_table);
38
39static struct usb_driver samba_driver = {
40 .name = "sam-ba",
41 .probe = usb_serial_probe,
42 .disconnect = usb_serial_disconnect,
43 .id_table = id_table,
44 .no_dynamic_id = 1,
45};
46
47
48/*
49 * NOTE: The SAM-BA firmware cannot handle merged write requests so we cannot
50 * use the generic write implementation (which uses the port write fifo).
51 */
52static int samba_write(struct tty_struct *tty, struct usb_serial_port *port,
53 const unsigned char *buf, int count)
54{
55 struct urb *urb;
56 unsigned long flags;
57 int result;
58 int i;
59
60 if (!count)
61 return 0;
62
63 count = min_t(int, count, port->bulk_out_size);
64
65 spin_lock_irqsave(&port->lock, flags);
66 if (!port->write_urbs_free) {
67 spin_unlock_irqrestore(&port->lock, flags);
68 return 0;
69 }
70 i = find_first_bit(&port->write_urbs_free,
71 ARRAY_SIZE(port->write_urbs));
72 __clear_bit(i, &port->write_urbs_free);
73 port->tx_bytes += count;
74 spin_unlock_irqrestore(&port->lock, flags);
75
76 urb = port->write_urbs[i];
77 memcpy(urb->transfer_buffer, buf, count);
78 urb->transfer_buffer_length = count;
79 usb_serial_debug_data(debug, &port->dev, __func__, count,
80 urb->transfer_buffer);
81 result = usb_submit_urb(urb, GFP_ATOMIC);
82 if (result) {
83 dev_err(&port->dev, "%s - error submitting urb: %d\n",
84 __func__, result);
85 spin_lock_irqsave(&port->lock, flags);
86 __set_bit(i, &port->write_urbs_free);
87 port->tx_bytes -= count;
88 spin_unlock_irqrestore(&port->lock, flags);
89
90 return result;
91 }
92
93 return count;
94}
95
96static int samba_write_room(struct tty_struct *tty)
97{
98 struct usb_serial_port *port = tty->driver_data;
99 unsigned long flags;
100 unsigned long free;
101 int count;
102 int room;
103
104 spin_lock_irqsave(&port->lock, flags);
105 free = port->write_urbs_free;
106 spin_unlock_irqrestore(&port->lock, flags);
107
108 count = hweight_long(free);
109 room = count * port->bulk_out_size;
110
111 dbg("%s - returns %d", __func__, room);
112
113 return room;
114}
115
116static int samba_chars_in_buffer(struct tty_struct *tty)
117{
118 struct usb_serial_port *port = tty->driver_data;
119 unsigned long flags;
120 int chars;
121
122 spin_lock_irqsave(&port->lock, flags);
123 chars = port->tx_bytes;
124 spin_unlock_irqrestore(&port->lock, flags);
125
126 dbg("%s - returns %d", __func__, chars);
127
128 return chars;
129}
130
131static void samba_write_bulk_callback(struct urb *urb)
132{
133 struct usb_serial_port *port = urb->context;
134 unsigned long flags;
135 int i;
136
137 dbg("%s - port %d", __func__, port->number);
138
139 for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
140 if (port->write_urbs[i] == urb)
141 break;
142 }
143 spin_lock_irqsave(&port->lock, flags);
144 __set_bit(i, &port->write_urbs_free);
145 port->tx_bytes -= urb->transfer_buffer_length;
146 spin_unlock_irqrestore(&port->lock, flags);
147
148 if (urb->status)
149 dbg("%s - non-zero urb status: %d", __func__, urb->status);
150
151 usb_serial_port_softint(port);
152}
153
154static struct usb_serial_driver samba_device = {
155 .driver = {
156 .owner = THIS_MODULE,
157 .name = "sam-ba",
158 },
159 .usb_driver = &samba_driver,
160 .id_table = id_table,
161 .num_ports = 1,
162 .bulk_in_size = 512,
163 .bulk_out_size = 2048,
164 .write = samba_write,
165 .write_room = samba_write_room,
166 .chars_in_buffer = samba_chars_in_buffer,
167 .write_bulk_callback = samba_write_bulk_callback,
168 .throttle = usb_serial_generic_throttle,
169 .unthrottle = usb_serial_generic_unthrottle,
170};
171
172static int __init samba_init(void)
173{
174 int retval;
175
176 retval = usb_serial_register(&samba_device);
177 if (retval)
178 return retval;
179
180 retval = usb_register(&samba_driver);
181 if (retval) {
182 usb_serial_deregister(&samba_device);
183 return retval;
184 }
185
186 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ": "
187 DRIVER_DESC "\n");
188 return 0;
189}
190
191static void __exit samba_exit(void)
192{
193 usb_deregister(&samba_driver);
194 usb_serial_deregister(&samba_device);
195}
196
197module_init(samba_init);
198module_exit(samba_exit);
199
200MODULE_AUTHOR(DRIVER_AUTHOR);
201MODULE_DESCRIPTION(DRIVER_DESC);
202MODULE_VERSION(DRIVER_VERSION);
203MODULE_LICENSE("GPL");
204
205module_param(debug, bool, S_IRUGO | S_IWUSR);
206MODULE_PARM_DESC(debug, "Enable verbose debugging messages");
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index e986002b3844..f5312dd3331b 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -416,12 +416,34 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
416 return 0; 416 return 0;
417} 417}
418 418
419static int ssu100_get_icount(struct tty_struct *tty,
420 struct serial_icounter_struct *icount)
421{
422 struct usb_serial_port *port = tty->driver_data;
423 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
424 struct async_icount cnow = priv->icount;
425
426 icount->cts = cnow.cts;
427 icount->dsr = cnow.dsr;
428 icount->rng = cnow.rng;
429 icount->dcd = cnow.dcd;
430 icount->rx = cnow.rx;
431 icount->tx = cnow.tx;
432 icount->frame = cnow.frame;
433 icount->overrun = cnow.overrun;
434 icount->parity = cnow.parity;
435 icount->brk = cnow.brk;
436 icount->buf_overrun = cnow.buf_overrun;
437
438 return 0;
439}
440
441
442
419static int ssu100_ioctl(struct tty_struct *tty, struct file *file, 443static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
420 unsigned int cmd, unsigned long arg) 444 unsigned int cmd, unsigned long arg)
421{ 445{
422 struct usb_serial_port *port = tty->driver_data; 446 struct usb_serial_port *port = tty->driver_data;
423 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
424 void __user *user_arg = (void __user *)arg;
425 447
426 dbg("%s cmd 0x%04x", __func__, cmd); 448 dbg("%s cmd 0x%04x", __func__, cmd);
427 449
@@ -433,27 +455,6 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
433 case TIOCMIWAIT: 455 case TIOCMIWAIT:
434 return wait_modem_info(port, arg); 456 return wait_modem_info(port, arg);
435 457
436 case TIOCGICOUNT:
437 {
438 struct serial_icounter_struct icount;
439 struct async_icount cnow = priv->icount;
440 memset(&icount, 0, sizeof(icount));
441 icount.cts = cnow.cts;
442 icount.dsr = cnow.dsr;
443 icount.rng = cnow.rng;
444 icount.dcd = cnow.dcd;
445 icount.rx = cnow.rx;
446 icount.tx = cnow.tx;
447 icount.frame = cnow.frame;
448 icount.overrun = cnow.overrun;
449 icount.parity = cnow.parity;
450 icount.brk = cnow.brk;
451 icount.buf_overrun = cnow.buf_overrun;
452 if (copy_to_user(user_arg, &icount, sizeof(icount)))
453 return -EFAULT;
454 return 0;
455 }
456
457 default: 458 default:
458 break; 459 break;
459 } 460 }
@@ -726,6 +727,7 @@ static struct usb_serial_driver ssu100_device = {
726 .process_read_urb = ssu100_process_read_urb, 727 .process_read_urb = ssu100_process_read_urb,
727 .tiocmget = ssu100_tiocmget, 728 .tiocmget = ssu100_tiocmget,
728 .tiocmset = ssu100_tiocmset, 729 .tiocmset = ssu100_tiocmset,
730 .get_icount = ssu100_get_icount,
729 .ioctl = ssu100_ioctl, 731 .ioctl = ssu100_ioctl,
730 .set_termios = ssu100_set_termios, 732 .set_termios = ssu100_set_termios,
731 .disconnect = usb_serial_generic_disconnect, 733 .disconnect = usb_serial_generic_disconnect,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 90979a1f5311..b2902f307b47 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -108,6 +108,8 @@ static void ti_throttle(struct tty_struct *tty);
108static void ti_unthrottle(struct tty_struct *tty); 108static void ti_unthrottle(struct tty_struct *tty);
109static int ti_ioctl(struct tty_struct *tty, struct file *file, 109static int ti_ioctl(struct tty_struct *tty, struct file *file,
110 unsigned int cmd, unsigned long arg); 110 unsigned int cmd, unsigned long arg);
111static int ti_get_icount(struct tty_struct *tty,
112 struct serial_icounter_struct *icount);
111static void ti_set_termios(struct tty_struct *tty, 113static void ti_set_termios(struct tty_struct *tty,
112 struct usb_serial_port *port, struct ktermios *old_termios); 114 struct usb_serial_port *port, struct ktermios *old_termios);
113static int ti_tiocmget(struct tty_struct *tty, struct file *file); 115static int ti_tiocmget(struct tty_struct *tty, struct file *file);
@@ -237,6 +239,7 @@ static struct usb_serial_driver ti_1port_device = {
237 .set_termios = ti_set_termios, 239 .set_termios = ti_set_termios,
238 .tiocmget = ti_tiocmget, 240 .tiocmget = ti_tiocmget,
239 .tiocmset = ti_tiocmset, 241 .tiocmset = ti_tiocmset,
242 .get_icount = ti_get_icount,
240 .break_ctl = ti_break, 243 .break_ctl = ti_break,
241 .read_int_callback = ti_interrupt_callback, 244 .read_int_callback = ti_interrupt_callback,
242 .read_bulk_callback = ti_bulk_in_callback, 245 .read_bulk_callback = ti_bulk_in_callback,
@@ -265,6 +268,7 @@ static struct usb_serial_driver ti_2port_device = {
265 .set_termios = ti_set_termios, 268 .set_termios = ti_set_termios,
266 .tiocmget = ti_tiocmget, 269 .tiocmget = ti_tiocmget,
267 .tiocmset = ti_tiocmset, 270 .tiocmset = ti_tiocmset,
271 .get_icount = ti_get_icount,
268 .break_ctl = ti_break, 272 .break_ctl = ti_break,
269 .read_int_callback = ti_interrupt_callback, 273 .read_int_callback = ti_interrupt_callback,
270 .read_bulk_callback = ti_bulk_in_callback, 274 .read_bulk_callback = ti_bulk_in_callback,
@@ -788,6 +792,31 @@ static void ti_unthrottle(struct tty_struct *tty)
788 } 792 }
789} 793}
790 794
795static int ti_get_icount(struct tty_struct *tty,
796 struct serial_icounter_struct *icount)
797{
798 struct usb_serial_port *port = tty->driver_data;
799 struct ti_port *tport = usb_get_serial_port_data(port);
800 struct async_icount cnow = tport->tp_icount;
801
802 dbg("%s - (%d) TIOCGICOUNT RX=%d, TX=%d",
803 __func__, port->number,
804 cnow.rx, cnow.tx);
805
806 icount->cts = cnow.cts;
807 icount->dsr = cnow.dsr;
808 icount->rng = cnow.rng;
809 icount->dcd = cnow.dcd;
810 icount->rx = cnow.rx;
811 icount->tx = cnow.tx;
812 icount->frame = cnow.frame;
813 icount->overrun = cnow.overrun;
814 icount->parity = cnow.parity;
815 icount->brk = cnow.brk;
816 icount->buf_overrun = cnow.buf_overrun;
817
818 return 0;
819}
791 820
792static int ti_ioctl(struct tty_struct *tty, struct file *file, 821static int ti_ioctl(struct tty_struct *tty, struct file *file,
793 unsigned int cmd, unsigned long arg) 822 unsigned int cmd, unsigned long arg)
@@ -830,14 +859,6 @@ static int ti_ioctl(struct tty_struct *tty, struct file *file,
830 cprev = cnow; 859 cprev = cnow;
831 } 860 }
832 break; 861 break;
833 case TIOCGICOUNT:
834 dbg("%s - (%d) TIOCGICOUNT RX=%d, TX=%d",
835 __func__, port->number,
836 tport->tp_icount.rx, tport->tp_icount.tx);
837 if (copy_to_user((void __user *)arg, &tport->tp_icount,
838 sizeof(tport->tp_icount)))
839 return -EFAULT;
840 return 0;
841 } 862 }
842 return -ENOIOCTLCMD; 863 return -ENOIOCTLCMD;
843} 864}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 7a2177c79bde..e64da74bdcc5 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -519,6 +519,18 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
519 return -EINVAL; 519 return -EINVAL;
520} 520}
521 521
522static int serial_get_icount(struct tty_struct *tty,
523 struct serial_icounter_struct *icount)
524{
525 struct usb_serial_port *port = tty->driver_data;
526
527 dbg("%s - port %d", __func__, port->number);
528
529 if (port->serial->type->get_icount)
530 return port->serial->type->get_icount(tty, icount);
531 return -EINVAL;
532}
533
522/* 534/*
523 * We would be calling tty_wakeup here, but unfortunately some line 535 * We would be calling tty_wakeup here, but unfortunately some line
524 * disciplines have an annoying habit of calling tty->write from 536 * disciplines have an annoying habit of calling tty->write from
@@ -1195,6 +1207,7 @@ static const struct tty_operations serial_ops = {
1195 .chars_in_buffer = serial_chars_in_buffer, 1207 .chars_in_buffer = serial_chars_in_buffer,
1196 .tiocmget = serial_tiocmget, 1208 .tiocmget = serial_tiocmget,
1197 .tiocmset = serial_tiocmset, 1209 .tiocmset = serial_tiocmset,
1210 .get_icount = serial_get_icount,
1198 .cleanup = serial_cleanup, 1211 .cleanup = serial_cleanup,
1199 .install = serial_install, 1212 .install = serial_install,
1200 .proc_fops = &serial_proc_fops, 1213 .proc_fops = &serial_proc_fops,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index eb76aaef4268..15a5d89b7f39 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -606,6 +606,10 @@ static int treo_attach(struct usb_serial *serial)
606 606
607static int clie_5_attach(struct usb_serial *serial) 607static int clie_5_attach(struct usb_serial *serial)
608{ 608{
609 struct usb_serial_port *port;
610 unsigned int pipe;
611 int j;
612
609 dbg("%s", __func__); 613 dbg("%s", __func__);
610 614
611 /* TH55 registers 2 ports. 615 /* TH55 registers 2 ports.
@@ -621,9 +625,14 @@ static int clie_5_attach(struct usb_serial *serial)
621 return -1; 625 return -1;
622 626
623 /* port 0 now uses the modified endpoint Address */ 627 /* port 0 now uses the modified endpoint Address */
624 serial->port[0]->bulk_out_endpointAddress = 628 port = serial->port[0];
629 port->bulk_out_endpointAddress =
625 serial->port[1]->bulk_out_endpointAddress; 630 serial->port[1]->bulk_out_endpointAddress;
626 631
632 pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
633 for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
634 port->write_urbs[j]->pipe = pipe;
635
627 return 0; 636 return 0;
628} 637}
629 638
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8a372bac0e43..f2767cf2e229 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -172,6 +172,19 @@ config USB_STORAGE_CYPRESS_ATACB
172 172
173 If this driver is compiled as a module, it will be named ums-cypress. 173 If this driver is compiled as a module, it will be named ums-cypress.
174 174
175config USB_UAS
176 tristate "USB Attached SCSI"
177 depends on USB && SCSI
178 help
179 The USB Attached SCSI protocol is supported by some USB
180 storage devices. It permits higher performance by supporting
181 multiple outstanding commands.
182
183 If you don't know whether you have a UAS device, it is safe to
184 say 'Y' or 'M' here and the kernel will use the right driver.
185
186 If you compile this driver as a module, it will be named uas.
187
175config USB_LIBUSUAL 188config USB_LIBUSUAL
176 bool "The shared table of common (or usual) storage devices" 189 bool "The shared table of common (or usual) storage devices"
177 depends on USB 190 depends on USB
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index ef7e5a8ceab5..fcf14cdc4a04 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -5,20 +5,21 @@
5# Rewritten to use lists instead of if-statements. 5# Rewritten to use lists instead of if-statements.
6# 6#
7 7
8EXTRA_CFLAGS := -Idrivers/scsi 8ccflags-y := -Idrivers/scsi
9 9
10obj-$(CONFIG_USB_UAS) += uas.o
10obj-$(CONFIG_USB_STORAGE) += usb-storage.o 11obj-$(CONFIG_USB_STORAGE) += usb-storage.o
11 12
12usb-storage-obj-$(CONFIG_USB_STORAGE_DEBUG) += debug.o 13usb-storage-y := scsiglue.o protocol.o transport.o usb.o
14usb-storage-y += initializers.o sierra_ms.o option_ms.o
13 15
14usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ 16usb-storage-$(CONFIG_USB_STORAGE_DEBUG) += debug.o
15 initializers.o sierra_ms.o option_ms.o $(usb-storage-obj-y)
16 17
17ifeq ($(CONFIG_USB_LIBUSUAL),) 18ifeq ($(CONFIG_USB_LIBUSUAL),)
18 usb-storage-objs += usual-tables.o 19 usb-storage-y += usual-tables.o
19else 20else
20 obj-$(CONFIG_USB) += usb-libusual.o 21 obj-$(CONFIG_USB) += usb-libusual.o
21 usb-libusual-objs := libusual.o usual-tables.o 22 usb-libusual-y := libusual.o usual-tables.o
22endif 23endif
23 24
24obj-$(CONFIG_USB_STORAGE_ALAUDA) += ums-alauda.o 25obj-$(CONFIG_USB_STORAGE_ALAUDA) += ums-alauda.o
@@ -33,14 +34,14 @@ obj-$(CONFIG_USB_STORAGE_SDDR09) += ums-sddr09.o
33obj-$(CONFIG_USB_STORAGE_SDDR55) += ums-sddr55.o 34obj-$(CONFIG_USB_STORAGE_SDDR55) += ums-sddr55.o
34obj-$(CONFIG_USB_STORAGE_USBAT) += ums-usbat.o 35obj-$(CONFIG_USB_STORAGE_USBAT) += ums-usbat.o
35 36
36ums-alauda-objs := alauda.o 37ums-alauda-y := alauda.o
37ums-cypress-objs := cypress_atacb.o 38ums-cypress-y := cypress_atacb.o
38ums-datafab-objs := datafab.o 39ums-datafab-y := datafab.o
39ums-freecom-objs := freecom.o 40ums-freecom-y := freecom.o
40ums-isd200-objs := isd200.o 41ums-isd200-y := isd200.o
41ums-jumpshot-objs := jumpshot.o 42ums-jumpshot-y := jumpshot.o
42ums-karma-objs := karma.o 43ums-karma-y := karma.o
43ums-onetouch-objs := onetouch.o 44ums-onetouch-y := onetouch.o
44ums-sddr09-objs := sddr09.o 45ums-sddr09-y := sddr09.o
45ums-sddr55-objs := sddr55.o 46ums-sddr55-y := sddr55.o
46ums-usbat-objs := shuttle_usbat.o 47ums-usbat-y := shuttle_usbat.o
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index d8d98cfecada..a688b1e686ea 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -113,7 +113,7 @@ static int slave_alloc (struct scsi_device *sdev)
113 * Let the scanning code know if this target merely sets 113 * Let the scanning code know if this target merely sets
114 * Peripheral Device Type to 0x1f to indicate no LUN. 114 * Peripheral Device Type to 0x1f to indicate no LUN.
115 */ 115 */
116 if (us->subclass == US_SC_UFI) 116 if (us->subclass == USB_SC_UFI)
117 sdev->sdev_target->pdt_1f_for_no_lun = 1; 117 sdev->sdev_target->pdt_1f_for_no_lun = 1;
118 118
119 return 0; 119 return 0;
@@ -176,7 +176,7 @@ static int slave_configure(struct scsi_device *sdev)
176 /* Disk-type devices use MODE SENSE(6) if the protocol 176 /* Disk-type devices use MODE SENSE(6) if the protocol
177 * (SubClass) is Transparent SCSI, otherwise they use 177 * (SubClass) is Transparent SCSI, otherwise they use
178 * MODE SENSE(10). */ 178 * MODE SENSE(10). */
179 if (us->subclass != US_SC_SCSI && us->subclass != US_SC_CYP_ATACB) 179 if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB)
180 sdev->use_10_for_ms = 1; 180 sdev->use_10_for_ms = 1;
181 181
182 /* Many disks only accept MODE SENSE transfer lengths of 182 /* Many disks only accept MODE SENSE transfer lengths of
@@ -209,6 +209,10 @@ static int slave_configure(struct scsi_device *sdev)
209 if (us->fflags & US_FL_CAPACITY_HEURISTICS) 209 if (us->fflags & US_FL_CAPACITY_HEURISTICS)
210 sdev->guess_capacity = 1; 210 sdev->guess_capacity = 1;
211 211
212 /* Some devices cannot handle READ_CAPACITY_16 */
213 if (us->fflags & US_FL_NO_READ_CAPACITY_16)
214 sdev->no_read_capacity_16 = 1;
215
212 /* assume SPC3 or latter devices support sense size > 18 */ 216 /* assume SPC3 or latter devices support sense size > 18 */
213 if (sdev->scsi_level > SCSI_SPC_2) 217 if (sdev->scsi_level > SCSI_SPC_2)
214 us->fflags |= US_FL_SANE_SENSE; 218 us->fflags |= US_FL_SANE_SENSE;
@@ -245,7 +249,7 @@ static int slave_configure(struct scsi_device *sdev)
245 * capacity will be decremented or is correct. */ 249 * capacity will be decremented or is correct. */
246 if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK | 250 if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
247 US_FL_SCM_MULT_TARG)) && 251 US_FL_SCM_MULT_TARG)) &&
248 us->protocol == US_PR_BULK) 252 us->protocol == USB_PR_BULK)
249 us->use_last_sector_hacks = 1; 253 us->use_last_sector_hacks = 1;
250 } else { 254 } else {
251 255
@@ -253,6 +257,10 @@ static int slave_configure(struct scsi_device *sdev)
253 * or to force 192-byte transfer lengths for MODE SENSE. 257 * or to force 192-byte transfer lengths for MODE SENSE.
254 * But they do need to use MODE SENSE(10). */ 258 * But they do need to use MODE SENSE(10). */
255 sdev->use_10_for_ms = 1; 259 sdev->use_10_for_ms = 1;
260
261 /* Some (fake) usb cdrom devices don't like READ_DISC_INFO */
262 if (us->fflags & US_FL_NO_READ_DISC_INFO)
263 sdev->no_read_disc_info = 1;
256 } 264 }
257 265
258 /* The CB and CBI transports have no way to pass LUN values 266 /* The CB and CBI transports have no way to pass LUN values
@@ -261,7 +269,7 @@ static int slave_configure(struct scsi_device *sdev)
261 * scsi_level == 0 (UNKNOWN). Hence such devices must necessarily 269 * scsi_level == 0 (UNKNOWN). Hence such devices must necessarily
262 * be single-LUN. 270 * be single-LUN.
263 */ 271 */
264 if ((us->protocol == US_PR_CB || us->protocol == US_PR_CBI) && 272 if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_CBI) &&
265 sdev->scsi_level == SCSI_UNKNOWN) 273 sdev->scsi_level == SCSI_UNKNOWN)
266 us->max_lun = 0; 274 us->max_lun = 0;
267 275
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index ab5f9f37575a..bcb9a709d349 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1760,7 +1760,7 @@ static int sddr09_probe(struct usb_interface *intf,
1760 if (result) 1760 if (result)
1761 return result; 1761 return result;
1762 1762
1763 if (us->protocol == US_PR_DPCM_USB) { 1763 if (us->protocol == USB_PR_DPCM_USB) {
1764 us->transport_name = "Control/Bulk-EUSB/SDDR09"; 1764 us->transport_name = "Control/Bulk-EUSB/SDDR09";
1765 us->transport = dpcm_transport; 1765 us->transport = dpcm_transport;
1766 us->transport_reset = usb_stor_CB_reset; 1766 us->transport_reset = usb_stor_CB_reset;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 64ec073e89de..00418995d8e9 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -642,7 +642,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
642 * unless the operation involved a data-in transfer. Devices 642 * unless the operation involved a data-in transfer. Devices
643 * can signal most data-in errors by stalling the bulk-in pipe. 643 * can signal most data-in errors by stalling the bulk-in pipe.
644 */ 644 */
645 if ((us->protocol == US_PR_CB || us->protocol == US_PR_DPCM_USB) && 645 if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_DPCM_USB) &&
646 srb->sc_data_direction != DMA_FROM_DEVICE) { 646 srb->sc_data_direction != DMA_FROM_DEVICE) {
647 US_DEBUGP("-- CB transport device requiring auto-sense\n"); 647 US_DEBUGP("-- CB transport device requiring auto-sense\n");
648 need_auto_sense = 1; 648 need_auto_sense = 1;
@@ -701,8 +701,8 @@ Retry_Sense:
701 scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size); 701 scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
702 702
703 /* FIXME: we must do the protocol translation here */ 703 /* FIXME: we must do the protocol translation here */
704 if (us->subclass == US_SC_RBC || us->subclass == US_SC_SCSI || 704 if (us->subclass == USB_SC_RBC || us->subclass == USB_SC_SCSI ||
705 us->subclass == US_SC_CYP_ATACB) 705 us->subclass == USB_SC_CYP_ATACB)
706 srb->cmd_len = 6; 706 srb->cmd_len = 6;
707 else 707 else
708 srb->cmd_len = 12; 708 srb->cmd_len = 12;
@@ -926,7 +926,7 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
926 /* NOTE: CB does not have a status stage. Silly, I know. So 926 /* NOTE: CB does not have a status stage. Silly, I know. So
927 * we have to catch this at a higher level. 927 * we have to catch this at a higher level.
928 */ 928 */
929 if (us->protocol != US_PR_CBI) 929 if (us->protocol != USB_PR_CBI)
930 return USB_STOR_TRANSPORT_GOOD; 930 return USB_STOR_TRANSPORT_GOOD;
931 931
932 result = usb_stor_intr_transfer(us, us->iobuf, 2); 932 result = usb_stor_intr_transfer(us, us->iobuf, 2);
@@ -942,7 +942,7 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
942 * that this means we could be ignoring a real error on these 942 * that this means we could be ignoring a real error on these
943 * commands, but that can't be helped. 943 * commands, but that can't be helped.
944 */ 944 */
945 if (us->subclass == US_SC_UFI) { 945 if (us->subclass == USB_SC_UFI) {
946 if (srb->cmnd[0] == REQUEST_SENSE || 946 if (srb->cmnd[0] == REQUEST_SENSE ||
947 srb->cmnd[0] == INQUIRY) 947 srb->cmnd[0] == INQUIRY)
948 return USB_STOR_TRANSPORT_GOOD; 948 return USB_STOR_TRANSPORT_GOOD;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
new file mode 100644
index 000000000000..2054b1e25a65
--- /dev/null
+++ b/drivers/usb/storage/uas.c
@@ -0,0 +1,751 @@
1/*
2 * USB Attached SCSI
3 * Note that this is not the same as the USB Mass Storage driver
4 *
5 * Copyright Matthew Wilcox for Intel Corp, 2010
6 * Copyright Sarah Sharp for Intel Corp, 2010
7 *
8 * Distributed under the terms of the GNU GPL, version two.
9 */
10
11#include <linux/blkdev.h>
12#include <linux/slab.h>
13#include <linux/types.h>
14#include <linux/usb.h>
15#include <linux/usb/storage.h>
16
17#include <scsi/scsi.h>
18#include <scsi/scsi_dbg.h>
19#include <scsi/scsi_cmnd.h>
20#include <scsi/scsi_device.h>
21#include <scsi/scsi_host.h>
22#include <scsi/scsi_tcq.h>
23
24/* Common header for all IUs */
25struct iu {
26 __u8 iu_id;
27 __u8 rsvd1;
28 __be16 tag;
29};
30
31enum {
32 IU_ID_COMMAND = 0x01,
33 IU_ID_STATUS = 0x03,
34 IU_ID_RESPONSE = 0x04,
35 IU_ID_TASK_MGMT = 0x05,
36 IU_ID_READ_READY = 0x06,
37 IU_ID_WRITE_READY = 0x07,
38};
39
40struct command_iu {
41 __u8 iu_id;
42 __u8 rsvd1;
43 __be16 tag;
44 __u8 prio_attr;
45 __u8 rsvd5;
46 __u8 len;
47 __u8 rsvd7;
48 struct scsi_lun lun;
49 __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */
50};
51
52struct sense_iu {
53 __u8 iu_id;
54 __u8 rsvd1;
55 __be16 tag;
56 __be16 status_qual;
57 __u8 status;
58 __u8 service_response;
59 __u8 rsvd8[6];
60 __be16 len;
61 __u8 sense[SCSI_SENSE_BUFFERSIZE];
62};
63
64/*
65 * The r00-r01c specs define this version of the SENSE IU data structure.
66 * It's still in use by several different firmware releases.
67 */
68struct sense_iu_old {
69 __u8 iu_id;
70 __u8 rsvd1;
71 __be16 tag;
72 __be16 len;
73 __u8 status;
74 __u8 service_response;
75 __u8 sense[SCSI_SENSE_BUFFERSIZE];
76};
77
78enum {
79 CMD_PIPE_ID = 1,
80 STATUS_PIPE_ID = 2,
81 DATA_IN_PIPE_ID = 3,
82 DATA_OUT_PIPE_ID = 4,
83
84 UAS_SIMPLE_TAG = 0,
85 UAS_HEAD_TAG = 1,
86 UAS_ORDERED_TAG = 2,
87 UAS_ACA = 4,
88};
89
90struct uas_dev_info {
91 struct usb_interface *intf;
92 struct usb_device *udev;
93 int qdepth;
94 unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
95 unsigned use_streams:1;
96 unsigned uas_sense_old:1;
97};
98
99enum {
100 ALLOC_SENSE_URB = (1 << 0),
101 SUBMIT_SENSE_URB = (1 << 1),
102 ALLOC_DATA_IN_URB = (1 << 2),
103 SUBMIT_DATA_IN_URB = (1 << 3),
104 ALLOC_DATA_OUT_URB = (1 << 4),
105 SUBMIT_DATA_OUT_URB = (1 << 5),
106 ALLOC_CMD_URB = (1 << 6),
107 SUBMIT_CMD_URB = (1 << 7),
108};
109
110/* Overrides scsi_pointer */
111struct uas_cmd_info {
112 unsigned int state;
113 unsigned int stream;
114 struct urb *cmd_urb;
115 struct urb *sense_urb;
116 struct urb *data_in_urb;
117 struct urb *data_out_urb;
118 struct list_head list;
119};
120
121/* I hate forward declarations, but I actually have a loop */
122static int uas_submit_urbs(struct scsi_cmnd *cmnd,
123 struct uas_dev_info *devinfo, gfp_t gfp);
124
125static DEFINE_SPINLOCK(uas_work_lock);
126static LIST_HEAD(uas_work_list);
127
128static void uas_do_work(struct work_struct *work)
129{
130 struct uas_cmd_info *cmdinfo;
131 struct list_head list;
132
133 spin_lock_irq(&uas_work_lock);
134 list_replace_init(&uas_work_list, &list);
135 spin_unlock_irq(&uas_work_lock);
136
137 list_for_each_entry(cmdinfo, &list, list) {
138 struct scsi_pointer *scp = (void *)cmdinfo;
139 struct scsi_cmnd *cmnd = container_of(scp,
140 struct scsi_cmnd, SCp);
141 uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_KERNEL);
142 }
143}
144
145static DECLARE_WORK(uas_work, uas_do_work);
146
147static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
148{
149 struct sense_iu *sense_iu = urb->transfer_buffer;
150 struct scsi_device *sdev = cmnd->device;
151
152 if (urb->actual_length > 16) {
153 unsigned len = be16_to_cpup(&sense_iu->len);
154 if (len + 16 != urb->actual_length) {
155 int newlen = min(len + 16, urb->actual_length) - 16;
156 if (newlen < 0)
157 newlen = 0;
158 sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
159 "disagrees with IU sense data length %d, "
160 "using %d bytes of sense data\n", __func__,
161 urb->actual_length, len, newlen);
162 len = newlen;
163 }
164 memcpy(cmnd->sense_buffer, sense_iu->sense, len);
165 }
166
167 cmnd->result = sense_iu->status;
168 if (sdev->current_cmnd)
169 sdev->current_cmnd = NULL;
170 cmnd->scsi_done(cmnd);
171 usb_free_urb(urb);
172}
173
174static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
175{
176 struct sense_iu_old *sense_iu = urb->transfer_buffer;
177 struct scsi_device *sdev = cmnd->device;
178
179 if (urb->actual_length > 8) {
180 unsigned len = be16_to_cpup(&sense_iu->len) - 2;
181 if (len + 8 != urb->actual_length) {
182 int newlen = min(len + 8, urb->actual_length) - 8;
183 if (newlen < 0)
184 newlen = 0;
185 sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
186 "disagrees with IU sense data length %d, "
187 "using %d bytes of sense data\n", __func__,
188 urb->actual_length, len, newlen);
189 len = newlen;
190 }
191 memcpy(cmnd->sense_buffer, sense_iu->sense, len);
192 }
193
194 cmnd->result = sense_iu->status;
195 if (sdev->current_cmnd)
196 sdev->current_cmnd = NULL;
197 cmnd->scsi_done(cmnd);
198 usb_free_urb(urb);
199}
200
201static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
202 unsigned direction)
203{
204 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
205 int err;
206
207 cmdinfo->state = direction | SUBMIT_SENSE_URB;
208 err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
209 if (err) {
210 spin_lock(&uas_work_lock);
211 list_add_tail(&cmdinfo->list, &uas_work_list);
212 spin_unlock(&uas_work_lock);
213 schedule_work(&uas_work);
214 }
215}
216
217static void uas_stat_cmplt(struct urb *urb)
218{
219 struct iu *iu = urb->transfer_buffer;
220 struct scsi_device *sdev = urb->context;
221 struct uas_dev_info *devinfo = sdev->hostdata;
222 struct scsi_cmnd *cmnd;
223 u16 tag;
224
225 if (urb->status) {
226 dev_err(&urb->dev->dev, "URB BAD STATUS %d\n", urb->status);
227 usb_free_urb(urb);
228 return;
229 }
230
231 tag = be16_to_cpup(&iu->tag) - 1;
232 if (sdev->current_cmnd)
233 cmnd = sdev->current_cmnd;
234 else
235 cmnd = scsi_find_tag(sdev, tag);
236 if (!cmnd)
237 return;
238
239 switch (iu->iu_id) {
240 case IU_ID_STATUS:
241 if (urb->actual_length < 16)
242 devinfo->uas_sense_old = 1;
243 if (devinfo->uas_sense_old)
244 uas_sense_old(urb, cmnd);
245 else
246 uas_sense(urb, cmnd);
247 break;
248 case IU_ID_READ_READY:
249 uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
250 break;
251 case IU_ID_WRITE_READY:
252 uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
253 break;
254 default:
255 scmd_printk(KERN_ERR, cmnd,
256 "Bogus IU (%d) received on status pipe\n", iu->iu_id);
257 }
258}
259
260static void uas_data_cmplt(struct urb *urb)
261{
262 struct scsi_data_buffer *sdb = urb->context;
263 sdb->resid = sdb->length - urb->actual_length;
264 usb_free_urb(urb);
265}
266
267static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
268 unsigned int pipe, u16 stream_id,
269 struct scsi_data_buffer *sdb,
270 enum dma_data_direction dir)
271{
272 struct usb_device *udev = devinfo->udev;
273 struct urb *urb = usb_alloc_urb(0, gfp);
274
275 if (!urb)
276 goto out;
277 usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length, uas_data_cmplt,
278 sdb);
279 if (devinfo->use_streams)
280 urb->stream_id = stream_id;
281 urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
282 urb->sg = sdb->table.sgl;
283 out:
284 return urb;
285}
286
287static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
288 struct scsi_cmnd *cmnd, u16 stream_id)
289{
290 struct usb_device *udev = devinfo->udev;
291 struct urb *urb = usb_alloc_urb(0, gfp);
292 struct sense_iu *iu;
293
294 if (!urb)
295 goto out;
296
297 iu = kmalloc(sizeof(*iu), gfp);
298 if (!iu)
299 goto free;
300
301 usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
302 uas_stat_cmplt, cmnd->device);
303 urb->stream_id = stream_id;
304 urb->transfer_flags |= URB_FREE_BUFFER;
305 out:
306 return urb;
307 free:
308 usb_free_urb(urb);
309 return NULL;
310}
311
312static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
313 struct scsi_cmnd *cmnd, u16 stream_id)
314{
315 struct usb_device *udev = devinfo->udev;
316 struct scsi_device *sdev = cmnd->device;
317 struct urb *urb = usb_alloc_urb(0, gfp);
318 struct command_iu *iu;
319 int len;
320
321 if (!urb)
322 goto out;
323
324 len = cmnd->cmd_len - 16;
325 if (len < 0)
326 len = 0;
327 len = ALIGN(len, 4);
328 iu = kmalloc(sizeof(*iu) + len, gfp);
329 if (!iu)
330 goto free;
331
332 iu->iu_id = IU_ID_COMMAND;
333 iu->tag = cpu_to_be16(stream_id);
334 if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER))
335 iu->prio_attr = UAS_ORDERED_TAG;
336 else
337 iu->prio_attr = UAS_SIMPLE_TAG;
338 iu->len = len;
339 int_to_scsilun(sdev->lun, &iu->lun);
340 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
341
342 usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
343 usb_free_urb, NULL);
344 urb->transfer_flags |= URB_FREE_BUFFER;
345 out:
346 return urb;
347 free:
348 usb_free_urb(urb);
349 return NULL;
350}
351
352/*
353 * Why should I request the Status IU before sending the Command IU? Spec
354 * says to, but also says the device may receive them in any order. Seems
355 * daft to me.
356 */
357
358static int uas_submit_urbs(struct scsi_cmnd *cmnd,
359 struct uas_dev_info *devinfo, gfp_t gfp)
360{
361 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
362
363 if (cmdinfo->state & ALLOC_SENSE_URB) {
364 cmdinfo->sense_urb = uas_alloc_sense_urb(devinfo, gfp, cmnd,
365 cmdinfo->stream);
366 if (!cmdinfo->sense_urb)
367 return SCSI_MLQUEUE_DEVICE_BUSY;
368 cmdinfo->state &= ~ALLOC_SENSE_URB;
369 }
370
371 if (cmdinfo->state & SUBMIT_SENSE_URB) {
372 if (usb_submit_urb(cmdinfo->sense_urb, gfp)) {
373 scmd_printk(KERN_INFO, cmnd,
374 "sense urb submission failure\n");
375 return SCSI_MLQUEUE_DEVICE_BUSY;
376 }
377 cmdinfo->state &= ~SUBMIT_SENSE_URB;
378 }
379
380 if (cmdinfo->state & ALLOC_DATA_IN_URB) {
381 cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp,
382 devinfo->data_in_pipe, cmdinfo->stream,
383 scsi_in(cmnd), DMA_FROM_DEVICE);
384 if (!cmdinfo->data_in_urb)
385 return SCSI_MLQUEUE_DEVICE_BUSY;
386 cmdinfo->state &= ~ALLOC_DATA_IN_URB;
387 }
388
389 if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
390 if (usb_submit_urb(cmdinfo->data_in_urb, gfp)) {
391 scmd_printk(KERN_INFO, cmnd,
392 "data in urb submission failure\n");
393 return SCSI_MLQUEUE_DEVICE_BUSY;
394 }
395 cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
396 }
397
398 if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
399 cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp,
400 devinfo->data_out_pipe, cmdinfo->stream,
401 scsi_out(cmnd), DMA_TO_DEVICE);
402 if (!cmdinfo->data_out_urb)
403 return SCSI_MLQUEUE_DEVICE_BUSY;
404 cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
405 }
406
407 if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
408 if (usb_submit_urb(cmdinfo->data_out_urb, gfp)) {
409 scmd_printk(KERN_INFO, cmnd,
410 "data out urb submission failure\n");
411 return SCSI_MLQUEUE_DEVICE_BUSY;
412 }
413 cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
414 }
415
416 if (cmdinfo->state & ALLOC_CMD_URB) {
417 cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd,
418 cmdinfo->stream);
419 if (!cmdinfo->cmd_urb)
420 return SCSI_MLQUEUE_DEVICE_BUSY;
421 cmdinfo->state &= ~ALLOC_CMD_URB;
422 }
423
424 if (cmdinfo->state & SUBMIT_CMD_URB) {
425 if (usb_submit_urb(cmdinfo->cmd_urb, gfp)) {
426 scmd_printk(KERN_INFO, cmnd,
427 "cmd urb submission failure\n");
428 return SCSI_MLQUEUE_DEVICE_BUSY;
429 }
430 cmdinfo->state &= ~SUBMIT_CMD_URB;
431 }
432
433 return 0;
434}
435
436static int uas_queuecommand(struct scsi_cmnd *cmnd,
437 void (*done)(struct scsi_cmnd *))
438{
439 struct scsi_device *sdev = cmnd->device;
440 struct uas_dev_info *devinfo = sdev->hostdata;
441 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
442 int err;
443
444 BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
445
446 if (!cmdinfo->sense_urb && sdev->current_cmnd)
447 return SCSI_MLQUEUE_DEVICE_BUSY;
448
449 if (blk_rq_tagged(cmnd->request)) {
450 cmdinfo->stream = cmnd->request->tag + 1;
451 } else {
452 sdev->current_cmnd = cmnd;
453 cmdinfo->stream = 1;
454 }
455
456 cmnd->scsi_done = done;
457
458 cmdinfo->state = ALLOC_SENSE_URB | SUBMIT_SENSE_URB |
459 ALLOC_CMD_URB | SUBMIT_CMD_URB;
460
461 switch (cmnd->sc_data_direction) {
462 case DMA_FROM_DEVICE:
463 cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
464 break;
465 case DMA_BIDIRECTIONAL:
466 cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
467 case DMA_TO_DEVICE:
468 cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
469 case DMA_NONE:
470 break;
471 }
472
473 if (!devinfo->use_streams) {
474 cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
475 cmdinfo->stream = 0;
476 }
477
478 err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
479 if (err) {
480 /* If we did nothing, give up now */
481 if (cmdinfo->state & SUBMIT_SENSE_URB) {
482 usb_free_urb(cmdinfo->sense_urb);
483 return SCSI_MLQUEUE_DEVICE_BUSY;
484 }
485 spin_lock(&uas_work_lock);
486 list_add_tail(&cmdinfo->list, &uas_work_list);
487 spin_unlock(&uas_work_lock);
488 schedule_work(&uas_work);
489 }
490
491 return 0;
492}
493
494static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
495{
496 struct scsi_device *sdev = cmnd->device;
497 sdev_printk(KERN_INFO, sdev, "%s tag %d\n", __func__,
498 cmnd->request->tag);
499
500/* XXX: Send ABORT TASK Task Management command */
501 return FAILED;
502}
503
504static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
505{
506 struct scsi_device *sdev = cmnd->device;
507 sdev_printk(KERN_INFO, sdev, "%s tag %d\n", __func__,
508 cmnd->request->tag);
509
510/* XXX: Send LOGICAL UNIT RESET Task Management command */
511 return FAILED;
512}
513
514static int uas_eh_target_reset_handler(struct scsi_cmnd *cmnd)
515{
516 struct scsi_device *sdev = cmnd->device;
517 sdev_printk(KERN_INFO, sdev, "%s tag %d\n", __func__,
518 cmnd->request->tag);
519
520/* XXX: Can we reset just the one USB interface?
521 * Would calling usb_set_interface() have the right effect?
522 */
523 return FAILED;
524}
525
526static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
527{
528 struct scsi_device *sdev = cmnd->device;
529 struct uas_dev_info *devinfo = sdev->hostdata;
530 struct usb_device *udev = devinfo->udev;
531
532 sdev_printk(KERN_INFO, sdev, "%s tag %d\n", __func__,
533 cmnd->request->tag);
534
535 if (usb_reset_device(udev))
536 return SUCCESS;
537
538 return FAILED;
539}
540
541static int uas_slave_alloc(struct scsi_device *sdev)
542{
543 sdev->hostdata = (void *)sdev->host->hostdata[0];
544 return 0;
545}
546
547static int uas_slave_configure(struct scsi_device *sdev)
548{
549 struct uas_dev_info *devinfo = sdev->hostdata;
550 scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
551 scsi_activate_tcq(sdev, devinfo->qdepth - 1);
552 return 0;
553}
554
555static struct scsi_host_template uas_host_template = {
556 .module = THIS_MODULE,
557 .name = "uas",
558 .queuecommand = uas_queuecommand,
559 .slave_alloc = uas_slave_alloc,
560 .slave_configure = uas_slave_configure,
561 .eh_abort_handler = uas_eh_abort_handler,
562 .eh_device_reset_handler = uas_eh_device_reset_handler,
563 .eh_target_reset_handler = uas_eh_target_reset_handler,
564 .eh_bus_reset_handler = uas_eh_bus_reset_handler,
565 .can_queue = 65536, /* Is there a limit on the _host_ ? */
566 .this_id = -1,
567 .sg_tablesize = SG_NONE,
568 .cmd_per_lun = 1, /* until we override it */
569 .skip_settle_delay = 1,
570 .ordered_tag = 1,
571};
572
573static struct usb_device_id uas_usb_ids[] = {
574 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
575 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
576 /* 0xaa is a prototype device I happen to have access to */
577 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, 0xaa) },
578 { }
579};
580MODULE_DEVICE_TABLE(usb, uas_usb_ids);
581
582static void uas_configure_endpoints(struct uas_dev_info *devinfo)
583{
584 struct usb_host_endpoint *eps[4] = { };
585 struct usb_interface *intf = devinfo->intf;
586 struct usb_device *udev = devinfo->udev;
587 struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
588 unsigned i, n_endpoints = intf->cur_altsetting->desc.bNumEndpoints;
589
590 devinfo->uas_sense_old = 0;
591
592 for (i = 0; i < n_endpoints; i++) {
593 unsigned char *extra = endpoint[i].extra;
594 int len = endpoint[i].extralen;
595 while (len > 1) {
596 if (extra[1] == USB_DT_PIPE_USAGE) {
597 unsigned pipe_id = extra[2];
598 if (pipe_id > 0 && pipe_id < 5)
599 eps[pipe_id - 1] = &endpoint[i];
600 break;
601 }
602 len -= extra[0];
603 extra += extra[0];
604 }
605 }
606
607 /*
608 * Assume that if we didn't find a control pipe descriptor, we're
609 * using a device with old firmware that happens to be set up like
610 * this.
611 */
612 if (!eps[0]) {
613 devinfo->cmd_pipe = usb_sndbulkpipe(udev, 1);
614 devinfo->status_pipe = usb_rcvbulkpipe(udev, 1);
615 devinfo->data_in_pipe = usb_rcvbulkpipe(udev, 2);
616 devinfo->data_out_pipe = usb_sndbulkpipe(udev, 2);
617
618 eps[1] = usb_pipe_endpoint(udev, devinfo->status_pipe);
619 eps[2] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
620 eps[3] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
621 } else {
622 devinfo->cmd_pipe = usb_sndbulkpipe(udev,
623 eps[0]->desc.bEndpointAddress);
624 devinfo->status_pipe = usb_rcvbulkpipe(udev,
625 eps[1]->desc.bEndpointAddress);
626 devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
627 eps[2]->desc.bEndpointAddress);
628 devinfo->data_out_pipe = usb_sndbulkpipe(udev,
629 eps[3]->desc.bEndpointAddress);
630 }
631
632 devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, 256,
633 GFP_KERNEL);
634 if (devinfo->qdepth < 0) {
635 devinfo->qdepth = 256;
636 devinfo->use_streams = 0;
637 } else {
638 devinfo->use_streams = 1;
639 }
640}
641
642/*
643 * XXX: What I'd like to do here is register a SCSI host for each USB host in
644 * the system. Follow usb-storage's design of registering a SCSI host for
645 * each USB device for the moment. Can implement this by walking up the
646 * USB hierarchy until we find a USB host.
647 */
648static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
649{
650 int result;
651 struct Scsi_Host *shost;
652 struct uas_dev_info *devinfo;
653 struct usb_device *udev = interface_to_usbdev(intf);
654
655 if (id->bInterfaceProtocol == 0x50) {
656 int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
657/* XXX: Shouldn't assume that 1 is the alternative we want */
658 int ret = usb_set_interface(udev, ifnum, 1);
659 if (ret)
660 return -ENODEV;
661 }
662
663 devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
664 if (!devinfo)
665 return -ENOMEM;
666
667 result = -ENOMEM;
668 shost = scsi_host_alloc(&uas_host_template, sizeof(void *));
669 if (!shost)
670 goto free;
671
672 shost->max_cmd_len = 16 + 252;
673 shost->max_id = 1;
674 shost->sg_tablesize = udev->bus->sg_tablesize;
675
676 result = scsi_add_host(shost, &intf->dev);
677 if (result)
678 goto free;
679 shost->hostdata[0] = (unsigned long)devinfo;
680
681 devinfo->intf = intf;
682 devinfo->udev = udev;
683 uas_configure_endpoints(devinfo);
684
685 scsi_scan_host(shost);
686 usb_set_intfdata(intf, shost);
687 return result;
688 free:
689 kfree(devinfo);
690 if (shost)
691 scsi_host_put(shost);
692 return result;
693}
694
695static int uas_pre_reset(struct usb_interface *intf)
696{
697/* XXX: Need to return 1 if it's not our device in error handling */
698 return 0;
699}
700
701static int uas_post_reset(struct usb_interface *intf)
702{
703/* XXX: Need to return 1 if it's not our device in error handling */
704 return 0;
705}
706
707static void uas_disconnect(struct usb_interface *intf)
708{
709 struct usb_device *udev = interface_to_usbdev(intf);
710 struct usb_host_endpoint *eps[3];
711 struct Scsi_Host *shost = usb_get_intfdata(intf);
712 struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
713
714 scsi_remove_host(shost);
715
716 eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
717 eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
718 eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
719 usb_free_streams(intf, eps, 3, GFP_KERNEL);
720
721 kfree(devinfo);
722}
723
724/*
725 * XXX: Should this plug into libusual so we can auto-upgrade devices from
726 * Bulk-Only to UAS?
727 */
728static struct usb_driver uas_driver = {
729 .name = "uas",
730 .probe = uas_probe,
731 .disconnect = uas_disconnect,
732 .pre_reset = uas_pre_reset,
733 .post_reset = uas_post_reset,
734 .id_table = uas_usb_ids,
735};
736
737static int uas_init(void)
738{
739 return usb_register(&uas_driver);
740}
741
742static void uas_exit(void)
743{
744 usb_deregister(&uas_driver);
745}
746
747module_init(uas_init);
748module_exit(uas_exit);
749
750MODULE_LICENSE("GPL");
751MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index 8c412f885dd2..fa3e9edaa2cf 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -21,11 +21,11 @@
21UNUSUAL_DEV( 0x0584, 0x0008, 0x0102, 0x0102, 21UNUSUAL_DEV( 0x0584, 0x0008, 0x0102, 0x0102,
22 "Fujifilm", 22 "Fujifilm",
23 "DPC-R1 (Alauda)", 23 "DPC-R1 (Alauda)",
24 US_SC_SCSI, US_PR_ALAUDA, init_alauda, 0), 24 USB_SC_SCSI, USB_PR_ALAUDA, init_alauda, 0),
25 25
26UNUSUAL_DEV( 0x07b4, 0x010a, 0x0102, 0x0102, 26UNUSUAL_DEV( 0x07b4, 0x010a, 0x0102, 0x0102,
27 "Olympus", 27 "Olympus",
28 "MAUSB-10 (Alauda)", 28 "MAUSB-10 (Alauda)",
29 US_SC_SCSI, US_PR_ALAUDA, init_alauda, 0), 29 USB_SC_SCSI, USB_PR_ALAUDA, init_alauda, 0),
30 30
31#endif /* defined(CONFIG_USB_STORAGE_ALAUDA) || ... */ 31#endif /* defined(CONFIG_USB_STORAGE_ALAUDA) || ... */
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 44be6d75dab6..c854fdebe0ae 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -23,12 +23,12 @@
23UNUSUAL_DEV( 0x04b4, 0x6830, 0x0000, 0x9999, 23UNUSUAL_DEV( 0x04b4, 0x6830, 0x0000, 0x9999,
24 "Cypress", 24 "Cypress",
25 "Cypress AT2LP", 25 "Cypress AT2LP",
26 US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), 26 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
27 27
28/* CY7C68310 : support atacb and atacb2 */ 28/* CY7C68310 : support atacb and atacb2 */
29UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, 29UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
30 "Cypress", 30 "Cypress",
31 "Cypress ISD-300LP", 31 "Cypress ISD-300LP",
32 US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), 32 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
33 33
34#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */ 34#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index c9298ce9f223..582a603c78be 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -21,7 +21,7 @@
21UNUSUAL_DEV( 0x07c4, 0xa000, 0x0000, 0x0015, 21UNUSUAL_DEV( 0x07c4, 0xa000, 0x0000, 0x0015,
22 "Datafab", 22 "Datafab",
23 "MDCFE-B USB CF Reader", 23 "MDCFE-B USB CF Reader",
24 US_SC_SCSI, US_PR_DATAFAB, NULL, 24 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
25 0), 25 0),
26 26
27/* 27/*
@@ -38,45 +38,45 @@ UNUSUAL_DEV( 0x07c4, 0xa000, 0x0000, 0x0015,
38UNUSUAL_DEV( 0x07c4, 0xa001, 0x0000, 0xffff, 38UNUSUAL_DEV( 0x07c4, 0xa001, 0x0000, 0xffff,
39 "SIIG/Datafab", 39 "SIIG/Datafab",
40 "SIIG/Datafab Memory Stick+CF Reader/Writer", 40 "SIIG/Datafab Memory Stick+CF Reader/Writer",
41 US_SC_SCSI, US_PR_DATAFAB, NULL, 41 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
42 0), 42 0),
43 43
44/* Reported by Josef Reisinger <josef.reisinger@netcologne.de> */ 44/* Reported by Josef Reisinger <josef.reisinger@netcologne.de> */
45UNUSUAL_DEV( 0x07c4, 0xa002, 0x0000, 0xffff, 45UNUSUAL_DEV( 0x07c4, 0xa002, 0x0000, 0xffff,
46 "Datafab/Unknown", 46 "Datafab/Unknown",
47 "MD2/MD3 Disk enclosure", 47 "MD2/MD3 Disk enclosure",
48 US_SC_SCSI, US_PR_DATAFAB, NULL, 48 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
49 US_FL_SINGLE_LUN), 49 US_FL_SINGLE_LUN),
50 50
51UNUSUAL_DEV( 0x07c4, 0xa003, 0x0000, 0xffff, 51UNUSUAL_DEV( 0x07c4, 0xa003, 0x0000, 0xffff,
52 "Datafab/Unknown", 52 "Datafab/Unknown",
53 "Datafab-based Reader", 53 "Datafab-based Reader",
54 US_SC_SCSI, US_PR_DATAFAB, NULL, 54 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
55 0), 55 0),
56 56
57UNUSUAL_DEV( 0x07c4, 0xa004, 0x0000, 0xffff, 57UNUSUAL_DEV( 0x07c4, 0xa004, 0x0000, 0xffff,
58 "Datafab/Unknown", 58 "Datafab/Unknown",
59 "Datafab-based Reader", 59 "Datafab-based Reader",
60 US_SC_SCSI, US_PR_DATAFAB, NULL, 60 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
61 0), 61 0),
62 62
63UNUSUAL_DEV( 0x07c4, 0xa005, 0x0000, 0xffff, 63UNUSUAL_DEV( 0x07c4, 0xa005, 0x0000, 0xffff,
64 "PNY/Datafab", 64 "PNY/Datafab",
65 "PNY/Datafab CF+SM Reader", 65 "PNY/Datafab CF+SM Reader",
66 US_SC_SCSI, US_PR_DATAFAB, NULL, 66 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
67 0), 67 0),
68 68
69UNUSUAL_DEV( 0x07c4, 0xa006, 0x0000, 0xffff, 69UNUSUAL_DEV( 0x07c4, 0xa006, 0x0000, 0xffff,
70 "Simple Tech/Datafab", 70 "Simple Tech/Datafab",
71 "Simple Tech/Datafab CF+SM Reader", 71 "Simple Tech/Datafab CF+SM Reader",
72 US_SC_SCSI, US_PR_DATAFAB, NULL, 72 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
73 0), 73 0),
74 74
75/* Submitted by Olaf Hering <olh@suse.de> */ 75/* Submitted by Olaf Hering <olh@suse.de> */
76UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff, 76UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff,
77 "Datafab Systems, Inc.", 77 "Datafab Systems, Inc.",
78 "USB to CF + SM Combo (LC1)", 78 "USB to CF + SM Combo (LC1)",
79 US_SC_SCSI, US_PR_DATAFAB, NULL, 79 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
80 0), 80 0),
81 81
82/* Reported by Felix Moeller <felix@derklecks.de> 82/* Reported by Felix Moeller <felix@derklecks.de>
@@ -86,13 +86,13 @@ UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff,
86UNUSUAL_DEV( 0x07c4, 0xa10b, 0x0000, 0xffff, 86UNUSUAL_DEV( 0x07c4, 0xa10b, 0x0000, 0xffff,
87 "DataFab Systems Inc.", 87 "DataFab Systems Inc.",
88 "USB CF+MS", 88 "USB CF+MS",
89 US_SC_SCSI, US_PR_DATAFAB, NULL, 89 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
90 0), 90 0),
91 91
92UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff, 92UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff,
93 "Acomdata", 93 "Acomdata",
94 "CF", 94 "CF",
95 US_SC_SCSI, US_PR_DATAFAB, NULL, 95 USB_SC_SCSI, USB_PR_DATAFAB, NULL,
96 US_FL_SINGLE_LUN), 96 US_FL_SINGLE_LUN),
97 97
98#endif /* defined(CONFIG_USB_STORAGE_DATAFAB) || ... */ 98#endif /* defined(CONFIG_USB_STORAGE_DATAFAB) || ... */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2c897eefadde..6ccdd3dd5259 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -63,26 +63,26 @@
63UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100, 63UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
64 "ATMEL", 64 "ATMEL",
65 "SND1 Storage", 65 "SND1 Storage",
66 US_SC_DEVICE, US_PR_DEVICE, NULL, 66 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
67 US_FL_IGNORE_RESIDUE), 67 US_FL_IGNORE_RESIDUE),
68 68
69/* Reported by Rodolfo Quesada <rquesada@roqz.net> */ 69/* Reported by Rodolfo Quesada <rquesada@roqz.net> */
70UNUSUAL_DEV( 0x03ee, 0x6906, 0x0003, 0x0003, 70UNUSUAL_DEV( 0x03ee, 0x6906, 0x0003, 0x0003,
71 "VIA Technologies Inc.", 71 "VIA Technologies Inc.",
72 "Mitsumi multi cardreader", 72 "Mitsumi multi cardreader",
73 US_SC_DEVICE, US_PR_DEVICE, NULL, 73 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
74 US_FL_IGNORE_RESIDUE ), 74 US_FL_IGNORE_RESIDUE ),
75 75
76UNUSUAL_DEV( 0x03f0, 0x0107, 0x0200, 0x0200, 76UNUSUAL_DEV( 0x03f0, 0x0107, 0x0200, 0x0200,
77 "HP", 77 "HP",
78 "CD-Writer+", 78 "CD-Writer+",
79 US_SC_8070, US_PR_CB, NULL, 0), 79 USB_SC_8070, USB_PR_CB, NULL, 0),
80 80
81/* Reported by Ben Efros <ben@pc-doctor.com> */ 81/* Reported by Ben Efros <ben@pc-doctor.com> */
82UNUSUAL_DEV( 0x03f0, 0x070c, 0x0000, 0x0000, 82UNUSUAL_DEV( 0x03f0, 0x070c, 0x0000, 0x0000,
83 "HP", 83 "HP",
84 "Personal Media Drive", 84 "Personal Media Drive",
85 US_SC_DEVICE, US_PR_DEVICE, NULL, 85 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
86 US_FL_SANE_SENSE ), 86 US_FL_SANE_SENSE ),
87 87
88/* Reported by Grant Grundler <grundler@parisc-linux.org> 88/* Reported by Grant Grundler <grundler@parisc-linux.org>
@@ -91,7 +91,7 @@ UNUSUAL_DEV( 0x03f0, 0x070c, 0x0000, 0x0000,
91UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001, 91UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001,
92 "HP", 92 "HP",
93 "PhotoSmart R707", 93 "PhotoSmart R707",
94 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), 94 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
95 95
96/* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net> 96/* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
97 * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product) 97 * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
@@ -100,14 +100,14 @@ UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001,
100UNUSUAL_DEV( 0x0409, 0x0040, 0x0000, 0x9999, 100UNUSUAL_DEV( 0x0409, 0x0040, 0x0000, 0x9999,
101 "NEC", 101 "NEC",
102 "NEC USB UF000x", 102 "NEC USB UF000x",
103 US_SC_DEVICE, US_PR_DEVICE, NULL, 103 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
104 US_FL_SINGLE_LUN ), 104 US_FL_SINGLE_LUN ),
105 105
106/* Patch submitted by Mihnea-Costin Grigore <mihnea@zulu.ro> */ 106/* Patch submitted by Mihnea-Costin Grigore <mihnea@zulu.ro> */
107UNUSUAL_DEV( 0x040d, 0x6205, 0x0003, 0x0003, 107UNUSUAL_DEV( 0x040d, 0x6205, 0x0003, 0x0003,
108 "VIA Technologies Inc.", 108 "VIA Technologies Inc.",
109 "USB 2.0 Card Reader", 109 "USB 2.0 Card Reader",
110 US_SC_DEVICE, US_PR_DEVICE, NULL, 110 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
111 US_FL_IGNORE_RESIDUE ), 111 US_FL_IGNORE_RESIDUE ),
112 112
113/* Deduced by Jonathan Woithe <jwoithe@physics.adelaide.edu.au> 113/* Deduced by Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
@@ -117,40 +117,40 @@ UNUSUAL_DEV( 0x040d, 0x6205, 0x0003, 0x0003,
117UNUSUAL_DEV( 0x0411, 0x001c, 0x0113, 0x0113, 117UNUSUAL_DEV( 0x0411, 0x001c, 0x0113, 0x0113,
118 "Buffalo", 118 "Buffalo",
119 "DUB-P40G HDD", 119 "DUB-P40G HDD",
120 US_SC_DEVICE, US_PR_DEVICE, NULL, 120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_FIX_INQUIRY ), 121 US_FL_FIX_INQUIRY ),
122 122
123/* Submitted by Ernestas Vaiciukevicius <ernisv@gmail.com> */ 123/* Submitted by Ernestas Vaiciukevicius <ernisv@gmail.com> */
124UNUSUAL_DEV( 0x0419, 0x0100, 0x0100, 0x0100, 124UNUSUAL_DEV( 0x0419, 0x0100, 0x0100, 0x0100,
125 "Samsung Info. Systems America, Inc.", 125 "Samsung Info. Systems America, Inc.",
126 "MP3 Player", 126 "MP3 Player",
127 US_SC_DEVICE, US_PR_DEVICE, NULL, 127 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
128 US_FL_IGNORE_RESIDUE ), 128 US_FL_IGNORE_RESIDUE ),
129 129
130/* Reported by Orgad Shaneh <orgads@gmail.com> */ 130/* Reported by Orgad Shaneh <orgads@gmail.com> */
131UNUSUAL_DEV( 0x0419, 0xaace, 0x0100, 0x0100, 131UNUSUAL_DEV( 0x0419, 0xaace, 0x0100, 0x0100,
132 "Samsung", "MP3 Player", 132 "Samsung", "MP3 Player",
133 US_SC_DEVICE, US_PR_DEVICE, NULL, 133 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
134 US_FL_IGNORE_RESIDUE ), 134 US_FL_IGNORE_RESIDUE ),
135 135
136/* Reported by Christian Leber <christian@leber.de> */ 136/* Reported by Christian Leber <christian@leber.de> */
137UNUSUAL_DEV( 0x0419, 0xaaf5, 0x0100, 0x0100, 137UNUSUAL_DEV( 0x0419, 0xaaf5, 0x0100, 0x0100,
138 "TrekStor", 138 "TrekStor",
139 "i.Beat 115 2.0", 139 "i.Beat 115 2.0",
140 US_SC_DEVICE, US_PR_DEVICE, NULL, 140 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
141 US_FL_IGNORE_RESIDUE | US_FL_NOT_LOCKABLE ), 141 US_FL_IGNORE_RESIDUE | US_FL_NOT_LOCKABLE ),
142 142
143/* Reported by Stefan Werner <dustbln@gmx.de> */ 143/* Reported by Stefan Werner <dustbln@gmx.de> */
144UNUSUAL_DEV( 0x0419, 0xaaf6, 0x0100, 0x0100, 144UNUSUAL_DEV( 0x0419, 0xaaf6, 0x0100, 0x0100,
145 "TrekStor", 145 "TrekStor",
146 "i.Beat Joy 2.0", 146 "i.Beat Joy 2.0",
147 US_SC_DEVICE, US_PR_DEVICE, NULL, 147 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
148 US_FL_IGNORE_RESIDUE ), 148 US_FL_IGNORE_RESIDUE ),
149 149
150/* Reported by Pete Zaitcev <zaitcev@redhat.com>, bz#176584 */ 150/* Reported by Pete Zaitcev <zaitcev@redhat.com>, bz#176584 */
151UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100, 151UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100,
152 "GENERIC", "MP3 PLAYER", /* MyMusix PD-205 on the outside. */ 152 "GENERIC", "MP3 PLAYER", /* MyMusix PD-205 on the outside. */
153 US_SC_DEVICE, US_PR_DEVICE, NULL, 153 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
154 US_FL_IGNORE_RESIDUE ), 154 US_FL_IGNORE_RESIDUE ),
155 155
156/* Reported by Andrew Nayenko <relan@bk.ru> 156/* Reported by Andrew Nayenko <relan@bk.ru>
@@ -158,28 +158,28 @@ UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100,
158UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0610, 158UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0610,
159 "Nokia", 159 "Nokia",
160 "Nokia 6288", 160 "Nokia 6288",
161 US_SC_DEVICE, US_PR_DEVICE, NULL, 161 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
162 US_FL_MAX_SECTORS_64 ), 162 US_FL_MAX_SECTORS_64 ),
163 163
164/* Reported by Mario Rettig <mariorettig@web.de> */ 164/* Reported by Mario Rettig <mariorettig@web.de> */
165UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, 165UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
166 "Nokia", 166 "Nokia",
167 "Nokia 3250", 167 "Nokia 3250",
168 US_SC_DEVICE, US_PR_DEVICE, NULL, 168 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
169 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 169 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
170 170
171/* Reported by <honkkis@gmail.com> */ 171/* Reported by <honkkis@gmail.com> */
172UNUSUAL_DEV( 0x0421, 0x0433, 0x0100, 0x0100, 172UNUSUAL_DEV( 0x0421, 0x0433, 0x0100, 0x0100,
173 "Nokia", 173 "Nokia",
174 "E70", 174 "E70",
175 US_SC_DEVICE, US_PR_DEVICE, NULL, 175 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
176 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 176 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
177 177
178/* Reported by Jon Hart <Jon.Hart@web.de> */ 178/* Reported by Jon Hart <Jon.Hart@web.de> */
179UNUSUAL_DEV( 0x0421, 0x0434, 0x0100, 0x0100, 179UNUSUAL_DEV( 0x0421, 0x0434, 0x0100, 0x0100,
180 "Nokia", 180 "Nokia",
181 "E60", 181 "E60",
182 US_SC_DEVICE, US_PR_DEVICE, NULL, 182 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
183 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), 183 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
184 184
185/* Reported by Sumedha Swamy <sumedhaswamy@gmail.com> and 185/* Reported by Sumedha Swamy <sumedhaswamy@gmail.com> and
@@ -187,7 +187,7 @@ UNUSUAL_DEV( 0x0421, 0x0434, 0x0100, 0x0100,
187UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100, 187UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100,
188 "Nokia", 188 "Nokia",
189 "N91", 189 "N91",
190 US_SC_DEVICE, US_PR_DEVICE, NULL, 190 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
191 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 191 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
192 192
193/* Reported by Jiri Slaby <jirislaby@gmail.com> and 193/* Reported by Jiri Slaby <jirislaby@gmail.com> and
@@ -195,42 +195,42 @@ UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100,
195UNUSUAL_DEV( 0x0421, 0x0446, 0x0100, 0x0100, 195UNUSUAL_DEV( 0x0421, 0x0446, 0x0100, 0x0100,
196 "Nokia", 196 "Nokia",
197 "N80", 197 "N80",
198 US_SC_DEVICE, US_PR_DEVICE, NULL, 198 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
199 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 199 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
200 200
201/* Reported by Matthew Bloch <matthew@bytemark.co.uk> */ 201/* Reported by Matthew Bloch <matthew@bytemark.co.uk> */
202UNUSUAL_DEV( 0x0421, 0x044e, 0x0100, 0x0100, 202UNUSUAL_DEV( 0x0421, 0x044e, 0x0100, 0x0100,
203 "Nokia", 203 "Nokia",
204 "E61", 204 "E61",
205 US_SC_DEVICE, US_PR_DEVICE, NULL, 205 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
206 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 206 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
207 207
208/* Reported by Bardur Arantsson <bardur@scientician.net> */ 208/* Reported by Bardur Arantsson <bardur@scientician.net> */
209UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0610, 209UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0610,
210 "Nokia", 210 "Nokia",
211 "6131", 211 "6131",
212 US_SC_DEVICE, US_PR_DEVICE, NULL, 212 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
213 US_FL_MAX_SECTORS_64 ), 213 US_FL_MAX_SECTORS_64 ),
214 214
215/* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */ 215/* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */
216UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x9999, 216UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x9999,
217 "Nokia", 217 "Nokia",
218 "Nokia 6233", 218 "Nokia 6233",
219 US_SC_DEVICE, US_PR_DEVICE, NULL, 219 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
220 US_FL_MAX_SECTORS_64 ), 220 US_FL_MAX_SECTORS_64 ),
221 221
222/* Reported by Alex Corcoles <alex@corcoles.net> */ 222/* Reported by Alex Corcoles <alex@corcoles.net> */
223UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370, 223UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
224 "Nokia", 224 "Nokia",
225 "6234", 225 "6234",
226 US_SC_DEVICE, US_PR_DEVICE, NULL, 226 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
227 US_FL_MAX_SECTORS_64 ), 227 US_FL_MAX_SECTORS_64 ),
228 228
229#ifdef NO_SDDR09 229#ifdef NO_SDDR09
230UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100, 230UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
231 "Microtech", 231 "Microtech",
232 "CameraMate", 232 "CameraMate",
233 US_SC_SCSI, US_PR_CB, NULL, 233 USB_SC_SCSI, USB_PR_CB, NULL,
234 US_FL_SINGLE_LUN ), 234 US_FL_SINGLE_LUN ),
235#endif 235#endif
236 236
@@ -239,7 +239,7 @@ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
239UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100, 239UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100,
240 "Neuros Audio", 240 "Neuros Audio",
241 "USB 2.0 HD 2.5", 241 "USB 2.0 HD 2.5",
242 US_SC_DEVICE, US_PR_BULK, NULL, 242 USB_SC_DEVICE, USB_PR_BULK, NULL,
243 US_FL_NEED_OVERRIDE ), 243 US_FL_NEED_OVERRIDE ),
244 244
245/* 245/*
@@ -250,7 +250,7 @@ UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100,
250UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100, 250UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100,
251 "USBest Technology", /* sold by Transcend */ 251 "USBest Technology", /* sold by Transcend */
252 "USB Mass Storage Device", 252 "USB Mass Storage Device",
253 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), 253 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
254 254
255/* 255/*
256* Bohdan Linda <bohdan.linda@gmail.com> 256* Bohdan Linda <bohdan.linda@gmail.com>
@@ -260,7 +260,7 @@ UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100,
260UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100, 260UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100,
261 "USB 2.0", 261 "USB 2.0",
262 "Flash Disk", 262 "Flash Disk",
263 US_SC_DEVICE, US_PR_DEVICE, NULL, 263 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
264 US_FL_NOT_LOCKABLE ), 264 US_FL_NOT_LOCKABLE ),
265 265
266/* Reported by Tamas Kerecsen <kerecsen@bigfoot.com> 266/* Reported by Tamas Kerecsen <kerecsen@bigfoot.com>
@@ -272,7 +272,7 @@ UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100,
272UNUSUAL_DEV( 0x045e, 0xffff, 0x0000, 0x0000, 272UNUSUAL_DEV( 0x045e, 0xffff, 0x0000, 0x0000,
273 "Mitac", 273 "Mitac",
274 "GPS", 274 "GPS",
275 US_SC_DEVICE, US_PR_DEVICE, NULL, 275 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
276 US_FL_MAX_SECTORS_64 ), 276 US_FL_MAX_SECTORS_64 ),
277 277
278/* 278/*
@@ -284,32 +284,32 @@ UNUSUAL_DEV( 0x045e, 0xffff, 0x0000, 0x0000,
284UNUSUAL_DEV( 0x046b, 0xff40, 0x0100, 0x0100, 284UNUSUAL_DEV( 0x046b, 0xff40, 0x0100, 0x0100,
285 "AMI", 285 "AMI",
286 "Virtual Floppy", 286 "Virtual Floppy",
287 US_SC_DEVICE, US_PR_DEVICE, NULL, 287 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
288 US_FL_NO_WP_DETECT), 288 US_FL_NO_WP_DETECT),
289 289
290/* Patch submitted by Philipp Friedrich <philipp@void.at> */ 290/* Patch submitted by Philipp Friedrich <philipp@void.at> */
291UNUSUAL_DEV( 0x0482, 0x0100, 0x0100, 0x0100, 291UNUSUAL_DEV( 0x0482, 0x0100, 0x0100, 0x0100,
292 "Kyocera", 292 "Kyocera",
293 "Finecam S3x", 293 "Finecam S3x",
294 US_SC_8070, US_PR_CB, NULL, US_FL_FIX_INQUIRY), 294 USB_SC_8070, USB_PR_CB, NULL, US_FL_FIX_INQUIRY),
295 295
296/* Patch submitted by Philipp Friedrich <philipp@void.at> */ 296/* Patch submitted by Philipp Friedrich <philipp@void.at> */
297UNUSUAL_DEV( 0x0482, 0x0101, 0x0100, 0x0100, 297UNUSUAL_DEV( 0x0482, 0x0101, 0x0100, 0x0100,
298 "Kyocera", 298 "Kyocera",
299 "Finecam S4", 299 "Finecam S4",
300 US_SC_8070, US_PR_CB, NULL, US_FL_FIX_INQUIRY), 300 USB_SC_8070, USB_PR_CB, NULL, US_FL_FIX_INQUIRY),
301 301
302/* Patch submitted by Stephane Galles <stephane.galles@free.fr> */ 302/* Patch submitted by Stephane Galles <stephane.galles@free.fr> */
303UNUSUAL_DEV( 0x0482, 0x0103, 0x0100, 0x0100, 303UNUSUAL_DEV( 0x0482, 0x0103, 0x0100, 0x0100,
304 "Kyocera", 304 "Kyocera",
305 "Finecam S5", 305 "Finecam S5",
306 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY), 306 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
307 307
308/* Patch submitted by Jens Taprogge <jens.taprogge@taprogge.org> */ 308/* Patch submitted by Jens Taprogge <jens.taprogge@taprogge.org> */
309UNUSUAL_DEV( 0x0482, 0x0107, 0x0100, 0x0100, 309UNUSUAL_DEV( 0x0482, 0x0107, 0x0100, 0x0100,
310 "Kyocera", 310 "Kyocera",
311 "CONTAX SL300R T*", 311 "CONTAX SL300R T*",
312 US_SC_DEVICE, US_PR_DEVICE, NULL, 312 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
313 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE), 313 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE),
314 314
315/* Reported by Paul Stewart <stewart@wetlogic.net> 315/* Reported by Paul Stewart <stewart@wetlogic.net>
@@ -317,7 +317,7 @@ UNUSUAL_DEV( 0x0482, 0x0107, 0x0100, 0x0100,
317UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001, 317UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001,
318 "Hitachi", 318 "Hitachi",
319 "DVD-CAM DZ-MV100A Camcorder", 319 "DVD-CAM DZ-MV100A Camcorder",
320 US_SC_SCSI, US_PR_CB, NULL, US_FL_SINGLE_LUN), 320 USB_SC_SCSI, USB_PR_CB, NULL, US_FL_SINGLE_LUN),
321 321
322/* BENQ DC5330 322/* BENQ DC5330
323 * Reported by Manuel Fombuena <mfombuena@ya.com> and 323 * Reported by Manuel Fombuena <mfombuena@ya.com> and
@@ -325,7 +325,7 @@ UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001,
325UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100, 325UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100,
326 "Tekom Technologies, Inc", 326 "Tekom Technologies, Inc",
327 "300_CAMERA", 327 "300_CAMERA",
328 US_SC_DEVICE, US_PR_DEVICE, NULL, 328 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
329 US_FL_IGNORE_RESIDUE ), 329 US_FL_IGNORE_RESIDUE ),
330 330
331/* Patch for Nikon coolpix 2000 331/* Patch for Nikon coolpix 2000
@@ -333,14 +333,14 @@ UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100,
333UNUSUAL_DEV( 0x04b0, 0x0301, 0x0010, 0x0010, 333UNUSUAL_DEV( 0x04b0, 0x0301, 0x0010, 0x0010,
334 "NIKON", 334 "NIKON",
335 "NIKON DSC E2000", 335 "NIKON DSC E2000",
336 US_SC_DEVICE, US_PR_DEVICE,NULL, 336 USB_SC_DEVICE, USB_PR_DEVICE,NULL,
337 US_FL_NOT_LOCKABLE ), 337 US_FL_NOT_LOCKABLE ),
338 338
339/* Reported by Doug Maxey (dwm@austin.ibm.com) */ 339/* Reported by Doug Maxey (dwm@austin.ibm.com) */
340UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110, 340UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110,
341 "IBM", 341 "IBM",
342 "IBM RSA2", 342 "IBM RSA2",
343 US_SC_DEVICE, US_PR_CB, NULL, 343 USB_SC_DEVICE, USB_PR_CB, NULL,
344 US_FL_MAX_SECTORS_MIN), 344 US_FL_MAX_SECTORS_MIN),
345 345
346/* Reported by Simon Levitt <simon@whattf.com> 346/* Reported by Simon Levitt <simon@whattf.com>
@@ -348,14 +348,14 @@ UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110,
348UNUSUAL_DEV( 0x04b8, 0x0601, 0x0100, 0x0100, 348UNUSUAL_DEV( 0x04b8, 0x0601, 0x0100, 0x0100,
349 "Epson", 349 "Epson",
350 "875DC Storage", 350 "875DC Storage",
351 US_SC_SCSI, US_PR_CB, NULL, US_FL_FIX_INQUIRY), 351 USB_SC_SCSI, USB_PR_CB, NULL, US_FL_FIX_INQUIRY),
352 352
353/* Reported by Khalid Aziz <khalid@gonehiking.org> 353/* Reported by Khalid Aziz <khalid@gonehiking.org>
354 * This entry is needed because the device reports Sub=ff */ 354 * This entry is needed because the device reports Sub=ff */
355UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110, 355UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110,
356 "Epson", 356 "Epson",
357 "785EPX Storage", 357 "785EPX Storage",
358 US_SC_SCSI, US_PR_BULK, NULL, US_FL_SINGLE_LUN), 358 USB_SC_SCSI, USB_PR_BULK, NULL, US_FL_SINGLE_LUN),
359 359
360/* Not sure who reported this originally but 360/* Not sure who reported this originally but
361 * Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN 361 * Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN
@@ -363,7 +363,7 @@ UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110,
363UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210, 363UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
364 "Fujifilm", 364 "Fujifilm",
365 "FinePix 1400Zoom", 365 "FinePix 1400Zoom",
366 US_SC_UFI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN), 366 USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
367 367
368/* Reported by Ondrej Zary <linux@rainbow-software.org> 368/* Reported by Ondrej Zary <linux@rainbow-software.org>
369 * The device reports one sector more and breaks when that sector is accessed 369 * The device reports one sector more and breaks when that sector is accessed
@@ -371,7 +371,7 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
371UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c, 371UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
372 "ScanLogic", 372 "ScanLogic",
373 "SL11R-IDE", 373 "SL11R-IDE",
374 US_SC_DEVICE, US_PR_DEVICE, NULL, 374 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
375 US_FL_FIX_CAPACITY), 375 US_FL_FIX_CAPACITY),
376 376
377/* Reported by Kriston Fincher <kriston@airmail.net> 377/* Reported by Kriston Fincher <kriston@airmail.net>
@@ -382,27 +382,27 @@ UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
382UNUSUAL_DEV( 0x04da, 0x0901, 0x0100, 0x0200, 382UNUSUAL_DEV( 0x04da, 0x0901, 0x0100, 0x0200,
383 "Panasonic", 383 "Panasonic",
384 "LS-120 Camera", 384 "LS-120 Camera",
385 US_SC_UFI, US_PR_DEVICE, NULL, 0), 385 USB_SC_UFI, USB_PR_DEVICE, NULL, 0),
386 386
387/* From Yukihiro Nakai, via zaitcev@yahoo.com. 387/* From Yukihiro Nakai, via zaitcev@yahoo.com.
388 * This is needed for CB instead of CBI */ 388 * This is needed for CB instead of CBI */
389UNUSUAL_DEV( 0x04da, 0x0d05, 0x0000, 0x0000, 389UNUSUAL_DEV( 0x04da, 0x0d05, 0x0000, 0x0000,
390 "Sharp CE-CW05", 390 "Sharp CE-CW05",
391 "CD-R/RW Drive", 391 "CD-R/RW Drive",
392 US_SC_8070, US_PR_CB, NULL, 0), 392 USB_SC_8070, USB_PR_CB, NULL, 0),
393 393
394/* Reported by Adriaan Penning <a.penning@luon.net> */ 394/* Reported by Adriaan Penning <a.penning@luon.net> */
395UNUSUAL_DEV( 0x04da, 0x2372, 0x0000, 0x9999, 395UNUSUAL_DEV( 0x04da, 0x2372, 0x0000, 0x9999,
396 "Panasonic", 396 "Panasonic",
397 "DMC-LCx Camera", 397 "DMC-LCx Camera",
398 US_SC_DEVICE, US_PR_DEVICE, NULL, 398 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
399 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ), 399 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
400 400
401/* Reported by Simeon Simeonov <simeonov_2000@yahoo.com> */ 401/* Reported by Simeon Simeonov <simeonov_2000@yahoo.com> */
402UNUSUAL_DEV( 0x04da, 0x2373, 0x0000, 0x9999, 402UNUSUAL_DEV( 0x04da, 0x2373, 0x0000, 0x9999,
403 "LEICA", 403 "LEICA",
404 "D-LUX Camera", 404 "D-LUX Camera",
405 US_SC_DEVICE, US_PR_DEVICE, NULL, 405 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
406 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ), 406 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
407 407
408/* Most of the following entries were developed with the help of 408/* Most of the following entries were developed with the help of
@@ -411,19 +411,19 @@ UNUSUAL_DEV( 0x04da, 0x2373, 0x0000, 0x9999,
411UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200, 411UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200,
412 "Matshita", 412 "Matshita",
413 "LS-120", 413 "LS-120",
414 US_SC_8020, US_PR_CB, NULL, 0), 414 USB_SC_8020, USB_PR_CB, NULL, 0),
415 415
416UNUSUAL_DEV( 0x04e6, 0x0002, 0x0100, 0x0100, 416UNUSUAL_DEV( 0x04e6, 0x0002, 0x0100, 0x0100,
417 "Shuttle", 417 "Shuttle",
418 "eUSCSI Bridge", 418 "eUSCSI Bridge",
419 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, 419 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
420 US_FL_SCM_MULT_TARG ), 420 US_FL_SCM_MULT_TARG ),
421 421
422#ifdef NO_SDDR09 422#ifdef NO_SDDR09
423UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208, 423UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208,
424 "SCM Microsystems", 424 "SCM Microsystems",
425 "eUSB CompactFlash Adapter", 425 "eUSB CompactFlash Adapter",
426 US_SC_SCSI, US_PR_CB, NULL, 426 USB_SC_SCSI, USB_PR_CB, NULL,
427 US_FL_SINGLE_LUN), 427 US_FL_SINGLE_LUN),
428#endif 428#endif
429 429
@@ -431,54 +431,54 @@ UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208,
431UNUSUAL_DEV( 0x04e6, 0x0006, 0x0100, 0x0100, 431UNUSUAL_DEV( 0x04e6, 0x0006, 0x0100, 0x0100,
432 "SCM Microsystems Inc.", 432 "SCM Microsystems Inc.",
433 "eUSB MMC Adapter", 433 "eUSB MMC Adapter",
434 US_SC_SCSI, US_PR_CB, NULL, 434 USB_SC_SCSI, USB_PR_CB, NULL,
435 US_FL_SINGLE_LUN), 435 US_FL_SINGLE_LUN),
436 436
437/* Reported by Daniel Nouri <dpunktnpunkt@web.de> */ 437/* Reported by Daniel Nouri <dpunktnpunkt@web.de> */
438UNUSUAL_DEV( 0x04e6, 0x0006, 0x0205, 0x0205, 438UNUSUAL_DEV( 0x04e6, 0x0006, 0x0205, 0x0205,
439 "Shuttle", 439 "Shuttle",
440 "eUSB MMC Adapter", 440 "eUSB MMC Adapter",
441 US_SC_SCSI, US_PR_DEVICE, NULL, 441 USB_SC_SCSI, USB_PR_DEVICE, NULL,
442 US_FL_SINGLE_LUN), 442 US_FL_SINGLE_LUN),
443 443
444UNUSUAL_DEV( 0x04e6, 0x0007, 0x0100, 0x0200, 444UNUSUAL_DEV( 0x04e6, 0x0007, 0x0100, 0x0200,
445 "Sony", 445 "Sony",
446 "Hifd", 446 "Hifd",
447 US_SC_SCSI, US_PR_CB, NULL, 447 USB_SC_SCSI, USB_PR_CB, NULL,
448 US_FL_SINGLE_LUN), 448 US_FL_SINGLE_LUN),
449 449
450UNUSUAL_DEV( 0x04e6, 0x0009, 0x0200, 0x0200, 450UNUSUAL_DEV( 0x04e6, 0x0009, 0x0200, 0x0200,
451 "Shuttle", 451 "Shuttle",
452 "eUSB ATA/ATAPI Adapter", 452 "eUSB ATA/ATAPI Adapter",
453 US_SC_8020, US_PR_CB, NULL, 0), 453 USB_SC_8020, USB_PR_CB, NULL, 0),
454 454
455UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200, 455UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200,
456 "Shuttle", 456 "Shuttle",
457 "eUSB CompactFlash Adapter", 457 "eUSB CompactFlash Adapter",
458 US_SC_8020, US_PR_CB, NULL, 0), 458 USB_SC_8020, USB_PR_CB, NULL, 0),
459 459
460UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100, 460UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100,
461 "Shuttle", 461 "Shuttle",
462 "eUSCSI Bridge", 462 "eUSCSI Bridge",
463 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 463 USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
464 US_FL_SCM_MULT_TARG ), 464 US_FL_SCM_MULT_TARG ),
465 465
466UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100, 466UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100,
467 "Shuttle", 467 "Shuttle",
468 "eUSCSI Bridge", 468 "eUSCSI Bridge",
469 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 469 USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
470 US_FL_SCM_MULT_TARG ), 470 US_FL_SCM_MULT_TARG ),
471 471
472UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, 472UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
473 "Shuttle", 473 "Shuttle",
474 "CD-RW Device", 474 "CD-RW Device",
475 US_SC_8020, US_PR_CB, NULL, 0), 475 USB_SC_8020, USB_PR_CB, NULL, 0),
476 476
477/* Reported by Dmitry Khlystov <adminimus@gmail.com> */ 477/* Reported by Dmitry Khlystov <adminimus@gmail.com> */
478UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220, 478UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220,
479 "Samsung", 479 "Samsung",
480 "YP-U3", 480 "YP-U3",
481 US_SC_DEVICE, US_PR_DEVICE, NULL, 481 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
482 US_FL_MAX_SECTORS_64), 482 US_FL_MAX_SECTORS_64),
483 483
484/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 484/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
@@ -488,14 +488,14 @@ UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220,
488UNUSUAL_DEV( 0x04fc, 0x80c2, 0x0100, 0x0100, 488UNUSUAL_DEV( 0x04fc, 0x80c2, 0x0100, 0x0100,
489 "Kobian Mercury", 489 "Kobian Mercury",
490 "Binocam DCB-132", 490 "Binocam DCB-132",
491 US_SC_DEVICE, US_PR_DEVICE, NULL, 491 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
492 US_FL_BULK32), 492 US_FL_BULK32),
493 493
494/* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */ 494/* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */
495UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133, 495UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
496 "Belkin", 496 "Belkin",
497 "USB SCSI Adaptor", 497 "USB SCSI Adaptor",
498 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 498 USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
499 US_FL_SCM_MULT_TARG ), 499 US_FL_SCM_MULT_TARG ),
500 500
501/* Iomega Clik! Drive 501/* Iomega Clik! Drive
@@ -505,14 +505,14 @@ UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
505UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100, 505UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
506 "Iomega", 506 "Iomega",
507 "USB Clik! 40", 507 "USB Clik! 40",
508 US_SC_8070, US_PR_DEVICE, NULL, 508 USB_SC_8070, USB_PR_DEVICE, NULL,
509 US_FL_FIX_INQUIRY ), 509 US_FL_FIX_INQUIRY ),
510 510
511/* Added by Alan Stern <stern@rowland.harvard.edu> */ 511/* Added by Alan Stern <stern@rowland.harvard.edu> */
512COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999, 512COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
513 "Linux", 513 "Linux",
514 "File-backed Storage Gadget", 514 "File-backed Storage Gadget",
515 US_SC_DEVICE, US_PR_DEVICE, NULL, 515 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
516 US_FL_CAPACITY_OK ), 516 US_FL_CAPACITY_OK ),
517 517
518/* Yakumo Mega Image 37 518/* Yakumo Mega Image 37
@@ -520,7 +520,7 @@ COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
520UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100, 520UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
521 "Tekom Technologies, Inc", 521 "Tekom Technologies, Inc",
522 "300_CAMERA", 522 "300_CAMERA",
523 US_SC_DEVICE, US_PR_DEVICE, NULL, 523 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
524 US_FL_IGNORE_RESIDUE ), 524 US_FL_IGNORE_RESIDUE ),
525 525
526/* Another Yakumo camera. 526/* Another Yakumo camera.
@@ -528,14 +528,14 @@ UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
528UNUSUAL_DEV( 0x052b, 0x1804, 0x0100, 0x0100, 528UNUSUAL_DEV( 0x052b, 0x1804, 0x0100, 0x0100,
529 "Tekom Technologies, Inc", 529 "Tekom Technologies, Inc",
530 "300_CAMERA", 530 "300_CAMERA",
531 US_SC_DEVICE, US_PR_DEVICE, NULL, 531 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
532 US_FL_IGNORE_RESIDUE ), 532 US_FL_IGNORE_RESIDUE ),
533 533
534/* Reported by Iacopo Spalletti <avvisi@spalletti.it> */ 534/* Reported by Iacopo Spalletti <avvisi@spalletti.it> */
535UNUSUAL_DEV( 0x052b, 0x1807, 0x0100, 0x0100, 535UNUSUAL_DEV( 0x052b, 0x1807, 0x0100, 0x0100,
536 "Tekom Technologies, Inc", 536 "Tekom Technologies, Inc",
537 "300_CAMERA", 537 "300_CAMERA",
538 US_SC_DEVICE, US_PR_DEVICE, NULL, 538 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
539 US_FL_IGNORE_RESIDUE ), 539 US_FL_IGNORE_RESIDUE ),
540 540
541/* Yakumo Mega Image 47 541/* Yakumo Mega Image 47
@@ -543,7 +543,7 @@ UNUSUAL_DEV( 0x052b, 0x1807, 0x0100, 0x0100,
543UNUSUAL_DEV( 0x052b, 0x1905, 0x0100, 0x0100, 543UNUSUAL_DEV( 0x052b, 0x1905, 0x0100, 0x0100,
544 "Tekom Technologies, Inc", 544 "Tekom Technologies, Inc",
545 "400_CAMERA", 545 "400_CAMERA",
546 US_SC_DEVICE, US_PR_DEVICE, NULL, 546 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
547 US_FL_IGNORE_RESIDUE ), 547 US_FL_IGNORE_RESIDUE ),
548 548
549/* Reported by Paul Ortyl <ortylp@3miasto.net> 549/* Reported by Paul Ortyl <ortylp@3miasto.net>
@@ -551,13 +551,13 @@ UNUSUAL_DEV( 0x052b, 0x1905, 0x0100, 0x0100,
551UNUSUAL_DEV( 0x052b, 0x1911, 0x0100, 0x0100, 551UNUSUAL_DEV( 0x052b, 0x1911, 0x0100, 0x0100,
552 "Tekom Technologies, Inc", 552 "Tekom Technologies, Inc",
553 "400_CAMERA", 553 "400_CAMERA",
554 US_SC_DEVICE, US_PR_DEVICE, NULL, 554 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
555 US_FL_IGNORE_RESIDUE ), 555 US_FL_IGNORE_RESIDUE ),
556 556
557UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450, 557UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
558 "Sony", 558 "Sony",
559 "DSC-S30/S70/S75/505V/F505/F707/F717/P8", 559 "DSC-S30/S70/S75/505V/F505/F707/F717/P8",
560 US_SC_SCSI, US_PR_DEVICE, NULL, 560 USB_SC_SCSI, USB_PR_DEVICE, NULL,
561 US_FL_SINGLE_LUN | US_FL_NOT_LOCKABLE | US_FL_NO_WP_DETECT ), 561 US_FL_SINGLE_LUN | US_FL_NOT_LOCKABLE | US_FL_NO_WP_DETECT ),
562 562
563/* Submitted by Lars Jacob <jacob.lars@googlemail.com> 563/* Submitted by Lars Jacob <jacob.lars@googlemail.com>
@@ -565,7 +565,7 @@ UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
565UNUSUAL_DEV( 0x054c, 0x0010, 0x0500, 0x0610, 565UNUSUAL_DEV( 0x054c, 0x0010, 0x0500, 0x0610,
566 "Sony", 566 "Sony",
567 "DSC-T1/T5/H5", 567 "DSC-T1/T5/H5",
568 US_SC_8070, US_PR_DEVICE, NULL, 568 USB_SC_8070, USB_PR_DEVICE, NULL,
569 US_FL_SINGLE_LUN ), 569 US_FL_SINGLE_LUN ),
570 570
571 571
@@ -573,88 +573,88 @@ UNUSUAL_DEV( 0x054c, 0x0010, 0x0500, 0x0610,
573UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100, 573UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100,
574 "Sony", 574 "Sony",
575 "Memorystick NW-MS7", 575 "Memorystick NW-MS7",
576 US_SC_DEVICE, US_PR_DEVICE, NULL, 576 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
577 US_FL_SINGLE_LUN ), 577 US_FL_SINGLE_LUN ),
578 578
579/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */ 579/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */
580UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x2000, 580UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x2000,
581 "Sony", 581 "Sony",
582 "USB Floppy Drive", 582 "USB Floppy Drive",
583 US_SC_DEVICE, US_PR_DEVICE, NULL, 583 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
584 US_FL_SINGLE_LUN ), 584 US_FL_SINGLE_LUN ),
585 585
586UNUSUAL_DEV( 0x054c, 0x002d, 0x0100, 0x0100, 586UNUSUAL_DEV( 0x054c, 0x002d, 0x0100, 0x0100,
587 "Sony", 587 "Sony",
588 "Memorystick MSAC-US1", 588 "Memorystick MSAC-US1",
589 US_SC_DEVICE, US_PR_DEVICE, NULL, 589 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
590 US_FL_SINGLE_LUN ), 590 US_FL_SINGLE_LUN ),
591 591
592/* Submitted by Klaus Mueller <k.mueller@intershop.de> */ 592/* Submitted by Klaus Mueller <k.mueller@intershop.de> */
593UNUSUAL_DEV( 0x054c, 0x002e, 0x0106, 0x0310, 593UNUSUAL_DEV( 0x054c, 0x002e, 0x0106, 0x0310,
594 "Sony", 594 "Sony",
595 "Handycam", 595 "Handycam",
596 US_SC_SCSI, US_PR_DEVICE, NULL, 596 USB_SC_SCSI, USB_PR_DEVICE, NULL,
597 US_FL_SINGLE_LUN ), 597 US_FL_SINGLE_LUN ),
598 598
599/* Submitted by Rajesh Kumble Nayak <nayak@obs-nice.fr> */ 599/* Submitted by Rajesh Kumble Nayak <nayak@obs-nice.fr> */
600UNUSUAL_DEV( 0x054c, 0x002e, 0x0500, 0x0500, 600UNUSUAL_DEV( 0x054c, 0x002e, 0x0500, 0x0500,
601 "Sony", 601 "Sony",
602 "Handycam HC-85", 602 "Handycam HC-85",
603 US_SC_UFI, US_PR_DEVICE, NULL, 603 USB_SC_UFI, USB_PR_DEVICE, NULL,
604 US_FL_SINGLE_LUN ), 604 US_FL_SINGLE_LUN ),
605 605
606UNUSUAL_DEV( 0x054c, 0x0032, 0x0000, 0x9999, 606UNUSUAL_DEV( 0x054c, 0x0032, 0x0000, 0x9999,
607 "Sony", 607 "Sony",
608 "Memorystick MSC-U01N", 608 "Memorystick MSC-U01N",
609 US_SC_DEVICE, US_PR_DEVICE, NULL, 609 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
610 US_FL_SINGLE_LUN ), 610 US_FL_SINGLE_LUN ),
611 611
612/* Submitted by Michal Mlotek <mlotek@foobar.pl> */ 612/* Submitted by Michal Mlotek <mlotek@foobar.pl> */
613UNUSUAL_DEV( 0x054c, 0x0058, 0x0000, 0x9999, 613UNUSUAL_DEV( 0x054c, 0x0058, 0x0000, 0x9999,
614 "Sony", 614 "Sony",
615 "PEG N760c Memorystick", 615 "PEG N760c Memorystick",
616 US_SC_DEVICE, US_PR_DEVICE, NULL, 616 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
617 US_FL_FIX_INQUIRY ), 617 US_FL_FIX_INQUIRY ),
618 618
619UNUSUAL_DEV( 0x054c, 0x0069, 0x0000, 0x9999, 619UNUSUAL_DEV( 0x054c, 0x0069, 0x0000, 0x9999,
620 "Sony", 620 "Sony",
621 "Memorystick MSC-U03", 621 "Memorystick MSC-U03",
622 US_SC_UFI, US_PR_CB, NULL, 622 USB_SC_UFI, USB_PR_CB, NULL,
623 US_FL_SINGLE_LUN ), 623 US_FL_SINGLE_LUN ),
624 624
625/* Submitted by Nathan Babb <nathan@lexi.com> */ 625/* Submitted by Nathan Babb <nathan@lexi.com> */
626UNUSUAL_DEV( 0x054c, 0x006d, 0x0000, 0x9999, 626UNUSUAL_DEV( 0x054c, 0x006d, 0x0000, 0x9999,
627 "Sony", 627 "Sony",
628 "PEG Mass Storage", 628 "PEG Mass Storage",
629 US_SC_DEVICE, US_PR_DEVICE, NULL, 629 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
630 US_FL_FIX_INQUIRY ), 630 US_FL_FIX_INQUIRY ),
631 631
632/* Submitted by Frank Engel <frankie@cse.unsw.edu.au> */ 632/* Submitted by Frank Engel <frankie@cse.unsw.edu.au> */
633UNUSUAL_DEV( 0x054c, 0x0099, 0x0000, 0x9999, 633UNUSUAL_DEV( 0x054c, 0x0099, 0x0000, 0x9999,
634 "Sony", 634 "Sony",
635 "PEG Mass Storage", 635 "PEG Mass Storage",
636 US_SC_DEVICE, US_PR_DEVICE, NULL, 636 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
637 US_FL_FIX_INQUIRY ), 637 US_FL_FIX_INQUIRY ),
638 638
639/* Submitted by Mike Alborn <malborn@deandra.homeip.net> */ 639/* Submitted by Mike Alborn <malborn@deandra.homeip.net> */
640UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999, 640UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
641 "Sony", 641 "Sony",
642 "PEG Mass Storage", 642 "PEG Mass Storage",
643 US_SC_DEVICE, US_PR_DEVICE, NULL, 643 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
644 US_FL_FIX_INQUIRY ), 644 US_FL_FIX_INQUIRY ),
645 645
646/* floppy reports multiple luns */ 646/* floppy reports multiple luns */
647UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, 647UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
648 "SAMSUNG", 648 "SAMSUNG",
649 "SFD-321U [FW 0C]", 649 "SFD-321U [FW 0C]",
650 US_SC_DEVICE, US_PR_DEVICE, NULL, 650 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
651 US_FL_SINGLE_LUN ), 651 US_FL_SINGLE_LUN ),
652 652
653/* We keep this entry to force the transport; firmware 3.00 and later is ok. */ 653/* We keep this entry to force the transport; firmware 3.00 and later is ok. */
654UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299, 654UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
655 "Y-E Data", 655 "Y-E Data",
656 "Flashbuster-U", 656 "Flashbuster-U",
657 US_SC_DEVICE, US_PR_CB, NULL, 657 USB_SC_DEVICE, USB_PR_CB, NULL,
658 US_FL_SINGLE_LUN), 658 US_FL_SINGLE_LUN),
659 659
660/* Reported by Johann Cardon <johann.cardon@free.fr> 660/* Reported by Johann Cardon <johann.cardon@free.fr>
@@ -664,20 +664,20 @@ UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
664UNUSUAL_DEV( 0x057b, 0x0022, 0x0000, 0x9999, 664UNUSUAL_DEV( 0x057b, 0x0022, 0x0000, 0x9999,
665 "Y-E Data", 665 "Y-E Data",
666 "Silicon Media R/W", 666 "Silicon Media R/W",
667 US_SC_DEVICE, US_PR_DEVICE, NULL, 0), 667 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
668 668
669/* Reported by RTE <raszilki@yandex.ru> */ 669/* Reported by RTE <raszilki@yandex.ru> */
670UNUSUAL_DEV( 0x058f, 0x6387, 0x0141, 0x0141, 670UNUSUAL_DEV( 0x058f, 0x6387, 0x0141, 0x0141,
671 "JetFlash", 671 "JetFlash",
672 "TS1GJF2A/120", 672 "TS1GJF2A/120",
673 US_SC_DEVICE, US_PR_DEVICE, NULL, 673 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
674 US_FL_MAX_SECTORS_64 ), 674 US_FL_MAX_SECTORS_64 ),
675 675
676/* Fabrizio Fellini <fello@libero.it> */ 676/* Fabrizio Fellini <fello@libero.it> */
677UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210, 677UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210,
678 "Fujifilm", 678 "Fujifilm",
679 "Digital Camera EX-20 DSC", 679 "Digital Camera EX-20 DSC",
680 US_SC_8070, US_PR_DEVICE, NULL, 0 ), 680 USB_SC_8070, USB_PR_DEVICE, NULL, 0 ),
681 681
682/* Reported by Andre Welter <a.r.welter@gmx.de> 682/* Reported by Andre Welter <a.r.welter@gmx.de>
683 * This antique device predates the release of the Bulk-only Transport 683 * This antique device predates the release of the Bulk-only Transport
@@ -688,14 +688,14 @@ UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210,
688UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100, 688UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100,
689 "Iomega", 689 "Iomega",
690 "ZIP 100", 690 "ZIP 100",
691 US_SC_DEVICE, US_PR_DEVICE, NULL, 691 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
692 US_FL_SINGLE_LUN ), 692 US_FL_SINGLE_LUN ),
693 693
694/* Reported by <Hendryk.Pfeiffer@gmx.de> */ 694/* Reported by <Hendryk.Pfeiffer@gmx.de> */
695UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, 695UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
696 "LaCie", 696 "LaCie",
697 "DVD+-RW", 697 "DVD+-RW",
698 US_SC_DEVICE, US_PR_DEVICE, NULL, 698 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
699 US_FL_GO_SLOW ), 699 US_FL_GO_SLOW ),
700 700
701/* Submitted by Joel Bourquard <numlock@freesurf.ch> 701/* Submitted by Joel Bourquard <numlock@freesurf.ch>
@@ -705,7 +705,7 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
705UNUSUAL_DEV( 0x05ab, 0x0060, 0x1104, 0x1110, 705UNUSUAL_DEV( 0x05ab, 0x0060, 0x1104, 0x1110,
706 "In-System", 706 "In-System",
707 "PyroGate External CD-ROM Enclosure (FCD-523)", 707 "PyroGate External CD-ROM Enclosure (FCD-523)",
708 US_SC_SCSI, US_PR_BULK, NULL, 708 USB_SC_SCSI, USB_PR_BULK, NULL,
709 US_FL_NEED_OVERRIDE ), 709 US_FL_NEED_OVERRIDE ),
710 710
711/* Submitted by Sven Anderson <sven-linux@anderson.de> 711/* Submitted by Sven Anderson <sven-linux@anderson.de>
@@ -717,26 +717,26 @@ UNUSUAL_DEV( 0x05ab, 0x0060, 0x1104, 0x1110,
717UNUSUAL_DEV( 0x05ac, 0x1202, 0x0000, 0x9999, 717UNUSUAL_DEV( 0x05ac, 0x1202, 0x0000, 0x9999,
718 "Apple", 718 "Apple",
719 "iPod", 719 "iPod",
720 US_SC_DEVICE, US_PR_DEVICE, NULL, 720 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
721 US_FL_FIX_CAPACITY ), 721 US_FL_FIX_CAPACITY ),
722 722
723/* Reported by Avi Kivity <avi@argo.co.il> */ 723/* Reported by Avi Kivity <avi@argo.co.il> */
724UNUSUAL_DEV( 0x05ac, 0x1203, 0x0000, 0x9999, 724UNUSUAL_DEV( 0x05ac, 0x1203, 0x0000, 0x9999,
725 "Apple", 725 "Apple",
726 "iPod", 726 "iPod",
727 US_SC_DEVICE, US_PR_DEVICE, NULL, 727 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
728 US_FL_FIX_CAPACITY ), 728 US_FL_FIX_CAPACITY ),
729 729
730UNUSUAL_DEV( 0x05ac, 0x1204, 0x0000, 0x9999, 730UNUSUAL_DEV( 0x05ac, 0x1204, 0x0000, 0x9999,
731 "Apple", 731 "Apple",
732 "iPod", 732 "iPod",
733 US_SC_DEVICE, US_PR_DEVICE, NULL, 733 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
734 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ), 734 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
735 735
736UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999, 736UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999,
737 "Apple", 737 "Apple",
738 "iPod", 738 "iPod",
739 US_SC_DEVICE, US_PR_DEVICE, NULL, 739 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
740 US_FL_FIX_CAPACITY ), 740 US_FL_FIX_CAPACITY ),
741 741
742/* 742/*
@@ -746,7 +746,7 @@ UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999,
746UNUSUAL_DEV( 0x05ac, 0x120a, 0x0000, 0x9999, 746UNUSUAL_DEV( 0x05ac, 0x120a, 0x0000, 0x9999,
747 "Apple", 747 "Apple",
748 "iPod", 748 "iPod",
749 US_SC_DEVICE, US_PR_DEVICE, NULL, 749 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
750 US_FL_FIX_CAPACITY ), 750 US_FL_FIX_CAPACITY ),
751 751
752/* Reported by Dan Williams <dcbw@redhat.com> 752/* Reported by Dan Williams <dcbw@redhat.com>
@@ -758,14 +758,14 @@ UNUSUAL_DEV( 0x05ac, 0x120a, 0x0000, 0x9999,
758UNUSUAL_DEV( 0x05c6, 0x1000, 0x0000, 0x9999, 758UNUSUAL_DEV( 0x05c6, 0x1000, 0x0000, 0x9999,
759 "Option N.V.", 759 "Option N.V.",
760 "Mass Storage", 760 "Mass Storage",
761 US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 761 USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init,
762 0), 762 0),
763 763
764/* Reported by Blake Matheny <bmatheny@purdue.edu> */ 764/* Reported by Blake Matheny <bmatheny@purdue.edu> */
765UNUSUAL_DEV( 0x05dc, 0xb002, 0x0000, 0x0113, 765UNUSUAL_DEV( 0x05dc, 0xb002, 0x0000, 0x0113,
766 "Lexar", 766 "Lexar",
767 "USB CF Reader", 767 "USB CF Reader",
768 US_SC_DEVICE, US_PR_DEVICE, NULL, 768 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
769 US_FL_FIX_INQUIRY ), 769 US_FL_FIX_INQUIRY ),
770 770
771/* The following two entries are for a Genesys USB to IDE 771/* The following two entries are for a Genesys USB to IDE
@@ -782,20 +782,20 @@ UNUSUAL_DEV( 0x05dc, 0xb002, 0x0000, 0x0113,
782UNUSUAL_DEV( 0x05e3, 0x0701, 0x0000, 0xffff, 782UNUSUAL_DEV( 0x05e3, 0x0701, 0x0000, 0xffff,
783 "Genesys Logic", 783 "Genesys Logic",
784 "USB to IDE Optical", 784 "USB to IDE Optical",
785 US_SC_DEVICE, US_PR_DEVICE, NULL, 785 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
786 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ), 786 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
787 787
788UNUSUAL_DEV( 0x05e3, 0x0702, 0x0000, 0xffff, 788UNUSUAL_DEV( 0x05e3, 0x0702, 0x0000, 0xffff,
789 "Genesys Logic", 789 "Genesys Logic",
790 "USB to IDE Disk", 790 "USB to IDE Disk",
791 US_SC_DEVICE, US_PR_DEVICE, NULL, 791 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
792 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ), 792 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
793 793
794/* Reported by Ben Efros <ben@pc-doctor.com> */ 794/* Reported by Ben Efros <ben@pc-doctor.com> */
795UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, 795UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451,
796 "Genesys Logic", 796 "Genesys Logic",
797 "USB to SATA", 797 "USB to SATA",
798 US_SC_DEVICE, US_PR_DEVICE, NULL, 798 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
799 US_FL_SANE_SENSE ), 799 US_FL_SANE_SENSE ),
800 800
801/* Reported by Hanno Boeck <hanno@gmx.de> 801/* Reported by Hanno Boeck <hanno@gmx.de>
@@ -803,33 +803,33 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451,
803UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999, 803UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999,
804 "Vivitar", 804 "Vivitar",
805 "Vivicam 35Xx", 805 "Vivicam 35Xx",
806 US_SC_SCSI, US_PR_BULK, NULL, 806 USB_SC_SCSI, USB_PR_BULK, NULL,
807 US_FL_FIX_INQUIRY ), 807 US_FL_FIX_INQUIRY ),
808 808
809UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100, 809UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100,
810 "TEAC", 810 "TEAC",
811 "Floppy Drive", 811 "Floppy Drive",
812 US_SC_UFI, US_PR_CB, NULL, 0 ), 812 USB_SC_UFI, USB_PR_CB, NULL, 0 ),
813 813
814/* Reported by Darsen Lu <darsen@micro.ee.nthu.edu.tw> */ 814/* Reported by Darsen Lu <darsen@micro.ee.nthu.edu.tw> */
815UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, 815UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
816 "SigmaTel", 816 "SigmaTel",
817 "USBMSC Audio Player", 817 "USBMSC Audio Player",
818 US_SC_DEVICE, US_PR_DEVICE, NULL, 818 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
819 US_FL_FIX_CAPACITY ), 819 US_FL_FIX_CAPACITY ),
820 820
821/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */ 821/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
822UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100, 822UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
823 "Prolific Technology, Inc.", 823 "Prolific Technology, Inc.",
824 "Prolific Storage Gadget", 824 "Prolific Storage Gadget",
825 US_SC_DEVICE, US_PR_DEVICE, NULL, 825 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
826 US_FL_BAD_SENSE ), 826 US_FL_BAD_SENSE ),
827 827
828/* Reported by Rogerio Brito <rbrito@ime.usp.br> */ 828/* Reported by Rogerio Brito <rbrito@ime.usp.br> */
829UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, 829UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
830 "Prolific Technology, Inc.", 830 "Prolific Technology, Inc.",
831 "Mass Storage Device", 831 "Mass Storage Device",
832 US_SC_DEVICE, US_PR_DEVICE, NULL, 832 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
833 US_FL_NOT_LOCKABLE ), 833 US_FL_NOT_LOCKABLE ),
834 834
835/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ 835/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */
@@ -838,46 +838,47 @@ UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
838UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100, 838UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100,
839 "Prolific Technology Inc.", 839 "Prolific Technology Inc.",
840 "Mass Storage Device", 840 "Mass Storage Device",
841 US_SC_DEVICE, US_PR_DEVICE, NULL, 841 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
842 US_FL_FIX_CAPACITY | US_FL_GO_SLOW ), 842 US_FL_FIX_CAPACITY | US_FL_GO_SLOW ),
843 843
844/* Reported by Alex Butcher <alex.butcher@assursys.co.uk> */ 844/* Reported by Alex Butcher <alex.butcher@assursys.co.uk> */
845UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0101, 845UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0101,
846 "Prolific Technology Inc.", 846 "Prolific Technology Inc.",
847 "ATAPI-6 Bridge Controller", 847 "ATAPI-6 Bridge Controller",
848 US_SC_DEVICE, US_PR_DEVICE, NULL, 848 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
849 US_FL_FIX_CAPACITY | US_FL_GO_SLOW ), 849 US_FL_FIX_CAPACITY | US_FL_GO_SLOW ),
850 850
851/* Submitted by Benny Sjostrand <benny@hostmobility.com> */ 851/* Submitted by Benny Sjostrand <benny@hostmobility.com> */
852UNUSUAL_DEV( 0x0686, 0x4011, 0x0001, 0x0001, 852UNUSUAL_DEV( 0x0686, 0x4011, 0x0001, 0x0001,
853 "Minolta", 853 "Minolta",
854 "Dimage F300", 854 "Dimage F300",
855 US_SC_SCSI, US_PR_BULK, NULL, 0 ), 855 USB_SC_SCSI, USB_PR_BULK, NULL, 0 ),
856 856
857/* Reported by Miguel A. Fosas <amn3s1a@ono.com> */ 857/* Reported by Miguel A. Fosas <amn3s1a@ono.com> */
858UNUSUAL_DEV( 0x0686, 0x4017, 0x0001, 0x0001, 858UNUSUAL_DEV( 0x0686, 0x4017, 0x0001, 0x0001,
859 "Minolta", 859 "Minolta",
860 "DIMAGE E223", 860 "DIMAGE E223",
861 US_SC_SCSI, US_PR_DEVICE, NULL, 0 ), 861 USB_SC_SCSI, USB_PR_DEVICE, NULL, 0 ),
862 862
863UNUSUAL_DEV( 0x0693, 0x0005, 0x0100, 0x0100, 863UNUSUAL_DEV( 0x0693, 0x0005, 0x0100, 0x0100,
864 "Hagiwara", 864 "Hagiwara",
865 "Flashgate", 865 "Flashgate",
866 US_SC_SCSI, US_PR_BULK, NULL, 0 ), 866 USB_SC_SCSI, USB_PR_BULK, NULL, 0 ),
867 867
868/* Reported by David Hamilton <niftimusmaximus@lycos.com> */ 868/* Reported by David Hamilton <niftimusmaximus@lycos.com> */
869UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001, 869UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001,
870 "Thomson Multimedia Inc.", 870 "Thomson Multimedia Inc.",
871 "RCA RD1080 MP3 Player", 871 "RCA RD1080 MP3 Player",
872 US_SC_DEVICE, US_PR_DEVICE, NULL, 872 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
873 US_FL_FIX_CAPACITY ), 873 US_FL_FIX_CAPACITY ),
874 874
875/* Reported by Adrian Pilchowiec <adi1981@epf.pl> */ 875/* Reported by Adrian Pilchowiec <adi1981@epf.pl> */
876UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000, 876UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
877 "RockChip", 877 "RockChip",
878 "MP3", 878 "MP3",
879 US_SC_DEVICE, US_PR_DEVICE, NULL, 879 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
880 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64), 880 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64 |
881 US_FL_NO_READ_CAPACITY_16),
881 882
882/* Reported by Jean-Baptiste Onofre <jb@nanthrax.net> 883/* Reported by Jean-Baptiste Onofre <jb@nanthrax.net>
883 * Support the following product : 884 * Support the following product :
@@ -886,7 +887,7 @@ UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
886UNUSUAL_DEV( 0x071b, 0x32bb, 0x0000, 0x0000, 887UNUSUAL_DEV( 0x071b, 0x32bb, 0x0000, 0x0000,
887 "RockChip", 888 "RockChip",
888 "MTP", 889 "MTP",
889 US_SC_DEVICE, US_PR_DEVICE, NULL, 890 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
890 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64), 891 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
891 892
892/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> 893/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
@@ -902,59 +903,59 @@ UNUSUAL_DEV( 0x071b, 0x32bb, 0x0000, 0x0000,
902UNUSUAL_DEV( 0x071b, 0x3203, 0x0100, 0x0100, 903UNUSUAL_DEV( 0x071b, 0x3203, 0x0100, 0x0100,
903 "RockChip", 904 "RockChip",
904 "ROCK MP3", 905 "ROCK MP3",
905 US_SC_DEVICE, US_PR_DEVICE, NULL, 906 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
906 US_FL_MAX_SECTORS_64), 907 US_FL_MAX_SECTORS_64),
907 908
908/* Reported by Olivier Blondeau <zeitoun@gmail.com> */ 909/* Reported by Olivier Blondeau <zeitoun@gmail.com> */
909UNUSUAL_DEV( 0x0727, 0x0306, 0x0100, 0x0100, 910UNUSUAL_DEV( 0x0727, 0x0306, 0x0100, 0x0100,
910 "ATMEL", 911 "ATMEL",
911 "SND1 Storage", 912 "SND1 Storage",
912 US_SC_DEVICE, US_PR_DEVICE, NULL, 913 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
913 US_FL_IGNORE_RESIDUE), 914 US_FL_IGNORE_RESIDUE),
914 915
915/* Submitted by Roman Hodek <roman@hodek.net> */ 916/* Submitted by Roman Hodek <roman@hodek.net> */
916UNUSUAL_DEV( 0x0781, 0x0001, 0x0200, 0x0200, 917UNUSUAL_DEV( 0x0781, 0x0001, 0x0200, 0x0200,
917 "Sandisk", 918 "Sandisk",
918 "ImageMate SDDR-05a", 919 "ImageMate SDDR-05a",
919 US_SC_SCSI, US_PR_CB, NULL, 920 USB_SC_SCSI, USB_PR_CB, NULL,
920 US_FL_SINGLE_LUN ), 921 US_FL_SINGLE_LUN ),
921 922
922UNUSUAL_DEV( 0x0781, 0x0002, 0x0009, 0x0009, 923UNUSUAL_DEV( 0x0781, 0x0002, 0x0009, 0x0009,
923 "SanDisk Corporation", 924 "SanDisk Corporation",
924 "ImageMate CompactFlash USB", 925 "ImageMate CompactFlash USB",
925 US_SC_DEVICE, US_PR_DEVICE, NULL, 926 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
926 US_FL_FIX_CAPACITY ), 927 US_FL_FIX_CAPACITY ),
927 928
928UNUSUAL_DEV( 0x0781, 0x0100, 0x0100, 0x0100, 929UNUSUAL_DEV( 0x0781, 0x0100, 0x0100, 0x0100,
929 "Sandisk", 930 "Sandisk",
930 "ImageMate SDDR-12", 931 "ImageMate SDDR-12",
931 US_SC_SCSI, US_PR_CB, NULL, 932 USB_SC_SCSI, USB_PR_CB, NULL,
932 US_FL_SINGLE_LUN ), 933 US_FL_SINGLE_LUN ),
933 934
934/* Reported by Eero Volotinen <eero@ping-viini.org> */ 935/* Reported by Eero Volotinen <eero@ping-viini.org> */
935UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999, 936UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
936 "Freecom Technologies", 937 "Freecom Technologies",
937 "FHD-Classic", 938 "FHD-Classic",
938 US_SC_DEVICE, US_PR_DEVICE, NULL, 939 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
939 US_FL_FIX_CAPACITY), 940 US_FL_FIX_CAPACITY),
940 941
941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 942UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
942 "Microtech", 943 "Microtech",
943 "USB-SCSI-DB25", 944 "USB-SCSI-DB25",
944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, 945 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
945 US_FL_SCM_MULT_TARG ), 946 US_FL_SCM_MULT_TARG ),
946 947
947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, 948UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
948 "Microtech", 949 "Microtech",
949 "USB-SCSI-HD50", 950 "USB-SCSI-HD50",
950 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, 951 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
951 US_FL_SCM_MULT_TARG ), 952 US_FL_SCM_MULT_TARG ),
952 953
953#ifdef NO_SDDR09 954#ifdef NO_SDDR09
954UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100, 955UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
955 "Microtech", 956 "Microtech",
956 "CameraMate", 957 "CameraMate",
957 US_SC_SCSI, US_PR_CB, NULL, 958 USB_SC_SCSI, USB_PR_CB, NULL,
958 US_FL_SINGLE_LUN ), 959 US_FL_SINGLE_LUN ),
959#endif 960#endif
960 961
@@ -967,7 +968,7 @@ UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
967UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, 968UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
968 "Datafab", 969 "Datafab",
969 "KECF-USB", 970 "KECF-USB",
970 US_SC_DEVICE, US_PR_DEVICE, NULL, 971 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
971 US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ), 972 US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
972 973
973/* Reported by Rauch Wolke <rauchwolke@gmx.net> 974/* Reported by Rauch Wolke <rauchwolke@gmx.net>
@@ -976,7 +977,7 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
976UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, 977UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
977 "Simple Tech/Datafab", 978 "Simple Tech/Datafab",
978 "CF+SM Reader", 979 "CF+SM Reader",
979 US_SC_DEVICE, US_PR_DEVICE, NULL, 980 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
980 US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ), 981 US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
981 982
982/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant 983/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
@@ -986,42 +987,42 @@ UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
986 * - They don't like the INQUIRY command. So we must handle this command 987 * - They don't like the INQUIRY command. So we must handle this command
987 * of the SCSI layer ourselves. 988 * of the SCSI layer ourselves.
988 * - Some cameras with idProduct=0x1001 and bcdDevice=0x1000 have 989 * - Some cameras with idProduct=0x1001 and bcdDevice=0x1000 have
989 * bInterfaceProtocol=0x00 (US_PR_CBI) while others have 0x01 (US_PR_CB). 990 * bInterfaceProtocol=0x00 (USB_PR_CBI) while others have 0x01 (USB_PR_CB).
990 * So don't remove the US_PR_CB override! 991 * So don't remove the USB_PR_CB override!
991 * - Cameras with bcdDevice=0x9009 require the US_SC_8070 override. 992 * - Cameras with bcdDevice=0x9009 require the USB_SC_8070 override.
992 */ 993 */
993UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999, 994UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999,
994 "Casio", 995 "Casio",
995 "QV DigitalCamera", 996 "QV DigitalCamera",
996 US_SC_8070, US_PR_CB, NULL, 997 USB_SC_8070, USB_PR_CB, NULL,
997 US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ), 998 US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ),
998 999
999/* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/ 1000/* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/
1000UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001, 1001UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
1001 "Samsung", 1002 "Samsung",
1002 "Digimax 410", 1003 "Digimax 410",
1003 US_SC_DEVICE, US_PR_DEVICE, NULL, 1004 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1004 US_FL_FIX_INQUIRY), 1005 US_FL_FIX_INQUIRY),
1005 1006
1006/* Reported by Luciano Rocha <luciano@eurotux.com> */ 1007/* Reported by Luciano Rocha <luciano@eurotux.com> */
1007UNUSUAL_DEV( 0x0840, 0x0082, 0x0001, 0x0001, 1008UNUSUAL_DEV( 0x0840, 0x0082, 0x0001, 0x0001,
1008 "Argosy", 1009 "Argosy",
1009 "Storage", 1010 "Storage",
1010 US_SC_DEVICE, US_PR_DEVICE, NULL, 1011 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1011 US_FL_FIX_CAPACITY), 1012 US_FL_FIX_CAPACITY),
1012 1013
1013/* Reported and patched by Nguyen Anh Quynh <aquynh@gmail.com> */ 1014/* Reported and patched by Nguyen Anh Quynh <aquynh@gmail.com> */
1014UNUSUAL_DEV( 0x0840, 0x0084, 0x0001, 0x0001, 1015UNUSUAL_DEV( 0x0840, 0x0084, 0x0001, 0x0001,
1015 "Argosy", 1016 "Argosy",
1016 "Storage", 1017 "Storage",
1017 US_SC_DEVICE, US_PR_DEVICE, NULL, 1018 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1018 US_FL_FIX_CAPACITY), 1019 US_FL_FIX_CAPACITY),
1019 1020
1020/* Reported by Martijn Hijdra <martijn.hijdra@gmail.com> */ 1021/* Reported by Martijn Hijdra <martijn.hijdra@gmail.com> */
1021UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001, 1022UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001,
1022 "Argosy", 1023 "Argosy",
1023 "Storage", 1024 "Storage",
1024 US_SC_DEVICE, US_PR_DEVICE, NULL, 1025 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1025 US_FL_FIX_CAPACITY), 1026 US_FL_FIX_CAPACITY),
1026 1027
1027/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 1028/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
@@ -1033,7 +1034,7 @@ UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001,
1033UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110, 1034UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
1034 "Grandtech", 1035 "Grandtech",
1035 "DC2MEGA", 1036 "DC2MEGA",
1036 US_SC_DEVICE, US_PR_DEVICE, NULL, 1037 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1037 US_FL_BULK32), 1038 US_FL_BULK32),
1038 1039
1039/* Andrew Lunn <andrew@lunn.ch> 1040/* Andrew Lunn <andrew@lunn.ch>
@@ -1044,14 +1045,14 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
1044UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200, 1045UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200,
1045 "PanDigital", 1046 "PanDigital",
1046 "Photo Frame", 1047 "Photo Frame",
1047 US_SC_DEVICE, US_PR_DEVICE, NULL, 1048 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1048 US_FL_NOT_LOCKABLE), 1049 US_FL_NOT_LOCKABLE),
1049 1050
1050/* Submitted by Jan De Luyck <lkml@kcore.org> */ 1051/* Submitted by Jan De Luyck <lkml@kcore.org> */
1051UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, 1052UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
1052 "CITIZEN", 1053 "CITIZEN",
1053 "X1DE-USB", 1054 "X1DE-USB",
1054 US_SC_DEVICE, US_PR_DEVICE, NULL, 1055 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1055 US_FL_SINGLE_LUN), 1056 US_FL_SINGLE_LUN),
1056 1057
1057/* Submitted by Dylan Taft <d13f00l@gmail.com> 1058/* Submitted by Dylan Taft <d13f00l@gmail.com>
@@ -1060,7 +1061,7 @@ UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
1060UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100, 1061UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100,
1061 "AIPTEK", 1062 "AIPTEK",
1062 "Aiptek USB Keychain MP3 Player", 1063 "Aiptek USB Keychain MP3 Player",
1063 US_SC_DEVICE, US_PR_DEVICE, NULL, 1064 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1064 US_FL_IGNORE_RESIDUE), 1065 US_FL_IGNORE_RESIDUE),
1065 1066
1066/* Entry needed for flags. Moreover, all devices with this ID use 1067/* Entry needed for flags. Moreover, all devices with this ID use
@@ -1071,7 +1072,7 @@ UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100,
1071UNUSUAL_DEV( 0x090a, 0x1001, 0x0100, 0x0100, 1072UNUSUAL_DEV( 0x090a, 0x1001, 0x0100, 0x0100,
1072 "Trumpion", 1073 "Trumpion",
1073 "t33520 USB Flash Card Controller", 1074 "t33520 USB Flash Card Controller",
1074 US_SC_DEVICE, US_PR_BULK, NULL, 1075 USB_SC_DEVICE, USB_PR_BULK, NULL,
1075 US_FL_NEED_OVERRIDE ), 1076 US_FL_NEED_OVERRIDE ),
1076 1077
1077/* Reported by Filippo Bardelli <filibard@libero.it> 1078/* Reported by Filippo Bardelli <filibard@libero.it>
@@ -1080,21 +1081,21 @@ UNUSUAL_DEV( 0x090a, 0x1001, 0x0100, 0x0100,
1080UNUSUAL_DEV( 0x090a, 0x1050, 0x0100, 0x0100, 1081UNUSUAL_DEV( 0x090a, 0x1050, 0x0100, 0x0100,
1081 "Trumpion Microelectronics, Inc.", 1082 "Trumpion Microelectronics, Inc.",
1082 "33520 USB Digital Voice Recorder", 1083 "33520 USB Digital Voice Recorder",
1083 US_SC_UFI, US_PR_DEVICE, NULL, 1084 USB_SC_UFI, USB_PR_DEVICE, NULL,
1084 0), 1085 0),
1085 1086
1086/* Trumpion Microelectronics MP3 player (felipe_alfaro@linuxmail.org) */ 1087/* Trumpion Microelectronics MP3 player (felipe_alfaro@linuxmail.org) */
1087UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, 1088UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
1088 "Trumpion", 1089 "Trumpion",
1089 "MP3 player", 1090 "MP3 player",
1090 US_SC_RBC, US_PR_BULK, NULL, 1091 USB_SC_RBC, USB_PR_BULK, NULL,
1091 0 ), 1092 0 ),
1092 1093
1093/* aeb */ 1094/* aeb */
1094UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, 1095UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1095 "Feiya", 1096 "Feiya",
1096 "5-in-1 Card Reader", 1097 "5-in-1 Card Reader",
1097 US_SC_DEVICE, US_PR_DEVICE, NULL, 1098 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1098 US_FL_FIX_CAPACITY ), 1099 US_FL_FIX_CAPACITY ),
1099 1100
1100/* This Pentax still camera is not conformant 1101/* This Pentax still camera is not conformant
@@ -1107,7 +1108,7 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1107UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000, 1108UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
1108 "Pentax", 1109 "Pentax",
1109 "Optio 2/3/400", 1110 "Optio 2/3/400",
1110 US_SC_DEVICE, US_PR_DEVICE, NULL, 1111 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1111 US_FL_FIX_INQUIRY ), 1112 US_FL_FIX_INQUIRY ),
1112 1113
1113/* These are virtual windows driver CDs, which the zd1211rw driver 1114/* These are virtual windows driver CDs, which the zd1211rw driver
@@ -1115,13 +1116,13 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
1115UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101, 1116UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
1116 "ZyXEL", 1117 "ZyXEL",
1117 "G-220F USB-WLAN Install", 1118 "G-220F USB-WLAN Install",
1118 US_SC_DEVICE, US_PR_DEVICE, NULL, 1119 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1119 US_FL_IGNORE_DEVICE ), 1120 US_FL_IGNORE_DEVICE ),
1120 1121
1121UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101, 1122UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101,
1122 "SiteCom", 1123 "SiteCom",
1123 "WL-117 USB-WLAN Install", 1124 "WL-117 USB-WLAN Install",
1124 US_SC_DEVICE, US_PR_DEVICE, NULL, 1125 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1125 US_FL_IGNORE_DEVICE ), 1126 US_FL_IGNORE_DEVICE ),
1126 1127
1127/* Reported by Dan Williams <dcbw@redhat.com> 1128/* Reported by Dan Williams <dcbw@redhat.com>
@@ -1133,7 +1134,7 @@ UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101,
1133UNUSUAL_DEV( 0x0af0, 0x6971, 0x0000, 0x9999, 1134UNUSUAL_DEV( 0x0af0, 0x6971, 0x0000, 0x9999,
1134 "Option N.V.", 1135 "Option N.V.",
1135 "Mass Storage", 1136 "Mass Storage",
1136 US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 1137 USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init,
1137 0), 1138 0),
1138 1139
1139/* Reported by F. Aben <f.aben@option.com> 1140/* Reported by F. Aben <f.aben@option.com>
@@ -1143,7 +1144,7 @@ UNUSUAL_DEV( 0x0af0, 0x6971, 0x0000, 0x9999,
1143UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000, 1144UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
1144 "Option", 1145 "Option",
1145 "GI 0401 SD-Card", 1146 "GI 0401 SD-Card",
1146 US_SC_DEVICE, US_PR_DEVICE, NULL, 1147 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1147 0 ), 1148 0 ),
1148 1149
1149/* Reported by Jan Dumon <j.dumon@option.com> 1150/* Reported by Jan Dumon <j.dumon@option.com>
@@ -1153,104 +1154,104 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
1153UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, 1154UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
1154 "Option", 1155 "Option",
1155 "GI 0431 SD-Card", 1156 "GI 0431 SD-Card",
1156 US_SC_DEVICE, US_PR_DEVICE, NULL, 1157 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1157 0 ), 1158 0 ),
1158 1159
1159UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000, 1160UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
1160 "Option", 1161 "Option",
1161 "GI 0451 SD-Card", 1162 "GI 0451 SD-Card",
1162 US_SC_DEVICE, US_PR_DEVICE, NULL, 1163 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1163 0 ), 1164 0 ),
1164 1165
1165UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000, 1166UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
1166 "Option", 1167 "Option",
1167 "GI 0451 SD-Card", 1168 "GI 0451 SD-Card",
1168 US_SC_DEVICE, US_PR_DEVICE, NULL, 1169 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1169 0 ), 1170 0 ),
1170 1171
1171UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000, 1172UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
1172 "Option", 1173 "Option",
1173 "GI 0452 SD-Card", 1174 "GI 0452 SD-Card",
1174 US_SC_DEVICE, US_PR_DEVICE, NULL, 1175 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1175 0 ), 1176 0 ),
1176 1177
1177UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000, 1178UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
1178 "Option", 1179 "Option",
1179 "GI 0461 SD-Card", 1180 "GI 0461 SD-Card",
1180 US_SC_DEVICE, US_PR_DEVICE, NULL, 1181 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1181 0 ), 1182 0 ),
1182 1183
1183UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000, 1184UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
1184 "Option", 1185 "Option",
1185 "GI 0461 SD-Card", 1186 "GI 0461 SD-Card",
1186 US_SC_DEVICE, US_PR_DEVICE, NULL, 1187 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1187 0 ), 1188 0 ),
1188 1189
1189UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000, 1190UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
1190 "Option", 1191 "Option",
1191 "GI 033x SD-Card", 1192 "GI 033x SD-Card",
1192 US_SC_DEVICE, US_PR_DEVICE, NULL, 1193 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1193 0 ), 1194 0 ),
1194 1195
1195UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000, 1196UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
1196 "Option", 1197 "Option",
1197 "GI 033x SD-Card", 1198 "GI 033x SD-Card",
1198 US_SC_DEVICE, US_PR_DEVICE, NULL, 1199 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1199 0 ), 1200 0 ),
1200 1201
1201UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000, 1202UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
1202 "Option", 1203 "Option",
1203 "GI 033x SD-Card", 1204 "GI 033x SD-Card",
1204 US_SC_DEVICE, US_PR_DEVICE, NULL, 1205 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1205 0 ), 1206 0 ),
1206 1207
1207UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000, 1208UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
1208 "Option", 1209 "Option",
1209 "GI 070x SD-Card", 1210 "GI 070x SD-Card",
1210 US_SC_DEVICE, US_PR_DEVICE, NULL, 1211 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1211 0 ), 1212 0 ),
1212 1213
1213UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000, 1214UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
1214 "Option", 1215 "Option",
1215 "GI 1505 SD-Card", 1216 "GI 1505 SD-Card",
1216 US_SC_DEVICE, US_PR_DEVICE, NULL, 1217 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1217 0 ), 1218 0 ),
1218 1219
1219UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000, 1220UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
1220 "Option", 1221 "Option",
1221 "GI 1509 SD-Card", 1222 "GI 1509 SD-Card",
1222 US_SC_DEVICE, US_PR_DEVICE, NULL, 1223 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1223 0 ), 1224 0 ),
1224 1225
1225UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000, 1226UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
1226 "Option", 1227 "Option",
1227 "GI 1515 SD-Card", 1228 "GI 1515 SD-Card",
1228 US_SC_DEVICE, US_PR_DEVICE, NULL, 1229 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1229 0 ), 1230 0 ),
1230 1231
1231UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000, 1232UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
1232 "Option", 1233 "Option",
1233 "GI 1215 SD-Card", 1234 "GI 1215 SD-Card",
1234 US_SC_DEVICE, US_PR_DEVICE, NULL, 1235 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1235 0 ), 1236 0 ),
1236 1237
1237UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000, 1238UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
1238 "Option", 1239 "Option",
1239 "GI 1505 SD-Card", 1240 "GI 1505 SD-Card",
1240 US_SC_DEVICE, US_PR_DEVICE, NULL, 1241 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1241 0 ), 1242 0 ),
1242 1243
1243/* Reported by Ben Efros <ben@pc-doctor.com> */ 1244/* Reported by Ben Efros <ben@pc-doctor.com> */
1244UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, 1245UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
1245 "Seagate", 1246 "Seagate",
1246 "FreeAgent Pro", 1247 "FreeAgent Pro",
1247 US_SC_DEVICE, US_PR_DEVICE, NULL, 1248 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1248 US_FL_SANE_SENSE ), 1249 US_FL_SANE_SENSE ),
1249 1250
1250UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, 1251UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
1251 "Maxtor", 1252 "Maxtor",
1252 "USB to SATA", 1253 "USB to SATA",
1253 US_SC_DEVICE, US_PR_DEVICE, NULL, 1254 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1254 US_FL_SANE_SENSE), 1255 US_FL_SANE_SENSE),
1255 1256
1256/* 1257/*
@@ -1260,14 +1261,14 @@ UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
1260UNUSUAL_DEV( 0x0c45, 0x1060, 0x0100, 0x0100, 1261UNUSUAL_DEV( 0x0c45, 0x1060, 0x0100, 0x0100,
1261 "Unknown", 1262 "Unknown",
1262 "Unknown", 1263 "Unknown",
1263 US_SC_DEVICE, US_PR_DEVICE, NULL, 1264 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1264 US_FL_SINGLE_LUN ), 1265 US_FL_SINGLE_LUN ),
1265 1266
1266/* Submitted by Joris Struyve <joris@struyve.be> */ 1267/* Submitted by Joris Struyve <joris@struyve.be> */
1267UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff, 1268UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff,
1268 "Medion", 1269 "Medion",
1269 "MD 7425", 1270 "MD 7425",
1270 US_SC_DEVICE, US_PR_DEVICE, NULL, 1271 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1271 US_FL_FIX_INQUIRY), 1272 US_FL_FIX_INQUIRY),
1272 1273
1273/* 1274/*
@@ -1278,13 +1279,13 @@ UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff,
1278UNUSUAL_DEV( 0x0d96, 0x5200, 0x0001, 0x0200, 1279UNUSUAL_DEV( 0x0d96, 0x5200, 0x0001, 0x0200,
1279 "Jenoptik", 1280 "Jenoptik",
1280 "JD 5200 z3", 1281 "JD 5200 z3",
1281 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY), 1282 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
1282 1283
1283/* Reported by Jason Johnston <killean@shaw.ca> */ 1284/* Reported by Jason Johnston <killean@shaw.ca> */
1284UNUSUAL_DEV( 0x0dc4, 0x0073, 0x0000, 0x0000, 1285UNUSUAL_DEV( 0x0dc4, 0x0073, 0x0000, 0x0000,
1285 "Macpower Technology Co.LTD.", 1286 "Macpower Technology Co.LTD.",
1286 "USB 2.0 3.5\" DEVICE", 1287 "USB 2.0 3.5\" DEVICE",
1287 US_SC_DEVICE, US_PR_DEVICE, NULL, 1288 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1288 US_FL_FIX_CAPACITY), 1289 US_FL_FIX_CAPACITY),
1289 1290
1290/* Reported by Lubomir Blaha <tritol@trilogic.cz> 1291/* Reported by Lubomir Blaha <tritol@trilogic.cz>
@@ -1295,7 +1296,7 @@ UNUSUAL_DEV( 0x0dc4, 0x0073, 0x0000, 0x0000,
1295UNUSUAL_DEV( 0x0dd8, 0x1060, 0x0000, 0xffff, 1296UNUSUAL_DEV( 0x0dd8, 0x1060, 0x0000, 0xffff,
1296 "Netac", 1297 "Netac",
1297 "USB-CF-Card", 1298 "USB-CF-Card",
1298 US_SC_DEVICE, US_PR_DEVICE, NULL, 1299 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1299 US_FL_FIX_INQUIRY ), 1300 US_FL_FIX_INQUIRY ),
1300 1301
1301/* Reported by Edward Chapman (taken from linux-usb mailing list) 1302/* Reported by Edward Chapman (taken from linux-usb mailing list)
@@ -1303,7 +1304,7 @@ UNUSUAL_DEV( 0x0dd8, 0x1060, 0x0000, 0xffff,
1303UNUSUAL_DEV( 0x0dd8, 0xd202, 0x0000, 0x9999, 1304UNUSUAL_DEV( 0x0dd8, 0xd202, 0x0000, 0x9999,
1304 "Netac", 1305 "Netac",
1305 "USB Flash Disk", 1306 "USB Flash Disk",
1306 US_SC_DEVICE, US_PR_DEVICE, NULL, 1307 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1307 US_FL_IGNORE_RESIDUE ), 1308 US_FL_IGNORE_RESIDUE ),
1308 1309
1309 1310
@@ -1312,28 +1313,28 @@ UNUSUAL_DEV( 0x0dd8, 0xd202, 0x0000, 0x9999,
1312UNUSUAL_DEV( 0x0dda, 0x0001, 0x0012, 0x0012, 1313UNUSUAL_DEV( 0x0dda, 0x0001, 0x0012, 0x0012,
1313 "WINWARD", 1314 "WINWARD",
1314 "Music Disk", 1315 "Music Disk",
1315 US_SC_DEVICE, US_PR_DEVICE, NULL, 1316 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1316 US_FL_IGNORE_RESIDUE ), 1317 US_FL_IGNORE_RESIDUE ),
1317 1318
1318/* Reported by Ian McConnell <ian at emit.demon.co.uk> */ 1319/* Reported by Ian McConnell <ian at emit.demon.co.uk> */
1319UNUSUAL_DEV( 0x0dda, 0x0301, 0x0012, 0x0012, 1320UNUSUAL_DEV( 0x0dda, 0x0301, 0x0012, 0x0012,
1320 "PNP_MP3", 1321 "PNP_MP3",
1321 "PNP_MP3 PLAYER", 1322 "PNP_MP3 PLAYER",
1322 US_SC_DEVICE, US_PR_DEVICE, NULL, 1323 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1323 US_FL_IGNORE_RESIDUE ), 1324 US_FL_IGNORE_RESIDUE ),
1324 1325
1325/* Reported by Jim McCloskey <mcclosk@ucsc.edu> */ 1326/* Reported by Jim McCloskey <mcclosk@ucsc.edu> */
1326UNUSUAL_DEV( 0x0e21, 0x0520, 0x0100, 0x0100, 1327UNUSUAL_DEV( 0x0e21, 0x0520, 0x0100, 0x0100,
1327 "Cowon Systems", 1328 "Cowon Systems",
1328 "iAUDIO M5", 1329 "iAUDIO M5",
1329 US_SC_DEVICE, US_PR_BULK, NULL, 1330 USB_SC_DEVICE, USB_PR_BULK, NULL,
1330 US_FL_NEED_OVERRIDE ), 1331 US_FL_NEED_OVERRIDE ),
1331 1332
1332/* Submitted by Antoine Mairesse <antoine.mairesse@free.fr> */ 1333/* Submitted by Antoine Mairesse <antoine.mairesse@free.fr> */
1333UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300, 1334UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300,
1334 "USB", 1335 "USB",
1335 "Solid state disk", 1336 "Solid state disk",
1336 US_SC_DEVICE, US_PR_DEVICE, NULL, 1337 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1337 US_FL_FIX_INQUIRY ), 1338 US_FL_FIX_INQUIRY ),
1338 1339
1339/* Submitted by Daniel Drake <dsd@gentoo.org> 1340/* Submitted by Daniel Drake <dsd@gentoo.org>
@@ -1341,14 +1342,14 @@ UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300,
1341UNUSUAL_DEV( 0x0ea0, 0x2168, 0x0110, 0x0110, 1342UNUSUAL_DEV( 0x0ea0, 0x2168, 0x0110, 0x0110,
1342 "Ours Technology", 1343 "Ours Technology",
1343 "Flash Disk", 1344 "Flash Disk",
1344 US_SC_DEVICE, US_PR_DEVICE, NULL, 1345 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1345 US_FL_IGNORE_RESIDUE ), 1346 US_FL_IGNORE_RESIDUE ),
1346 1347
1347/* Reported by Rastislav Stanik <rs_kernel@yahoo.com> */ 1348/* Reported by Rastislav Stanik <rs_kernel@yahoo.com> */
1348UNUSUAL_DEV( 0x0ea0, 0x6828, 0x0110, 0x0110, 1349UNUSUAL_DEV( 0x0ea0, 0x6828, 0x0110, 0x0110,
1349 "USB", 1350 "USB",
1350 "Flash Disk", 1351 "Flash Disk",
1351 US_SC_DEVICE, US_PR_DEVICE, NULL, 1352 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1352 US_FL_IGNORE_RESIDUE ), 1353 US_FL_IGNORE_RESIDUE ),
1353 1354
1354/* Reported by Benjamin Schiller <sbenni@gmx.de> 1355/* Reported by Benjamin Schiller <sbenni@gmx.de>
@@ -1356,7 +1357,7 @@ UNUSUAL_DEV( 0x0ea0, 0x6828, 0x0110, 0x0110,
1356UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103, 1357UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103,
1357 "Typhoon", 1358 "Typhoon",
1358 "My DJ 1820", 1359 "My DJ 1820",
1359 US_SC_DEVICE, US_PR_DEVICE, NULL, 1360 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1360 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64), 1361 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64),
1361 1362
1362/* Patch by Leonid Petrov mail at lpetrov.net 1363/* Patch by Leonid Petrov mail at lpetrov.net
@@ -1367,7 +1368,7 @@ UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103,
1367UNUSUAL_DEV( 0x0f19, 0x0103, 0x0100, 0x0100, 1368UNUSUAL_DEV( 0x0f19, 0x0103, 0x0100, 0x0100,
1368 "Oracom Co., Ltd", 1369 "Oracom Co., Ltd",
1369 "ORC-200M", 1370 "ORC-200M",
1370 US_SC_DEVICE, US_PR_DEVICE, NULL, 1371 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1371 US_FL_IGNORE_RESIDUE ), 1372 US_FL_IGNORE_RESIDUE ),
1372 1373
1373/* David Kuehling <dvdkhlng@gmx.de>: 1374/* David Kuehling <dvdkhlng@gmx.de>:
@@ -1377,21 +1378,21 @@ UNUSUAL_DEV( 0x0f19, 0x0103, 0x0100, 0x0100,
1377UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, 1378UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
1378 "C-MEX", 1379 "C-MEX",
1379 "A-VOX", 1380 "A-VOX",
1380 US_SC_DEVICE, US_PR_DEVICE, NULL, 1381 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1381 US_FL_IGNORE_RESIDUE ), 1382 US_FL_IGNORE_RESIDUE ),
1382 1383
1383/* Reported by Michael Stattmann <michael@stattmann.com> */ 1384/* Reported by Michael Stattmann <michael@stattmann.com> */
1384UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1385UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1385 "Sony Ericsson", 1386 "Sony Ericsson",
1386 "V800-Vodafone 802", 1387 "V800-Vodafone 802",
1387 US_SC_DEVICE, US_PR_DEVICE, NULL, 1388 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1388 US_FL_NO_WP_DETECT ), 1389 US_FL_NO_WP_DETECT ),
1389 1390
1390/* Reported by The Solutor <thesolutor@gmail.com> */ 1391/* Reported by The Solutor <thesolutor@gmail.com> */
1391UNUSUAL_DEV( 0x0fce, 0xd0e1, 0x0000, 0x0000, 1392UNUSUAL_DEV( 0x0fce, 0xd0e1, 0x0000, 0x0000,
1392 "Sony Ericsson", 1393 "Sony Ericsson",
1393 "MD400", 1394 "MD400",
1394 US_SC_DEVICE, US_PR_DEVICE, NULL, 1395 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1395 US_FL_IGNORE_DEVICE), 1396 US_FL_IGNORE_DEVICE),
1396 1397
1397/* Reported by Jan Mate <mate@fiit.stuba.sk> 1398/* Reported by Jan Mate <mate@fiit.stuba.sk>
@@ -1399,21 +1400,21 @@ UNUSUAL_DEV( 0x0fce, 0xd0e1, 0x0000, 0x0000,
1399UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000, 1400UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000,
1400 "Sony Ericsson", 1401 "Sony Ericsson",
1401 "P990i", 1402 "P990i",
1402 US_SC_DEVICE, US_PR_DEVICE, NULL, 1403 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1403 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), 1404 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
1404 1405
1405/* Reported by Emmanuel Vasilakis <evas@forthnet.gr> */ 1406/* Reported by Emmanuel Vasilakis <evas@forthnet.gr> */
1406UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000, 1407UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000,
1407 "Sony Ericsson", 1408 "Sony Ericsson",
1408 "M600i", 1409 "M600i",
1409 US_SC_DEVICE, US_PR_DEVICE, NULL, 1410 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1410 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 1411 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
1411 1412
1412/* Reported by Ricardo Barberis <ricardo@dattatec.com> */ 1413/* Reported by Ricardo Barberis <ricardo@dattatec.com> */
1413UNUSUAL_DEV( 0x0fce, 0xe092, 0x0000, 0x0000, 1414UNUSUAL_DEV( 0x0fce, 0xe092, 0x0000, 0x0000,
1414 "Sony Ericsson", 1415 "Sony Ericsson",
1415 "P1i", 1416 "P1i",
1416 US_SC_DEVICE, US_PR_DEVICE, NULL, 1417 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1417 US_FL_IGNORE_RESIDUE ), 1418 US_FL_IGNORE_RESIDUE ),
1418 1419
1419/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu> 1420/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
@@ -1425,13 +1426,13 @@ UNUSUAL_DEV( 0x0fce, 0xe092, 0x0000, 0x0000,
1425UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x0110, 1426UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x0110,
1426 "Desknote", 1427 "Desknote",
1427 "UCR-61S2B", 1428 "UCR-61S2B",
1428 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1429 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_ucr61s2b_init,
1429 0 ), 1430 0 ),
1430 1431
1431UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999, 1432UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999,
1432 "Western Digital", 1433 "Western Digital",
1433 "External HDD", 1434 "External HDD",
1434 US_SC_DEVICE, US_PR_DEVICE, NULL, 1435 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1435 US_FL_SANE_SENSE), 1436 US_FL_SANE_SENSE),
1436 1437
1437/* Reported by Fabio Venturi <f.venturi@tdnet.it> 1438/* Reported by Fabio Venturi <f.venturi@tdnet.it>
@@ -1440,7 +1441,7 @@ UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999,
1440UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, 1441UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1441 "Actions Semiconductor", 1442 "Actions Semiconductor",
1442 "Mtp device", 1443 "Mtp device",
1443 US_SC_DEVICE, US_PR_DEVICE, NULL, 1444 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1444 0), 1445 0),
1445 1446
1446/* Reported by Pascal Terjan <pterjan@mandriva.com> 1447/* Reported by Pascal Terjan <pterjan@mandriva.com>
@@ -1449,7 +1450,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1449UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000, 1450UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
1450 "D-Link", 1451 "D-Link",
1451 "USB Mass Storage", 1452 "USB Mass Storage",
1452 US_SC_DEVICE, US_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE), 1453 USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
1453 1454
1454/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1455/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1455 * Entry is needed for the initializer function override, 1456 * Entry is needed for the initializer function override,
@@ -1459,7 +1460,7 @@ UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
1459UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, 1460UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
1460 "Sierra Wireless", 1461 "Sierra Wireless",
1461 "USB MMC Storage", 1462 "USB MMC Storage",
1462 US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, 1463 USB_SC_DEVICE, USB_PR_DEVICE, sierra_ms_init,
1463 0), 1464 0),
1464 1465
1465/* Reported by Jaco Kroon <jaco@kroon.co.za> 1466/* Reported by Jaco Kroon <jaco@kroon.co.za>
@@ -1469,7 +1470,7 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
1469UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, 1470UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1470 "Digitech HMG", 1471 "Digitech HMG",
1471 "DigiTech Mass Storage", 1472 "DigiTech Mass Storage",
1472 US_SC_DEVICE, US_PR_DEVICE, NULL, 1473 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1473 US_FL_IGNORE_RESIDUE ), 1474 US_FL_IGNORE_RESIDUE ),
1474 1475
1475/* Reported by fangxiaozhi <huananhu@huawei.com> 1476/* Reported by fangxiaozhi <huananhu@huawei.com>
@@ -1478,353 +1479,353 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1478UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1479UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
1479 "HUAWEI MOBILE", 1480 "HUAWEI MOBILE",
1480 "Mass Storage", 1481 "Mass Storage",
1481 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1482 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1482 0), 1483 0),
1483UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1484UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
1484 "HUAWEI MOBILE", 1485 "HUAWEI MOBILE",
1485 "Mass Storage", 1486 "Mass Storage",
1486 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1487 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1487 0), 1488 0),
1488UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1489UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
1489 "HUAWEI MOBILE", 1490 "HUAWEI MOBILE",
1490 "Mass Storage", 1491 "Mass Storage",
1491 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1492 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1492 0), 1493 0),
1493UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1494UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
1494 "HUAWEI MOBILE", 1495 "HUAWEI MOBILE",
1495 "Mass Storage", 1496 "Mass Storage",
1496 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1497 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1497 0), 1498 0),
1498UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, 1499UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
1499 "HUAWEI MOBILE", 1500 "HUAWEI MOBILE",
1500 "Mass Storage", 1501 "Mass Storage",
1501 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1502 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1502 0), 1503 0),
1503UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1504UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
1504 "HUAWEI MOBILE", 1505 "HUAWEI MOBILE",
1505 "Mass Storage", 1506 "Mass Storage",
1506 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1507 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1507 0), 1508 0),
1508UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, 1509UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
1509 "HUAWEI MOBILE", 1510 "HUAWEI MOBILE",
1510 "Mass Storage", 1511 "Mass Storage",
1511 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1512 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1512 0), 1513 0),
1513UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1514UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
1514 "HUAWEI MOBILE", 1515 "HUAWEI MOBILE",
1515 "Mass Storage", 1516 "Mass Storage",
1516 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1517 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1517 0), 1518 0),
1518UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1519UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
1519 "HUAWEI MOBILE", 1520 "HUAWEI MOBILE",
1520 "Mass Storage", 1521 "Mass Storage",
1521 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1522 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1522 0), 1523 0),
1523UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, 1524UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
1524 "HUAWEI MOBILE", 1525 "HUAWEI MOBILE",
1525 "Mass Storage", 1526 "Mass Storage",
1526 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1527 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1527 0), 1528 0),
1528UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1529UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
1529 "HUAWEI MOBILE", 1530 "HUAWEI MOBILE",
1530 "Mass Storage", 1531 "Mass Storage",
1531 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1532 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1532 0), 1533 0),
1533UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1534UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
1534 "HUAWEI MOBILE", 1535 "HUAWEI MOBILE",
1535 "Mass Storage", 1536 "Mass Storage",
1536 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1537 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1537 0), 1538 0),
1538UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, 1539UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
1539 "HUAWEI MOBILE", 1540 "HUAWEI MOBILE",
1540 "Mass Storage", 1541 "Mass Storage",
1541 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1542 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1542 0), 1543 0),
1543UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, 1544UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
1544 "HUAWEI MOBILE", 1545 "HUAWEI MOBILE",
1545 "Mass Storage", 1546 "Mass Storage",
1546 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1547 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1547 0), 1548 0),
1548UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, 1549UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
1549 "HUAWEI MOBILE", 1550 "HUAWEI MOBILE",
1550 "Mass Storage", 1551 "Mass Storage",
1551 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1552 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1552 0), 1553 0),
1553UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, 1554UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
1554 "HUAWEI MOBILE", 1555 "HUAWEI MOBILE",
1555 "Mass Storage", 1556 "Mass Storage",
1556 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1557 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1557 0), 1558 0),
1558UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, 1559UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
1559 "HUAWEI MOBILE", 1560 "HUAWEI MOBILE",
1560 "Mass Storage", 1561 "Mass Storage",
1561 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1562 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1562 0), 1563 0),
1563UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, 1564UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
1564 "HUAWEI MOBILE", 1565 "HUAWEI MOBILE",
1565 "Mass Storage", 1566 "Mass Storage",
1566 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1567 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1567 0), 1568 0),
1568UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1569UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
1569 "HUAWEI MOBILE", 1570 "HUAWEI MOBILE",
1570 "Mass Storage", 1571 "Mass Storage",
1571 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1572 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1572 0), 1573 0),
1573UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1574UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
1574 "HUAWEI MOBILE", 1575 "HUAWEI MOBILE",
1575 "Mass Storage", 1576 "Mass Storage",
1576 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1577 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1577 0), 1578 0),
1578UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1579UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
1579 "HUAWEI MOBILE", 1580 "HUAWEI MOBILE",
1580 "Mass Storage", 1581 "Mass Storage",
1581 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1582 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1582 0), 1583 0),
1583UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1584UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
1584 "HUAWEI MOBILE", 1585 "HUAWEI MOBILE",
1585 "Mass Storage", 1586 "Mass Storage",
1586 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1587 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1587 0), 1588 0),
1588UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1589UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
1589 "HUAWEI MOBILE", 1590 "HUAWEI MOBILE",
1590 "Mass Storage", 1591 "Mass Storage",
1591 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1592 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1592 0), 1593 0),
1593UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1594UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
1594 "HUAWEI MOBILE", 1595 "HUAWEI MOBILE",
1595 "Mass Storage", 1596 "Mass Storage",
1596 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1597 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1597 0), 1598 0),
1598UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1599UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
1599 "HUAWEI MOBILE", 1600 "HUAWEI MOBILE",
1600 "Mass Storage", 1601 "Mass Storage",
1601 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1602 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1602 0), 1603 0),
1603UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1604UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
1604 "HUAWEI MOBILE", 1605 "HUAWEI MOBILE",
1605 "Mass Storage", 1606 "Mass Storage",
1606 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1607 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1607 0), 1608 0),
1608UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1609UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
1609 "HUAWEI MOBILE", 1610 "HUAWEI MOBILE",
1610 "Mass Storage", 1611 "Mass Storage",
1611 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1612 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1612 0), 1613 0),
1613UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1614UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
1614 "HUAWEI MOBILE", 1615 "HUAWEI MOBILE",
1615 "Mass Storage", 1616 "Mass Storage",
1616 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1617 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1617 0), 1618 0),
1618UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, 1619UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
1619 "HUAWEI MOBILE", 1620 "HUAWEI MOBILE",
1620 "Mass Storage", 1621 "Mass Storage",
1621 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1622 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1622 0), 1623 0),
1623UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, 1624UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
1624 "HUAWEI MOBILE", 1625 "HUAWEI MOBILE",
1625 "Mass Storage", 1626 "Mass Storage",
1626 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1627 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1627 0), 1628 0),
1628UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, 1629UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
1629 "HUAWEI MOBILE", 1630 "HUAWEI MOBILE",
1630 "Mass Storage", 1631 "Mass Storage",
1631 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1632 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1632 0), 1633 0),
1633UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, 1634UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
1634 "HUAWEI MOBILE", 1635 "HUAWEI MOBILE",
1635 "Mass Storage", 1636 "Mass Storage",
1636 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1637 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1637 0), 1638 0),
1638UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, 1639UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
1639 "HUAWEI MOBILE", 1640 "HUAWEI MOBILE",
1640 "Mass Storage", 1641 "Mass Storage",
1641 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1642 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1642 0), 1643 0),
1643UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, 1644UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
1644 "HUAWEI MOBILE", 1645 "HUAWEI MOBILE",
1645 "Mass Storage", 1646 "Mass Storage",
1646 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1647 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1647 0), 1648 0),
1648UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, 1649UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
1649 "HUAWEI MOBILE", 1650 "HUAWEI MOBILE",
1650 "Mass Storage", 1651 "Mass Storage",
1651 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1652 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1652 0), 1653 0),
1653UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, 1654UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
1654 "HUAWEI MOBILE", 1655 "HUAWEI MOBILE",
1655 "Mass Storage", 1656 "Mass Storage",
1656 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1657 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1657 0), 1658 0),
1658UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, 1659UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
1659 "HUAWEI MOBILE", 1660 "HUAWEI MOBILE",
1660 "Mass Storage", 1661 "Mass Storage",
1661 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1662 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1662 0), 1663 0),
1663UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, 1664UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
1664 "HUAWEI MOBILE", 1665 "HUAWEI MOBILE",
1665 "Mass Storage", 1666 "Mass Storage",
1666 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1667 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1667 0), 1668 0),
1668UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, 1669UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
1669 "HUAWEI MOBILE", 1670 "HUAWEI MOBILE",
1670 "Mass Storage", 1671 "Mass Storage",
1671 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1672 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1672 0), 1673 0),
1673UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, 1674UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
1674 "HUAWEI MOBILE", 1675 "HUAWEI MOBILE",
1675 "Mass Storage", 1676 "Mass Storage",
1676 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1677 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1677 0), 1678 0),
1678UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, 1679UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
1679 "HUAWEI MOBILE", 1680 "HUAWEI MOBILE",
1680 "Mass Storage", 1681 "Mass Storage",
1681 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1682 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1682 0), 1683 0),
1683UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, 1684UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
1684 "HUAWEI MOBILE", 1685 "HUAWEI MOBILE",
1685 "Mass Storage", 1686 "Mass Storage",
1686 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1687 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1687 0), 1688 0),
1688UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, 1689UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
1689 "HUAWEI MOBILE", 1690 "HUAWEI MOBILE",
1690 "Mass Storage", 1691 "Mass Storage",
1691 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1692 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1692 0), 1693 0),
1693UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, 1694UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
1694 "HUAWEI MOBILE", 1695 "HUAWEI MOBILE",
1695 "Mass Storage", 1696 "Mass Storage",
1696 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1697 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1697 0), 1698 0),
1698UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, 1699UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
1699 "HUAWEI MOBILE", 1700 "HUAWEI MOBILE",
1700 "Mass Storage", 1701 "Mass Storage",
1701 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1702 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1702 0), 1703 0),
1703UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, 1704UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
1704 "HUAWEI MOBILE", 1705 "HUAWEI MOBILE",
1705 "Mass Storage", 1706 "Mass Storage",
1706 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1707 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1707 0), 1708 0),
1708UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, 1709UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
1709 "HUAWEI MOBILE", 1710 "HUAWEI MOBILE",
1710 "Mass Storage", 1711 "Mass Storage",
1711 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1712 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1712 0), 1713 0),
1713UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, 1714UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
1714 "HUAWEI MOBILE", 1715 "HUAWEI MOBILE",
1715 "Mass Storage", 1716 "Mass Storage",
1716 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1717 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1717 0), 1718 0),
1718UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, 1719UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
1719 "HUAWEI MOBILE", 1720 "HUAWEI MOBILE",
1720 "Mass Storage", 1721 "Mass Storage",
1721 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1722 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1722 0), 1723 0),
1723UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, 1724UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
1724 "HUAWEI MOBILE", 1725 "HUAWEI MOBILE",
1725 "Mass Storage", 1726 "Mass Storage",
1726 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1727 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1727 0), 1728 0),
1728UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, 1729UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
1729 "HUAWEI MOBILE", 1730 "HUAWEI MOBILE",
1730 "Mass Storage", 1731 "Mass Storage",
1731 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1732 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1732 0), 1733 0),
1733UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, 1734UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
1734 "HUAWEI MOBILE", 1735 "HUAWEI MOBILE",
1735 "Mass Storage", 1736 "Mass Storage",
1736 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1737 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1737 0), 1738 0),
1738UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, 1739UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
1739 "HUAWEI MOBILE", 1740 "HUAWEI MOBILE",
1740 "Mass Storage", 1741 "Mass Storage",
1741 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1742 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1742 0), 1743 0),
1743UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, 1744UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
1744 "HUAWEI MOBILE", 1745 "HUAWEI MOBILE",
1745 "Mass Storage", 1746 "Mass Storage",
1746 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1747 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1747 0), 1748 0),
1748UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, 1749UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
1749 "HUAWEI MOBILE", 1750 "HUAWEI MOBILE",
1750 "Mass Storage", 1751 "Mass Storage",
1751 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1752 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1752 0), 1753 0),
1753UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, 1754UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
1754 "HUAWEI MOBILE", 1755 "HUAWEI MOBILE",
1755 "Mass Storage", 1756 "Mass Storage",
1756 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1757 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1757 0), 1758 0),
1758UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, 1759UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
1759 "HUAWEI MOBILE", 1760 "HUAWEI MOBILE",
1760 "Mass Storage", 1761 "Mass Storage",
1761 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1762 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1762 0), 1763 0),
1763UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, 1764UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
1764 "HUAWEI MOBILE", 1765 "HUAWEI MOBILE",
1765 "Mass Storage", 1766 "Mass Storage",
1766 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1767 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1767 0), 1768 0),
1768UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, 1769UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
1769 "HUAWEI MOBILE", 1770 "HUAWEI MOBILE",
1770 "Mass Storage", 1771 "Mass Storage",
1771 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1772 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1772 0), 1773 0),
1773UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, 1774UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
1774 "HUAWEI MOBILE", 1775 "HUAWEI MOBILE",
1775 "Mass Storage", 1776 "Mass Storage",
1776 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1777 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1777 0), 1778 0),
1778UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, 1779UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
1779 "HUAWEI MOBILE", 1780 "HUAWEI MOBILE",
1780 "Mass Storage", 1781 "Mass Storage",
1781 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1782 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1782 0), 1783 0),
1783UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, 1784UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
1784 "HUAWEI MOBILE", 1785 "HUAWEI MOBILE",
1785 "Mass Storage", 1786 "Mass Storage",
1786 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1787 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1787 0), 1788 0),
1788UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, 1789UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
1789 "HUAWEI MOBILE", 1790 "HUAWEI MOBILE",
1790 "Mass Storage", 1791 "Mass Storage",
1791 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1792 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1792 0), 1793 0),
1793UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, 1794UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
1794 "HUAWEI MOBILE", 1795 "HUAWEI MOBILE",
1795 "Mass Storage", 1796 "Mass Storage",
1796 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1797 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1797 0), 1798 0),
1798UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, 1799UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
1799 "HUAWEI MOBILE", 1800 "HUAWEI MOBILE",
1800 "Mass Storage", 1801 "Mass Storage",
1801 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1802 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1802 0), 1803 0),
1803UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, 1804UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
1804 "HUAWEI MOBILE", 1805 "HUAWEI MOBILE",
1805 "Mass Storage", 1806 "Mass Storage",
1806 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1807 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1807 0), 1808 0),
1808 1809
1809/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ 1810/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
1810UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001, 1811UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001,
1811 "Minolta", 1812 "Minolta",
1812 "Dimage Z10", 1813 "Dimage Z10",
1813 US_SC_DEVICE, US_PR_DEVICE, NULL, 1814 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1814 0 ), 1815 0 ),
1815 1816
1816/* Reported by Kotrla Vitezslav <kotrla@ceb.cz> */ 1817/* Reported by Kotrla Vitezslav <kotrla@ceb.cz> */
1817UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, 1818UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
1818 "SWISSBIT", 1819 "SWISSBIT",
1819 "Black Silver", 1820 "Black Silver",
1820 US_SC_DEVICE, US_PR_DEVICE, NULL, 1821 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1821 US_FL_IGNORE_RESIDUE ), 1822 US_FL_IGNORE_RESIDUE ),
1822 1823
1823/* Reported by Francesco Foresti <frafore@tiscali.it> */ 1824/* Reported by Francesco Foresti <frafore@tiscali.it> */
1824UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, 1825UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1825 "Super Top", 1826 "Super Top",
1826 "IDE DEVICE", 1827 "IDE DEVICE",
1827 US_SC_DEVICE, US_PR_DEVICE, NULL, 1828 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1828 US_FL_IGNORE_RESIDUE ), 1829 US_FL_IGNORE_RESIDUE ),
1829 1830
1830/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> 1831/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
@@ -1833,7 +1834,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1833UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, 1834UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
1834 "JMicron", 1835 "JMicron",
1835 "USB to ATA/ATAPI Bridge", 1836 "USB to ATA/ATAPI Bridge",
1836 US_SC_DEVICE, US_PR_DEVICE, NULL, 1837 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1837 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), 1838 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
1838 1839
1839/* Reported by Robert Schedel <r.schedel@yahoo.de> 1840/* Reported by Robert Schedel <r.schedel@yahoo.de>
@@ -1841,7 +1842,7 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
1841UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, 1842UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1842 "Teac", 1843 "Teac",
1843 "HD-35PUK-B", 1844 "HD-35PUK-B",
1844 US_SC_DEVICE, US_PR_DEVICE, NULL, 1845 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1845 US_FL_IGNORE_RESIDUE ), 1846 US_FL_IGNORE_RESIDUE ),
1846 1847
1847/* Reported by Hans de Goede <hdegoede@redhat.com> 1848/* Reported by Hans de Goede <hdegoede@redhat.com>
@@ -1851,18 +1852,23 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1851UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000, 1852UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
1852 "BUILDWIN", 1853 "BUILDWIN",
1853 "Photo Frame", 1854 "Photo Frame",
1854 US_SC_DEVICE, US_PR_DEVICE, NULL, 1855 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1855 US_FL_BAD_SENSE ), 1856 US_FL_BAD_SENSE ),
1856UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000, 1857UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
1857 "BUILDWIN", 1858 "BUILDWIN",
1858 "Photo Frame", 1859 "Photo Frame",
1859 US_SC_DEVICE, US_PR_DEVICE, NULL, 1860 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1860 US_FL_BAD_SENSE ), 1861 US_FL_BAD_SENSE ),
1862UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
1863 "BUILDWIN",
1864 "Photo Frame",
1865 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1866 US_FL_NO_READ_DISC_INFO ),
1861 1867
1862UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, 1868UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1863 "ST", 1869 "ST",
1864 "2A", 1870 "2A",
1865 US_SC_DEVICE, US_PR_DEVICE, NULL, 1871 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1866 US_FL_FIX_CAPACITY), 1872 US_FL_FIX_CAPACITY),
1867 1873
1868/* patch submitted by Davide Perini <perini.davide@dpsoftware.org> 1874/* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
@@ -1871,7 +1877,7 @@ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1871UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001, 1877UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
1872 "Motorola", 1878 "Motorola",
1873 "RAZR V3x", 1879 "RAZR V3x",
1874 US_SC_DEVICE, US_PR_DEVICE, NULL, 1880 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1875 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), 1881 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
1876 1882
1877/* 1883/*
@@ -1882,14 +1888,14 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
1882UNUSUAL_DEV( 0x22b8, 0x6426, 0x0101, 0x0101, 1888UNUSUAL_DEV( 0x22b8, 0x6426, 0x0101, 0x0101,
1883 "Motorola", 1889 "Motorola",
1884 "MSnc.", 1890 "MSnc.",
1885 US_SC_DEVICE, US_PR_DEVICE, NULL, 1891 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1886 US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG), 1892 US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
1887 1893
1888/* Reported by Radovan Garabik <garabik@kassiopeia.juls.savba.sk> */ 1894/* Reported by Radovan Garabik <garabik@kassiopeia.juls.savba.sk> */
1889UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, 1895UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1890 "MPIO", 1896 "MPIO",
1891 "HS200", 1897 "HS200",
1892 US_SC_DEVICE, US_PR_DEVICE, NULL, 1898 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1893 US_FL_GO_SLOW ), 1899 US_FL_GO_SLOW ),
1894 1900
1895/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com> 1901/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
@@ -1898,21 +1904,21 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1898UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000, 1904UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
1899 "Mitac", 1905 "Mitac",
1900 "Mio DigiWalker USB Sync", 1906 "Mio DigiWalker USB Sync",
1901 US_SC_DEVICE,US_PR_DEVICE,NULL, 1907 USB_SC_DEVICE,USB_PR_DEVICE,NULL,
1902 US_FL_MAX_SECTORS_64 ), 1908 US_FL_MAX_SECTORS_64 ),
1903 1909
1904/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ 1910/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1905UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, 1911UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1906 "iRiver", 1912 "iRiver",
1907 "MP3 T10", 1913 "MP3 T10",
1908 US_SC_DEVICE, US_PR_DEVICE, NULL, 1914 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1909 US_FL_IGNORE_RESIDUE ), 1915 US_FL_IGNORE_RESIDUE ),
1910 1916
1911/* Reported by Sergey Pinaev <dfo@antex.ru> */ 1917/* Reported by Sergey Pinaev <dfo@antex.ru> */
1912UNUSUAL_DEV( 0x4102, 0x1059, 0x0000, 0x0000, 1918UNUSUAL_DEV( 0x4102, 0x1059, 0x0000, 0x0000,
1913 "iRiver", 1919 "iRiver",
1914 "P7K", 1920 "P7K",
1915 US_SC_DEVICE, US_PR_DEVICE, NULL, 1921 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1916 US_FL_MAX_SECTORS_64 ), 1922 US_FL_MAX_SECTORS_64 ),
1917 1923
1918/* 1924/*
@@ -1922,41 +1928,41 @@ UNUSUAL_DEV( 0x4102, 0x1059, 0x0000, 0x0000,
1922UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, 1928UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
1923 "Iomega", 1929 "Iomega",
1924 "Micro Mini 1GB", 1930 "Micro Mini 1GB",
1925 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), 1931 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1926 1932
1927/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ 1933/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
1928UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, 1934UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
1929 "DataStor", 1935 "DataStor",
1930 "USB4500 FW1.04", 1936 "USB4500 FW1.04",
1931 US_SC_DEVICE, US_PR_DEVICE, NULL, 1937 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1932 US_FL_CAPACITY_HEURISTICS), 1938 US_FL_CAPACITY_HEURISTICS),
1933 1939
1934/* Reported by Alessio Treglia <quadrispro@ubuntu.com> */ 1940/* Reported by Alessio Treglia <quadrispro@ubuntu.com> */
1935UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001, 1941UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
1936 "TGE", 1942 "TGE",
1937 "Digital MP3 Audio Player", 1943 "Digital MP3 Audio Player",
1938 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), 1944 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1939 1945
1940/* Control/Bulk transport for all SubClass values */ 1946/* Control/Bulk transport for all SubClass values */
1941USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR), 1947USUAL_DEV(USB_SC_RBC, USB_PR_CB, USB_US_TYPE_STOR),
1942USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR), 1948USUAL_DEV(USB_SC_8020, USB_PR_CB, USB_US_TYPE_STOR),
1943USUAL_DEV(US_SC_QIC, US_PR_CB, USB_US_TYPE_STOR), 1949USUAL_DEV(USB_SC_QIC, USB_PR_CB, USB_US_TYPE_STOR),
1944USUAL_DEV(US_SC_UFI, US_PR_CB, USB_US_TYPE_STOR), 1950USUAL_DEV(USB_SC_UFI, USB_PR_CB, USB_US_TYPE_STOR),
1945USUAL_DEV(US_SC_8070, US_PR_CB, USB_US_TYPE_STOR), 1951USUAL_DEV(USB_SC_8070, USB_PR_CB, USB_US_TYPE_STOR),
1946USUAL_DEV(US_SC_SCSI, US_PR_CB, USB_US_TYPE_STOR), 1952USUAL_DEV(USB_SC_SCSI, USB_PR_CB, USB_US_TYPE_STOR),
1947 1953
1948/* Control/Bulk/Interrupt transport for all SubClass values */ 1954/* Control/Bulk/Interrupt transport for all SubClass values */
1949USUAL_DEV(US_SC_RBC, US_PR_CBI, USB_US_TYPE_STOR), 1955USUAL_DEV(USB_SC_RBC, USB_PR_CBI, USB_US_TYPE_STOR),
1950USUAL_DEV(US_SC_8020, US_PR_CBI, USB_US_TYPE_STOR), 1956USUAL_DEV(USB_SC_8020, USB_PR_CBI, USB_US_TYPE_STOR),
1951USUAL_DEV(US_SC_QIC, US_PR_CBI, USB_US_TYPE_STOR), 1957USUAL_DEV(USB_SC_QIC, USB_PR_CBI, USB_US_TYPE_STOR),
1952USUAL_DEV(US_SC_UFI, US_PR_CBI, USB_US_TYPE_STOR), 1958USUAL_DEV(USB_SC_UFI, USB_PR_CBI, USB_US_TYPE_STOR),
1953USUAL_DEV(US_SC_8070, US_PR_CBI, USB_US_TYPE_STOR), 1959USUAL_DEV(USB_SC_8070, USB_PR_CBI, USB_US_TYPE_STOR),
1954USUAL_DEV(US_SC_SCSI, US_PR_CBI, USB_US_TYPE_STOR), 1960USUAL_DEV(USB_SC_SCSI, USB_PR_CBI, USB_US_TYPE_STOR),
1955 1961
1956/* Bulk-only transport for all SubClass values */ 1962/* Bulk-only transport for all SubClass values */
1957USUAL_DEV(US_SC_RBC, US_PR_BULK, USB_US_TYPE_STOR), 1963USUAL_DEV(USB_SC_RBC, USB_PR_BULK, USB_US_TYPE_STOR),
1958USUAL_DEV(US_SC_8020, US_PR_BULK, USB_US_TYPE_STOR), 1964USUAL_DEV(USB_SC_8020, USB_PR_BULK, USB_US_TYPE_STOR),
1959USUAL_DEV(US_SC_QIC, US_PR_BULK, USB_US_TYPE_STOR), 1965USUAL_DEV(USB_SC_QIC, USB_PR_BULK, USB_US_TYPE_STOR),
1960USUAL_DEV(US_SC_UFI, US_PR_BULK, USB_US_TYPE_STOR), 1966USUAL_DEV(USB_SC_UFI, USB_PR_BULK, USB_US_TYPE_STOR),
1961USUAL_DEV(US_SC_8070, US_PR_BULK, USB_US_TYPE_STOR), 1967USUAL_DEV(USB_SC_8070, USB_PR_BULK, USB_US_TYPE_STOR),
1962USUAL_DEV(US_SC_SCSI, US_PR_BULK, 0), 1968USUAL_DEV(USB_SC_SCSI, USB_PR_BULK, 0),
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 375867942391..59a261155b98 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -21,6 +21,6 @@
21UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999, 21UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999,
22 "Freecom", 22 "Freecom",
23 "USB-IDE", 23 "USB-IDE",
24 US_SC_QIC, US_PR_FREECOM, init_freecom, 0), 24 USB_SC_QIC, USB_PR_FREECOM, init_freecom, 0),
25 25
26#endif /* defined(CONFIG_USB_STORAGE_FREECOM) || ... */ 26#endif /* defined(CONFIG_USB_STORAGE_FREECOM) || ... */
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index 0d99dde3382a..14cca0c48302 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -21,37 +21,37 @@
21UNUSUAL_DEV( 0x054c, 0x002b, 0x0100, 0x0110, 21UNUSUAL_DEV( 0x054c, 0x002b, 0x0100, 0x0110,
22 "Sony", 22 "Sony",
23 "Portable USB Harddrive V2", 23 "Portable USB Harddrive V2",
24 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 24 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
25 0), 25 0),
26 26
27UNUSUAL_DEV( 0x05ab, 0x0031, 0x0100, 0x0110, 27UNUSUAL_DEV( 0x05ab, 0x0031, 0x0100, 0x0110,
28 "In-System", 28 "In-System",
29 "USB/IDE Bridge (ATA/ATAPI)", 29 "USB/IDE Bridge (ATA/ATAPI)",
30 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 30 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
31 0), 31 0),
32 32
33UNUSUAL_DEV( 0x05ab, 0x0301, 0x0100, 0x0110, 33UNUSUAL_DEV( 0x05ab, 0x0301, 0x0100, 0x0110,
34 "In-System", 34 "In-System",
35 "Portable USB Harddrive V2", 35 "Portable USB Harddrive V2",
36 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 36 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
37 0), 37 0),
38 38
39UNUSUAL_DEV( 0x05ab, 0x0351, 0x0100, 0x0110, 39UNUSUAL_DEV( 0x05ab, 0x0351, 0x0100, 0x0110,
40 "In-System", 40 "In-System",
41 "Portable USB Harddrive V2", 41 "Portable USB Harddrive V2",
42 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 42 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
43 0), 43 0),
44 44
45UNUSUAL_DEV( 0x05ab, 0x5701, 0x0100, 0x0110, 45UNUSUAL_DEV( 0x05ab, 0x5701, 0x0100, 0x0110,
46 "In-System", 46 "In-System",
47 "USB Storage Adapter V2", 47 "USB Storage Adapter V2",
48 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 48 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
49 0), 49 0),
50 50
51UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110, 51UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110,
52 "ATI", 52 "ATI",
53 "USB Cable 205", 53 "USB Cable 205",
54 US_SC_ISD200, US_PR_BULK, isd200_Initialization, 54 USB_SC_ISD200, USB_PR_BULK, isd200_Initialization,
55 0), 55 0),
56 56
57#endif /* defined(CONFIG_USB_STORAGE_ISD200) || ... */ 57#endif /* defined(CONFIG_USB_STORAGE_ISD200) || ... */
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index 2e549b1c2c62..54be78b5d643 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -21,7 +21,7 @@
21UNUSUAL_DEV( 0x05dc, 0x0001, 0x0000, 0x0001, 21UNUSUAL_DEV( 0x05dc, 0x0001, 0x0000, 0x0001,
22 "Lexar", 22 "Lexar",
23 "Jumpshot USB CF Reader", 23 "Jumpshot USB CF Reader",
24 US_SC_SCSI, US_PR_JUMPSHOT, NULL, 24 USB_SC_SCSI, USB_PR_JUMPSHOT, NULL,
25 US_FL_NEED_OVERRIDE), 25 US_FL_NEED_OVERRIDE),
26 26
27#endif /* defined(CONFIG_USB_STORAGE_JUMPSHOT) || ... */ 27#endif /* defined(CONFIG_USB_STORAGE_JUMPSHOT) || ... */
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index 12ae3a03e802..6df03972a22c 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -21,6 +21,6 @@
21UNUSUAL_DEV( 0x045a, 0x5210, 0x0101, 0x0101, 21UNUSUAL_DEV( 0x045a, 0x5210, 0x0101, 0x0101,
22 "Rio", 22 "Rio",
23 "Rio Karma", 23 "Rio Karma",
24 US_SC_SCSI, US_PR_KARMA, rio_karma_init, 0), 24 USB_SC_SCSI, USB_PR_KARMA, rio_karma_init, 0),
25 25
26#endif /* defined(CONFIG_USB_STORAGE_KARMA) || ... */ 26#endif /* defined(CONFIG_USB_STORAGE_KARMA) || ... */
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index bd9306b637df..0abb819c7405 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -24,13 +24,13 @@
24UNUSUAL_DEV( 0x0d49, 0x7000, 0x0000, 0x9999, 24UNUSUAL_DEV( 0x0d49, 0x7000, 0x0000, 0x9999,
25 "Maxtor", 25 "Maxtor",
26 "OneTouch External Harddrive", 26 "OneTouch External Harddrive",
27 US_SC_DEVICE, US_PR_DEVICE, onetouch_connect_input, 27 USB_SC_DEVICE, USB_PR_DEVICE, onetouch_connect_input,
28 0), 28 0),
29 29
30UNUSUAL_DEV( 0x0d49, 0x7010, 0x0000, 0x9999, 30UNUSUAL_DEV( 0x0d49, 0x7010, 0x0000, 0x9999,
31 "Maxtor", 31 "Maxtor",
32 "OneTouch External Harddrive", 32 "OneTouch External Harddrive",
33 US_SC_DEVICE, US_PR_DEVICE, onetouch_connect_input, 33 USB_SC_DEVICE, USB_PR_DEVICE, onetouch_connect_input,
34 0), 34 0),
35 35
36#endif /* defined(CONFIG_USB_STORAGE_ONETOUCH) || ... */ 36#endif /* defined(CONFIG_USB_STORAGE_ONETOUCH) || ... */
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index 50cab511a4d7..59a7e37b6c11 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -21,36 +21,36 @@
21UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100, 21UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
22 "Microtech", 22 "Microtech",
23 "CameraMate (DPCM_USB)", 23 "CameraMate (DPCM_USB)",
24 US_SC_SCSI, US_PR_DPCM_USB, NULL, 0), 24 USB_SC_SCSI, USB_PR_DPCM_USB, NULL, 0),
25 25
26UNUSUAL_DEV( 0x04e6, 0x0003, 0x0000, 0x9999, 26UNUSUAL_DEV( 0x04e6, 0x0003, 0x0000, 0x9999,
27 "Sandisk", 27 "Sandisk",
28 "ImageMate SDDR09", 28 "ImageMate SDDR09",
29 US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init, 29 USB_SC_SCSI, USB_PR_EUSB_SDDR09, usb_stor_sddr09_init,
30 0), 30 0),
31 31
32/* This entry is from Andries.Brouwer@cwi.nl */ 32/* This entry is from Andries.Brouwer@cwi.nl */
33UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208, 33UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208,
34 "SCM Microsystems", 34 "SCM Microsystems",
35 "eUSB SmartMedia / CompactFlash Adapter", 35 "eUSB SmartMedia / CompactFlash Adapter",
36 US_SC_SCSI, US_PR_DPCM_USB, usb_stor_sddr09_dpcm_init, 36 USB_SC_SCSI, USB_PR_DPCM_USB, usb_stor_sddr09_dpcm_init,
37 0), 37 0),
38 38
39UNUSUAL_DEV( 0x066b, 0x0105, 0x0100, 0x0100, 39UNUSUAL_DEV( 0x066b, 0x0105, 0x0100, 0x0100,
40 "Olympus", 40 "Olympus",
41 "Camedia MAUSB-2", 41 "Camedia MAUSB-2",
42 US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init, 42 USB_SC_SCSI, USB_PR_EUSB_SDDR09, usb_stor_sddr09_init,
43 0), 43 0),
44 44
45UNUSUAL_DEV( 0x0781, 0x0200, 0x0000, 0x9999, 45UNUSUAL_DEV( 0x0781, 0x0200, 0x0000, 0x9999,
46 "Sandisk", 46 "Sandisk",
47 "ImageMate SDDR-09", 47 "ImageMate SDDR-09",
48 US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init, 48 USB_SC_SCSI, USB_PR_EUSB_SDDR09, usb_stor_sddr09_init,
49 0), 49 0),
50 50
51UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100, 51UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
52 "Microtech", 52 "Microtech",
53 "CameraMate (DPCM_USB)", 53 "CameraMate (DPCM_USB)",
54 US_SC_SCSI, US_PR_DPCM_USB, NULL, 0), 54 USB_SC_SCSI, USB_PR_DPCM_USB, NULL, 0),
55 55
56#endif /* defined(CONFIG_USB_STORAGE_SDDR09) || ... */ 56#endif /* defined(CONFIG_USB_STORAGE_SDDR09) || ... */
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index ae81ef7a1cfd..fcb7e12c598f 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -22,23 +22,23 @@
22UNUSUAL_DEV( 0x07c4, 0xa103, 0x0000, 0x9999, 22UNUSUAL_DEV( 0x07c4, 0xa103, 0x0000, 0x9999,
23 "Datafab", 23 "Datafab",
24 "MDSM-B reader", 24 "MDSM-B reader",
25 US_SC_SCSI, US_PR_SDDR55, NULL, 25 USB_SC_SCSI, USB_PR_SDDR55, NULL,
26 US_FL_FIX_INQUIRY), 26 US_FL_FIX_INQUIRY),
27 27
28/* SM part - aeb <Andries.Brouwer@cwi.nl> */ 28/* SM part - aeb <Andries.Brouwer@cwi.nl> */
29UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff, 29UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff,
30 "Datafab Systems, Inc.", 30 "Datafab Systems, Inc.",
31 "USB to CF + SM Combo (LC1)", 31 "USB to CF + SM Combo (LC1)",
32 US_SC_SCSI, US_PR_SDDR55, NULL, 0), 32 USB_SC_SCSI, USB_PR_SDDR55, NULL, 0),
33 33
34UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff, 34UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff,
35 "Acomdata", 35 "Acomdata",
36 "SM", 36 "SM",
37 US_SC_SCSI, US_PR_SDDR55, NULL, 0), 37 USB_SC_SCSI, USB_PR_SDDR55, NULL, 0),
38 38
39UNUSUAL_DEV( 0x55aa, 0xa103, 0x0000, 0x9999, 39UNUSUAL_DEV( 0x55aa, 0xa103, 0x0000, 0x9999,
40 "Sandisk", 40 "Sandisk",
41 "ImageMate SDDR55", 41 "ImageMate SDDR55",
42 US_SC_SCSI, US_PR_SDDR55, NULL, 0), 42 USB_SC_SCSI, USB_PR_SDDR55, NULL, 0),
43 43
44#endif /* defined(CONFIG_USB_STORAGE_SDDR55) || ... */ 44#endif /* defined(CONFIG_USB_STORAGE_SDDR55) || ... */
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 80e869f10180..38e79c4e6d6a 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -21,23 +21,23 @@
21UNUSUAL_DEV( 0x03f0, 0x0207, 0x0001, 0x0001, 21UNUSUAL_DEV( 0x03f0, 0x0207, 0x0001, 0x0001,
22 "HP", 22 "HP",
23 "CD-Writer+ 8200e", 23 "CD-Writer+ 8200e",
24 US_SC_8070, US_PR_USBAT, init_usbat_cd, 0), 24 USB_SC_8070, USB_PR_USBAT, init_usbat_cd, 0),
25 25
26UNUSUAL_DEV( 0x03f0, 0x0307, 0x0001, 0x0001, 26UNUSUAL_DEV( 0x03f0, 0x0307, 0x0001, 0x0001,
27 "HP", 27 "HP",
28 "CD-Writer+ CD-4e", 28 "CD-Writer+ CD-4e",
29 US_SC_8070, US_PR_USBAT, init_usbat_cd, 0), 29 USB_SC_8070, USB_PR_USBAT, init_usbat_cd, 0),
30 30
31UNUSUAL_DEV( 0x04e6, 0x1010, 0x0000, 0x9999, 31UNUSUAL_DEV( 0x04e6, 0x1010, 0x0000, 0x9999,
32 "Shuttle/SCM", 32 "Shuttle/SCM",
33 "USBAT-02", 33 "USBAT-02",
34 US_SC_SCSI, US_PR_USBAT, init_usbat_flash, 34 USB_SC_SCSI, USB_PR_USBAT, init_usbat_flash,
35 US_FL_SINGLE_LUN), 35 US_FL_SINGLE_LUN),
36 36
37UNUSUAL_DEV( 0x0781, 0x0005, 0x0005, 0x0005, 37UNUSUAL_DEV( 0x0781, 0x0005, 0x0005, 0x0005,
38 "Sandisk", 38 "Sandisk",
39 "ImageMate SDDR-05b", 39 "ImageMate SDDR-05b",
40 US_SC_SCSI, US_PR_USBAT, init_usbat_flash, 40 USB_SC_SCSI, USB_PR_USBAT, init_usbat_flash,
41 US_FL_SINGLE_LUN), 41 US_FL_SINGLE_LUN),
42 42
43#endif /* defined(CONFIG_USB_STORAGE_USBAT) || ... */ 43#endif /* defined(CONFIG_USB_STORAGE_USBAT) || ... */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 90bb0175a152..4219c197cb08 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -512,10 +512,10 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
512 512
513 /* Store the entries */ 513 /* Store the entries */
514 us->unusual_dev = unusual_dev; 514 us->unusual_dev = unusual_dev;
515 us->subclass = (unusual_dev->useProtocol == US_SC_DEVICE) ? 515 us->subclass = (unusual_dev->useProtocol == USB_SC_DEVICE) ?
516 idesc->bInterfaceSubClass : 516 idesc->bInterfaceSubClass :
517 unusual_dev->useProtocol; 517 unusual_dev->useProtocol;
518 us->protocol = (unusual_dev->useTransport == US_PR_DEVICE) ? 518 us->protocol = (unusual_dev->useTransport == USB_PR_DEVICE) ?
519 idesc->bInterfaceProtocol : 519 idesc->bInterfaceProtocol :
520 unusual_dev->useTransport; 520 unusual_dev->useTransport;
521 us->fflags = USB_US_ORIG_FLAGS(id->driver_info); 521 us->fflags = USB_US_ORIG_FLAGS(id->driver_info);
@@ -552,10 +552,10 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
552 struct usb_device_descriptor *ddesc = &dev->descriptor; 552 struct usb_device_descriptor *ddesc = &dev->descriptor;
553 int msg = -1; 553 int msg = -1;
554 554
555 if (unusual_dev->useProtocol != US_SC_DEVICE && 555 if (unusual_dev->useProtocol != USB_SC_DEVICE &&
556 us->subclass == idesc->bInterfaceSubClass) 556 us->subclass == idesc->bInterfaceSubClass)
557 msg += 1; 557 msg += 1;
558 if (unusual_dev->useTransport != US_PR_DEVICE && 558 if (unusual_dev->useTransport != USB_PR_DEVICE &&
559 us->protocol == idesc->bInterfaceProtocol) 559 us->protocol == idesc->bInterfaceProtocol)
560 msg += 2; 560 msg += 2;
561 if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE)) 561 if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE))
@@ -582,21 +582,21 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
582static void get_transport(struct us_data *us) 582static void get_transport(struct us_data *us)
583{ 583{
584 switch (us->protocol) { 584 switch (us->protocol) {
585 case US_PR_CB: 585 case USB_PR_CB:
586 us->transport_name = "Control/Bulk"; 586 us->transport_name = "Control/Bulk";
587 us->transport = usb_stor_CB_transport; 587 us->transport = usb_stor_CB_transport;
588 us->transport_reset = usb_stor_CB_reset; 588 us->transport_reset = usb_stor_CB_reset;
589 us->max_lun = 7; 589 us->max_lun = 7;
590 break; 590 break;
591 591
592 case US_PR_CBI: 592 case USB_PR_CBI:
593 us->transport_name = "Control/Bulk/Interrupt"; 593 us->transport_name = "Control/Bulk/Interrupt";
594 us->transport = usb_stor_CB_transport; 594 us->transport = usb_stor_CB_transport;
595 us->transport_reset = usb_stor_CB_reset; 595 us->transport_reset = usb_stor_CB_reset;
596 us->max_lun = 7; 596 us->max_lun = 7;
597 break; 597 break;
598 598
599 case US_PR_BULK: 599 case USB_PR_BULK:
600 us->transport_name = "Bulk"; 600 us->transport_name = "Bulk";
601 us->transport = usb_stor_Bulk_transport; 601 us->transport = usb_stor_Bulk_transport;
602 us->transport_reset = usb_stor_Bulk_reset; 602 us->transport_reset = usb_stor_Bulk_reset;
@@ -608,35 +608,35 @@ static void get_transport(struct us_data *us)
608static void get_protocol(struct us_data *us) 608static void get_protocol(struct us_data *us)
609{ 609{
610 switch (us->subclass) { 610 switch (us->subclass) {
611 case US_SC_RBC: 611 case USB_SC_RBC:
612 us->protocol_name = "Reduced Block Commands (RBC)"; 612 us->protocol_name = "Reduced Block Commands (RBC)";
613 us->proto_handler = usb_stor_transparent_scsi_command; 613 us->proto_handler = usb_stor_transparent_scsi_command;
614 break; 614 break;
615 615
616 case US_SC_8020: 616 case USB_SC_8020:
617 us->protocol_name = "8020i"; 617 us->protocol_name = "8020i";
618 us->proto_handler = usb_stor_pad12_command; 618 us->proto_handler = usb_stor_pad12_command;
619 us->max_lun = 0; 619 us->max_lun = 0;
620 break; 620 break;
621 621
622 case US_SC_QIC: 622 case USB_SC_QIC:
623 us->protocol_name = "QIC-157"; 623 us->protocol_name = "QIC-157";
624 us->proto_handler = usb_stor_pad12_command; 624 us->proto_handler = usb_stor_pad12_command;
625 us->max_lun = 0; 625 us->max_lun = 0;
626 break; 626 break;
627 627
628 case US_SC_8070: 628 case USB_SC_8070:
629 us->protocol_name = "8070i"; 629 us->protocol_name = "8070i";
630 us->proto_handler = usb_stor_pad12_command; 630 us->proto_handler = usb_stor_pad12_command;
631 us->max_lun = 0; 631 us->max_lun = 0;
632 break; 632 break;
633 633
634 case US_SC_SCSI: 634 case USB_SC_SCSI:
635 us->protocol_name = "Transparent SCSI"; 635 us->protocol_name = "Transparent SCSI";
636 us->proto_handler = usb_stor_transparent_scsi_command; 636 us->proto_handler = usb_stor_transparent_scsi_command;
637 break; 637 break;
638 638
639 case US_SC_UFI: 639 case USB_SC_UFI:
640 us->protocol_name = "Uniform Floppy Interface (UFI)"; 640 us->protocol_name = "Uniform Floppy Interface (UFI)";
641 us->proto_handler = usb_stor_ufi_command; 641 us->proto_handler = usb_stor_ufi_command;
642 break; 642 break;
@@ -679,7 +679,7 @@ static int get_pipes(struct us_data *us)
679 } 679 }
680 } 680 }
681 681
682 if (!ep_in || !ep_out || (us->protocol == US_PR_CBI && !ep_int)) { 682 if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int)) {
683 US_DEBUGP("Endpoint sanity check failed! Rejecting dev.\n"); 683 US_DEBUGP("Endpoint sanity check failed! Rejecting dev.\n");
684 return -EIO; 684 return -EIO;
685 } 685 }
@@ -834,7 +834,7 @@ static int usb_stor_scan_thread(void * __us)
834 if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) { 834 if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
835 835
836 /* For bulk-only devices, determine the max LUN value */ 836 /* For bulk-only devices, determine the max LUN value */
837 if (us->protocol == US_PR_BULK && 837 if (us->protocol == USB_PR_BULK &&
838 !(us->fflags & US_FL_SINGLE_LUN)) { 838 !(us->fflags & US_FL_SINGLE_LUN)) {
839 mutex_lock(&us->dev_mutex); 839 mutex_lock(&us->dev_mutex);
840 us->max_lun = usb_stor_Bulk_max_lun(us); 840 us->max_lun = usb_stor_Bulk_max_lun(us);
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
index 75f1ade66258..b3bd313032b1 100644
--- a/drivers/usb/wusbcore/Makefile
+++ b/drivers/usb/wusbcore/Makefile
@@ -1,9 +1,11 @@
1ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG
2
1obj-$(CONFIG_USB_WUSB) += wusbcore.o 3obj-$(CONFIG_USB_WUSB) += wusbcore.o
2obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o 4obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
3obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o 5obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
4 6
5 7
6wusbcore-objs := \ 8wusbcore-y := \
7 crypto.o \ 9 crypto.o \
8 devconnect.o \ 10 devconnect.o \
9 dev-sysfs.o \ 11 dev-sysfs.o \
@@ -14,13 +16,10 @@ wusbcore-objs := \
14 security.o \ 16 security.o \
15 wusbhc.o 17 wusbhc.o
16 18
17wusb-cbaf-objs := cbaf.o 19wusb-cbaf-y := cbaf.o
18
19wusb-wa-objs := wa-hc.o \
20 wa-nep.o \
21 wa-rpipe.o \
22 wa-xfer.o
23 20
24ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y) 21wusb-wa-y := \
25EXTRA_CFLAGS += -DDEBUG 22 wa-hc.o \
26endif 23 wa-nep.o \
24 wa-rpipe.o \
25 wa-xfer.o
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
index 973321327c44..8739c4f4d015 100644
--- a/drivers/uwb/address.c
+++ b/drivers/uwb/address.c
@@ -363,10 +363,7 @@ size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
363{ 363{
364 size_t result; 364 size_t result;
365 if (type) 365 if (type)
366 result = scnprintf(buf, buf_size, 366 result = scnprintf(buf, buf_size, "%pM", addr);
367 "%02x:%02x:%02x:%02x:%02x:%02x",
368 addr[0], addr[1], addr[2],
369 addr[3], addr[4], addr[5]);
370 else 367 else
371 result = scnprintf(buf, buf_size, "%02x:%02x", 368 result = scnprintf(buf, buf_size, "%02x:%02x",
372 addr[1], addr[0]); 369 addr[1], addr[0]);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
index a005d2a03b5d..67872c83b679 100644
--- a/drivers/uwb/wlp/wss-lc.c
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -791,11 +791,8 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
791 } else { 791 } else {
792 if (printk_ratelimit()) 792 if (printk_ratelimit())
793 dev_err(dev, "WLP: Destination neighbor (Ethernet: " 793 dev_err(dev, "WLP: Destination neighbor (Ethernet: "
794 "%02x:%02x:%02x:%02x:%02x:%02x, Dev: " 794 "%pM, Dev: %02x:%02x) is not connected.\n",
795 "%02x:%02x) is not connected. \n", eth_addr[0], 795 eth_addr, dev_addr->data[1], dev_addr->data[0]);
796 eth_addr[1], eth_addr[2], eth_addr[3],
797 eth_addr[4], eth_addr[5], dev_addr->data[1],
798 dev_addr->data[0]);
799 result = -EINVAL; 796 result = -EINVAL;
800 } 797 }
801 return result; 798 return result;
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index f3aada20fa02..5b2b5ef4edba 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -1718,11 +1718,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red,
1718 (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | 1718 (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
1719 (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | 1719 (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
1720 ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); 1720 ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
1721#ifdef ATAFB_FALCON
1722 ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | 1721 ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
1723 ((green & 0xfc00) >> 5) | 1722 ((green & 0xfc00) >> 5) |
1724 ((blue & 0xf800) >> 11)); 1723 ((blue & 0xf800) >> 11));
1725#endif
1726 } 1724 }
1727 return 0; 1725 return 0;
1728} 1726}
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index fc32c323a381..f5a39f5aa900 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -28,7 +28,7 @@
28 28
29#define Q40_PHYS_SCREEN_ADDR 0xFE800000 29#define Q40_PHYS_SCREEN_ADDR 0xFE800000
30 30
31static struct fb_fix_screeninfo q40fb_fix __initdata = { 31static struct fb_fix_screeninfo q40fb_fix __devinitdata = {
32 .id = "Q40", 32 .id = "Q40",
33 .smem_len = 1024*1024, 33 .smem_len = 1024*1024,
34 .type = FB_TYPE_PACKED_PIXELS, 34 .type = FB_TYPE_PACKED_PIXELS,
@@ -37,7 +37,7 @@ static struct fb_fix_screeninfo q40fb_fix __initdata = {
37 .accel = FB_ACCEL_NONE, 37 .accel = FB_ACCEL_NONE,
38}; 38};
39 39
40static struct fb_var_screeninfo q40fb_var __initdata = { 40static struct fb_var_screeninfo q40fb_var __devinitdata = {
41 .xres = 1024, 41 .xres = 1024,
42 .yres = 512, 42 .yres = 512,
43 .xres_virtual = 1024, 43 .xres_virtual = 1024,
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 6455f3a244c5..e0c2807b0970 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -142,6 +142,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
142 error = device_register(&bus->dev); 142 error = device_register(&bus->dev);
143 if (error) { 143 if (error) {
144 pr_err("Zorro: Error registering zorro_bus\n"); 144 pr_err("Zorro: Error registering zorro_bus\n");
145 put_device(&bus->dev);
145 kfree(bus); 146 kfree(bus);
146 return error; 147 return error;
147 } 148 }
@@ -175,6 +176,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
175 if (error) { 176 if (error) {
176 dev_err(&bus->dev, "Error registering device %s\n", 177 dev_err(&bus->dev, "Error registering device %s\n",
177 z->name); 178 z->name);
179 put_device(&z->dev);
178 continue; 180 continue;
179 } 181 }
180 error = zorro_create_sysfs_dev_files(z); 182 error = zorro_create_sysfs_dev_files(z);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 50e8c8582faa..b737451e2e9d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -370,7 +370,7 @@ int blkdev_fsync(struct file *filp, int datasync)
370 */ 370 */
371 mutex_unlock(&bd_inode->i_mutex); 371 mutex_unlock(&bd_inode->i_mutex);
372 372
373 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); 373 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
374 if (error == -EOPNOTSUPP) 374 if (error == -EOPNOTSUPP)
375 error = 0; 375 error = 0;
376 376
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 64f10082f048..5e789f4a3ed0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2063,7 +2063,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2063 if (uptodate) { 2063 if (uptodate) {
2064 set_buffer_uptodate(bh); 2064 set_buffer_uptodate(bh);
2065 } else { 2065 } else {
2066 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 2066 if (printk_ratelimit()) {
2067 printk(KERN_WARNING "lost page write due to " 2067 printk(KERN_WARNING "lost page write due to "
2068 "I/O error on %s\n", 2068 "I/O error on %s\n",
2069 bdevname(bh->b_bdev, b)); 2069 bdevname(bh->b_bdev, b));
@@ -2200,21 +2200,10 @@ static int write_dev_supers(struct btrfs_device *device,
2200 bh->b_end_io = btrfs_end_buffer_write_sync; 2200 bh->b_end_io = btrfs_end_buffer_write_sync;
2201 } 2201 }
2202 2202
2203 if (i == last_barrier && do_barriers && device->barriers) { 2203 if (i == last_barrier && do_barriers)
2204 ret = submit_bh(WRITE_BARRIER, bh); 2204 ret = submit_bh(WRITE_FLUSH_FUA, bh);
2205 if (ret == -EOPNOTSUPP) { 2205 else
2206 printk("btrfs: disabling barriers on dev %s\n",
2207 device->name);
2208 set_buffer_uptodate(bh);
2209 device->barriers = 0;
2210 /* one reference for submit_bh */
2211 get_bh(bh);
2212 lock_buffer(bh);
2213 ret = submit_bh(WRITE_SYNC, bh);
2214 }
2215 } else {
2216 ret = submit_bh(WRITE_SYNC, bh); 2206 ret = submit_bh(WRITE_SYNC, bh);
2217 }
2218 2207
2219 if (ret) 2208 if (ret)
2220 errors++; 2209 errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 32d094002a57..0b81ecdb101c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1695,8 +1695,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1695static void btrfs_issue_discard(struct block_device *bdev, 1695static void btrfs_issue_discard(struct block_device *bdev,
1696 u64 start, u64 len) 1696 u64 start, u64 len)
1697{ 1697{
1698 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 1698 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
1699 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1700} 1699}
1701 1700
1702static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1701static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd318ff280b2..e25e46a8b4e2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -398,7 +398,6 @@ static noinline int device_list_add(const char *path,
398 device->work.func = pending_bios_fn; 398 device->work.func = pending_bios_fn;
399 memcpy(device->uuid, disk_super->dev_item.uuid, 399 memcpy(device->uuid, disk_super->dev_item.uuid,
400 BTRFS_UUID_SIZE); 400 BTRFS_UUID_SIZE);
401 device->barriers = 1;
402 spin_lock_init(&device->io_lock); 401 spin_lock_init(&device->io_lock);
403 device->name = kstrdup(path, GFP_NOFS); 402 device->name = kstrdup(path, GFP_NOFS);
404 if (!device->name) { 403 if (!device->name) {
@@ -462,7 +461,6 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
462 device->devid = orig_dev->devid; 461 device->devid = orig_dev->devid;
463 device->work.func = pending_bios_fn; 462 device->work.func = pending_bios_fn;
464 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); 463 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
465 device->barriers = 1;
466 spin_lock_init(&device->io_lock); 464 spin_lock_init(&device->io_lock);
467 INIT_LIST_HEAD(&device->dev_list); 465 INIT_LIST_HEAD(&device->dev_list);
468 INIT_LIST_HEAD(&device->dev_alloc_list); 466 INIT_LIST_HEAD(&device->dev_alloc_list);
@@ -1489,7 +1487,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1489 trans = btrfs_start_transaction(root, 0); 1487 trans = btrfs_start_transaction(root, 0);
1490 lock_chunks(root); 1488 lock_chunks(root);
1491 1489
1492 device->barriers = 1;
1493 device->writeable = 1; 1490 device->writeable = 1;
1494 device->work.func = pending_bios_fn; 1491 device->work.func = pending_bios_fn;
1495 generate_random_uuid(device->uuid); 1492 generate_random_uuid(device->uuid);
@@ -3084,7 +3081,6 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3084 return NULL; 3081 return NULL;
3085 list_add(&device->dev_list, 3082 list_add(&device->dev_list,
3086 &fs_devices->devices); 3083 &fs_devices->devices);
3087 device->barriers = 1;
3088 device->dev_root = root->fs_info->dev_root; 3084 device->dev_root = root->fs_info->dev_root;
3089 device->devid = devid; 3085 device->devid = devid;
3090 device->work.func = pending_bios_fn; 3086 device->work.func = pending_bios_fn;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 31b0fabdd2ea..2b638b6e4eea 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -42,7 +42,6 @@ struct btrfs_device {
42 int running_pending; 42 int running_pending;
43 u64 generation; 43 u64 generation;
44 44
45 int barriers;
46 int writeable; 45 int writeable;
47 int in_fs_metadata; 46 int in_fs_metadata;
48 47
diff --git a/fs/buffer.c b/fs/buffer.c
index 3e7dca279d1c..7f0b9b083f77 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -156,7 +156,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
156 if (uptodate) { 156 if (uptodate) {
157 set_buffer_uptodate(bh); 157 set_buffer_uptodate(bh);
158 } else { 158 } else {
159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) { 159 if (!quiet_error(bh)) {
160 buffer_io_error(bh); 160 buffer_io_error(bh);
161 printk(KERN_WARNING "lost page write due to " 161 printk(KERN_WARNING "lost page write due to "
162 "I/O error on %s\n", 162 "I/O error on %s\n",
@@ -2891,7 +2891,6 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
2891 2891
2892 if (err == -EOPNOTSUPP) { 2892 if (err == -EOPNOTSUPP) {
2893 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 2893 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2894 set_bit(BH_Eopnotsupp, &bh->b_state);
2895 } 2894 }
2896 2895
2897 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) 2896 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
@@ -3031,10 +3030,6 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3031 bh->b_end_io = end_buffer_write_sync; 3030 bh->b_end_io = end_buffer_write_sync;
3032 ret = submit_bh(rw, bh); 3031 ret = submit_bh(rw, bh);
3033 wait_on_buffer(bh); 3032 wait_on_buffer(bh);
3034 if (buffer_eopnotsupp(bh)) {
3035 clear_buffer_eopnotsupp(bh);
3036 ret = -EOPNOTSUPP;
3037 }
3038 if (!ret && !buffer_uptodate(bh)) 3033 if (!ret && !buffer_uptodate(bh))
3039 ret = -EIO; 3034 ret = -EIO;
3040 } else { 3035 } else {
diff --git a/fs/cifs/README b/fs/cifs/README
index 7099a526f775..ee68d1036544 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -527,6 +527,11 @@ A partial list of the supported mount options follows:
527 SFU does). In the future the bottom 9 bits of the 527 SFU does). In the future the bottom 9 bits of the
528 mode also will be emulated using queries of the security 528 mode also will be emulated using queries of the security
529 descriptor (ACL). 529 descriptor (ACL).
530 mfsymlinks Enable support for Minshall+French symlinks
531 (see http://wiki.samba.org/index.php/UNIX_Extensions#Minshall.2BFrench_symlinks)
532 This option is ignored when specified together with the
533 'sfu' option. Minshall+French symlinks are used even if
534 the server supports the CIFS Unix Extensions.
530 sign Must use packet signing (helps avoid unwanted data modification 535 sign Must use packet signing (helps avoid unwanted data modification
531 by intermediate systems in the route). Note that signing 536 by intermediate systems in the route). Note that signing
532 does not work with lanman or plaintext authentication. 537 does not work with lanman or plaintext authentication.
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index eb1ba493489f..103ab8b605b0 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -148,7 +148,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
148 seq_printf(m, "Servers:"); 148 seq_printf(m, "Servers:");
149 149
150 i = 0; 150 i = 0;
151 read_lock(&cifs_tcp_ses_lock); 151 spin_lock(&cifs_tcp_ses_lock);
152 list_for_each(tmp1, &cifs_tcp_ses_list) { 152 list_for_each(tmp1, &cifs_tcp_ses_list) {
153 server = list_entry(tmp1, struct TCP_Server_Info, 153 server = list_entry(tmp1, struct TCP_Server_Info,
154 tcp_ses_list); 154 tcp_ses_list);
@@ -230,7 +230,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
230 spin_unlock(&GlobalMid_Lock); 230 spin_unlock(&GlobalMid_Lock);
231 } 231 }
232 } 232 }
233 read_unlock(&cifs_tcp_ses_lock); 233 spin_unlock(&cifs_tcp_ses_lock);
234 seq_putc(m, '\n'); 234 seq_putc(m, '\n');
235 235
236 /* BB add code to dump additional info such as TCP session info now */ 236 /* BB add code to dump additional info such as TCP session info now */
@@ -270,7 +270,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
270 atomic_set(&totBufAllocCount, 0); 270 atomic_set(&totBufAllocCount, 0);
271 atomic_set(&totSmBufAllocCount, 0); 271 atomic_set(&totSmBufAllocCount, 0);
272#endif /* CONFIG_CIFS_STATS2 */ 272#endif /* CONFIG_CIFS_STATS2 */
273 read_lock(&cifs_tcp_ses_lock); 273 spin_lock(&cifs_tcp_ses_lock);
274 list_for_each(tmp1, &cifs_tcp_ses_list) { 274 list_for_each(tmp1, &cifs_tcp_ses_list) {
275 server = list_entry(tmp1, struct TCP_Server_Info, 275 server = list_entry(tmp1, struct TCP_Server_Info,
276 tcp_ses_list); 276 tcp_ses_list);
@@ -303,7 +303,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
303 } 303 }
304 } 304 }
305 } 305 }
306 read_unlock(&cifs_tcp_ses_lock); 306 spin_unlock(&cifs_tcp_ses_lock);
307 } 307 }
308 308
309 return count; 309 return count;
@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
343 GlobalCurrentXid, GlobalMaxActiveXid); 343 GlobalCurrentXid, GlobalMaxActiveXid);
344 344
345 i = 0; 345 i = 0;
346 read_lock(&cifs_tcp_ses_lock); 346 spin_lock(&cifs_tcp_ses_lock);
347 list_for_each(tmp1, &cifs_tcp_ses_list) { 347 list_for_each(tmp1, &cifs_tcp_ses_list) {
348 server = list_entry(tmp1, struct TCP_Server_Info, 348 server = list_entry(tmp1, struct TCP_Server_Info,
349 tcp_ses_list); 349 tcp_ses_list);
@@ -397,7 +397,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
397 } 397 }
398 } 398 }
399 } 399 }
400 read_unlock(&cifs_tcp_ses_lock); 400 spin_unlock(&cifs_tcp_ses_lock);
401 401
402 seq_putc(m, '\n'); 402 seq_putc(m, '\n');
403 return 0; 403 return 0;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index aa316891ac0c..8942b28cf807 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -34,7 +34,7 @@ void cifs_dump_mids(struct TCP_Server_Info *);
34extern int traceSMB; /* flag which enables the function below */ 34extern int traceSMB; /* flag which enables the function below */
35void dump_smb(struct smb_hdr *, int); 35void dump_smb(struct smb_hdr *, int);
36#define CIFS_INFO 0x01 36#define CIFS_INFO 0x01
37#define CIFS_RC 0x02 37#define CIFS_RC 0x02
38#define CIFS_TIMER 0x04 38#define CIFS_TIMER 0x04
39 39
40/* 40/*
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index d6ced7aa23cf..c68a056f27fd 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -44,8 +44,7 @@ static void cifs_dfs_expire_automounts(struct work_struct *work)
44void cifs_dfs_release_automount_timer(void) 44void cifs_dfs_release_automount_timer(void)
45{ 45{
46 BUG_ON(!list_empty(&cifs_dfs_automount_list)); 46 BUG_ON(!list_empty(&cifs_dfs_automount_list));
47 cancel_delayed_work(&cifs_dfs_automount_task); 47 cancel_delayed_work_sync(&cifs_dfs_automount_task);
48 flush_scheduled_work();
49} 48}
50 49
51/** 50/**
@@ -306,6 +305,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
306 int xid, i; 305 int xid, i;
307 int rc = 0; 306 int rc = 0;
308 struct vfsmount *mnt = ERR_PTR(-ENOENT); 307 struct vfsmount *mnt = ERR_PTR(-ENOENT);
308 struct tcon_link *tlink;
309 309
310 cFYI(1, "in %s", __func__); 310 cFYI(1, "in %s", __func__);
311 BUG_ON(IS_ROOT(dentry)); 311 BUG_ON(IS_ROOT(dentry));
@@ -315,14 +315,6 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
315 dput(nd->path.dentry); 315 dput(nd->path.dentry);
316 nd->path.dentry = dget(dentry); 316 nd->path.dentry = dget(dentry);
317 317
318 cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
319 ses = cifs_sb->tcon->ses;
320
321 if (!ses) {
322 rc = -EINVAL;
323 goto out_err;
324 }
325
326 /* 318 /*
327 * The MSDFS spec states that paths in DFS referral requests and 319 * The MSDFS spec states that paths in DFS referral requests and
328 * responses must be prefixed by a single '\' character instead of 320 * responses must be prefixed by a single '\' character instead of
@@ -335,10 +327,20 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
335 goto out_err; 327 goto out_err;
336 } 328 }
337 329
338 rc = get_dfs_path(xid, ses , full_path + 1, cifs_sb->local_nls, 330 cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
331 tlink = cifs_sb_tlink(cifs_sb);
332 if (IS_ERR(tlink)) {
333 rc = PTR_ERR(tlink);
334 goto out_err;
335 }
336 ses = tlink_tcon(tlink)->ses;
337
338 rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
339 &num_referrals, &referrals, 339 &num_referrals, &referrals,
340 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 340 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
341 341
342 cifs_put_tlink(tlink);
343
342 for (i = 0; i < num_referrals; i++) { 344 for (i = 0; i < num_referrals; i++) {
343 int len; 345 int len;
344 dump_referral(referrals+i); 346 dump_referral(referrals+i);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9e771450c3b8..525ba59a4105 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,6 +15,8 @@
15 * the GNU Lesser General Public License for more details. 15 * the GNU Lesser General Public License for more details.
16 * 16 *
17 */ 17 */
18#include <linux/radix-tree.h>
19
18#ifndef _CIFS_FS_SB_H 20#ifndef _CIFS_FS_SB_H
19#define _CIFS_FS_SB_H 21#define _CIFS_FS_SB_H
20 22
@@ -36,23 +38,28 @@
36#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */ 38#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */
37#define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/ 39#define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/
38#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */ 40#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */
41#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
39 43
40struct cifs_sb_info { 44struct cifs_sb_info {
41 struct cifsTconInfo *tcon; /* primary mount */ 45 struct radix_tree_root tlink_tree;
42 struct list_head nested_tcon_q; 46#define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */
47 spinlock_t tlink_tree_lock;
43 struct nls_table *local_nls; 48 struct nls_table *local_nls;
44 unsigned int rsize; 49 unsigned int rsize;
45 unsigned int wsize; 50 unsigned int wsize;
51 atomic_t active;
46 uid_t mnt_uid; 52 uid_t mnt_uid;
47 gid_t mnt_gid; 53 gid_t mnt_gid;
48 mode_t mnt_file_mode; 54 mode_t mnt_file_mode;
49 mode_t mnt_dir_mode; 55 mode_t mnt_dir_mode;
50 int mnt_cifs_flags; 56 unsigned int mnt_cifs_flags;
51 int prepathlen; 57 int prepathlen;
52 char *prepath; /* relative path under the share to mount to */ 58 char *prepath; /* relative path under the share to mount to */
53#ifdef CONFIG_CIFS_DFS_UPCALL 59#ifdef CONFIG_CIFS_DFS_UPCALL
54 char *mountdata; /* mount options received at mount time */ 60 char *mountdata; /* mount options received at mount time */
55#endif 61#endif
56 struct backing_dev_info bdi; 62 struct backing_dev_info bdi;
63 struct delayed_work prune_tlinks;
57}; 64};
58#endif /* _CIFS_FS_SB_H */ 65#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 85d7cf7ff2c8..c9b4792ae825 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -557,11 +557,16 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
557{ 557{
558 struct cifs_ntsd *pntsd = NULL; 558 struct cifs_ntsd *pntsd = NULL;
559 int xid, rc; 559 int xid, rc;
560 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
561
562 if (IS_ERR(tlink))
563 return NULL;
560 564
561 xid = GetXid(); 565 xid = GetXid();
562 rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen); 566 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
563 FreeXid(xid); 567 FreeXid(xid);
564 568
569 cifs_put_tlink(tlink);
565 570
566 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); 571 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
567 return pntsd; 572 return pntsd;
@@ -574,10 +579,16 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
574 int oplock = 0; 579 int oplock = 0;
575 int xid, rc; 580 int xid, rc;
576 __u16 fid; 581 __u16 fid;
582 struct cifsTconInfo *tcon;
583 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
584
585 if (IS_ERR(tlink))
586 return NULL;
577 587
588 tcon = tlink_tcon(tlink);
578 xid = GetXid(); 589 xid = GetXid();
579 590
580 rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, READ_CONTROL, 0, 591 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
581 &fid, &oplock, NULL, cifs_sb->local_nls, 592 &fid, &oplock, NULL, cifs_sb->local_nls,
582 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 593 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
583 if (rc) { 594 if (rc) {
@@ -585,11 +596,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
585 goto out; 596 goto out;
586 } 597 }
587 598
588 rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen); 599 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
589 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); 600 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
590 601
591 CIFSSMBClose(xid, cifs_sb->tcon, fid); 602 CIFSSMBClose(xid, tcon, fid);
592 out: 603 out:
604 cifs_put_tlink(tlink);
593 FreeXid(xid); 605 FreeXid(xid);
594 return pntsd; 606 return pntsd;
595} 607}
@@ -603,7 +615,7 @@ static struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
603 struct cifsFileInfo *open_file = NULL; 615 struct cifsFileInfo *open_file = NULL;
604 616
605 if (inode) 617 if (inode)
606 open_file = find_readable_file(CIFS_I(inode)); 618 open_file = find_readable_file(CIFS_I(inode), true);
607 if (!open_file) 619 if (!open_file)
608 return get_cifs_acl_by_path(cifs_sb, path, pacllen); 620 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
609 621
@@ -616,10 +628,15 @@ static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
616 struct cifs_ntsd *pnntsd, u32 acllen) 628 struct cifs_ntsd *pnntsd, u32 acllen)
617{ 629{
618 int xid, rc; 630 int xid, rc;
631 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
632
633 if (IS_ERR(tlink))
634 return PTR_ERR(tlink);
619 635
620 xid = GetXid(); 636 xid = GetXid();
621 rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); 637 rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
622 FreeXid(xid); 638 FreeXid(xid);
639 cifs_put_tlink(tlink);
623 640
624 cFYI(DBG2, "SetCIFSACL rc = %d", rc); 641 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
625 return rc; 642 return rc;
@@ -631,10 +648,16 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
631 int oplock = 0; 648 int oplock = 0;
632 int xid, rc; 649 int xid, rc;
633 __u16 fid; 650 __u16 fid;
651 struct cifsTconInfo *tcon;
652 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
634 653
654 if (IS_ERR(tlink))
655 return PTR_ERR(tlink);
656
657 tcon = tlink_tcon(tlink);
635 xid = GetXid(); 658 xid = GetXid();
636 659
637 rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, WRITE_DAC, 0, 660 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
638 &fid, &oplock, NULL, cifs_sb->local_nls, 661 &fid, &oplock, NULL, cifs_sb->local_nls,
639 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 662 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
640 if (rc) { 663 if (rc) {
@@ -642,12 +665,13 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
642 goto out; 665 goto out;
643 } 666 }
644 667
645 rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); 668 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
646 cFYI(DBG2, "SetCIFSACL rc = %d", rc); 669 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
647 670
648 CIFSSMBClose(xid, cifs_sb->tcon, fid); 671 CIFSSMBClose(xid, tcon, fid);
649 out: 672out:
650 FreeXid(xid); 673 FreeXid(xid);
674 cifs_put_tlink(tlink);
651 return rc; 675 return rc;
652} 676}
653 677
@@ -661,7 +685,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
661 685
662 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); 686 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
663 687
664 open_file = find_readable_file(CIFS_I(inode)); 688 open_file = find_readable_file(CIFS_I(inode), true);
665 if (!open_file) 689 if (!open_file)
666 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen); 690 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
667 691
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 35042d8f7338..7ac0056294cf 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,6 +27,7 @@
27#include "md5.h" 27#include "md5.h"
28#include "cifs_unicode.h" 28#include "cifs_unicode.h"
29#include "cifsproto.h" 29#include "cifsproto.h"
30#include "ntlmssp.h"
30#include <linux/ctype.h> 31#include <linux/ctype.h>
31#include <linux/random.h> 32#include <linux/random.h>
32 33
@@ -42,7 +43,7 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
42 unsigned char *p24); 43 unsigned char *p24);
43 44
44static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 45static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
45 const struct mac_key *key, char *signature) 46 const struct session_key *key, char *signature)
46{ 47{
47 struct MD5Context context; 48 struct MD5Context context;
48 49
@@ -78,7 +79,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
78 server->sequence_number++; 79 server->sequence_number++;
79 spin_unlock(&GlobalMid_Lock); 80 spin_unlock(&GlobalMid_Lock);
80 81
81 rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key, 82 rc = cifs_calculate_signature(cifs_pdu, &server->session_key,
82 smb_signature); 83 smb_signature);
83 if (rc) 84 if (rc)
84 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 85 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
@@ -89,7 +90,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
89} 90}
90 91
91static int cifs_calc_signature2(const struct kvec *iov, int n_vec, 92static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
92 const struct mac_key *key, char *signature) 93 const struct session_key *key, char *signature)
93{ 94{
94 struct MD5Context context; 95 struct MD5Context context;
95 int i; 96 int i;
@@ -145,7 +146,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
145 server->sequence_number++; 146 server->sequence_number++;
146 spin_unlock(&GlobalMid_Lock); 147 spin_unlock(&GlobalMid_Lock);
147 148
148 rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key, 149 rc = cifs_calc_signature2(iov, n_vec, &server->session_key,
149 smb_signature); 150 smb_signature);
150 if (rc) 151 if (rc)
151 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 152 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
@@ -156,14 +157,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
156} 157}
157 158
158int cifs_verify_signature(struct smb_hdr *cifs_pdu, 159int cifs_verify_signature(struct smb_hdr *cifs_pdu,
159 const struct mac_key *mac_key, 160 const struct session_key *session_key,
160 __u32 expected_sequence_number) 161 __u32 expected_sequence_number)
161{ 162{
162 unsigned int rc; 163 unsigned int rc;
163 char server_response_sig[8]; 164 char server_response_sig[8];
164 char what_we_think_sig_should_be[20]; 165 char what_we_think_sig_should_be[20];
165 166
166 if ((cifs_pdu == NULL) || (mac_key == NULL)) 167 if (cifs_pdu == NULL || session_key == NULL)
167 return -EINVAL; 168 return -EINVAL;
168 169
169 if (cifs_pdu->Command == SMB_COM_NEGOTIATE) 170 if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -192,7 +193,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
192 cpu_to_le32(expected_sequence_number); 193 cpu_to_le32(expected_sequence_number);
193 cifs_pdu->Signature.Sequence.Reserved = 0; 194 cifs_pdu->Signature.Sequence.Reserved = 0;
194 195
195 rc = cifs_calculate_signature(cifs_pdu, mac_key, 196 rc = cifs_calculate_signature(cifs_pdu, session_key,
196 what_we_think_sig_should_be); 197 what_we_think_sig_should_be);
197 198
198 if (rc) 199 if (rc)
@@ -209,7 +210,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
209} 210}
210 211
211/* We fill in key by putting in 40 byte array which was allocated by caller */ 212/* We fill in key by putting in 40 byte array which was allocated by caller */
212int cifs_calculate_mac_key(struct mac_key *key, const char *rn, 213int cifs_calculate_session_key(struct session_key *key, const char *rn,
213 const char *password) 214 const char *password)
214{ 215{
215 char temp_key[16]; 216 char temp_key[16];
@@ -262,6 +263,148 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
262} 263}
263#endif /* CIFS_WEAK_PW_HASH */ 264#endif /* CIFS_WEAK_PW_HASH */
264 265
266/* Build a proper attribute value/target info pairs blob.
267 * Fill in netbios and dns domain name and workstation name
268 * and client time (total five av pairs and + one end of fields indicator.
269 * Allocate domain name which gets freed when session struct is deallocated.
270 */
271static int
272build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
273{
274 unsigned int dlen;
275 unsigned int wlen;
276 unsigned int size = 6 * sizeof(struct ntlmssp2_name);
277 __le64 curtime;
278 char *defdmname = "WORKGROUP";
279 unsigned char *blobptr;
280 struct ntlmssp2_name *attrptr;
281
282 if (!ses->domainName) {
283 ses->domainName = kstrdup(defdmname, GFP_KERNEL);
284 if (!ses->domainName)
285 return -ENOMEM;
286 }
287
288 dlen = strlen(ses->domainName);
289 wlen = strlen(ses->server->hostname);
290
291 /* The length of this blob is a size which is
292 * six times the size of a structure which holds name/size +
293 * two times the unicode length of a domain name +
294 * two times the unicode length of a server name +
295 * size of a timestamp (which is 8 bytes).
296 */
297 ses->tilen = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
298 ses->tiblob = kzalloc(ses->tilen, GFP_KERNEL);
299 if (!ses->tiblob) {
300 ses->tilen = 0;
301 cERROR(1, "Challenge target info allocation failure");
302 return -ENOMEM;
303 }
304
305 blobptr = ses->tiblob;
306 attrptr = (struct ntlmssp2_name *) blobptr;
307
308 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
309 attrptr->length = cpu_to_le16(2 * dlen);
310 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
311 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
312
313 blobptr += 2 * dlen;
314 attrptr = (struct ntlmssp2_name *) blobptr;
315
316 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
317 attrptr->length = cpu_to_le16(2 * wlen);
318 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
319 cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
320
321 blobptr += 2 * wlen;
322 attrptr = (struct ntlmssp2_name *) blobptr;
323
324 attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
325 attrptr->length = cpu_to_le16(2 * dlen);
326 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
327 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
328
329 blobptr += 2 * dlen;
330 attrptr = (struct ntlmssp2_name *) blobptr;
331
332 attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
333 attrptr->length = cpu_to_le16(2 * wlen);
334 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
335 cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
336
337 blobptr += 2 * wlen;
338 attrptr = (struct ntlmssp2_name *) blobptr;
339
340 attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
341 attrptr->length = cpu_to_le16(sizeof(__le64));
342 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
343 curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
344 memcpy(blobptr, &curtime, sizeof(__le64));
345
346 return 0;
347}
348
349/* Server has provided av pairs/target info in the type 2 challenge
350 * packet and we have plucked it and stored within smb session.
351 * We parse that blob here to find netbios domain name to be used
352 * as part of ntlmv2 authentication (in Target String), if not already
353 * specified on the command line.
354 * If this function returns without any error but without fetching
355 * domain name, authentication may fail against some server but
356 * may not fail against other (those who are not very particular
357 * about target string i.e. for some, just user name might suffice.
358 */
359static int
360find_domain_name(struct cifsSesInfo *ses)
361{
362 unsigned int attrsize;
363 unsigned int type;
364 unsigned int onesize = sizeof(struct ntlmssp2_name);
365 unsigned char *blobptr;
366 unsigned char *blobend;
367 struct ntlmssp2_name *attrptr;
368
369 if (!ses->tilen || !ses->tiblob)
370 return 0;
371
372 blobptr = ses->tiblob;
373 blobend = ses->tiblob + ses->tilen;
374
375 while (blobptr + onesize < blobend) {
376 attrptr = (struct ntlmssp2_name *) blobptr;
377 type = le16_to_cpu(attrptr->type);
378 if (type == NTLMSSP_AV_EOL)
379 break;
380 blobptr += 2; /* advance attr type */
381 attrsize = le16_to_cpu(attrptr->length);
382 blobptr += 2; /* advance attr size */
383 if (blobptr + attrsize > blobend)
384 break;
385 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
386 if (!attrsize)
387 break;
388 if (!ses->domainName) {
389 struct nls_table *default_nls;
390 ses->domainName =
391 kmalloc(attrsize + 1, GFP_KERNEL);
392 if (!ses->domainName)
393 return -ENOMEM;
394 default_nls = load_nls_default();
395 cifs_from_ucs2(ses->domainName,
396 (__le16 *)blobptr, attrsize, attrsize,
397 default_nls, false);
398 unload_nls(default_nls);
399 break;
400 }
401 }
402 blobptr += attrsize; /* advance attr value */
403 }
404
405 return 0;
406}
407
265static int calc_ntlmv2_hash(struct cifsSesInfo *ses, 408static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
266 const struct nls_table *nls_cp) 409 const struct nls_table *nls_cp)
267{ 410{
@@ -315,13 +458,14 @@ calc_exit_1:
315calc_exit_2: 458calc_exit_2:
316 /* BB FIXME what about bytes 24 through 40 of the signing key? 459 /* BB FIXME what about bytes 24 through 40 of the signing key?
317 compare with the NTLM example */ 460 compare with the NTLM example */
318 hmac_md5_final(ses->server->ntlmv2_hash, pctxt); 461 hmac_md5_final(ses->ntlmv2_hash, pctxt);
319 462
320 kfree(pctxt); 463 kfree(pctxt);
321 return rc; 464 return rc;
322} 465}
323 466
324void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, 467int
468setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
325 const struct nls_table *nls_cp) 469 const struct nls_table *nls_cp)
326{ 470{
327 int rc; 471 int rc;
@@ -333,25 +477,48 @@ void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
333 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 477 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
334 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); 478 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
335 buf->reserved2 = 0; 479 buf->reserved2 = 0;
336 buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); 480
337 buf->names[0].length = 0; 481 if (ses->server->secType == RawNTLMSSP) {
338 buf->names[1].type = 0; 482 if (!ses->domainName) {
339 buf->names[1].length = 0; 483 rc = find_domain_name(ses);
484 if (rc) {
485 cERROR(1, "error %d finding domain name", rc);
486 goto setup_ntlmv2_rsp_ret;
487 }
488 }
489 } else {
490 rc = build_avpair_blob(ses, nls_cp);
491 if (rc) {
492 cERROR(1, "error %d building av pair blob", rc);
493 return rc;
494 }
495 }
340 496
341 /* calculate buf->ntlmv2_hash */ 497 /* calculate buf->ntlmv2_hash */
342 rc = calc_ntlmv2_hash(ses, nls_cp); 498 rc = calc_ntlmv2_hash(ses, nls_cp);
343 if (rc) 499 if (rc) {
344 cERROR(1, "could not get v2 hash rc %d", rc); 500 cERROR(1, "could not get v2 hash rc %d", rc);
501 goto setup_ntlmv2_rsp_ret;
502 }
345 CalcNTLMv2_response(ses, resp_buf); 503 CalcNTLMv2_response(ses, resp_buf);
346 504
347 /* now calculate the MAC key for NTLMv2 */ 505 /* now calculate the session key for NTLMv2 */
348 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); 506 hmac_md5_init_limK_to_64(ses->ntlmv2_hash, 16, &context);
349 hmac_md5_update(resp_buf, 16, &context); 507 hmac_md5_update(resp_buf, 16, &context);
350 hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context); 508 hmac_md5_final(ses->auth_key.data.ntlmv2.key, &context);
351 509
352 memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf, 510 memcpy(&ses->auth_key.data.ntlmv2.resp, resp_buf,
353 sizeof(struct ntlmv2_resp)); 511 sizeof(struct ntlmv2_resp));
354 ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp); 512 ses->auth_key.len = 16 + sizeof(struct ntlmv2_resp);
513
514 return 0;
515
516setup_ntlmv2_rsp_ret:
517 kfree(ses->tiblob);
518 ses->tiblob = NULL;
519 ses->tilen = 0;
520
521 return rc;
355} 522}
356 523
357void CalcNTLMv2_response(const struct cifsSesInfo *ses, 524void CalcNTLMv2_response(const struct cifsSesInfo *ses,
@@ -359,12 +526,15 @@ void CalcNTLMv2_response(const struct cifsSesInfo *ses,
359{ 526{
360 struct HMACMD5Context context; 527 struct HMACMD5Context context;
361 /* rest of v2 struct already generated */ 528 /* rest of v2 struct already generated */
362 memcpy(v2_session_response + 8, ses->server->cryptKey, 8); 529 memcpy(v2_session_response + 8, ses->cryptKey, 8);
363 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); 530 hmac_md5_init_limK_to_64(ses->ntlmv2_hash, 16, &context);
364 531
365 hmac_md5_update(v2_session_response+8, 532 hmac_md5_update(v2_session_response+8,
366 sizeof(struct ntlmv2_resp) - 8, &context); 533 sizeof(struct ntlmv2_resp) - 8, &context);
367 534
535 if (ses->tilen)
536 hmac_md5_update(ses->tiblob, ses->tilen, &context);
537
368 hmac_md5_final(v2_session_response, &context); 538 hmac_md5_final(v2_session_response, &context);
369/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */ 539/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
370} 540}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 50208c15309a..34371637f210 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/freezer.h> 37#include <linux/freezer.h>
38#include <net/ipv6.h>
38#include "cifsfs.h" 39#include "cifsfs.h"
39#include "cifspdu.h" 40#include "cifspdu.h"
40#define DECLARE_GLOBALS_HERE 41#define DECLARE_GLOBALS_HERE
@@ -81,6 +82,24 @@ extern mempool_t *cifs_sm_req_poolp;
81extern mempool_t *cifs_req_poolp; 82extern mempool_t *cifs_req_poolp;
82extern mempool_t *cifs_mid_poolp; 83extern mempool_t *cifs_mid_poolp;
83 84
85void
86cifs_sb_active(struct super_block *sb)
87{
88 struct cifs_sb_info *server = CIFS_SB(sb);
89
90 if (atomic_inc_return(&server->active) == 1)
91 atomic_inc(&sb->s_active);
92}
93
94void
95cifs_sb_deactive(struct super_block *sb)
96{
97 struct cifs_sb_info *server = CIFS_SB(sb);
98
99 if (atomic_dec_and_test(&server->active))
100 deactivate_super(sb);
101}
102
84static int 103static int
85cifs_read_super(struct super_block *sb, void *data, 104cifs_read_super(struct super_block *sb, void *data,
86 const char *devname, int silent) 105 const char *devname, int silent)
@@ -96,6 +115,9 @@ cifs_read_super(struct super_block *sb, void *data,
96 if (cifs_sb == NULL) 115 if (cifs_sb == NULL)
97 return -ENOMEM; 116 return -ENOMEM;
98 117
118 spin_lock_init(&cifs_sb->tlink_tree_lock);
119 INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
120
99 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
100 if (rc) { 122 if (rc) {
101 kfree(cifs_sb); 123 kfree(cifs_sb);
@@ -135,9 +157,6 @@ cifs_read_super(struct super_block *sb, void *data,
135 sb->s_magic = CIFS_MAGIC_NUMBER; 157 sb->s_magic = CIFS_MAGIC_NUMBER;
136 sb->s_op = &cifs_super_ops; 158 sb->s_op = &cifs_super_ops;
137 sb->s_bdi = &cifs_sb->bdi; 159 sb->s_bdi = &cifs_sb->bdi;
138/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
139 sb->s_blocksize =
140 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
141 sb->s_blocksize = CIFS_MAX_MSGSIZE; 160 sb->s_blocksize = CIFS_MAX_MSGSIZE;
142 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 161 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
143 inode = cifs_root_iget(sb, ROOT_I); 162 inode = cifs_root_iget(sb, ROOT_I);
@@ -219,7 +238,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
219{ 238{
220 struct super_block *sb = dentry->d_sb; 239 struct super_block *sb = dentry->d_sb;
221 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 240 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
222 struct cifsTconInfo *tcon = cifs_sb->tcon; 241 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
223 int rc = -EOPNOTSUPP; 242 int rc = -EOPNOTSUPP;
224 int xid; 243 int xid;
225 244
@@ -361,14 +380,36 @@ static int
361cifs_show_options(struct seq_file *s, struct vfsmount *m) 380cifs_show_options(struct seq_file *s, struct vfsmount *m)
362{ 381{
363 struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb); 382 struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
364 struct cifsTconInfo *tcon = cifs_sb->tcon; 383 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
384 struct sockaddr *srcaddr;
385 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
365 386
366 seq_printf(s, ",unc=%s", tcon->treeName); 387 seq_printf(s, ",unc=%s", tcon->treeName);
367 if (tcon->ses->userName) 388
389 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
390 seq_printf(s, ",multiuser");
391 else if (tcon->ses->userName)
368 seq_printf(s, ",username=%s", tcon->ses->userName); 392 seq_printf(s, ",username=%s", tcon->ses->userName);
393
369 if (tcon->ses->domainName) 394 if (tcon->ses->domainName)
370 seq_printf(s, ",domain=%s", tcon->ses->domainName); 395 seq_printf(s, ",domain=%s", tcon->ses->domainName);
371 396
397 if (srcaddr->sa_family != AF_UNSPEC) {
398 struct sockaddr_in *saddr4;
399 struct sockaddr_in6 *saddr6;
400 saddr4 = (struct sockaddr_in *)srcaddr;
401 saddr6 = (struct sockaddr_in6 *)srcaddr;
402 if (srcaddr->sa_family == AF_INET6)
403 seq_printf(s, ",srcaddr=%pI6c",
404 &saddr6->sin6_addr);
405 else if (srcaddr->sa_family == AF_INET)
406 seq_printf(s, ",srcaddr=%pI4",
407 &saddr4->sin_addr.s_addr);
408 else
409 seq_printf(s, ",srcaddr=BAD-AF:%i",
410 (int)(srcaddr->sa_family));
411 }
412
372 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); 413 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 414 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
374 seq_printf(s, ",forceuid"); 415 seq_printf(s, ",forceuid");
@@ -417,6 +458,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
417 seq_printf(s, ",dynperm"); 458 seq_printf(s, ",dynperm");
418 if (m->mnt_sb->s_flags & MS_POSIXACL) 459 if (m->mnt_sb->s_flags & MS_POSIXACL)
419 seq_printf(s, ",acl"); 460 seq_printf(s, ",acl");
461 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
462 seq_printf(s, ",mfsymlinks");
420 463
421 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 464 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
422 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 465 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
@@ -432,20 +475,18 @@ static void cifs_umount_begin(struct super_block *sb)
432 if (cifs_sb == NULL) 475 if (cifs_sb == NULL)
433 return; 476 return;
434 477
435 tcon = cifs_sb->tcon; 478 tcon = cifs_sb_master_tcon(cifs_sb);
436 if (tcon == NULL)
437 return;
438 479
439 read_lock(&cifs_tcp_ses_lock); 480 spin_lock(&cifs_tcp_ses_lock);
440 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { 481 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
441 /* we have other mounts to same share or we have 482 /* we have other mounts to same share or we have
442 already tried to force umount this and woken up 483 already tried to force umount this and woken up
443 all waiting network requests, nothing to do */ 484 all waiting network requests, nothing to do */
444 read_unlock(&cifs_tcp_ses_lock); 485 spin_unlock(&cifs_tcp_ses_lock);
445 return; 486 return;
446 } else if (tcon->tc_count == 1) 487 } else if (tcon->tc_count == 1)
447 tcon->tidStatus = CifsExiting; 488 tcon->tidStatus = CifsExiting;
448 read_unlock(&cifs_tcp_ses_lock); 489 spin_unlock(&cifs_tcp_ses_lock);
449 490
450 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 491 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
451 /* cancel_notify_requests(tcon); */ 492 /* cancel_notify_requests(tcon); */
@@ -565,6 +606,7 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
565 /* note that this is called by vfs setlease with lock_flocks held 606 /* note that this is called by vfs setlease with lock_flocks held
566 to protect *lease from going away */ 607 to protect *lease from going away */
567 struct inode *inode = file->f_path.dentry->d_inode; 608 struct inode *inode = file->f_path.dentry->d_inode;
609 struct cifsFileInfo *cfile = file->private_data;
568 610
569 if (!(S_ISREG(inode->i_mode))) 611 if (!(S_ISREG(inode->i_mode)))
570 return -EINVAL; 612 return -EINVAL;
@@ -575,8 +617,8 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
575 ((arg == F_WRLCK) && 617 ((arg == F_WRLCK) &&
576 (CIFS_I(inode)->clientCanCacheAll))) 618 (CIFS_I(inode)->clientCanCacheAll)))
577 return generic_setlease(file, arg, lease); 619 return generic_setlease(file, arg, lease);
578 else if (CIFS_SB(inode->i_sb)->tcon->local_lease && 620 else if (tlink_tcon(cfile->tlink)->local_lease &&
579 !CIFS_I(inode)->clientCanCacheRead) 621 !CIFS_I(inode)->clientCanCacheRead)
580 /* If the server claims to support oplock on this 622 /* If the server claims to support oplock on this
581 file, then we still need to check oplock even 623 file, then we still need to check oplock even
582 if the local_lease mount option is set, but there 624 if the local_lease mount option is set, but there
@@ -895,8 +937,8 @@ init_cifs(void)
895 GlobalTotalActiveXid = 0; 937 GlobalTotalActiveXid = 0;
896 GlobalMaxActiveXid = 0; 938 GlobalMaxActiveXid = 0;
897 memset(Local_System_Name, 0, 15); 939 memset(Local_System_Name, 0, 15);
898 rwlock_init(&GlobalSMBSeslock); 940 spin_lock_init(&cifs_tcp_ses_lock);
899 rwlock_init(&cifs_tcp_ses_lock); 941 spin_lock_init(&cifs_file_list_lock);
900 spin_lock_init(&GlobalMid_Lock); 942 spin_lock_init(&GlobalMid_Lock);
901 943
902 if (cifs_max_pending < 2) { 944 if (cifs_max_pending < 2) {
@@ -909,11 +951,11 @@ init_cifs(void)
909 951
910 rc = cifs_fscache_register(); 952 rc = cifs_fscache_register();
911 if (rc) 953 if (rc)
912 goto out; 954 goto out_clean_proc;
913 955
914 rc = cifs_init_inodecache(); 956 rc = cifs_init_inodecache();
915 if (rc) 957 if (rc)
916 goto out_clean_proc; 958 goto out_unreg_fscache;
917 959
918 rc = cifs_init_mids(); 960 rc = cifs_init_mids();
919 if (rc) 961 if (rc)
@@ -935,19 +977,19 @@ init_cifs(void)
935 return 0; 977 return 0;
936 978
937#ifdef CONFIG_CIFS_UPCALL 979#ifdef CONFIG_CIFS_UPCALL
938 out_unregister_filesystem: 980out_unregister_filesystem:
939 unregister_filesystem(&cifs_fs_type); 981 unregister_filesystem(&cifs_fs_type);
940#endif 982#endif
941 out_destroy_request_bufs: 983out_destroy_request_bufs:
942 cifs_destroy_request_bufs(); 984 cifs_destroy_request_bufs();
943 out_destroy_mids: 985out_destroy_mids:
944 cifs_destroy_mids(); 986 cifs_destroy_mids();
945 out_destroy_inodecache: 987out_destroy_inodecache:
946 cifs_destroy_inodecache(); 988 cifs_destroy_inodecache();
947 out_clean_proc: 989out_unreg_fscache:
948 cifs_proc_clean();
949 cifs_fscache_unregister(); 990 cifs_fscache_unregister();
950 out: 991out_clean_proc:
992 cifs_proc_clean();
951 return rc; 993 return rc;
952} 994}
953 995
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d82f5fb4761e..f35795a16b42 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -42,10 +42,8 @@ extern const struct address_space_operations cifs_addr_ops;
42extern const struct address_space_operations cifs_addr_ops_smallbuf; 42extern const struct address_space_operations cifs_addr_ops_smallbuf;
43 43
44/* Functions related to super block operations */ 44/* Functions related to super block operations */
45/* extern const struct super_operations cifs_super_ops;*/ 45extern void cifs_sb_active(struct super_block *sb);
46extern void cifs_read_inode(struct inode *); 46extern void cifs_sb_deactive(struct super_block *sb);
47/*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */
48/* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */
49 47
50/* Functions related to inodes */ 48/* Functions related to inodes */
51extern const struct inode_operations cifs_dir_inode_ops; 49extern const struct inode_operations cifs_dir_inode_ops;
@@ -104,7 +102,7 @@ extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
104extern int cifs_symlink(struct inode *inode, struct dentry *direntry, 102extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
105 const char *symname); 103 const char *symname);
106extern int cifs_removexattr(struct dentry *, const char *); 104extern int cifs_removexattr(struct dentry *, const char *);
107extern int cifs_setxattr(struct dentry *, const char *, const void *, 105extern int cifs_setxattr(struct dentry *, const char *, const void *,
108 size_t, int); 106 size_t, int);
109extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); 107extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
110extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 108extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
@@ -114,5 +112,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
114extern const struct export_operations cifs_export_ops; 112extern const struct export_operations cifs_export_ops;
115#endif /* EXPERIMENTAL */ 113#endif /* EXPERIMENTAL */
116 114
117#define CIFS_VERSION "1.65" 115#define CIFS_VERSION "1.67"
118#endif /* _CIFSFS_H */ 116#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0cdfb8c32ac6..3365e77f6f24 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -97,7 +97,7 @@ enum protocolEnum {
97 /* Netbios frames protocol not supported at this time */ 97 /* Netbios frames protocol not supported at this time */
98}; 98};
99 99
100struct mac_key { 100struct session_key {
101 unsigned int len; 101 unsigned int len;
102 union { 102 union {
103 char ntlm[CIFS_SESS_KEY_SIZE + 16]; 103 char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -139,6 +139,7 @@ struct TCP_Server_Info {
139 struct sockaddr_in sockAddr; 139 struct sockaddr_in sockAddr;
140 struct sockaddr_in6 sockAddr6; 140 struct sockaddr_in6 sockAddr6;
141 } addr; 141 } addr;
142 struct sockaddr_storage srcaddr; /* locally bind to this IP */
142 wait_queue_head_t response_q; 143 wait_queue_head_t response_q;
143 wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ 144 wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
144 struct list_head pending_mid_q; 145 struct list_head pending_mid_q;
@@ -178,12 +179,10 @@ struct TCP_Server_Info {
178 int capabilities; /* allow selective disabling of caps by smb sess */ 179 int capabilities; /* allow selective disabling of caps by smb sess */
179 int timeAdj; /* Adjust for difference in server time zone in sec */ 180 int timeAdj; /* Adjust for difference in server time zone in sec */
180 __u16 CurrentMid; /* multiplex id - rotating counter */ 181 __u16 CurrentMid; /* multiplex id - rotating counter */
181 char cryptKey[CIFS_CRYPTO_KEY_SIZE];
182 /* 16th byte of RFC1001 workstation name is always null */ 182 /* 16th byte of RFC1001 workstation name is always null */
183 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 183 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
184 __u32 sequence_number; /* needed for CIFS PDU signature */ 184 __u32 sequence_number; /* needed for CIFS PDU signature */
185 struct mac_key mac_signing_key; 185 struct session_key session_key;
186 char ntlmv2_hash[16];
187 unsigned long lstrp; /* when we got last response from this server */ 186 unsigned long lstrp; /* when we got last response from this server */
188 u16 dialect; /* dialect index that server chose */ 187 u16 dialect; /* dialect index that server chose */
189 /* extended security flavors that server supports */ 188 /* extended security flavors that server supports */
@@ -191,6 +190,7 @@ struct TCP_Server_Info {
191 bool sec_mskerberos; /* supports legacy MS Kerberos */ 190 bool sec_mskerberos; /* supports legacy MS Kerberos */
192 bool sec_kerberosu2u; /* supports U2U Kerberos */ 191 bool sec_kerberosu2u; /* supports U2U Kerberos */
193 bool sec_ntlmssp; /* supports NTLMSSP */ 192 bool sec_ntlmssp; /* supports NTLMSSP */
193 bool session_estab; /* mark when very first sess is established */
194#ifdef CONFIG_CIFS_FSCACHE 194#ifdef CONFIG_CIFS_FSCACHE
195 struct fscache_cookie *fscache; /* client index cache cookie */ 195 struct fscache_cookie *fscache; /* client index cache cookie */
196#endif 196#endif
@@ -222,6 +222,11 @@ struct cifsSesInfo {
222 char userName[MAX_USERNAME_SIZE + 1]; 222 char userName[MAX_USERNAME_SIZE + 1];
223 char *domainName; 223 char *domainName;
224 char *password; 224 char *password;
225 char cryptKey[CIFS_CRYPTO_KEY_SIZE];
226 struct session_key auth_key;
227 char ntlmv2_hash[16];
228 unsigned int tilen; /* length of the target info blob */
229 unsigned char *tiblob; /* target info blob in challenge response */
225 bool need_reconnect:1; /* connection reset, uid now invalid */ 230 bool need_reconnect:1; /* connection reset, uid now invalid */
226}; 231};
227/* no more than one of the following three session flags may be set */ 232/* no more than one of the following three session flags may be set */
@@ -308,6 +313,44 @@ struct cifsTconInfo {
308}; 313};
309 314
310/* 315/*
316 * This is a refcounted and timestamped container for a tcon pointer. The
317 * container holds a tcon reference. It is considered safe to free one of
318 * these when the tl_count goes to 0. The tl_time is the time of the last
319 * "get" on the container.
320 */
321struct tcon_link {
322 unsigned long tl_index;
323 unsigned long tl_flags;
324#define TCON_LINK_MASTER 0
325#define TCON_LINK_PENDING 1
326#define TCON_LINK_IN_TREE 2
327 unsigned long tl_time;
328 atomic_t tl_count;
329 struct cifsTconInfo *tl_tcon;
330};
331
332extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
333
334static inline struct cifsTconInfo *
335tlink_tcon(struct tcon_link *tlink)
336{
337 return tlink->tl_tcon;
338}
339
340extern void cifs_put_tlink(struct tcon_link *tlink);
341
342static inline struct tcon_link *
343cifs_get_tlink(struct tcon_link *tlink)
344{
345 if (tlink && !IS_ERR(tlink))
346 atomic_inc(&tlink->tl_count);
347 return tlink;
348}
349
350/* This function is always expected to succeed */
351extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
352
353/*
311 * This info hangs off the cifsFileInfo structure, pointed to by llist. 354 * This info hangs off the cifsFileInfo structure, pointed to by llist.
312 * This is used to track byte stream locks on the file 355 * This is used to track byte stream locks on the file
313 */ 356 */
@@ -345,12 +388,11 @@ struct cifsFileInfo {
345 __u16 netfid; /* file id from remote */ 388 __u16 netfid; /* file id from remote */
346 /* BB add lock scope info here if needed */ ; 389 /* BB add lock scope info here if needed */ ;
347 /* lock scope id (0 if none) */ 390 /* lock scope id (0 if none) */
348 struct file *pfile; /* needed for writepage */ 391 struct dentry *dentry;
349 struct inode *pInode; /* needed for oplock break */ 392 unsigned int f_flags;
350 struct vfsmount *mnt; 393 struct tcon_link *tlink;
351 struct mutex lock_mutex; 394 struct mutex lock_mutex;
352 struct list_head llist; /* list of byte range locks we have. */ 395 struct list_head llist; /* list of byte range locks we have. */
353 bool closePend:1; /* file is marked to close */
354 bool invalidHandle:1; /* file closed via session abend */ 396 bool invalidHandle:1; /* file closed via session abend */
355 bool oplock_break_cancelled:1; 397 bool oplock_break_cancelled:1;
356 atomic_t count; /* reference count */ 398 atomic_t count; /* reference count */
@@ -365,14 +407,7 @@ static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
365 atomic_inc(&cifs_file->count); 407 atomic_inc(&cifs_file->count);
366} 408}
367 409
368/* Release a reference on the file private data */ 410void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
369static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
370{
371 if (atomic_dec_and_test(&cifs_file->count)) {
372 iput(cifs_file->pInode);
373 kfree(cifs_file);
374 }
375}
376 411
377/* 412/*
378 * One of these for each file inode 413 * One of these for each file inode
@@ -474,16 +509,16 @@ struct oplock_q_entry {
474 509
475/* for pending dnotify requests */ 510/* for pending dnotify requests */
476struct dir_notify_req { 511struct dir_notify_req {
477 struct list_head lhead; 512 struct list_head lhead;
478 __le16 Pid; 513 __le16 Pid;
479 __le16 PidHigh; 514 __le16 PidHigh;
480 __u16 Mid; 515 __u16 Mid;
481 __u16 Tid; 516 __u16 Tid;
482 __u16 Uid; 517 __u16 Uid;
483 __u16 netfid; 518 __u16 netfid;
484 __u32 filter; /* CompletionFilter (for multishot) */ 519 __u32 filter; /* CompletionFilter (for multishot) */
485 int multishot; 520 int multishot;
486 struct file *pfile; 521 struct file *pfile;
487}; 522};
488 523
489struct dfs_info3_param { 524struct dfs_info3_param {
@@ -667,7 +702,7 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
667 * the reference counters for the server, smb session, and tcon. Finally, 702 * the reference counters for the server, smb session, and tcon. Finally,
668 * changes to the tcon->tidStatus should be done while holding this lock. 703 * changes to the tcon->tidStatus should be done while holding this lock.
669 */ 704 */
670GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; 705GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
671 706
672/* 707/*
673 * This lock protects the cifs_file->llist and cifs_file->flist 708 * This lock protects the cifs_file->llist and cifs_file->flist
@@ -676,7 +711,7 @@ GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
676 * If cifs_tcp_ses_lock and the lock below are both needed to be held, then 711 * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
677 * the cifs_tcp_ses_lock must be grabbed first and released last. 712 * the cifs_tcp_ses_lock must be grabbed first and released last.
678 */ 713 */
679GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; 714GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
680 715
681/* Outstanding dir notify requests */ 716/* Outstanding dir notify requests */
682GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; 717GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 14d036d8db11..b0f4b5656d4c 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -663,7 +663,6 @@ struct ntlmv2_resp {
663 __le64 time; 663 __le64 time;
664 __u64 client_chal; /* random */ 664 __u64 client_chal; /* random */
665 __u32 reserved2; 665 __u32 reserved2;
666 struct ntlmssp2_name names[2];
667 /* array of name entries could follow ending in minimum 4 byte struct */ 666 /* array of name entries could follow ending in minimum 4 byte struct */
668} __attribute__((packed)); 667} __attribute__((packed));
669 668
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1d60c655e3e0..e593c40ba7ba 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,9 +78,9 @@ extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
78extern bool is_valid_oplock_break(struct smb_hdr *smb, 78extern bool is_valid_oplock_break(struct smb_hdr *smb,
79 struct TCP_Server_Info *); 79 struct TCP_Server_Info *);
80extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); 80extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
81extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *); 81extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
82#ifdef CONFIG_CIFS_EXPERIMENTAL 82#ifdef CONFIG_CIFS_EXPERIMENTAL
83extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *); 83extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
84#endif 84#endif
85extern unsigned int smbCalcSize(struct smb_hdr *ptr); 85extern unsigned int smbCalcSize(struct smb_hdr *ptr);
86extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); 86extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
@@ -105,12 +105,12 @@ extern u64 cifs_UnixTimeToNT(struct timespec);
105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
106 int offset); 106 int offset);
107 107
108extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, 108extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
109 __u16 fileHandle, struct file *file, 109 struct file *file, struct tcon_link *tlink,
110 struct vfsmount *mnt, unsigned int oflags); 110 __u32 oplock);
111extern int cifs_posix_open(char *full_path, struct inode **pinode, 111extern int cifs_posix_open(char *full_path, struct inode **pinode,
112 struct super_block *sb, 112 struct super_block *sb,
113 int mode, int oflags, 113 int mode, unsigned int f_flags,
114 __u32 *poplock, __u16 *pnetfid, int xid); 114 __u32 *poplock, __u16 *pnetfid, int xid);
115void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr); 115void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
116extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, 116extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
@@ -362,12 +362,12 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
362extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 362extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
363 __u32 *); 363 __u32 *);
364extern int cifs_verify_signature(struct smb_hdr *, 364extern int cifs_verify_signature(struct smb_hdr *,
365 const struct mac_key *mac_key, 365 const struct session_key *session_key,
366 __u32 expected_sequence_number); 366 __u32 expected_sequence_number);
367extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn, 367extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
368 const char *pass); 368 const char *pass);
369extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *); 369extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
370extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, 370extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
371 const struct nls_table *); 371 const struct nls_table *);
372#ifdef CONFIG_CIFS_WEAK_PW_HASH 372#ifdef CONFIG_CIFS_WEAK_PW_HASH
373extern void calc_lanman_hash(const char *password, const char *cryptkey, 373extern void calc_lanman_hash(const char *password, const char *cryptkey,
@@ -408,4 +408,8 @@ extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
408extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, 408extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
409 const int netfid, __u64 *pExtAttrBits, __u64 *pMask); 409 const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
410extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); 410extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
411extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
412extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
413 const unsigned char *path,
414 struct cifs_sb_info *cifs_sb, int xid);
411#endif /* _CIFSPROTO_H */ 415#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 7e83b356cc9e..e98f1f317b15 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -91,13 +91,13 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
91 struct list_head *tmp1; 91 struct list_head *tmp1;
92 92
93/* list all files open on tree connection and mark them invalid */ 93/* list all files open on tree connection and mark them invalid */
94 write_lock(&GlobalSMBSeslock); 94 spin_lock(&cifs_file_list_lock);
95 list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { 95 list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
96 open_file = list_entry(tmp, struct cifsFileInfo, tlist); 96 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
97 open_file->invalidHandle = true; 97 open_file->invalidHandle = true;
98 open_file->oplock_break_cancelled = true; 98 open_file->oplock_break_cancelled = true;
99 } 99 }
100 write_unlock(&GlobalSMBSeslock); 100 spin_unlock(&cifs_file_list_lock);
101 /* BB Add call to invalidate_inodes(sb) for all superblocks mounted 101 /* BB Add call to invalidate_inodes(sb) for all superblocks mounted
102 to this tcon */ 102 to this tcon */
103} 103}
@@ -503,7 +503,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
503 503
504 if (rsp->EncryptionKeyLength == 504 if (rsp->EncryptionKeyLength ==
505 cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { 505 cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
506 memcpy(server->cryptKey, rsp->EncryptionKey, 506 memcpy(ses->cryptKey, rsp->EncryptionKey,
507 CIFS_CRYPTO_KEY_SIZE); 507 CIFS_CRYPTO_KEY_SIZE);
508 } else if (server->secMode & SECMODE_PW_ENCRYPT) { 508 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
509 rc = -EIO; /* need cryptkey unless plain text */ 509 rc = -EIO; /* need cryptkey unless plain text */
@@ -574,7 +574,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
574 server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); 574 server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
575 server->timeAdj *= 60; 575 server->timeAdj *= 60;
576 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { 576 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
577 memcpy(server->cryptKey, pSMBr->u.EncryptionKey, 577 memcpy(ses->cryptKey, pSMBr->u.EncryptionKey,
578 CIFS_CRYPTO_KEY_SIZE); 578 CIFS_CRYPTO_KEY_SIZE);
579 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) 579 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
580 && (pSMBr->EncryptionKeyLength == 0)) { 580 && (pSMBr->EncryptionKeyLength == 0)) {
@@ -593,9 +593,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
593 rc = -EIO; 593 rc = -EIO;
594 goto neg_err_exit; 594 goto neg_err_exit;
595 } 595 }
596 read_lock(&cifs_tcp_ses_lock); 596 spin_lock(&cifs_tcp_ses_lock);
597 if (server->srv_count > 1) { 597 if (server->srv_count > 1) {
598 read_unlock(&cifs_tcp_ses_lock); 598 spin_unlock(&cifs_tcp_ses_lock);
599 if (memcmp(server->server_GUID, 599 if (memcmp(server->server_GUID,
600 pSMBr->u.extended_response. 600 pSMBr->u.extended_response.
601 GUID, 16) != 0) { 601 GUID, 16) != 0) {
@@ -605,7 +605,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
605 16); 605 16);
606 } 606 }
607 } else { 607 } else {
608 read_unlock(&cifs_tcp_ses_lock); 608 spin_unlock(&cifs_tcp_ses_lock);
609 memcpy(server->server_GUID, 609 memcpy(server->server_GUID,
610 pSMBr->u.extended_response.GUID, 16); 610 pSMBr->u.extended_response.GUID, 16);
611 } 611 }
@@ -620,13 +620,15 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
620 rc = 0; 620 rc = 0;
621 else 621 else
622 rc = -EINVAL; 622 rc = -EINVAL;
623 623 if (server->secType == Kerberos) {
624 if (server->sec_kerberos || server->sec_mskerberos) 624 if (!server->sec_kerberos &&
625 server->secType = Kerberos; 625 !server->sec_mskerberos)
626 else if (server->sec_ntlmssp) 626 rc = -EOPNOTSUPP;
627 server->secType = RawNTLMSSP; 627 } else if (server->secType == RawNTLMSSP) {
628 else 628 if (!server->sec_ntlmssp)
629 rc = -EOPNOTSUPP; 629 rc = -EOPNOTSUPP;
630 } else
631 rc = -EOPNOTSUPP;
630 } 632 }
631 } else 633 } else
632 server->capabilities &= ~CAP_EXTENDED_SECURITY; 634 server->capabilities &= ~CAP_EXTENDED_SECURITY;
diff --git a/fs/cifs/cn_cifs.h b/fs/cifs/cn_cifs.h
deleted file mode 100644
index ea59ccac2eb1..000000000000
--- a/fs/cifs/cn_cifs.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * fs/cifs/cn_cifs.h
3 *
4 * Copyright (c) International Business Machines Corp., 2002
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _CN_CIFS_H
23#define _CN_CIFS_H
24#ifdef CONFIG_CIFS_UPCALL
25#include <linux/types.h>
26#include <linux/connector.h>
27
28struct cifs_upcall {
29 char signature[4]; /* CIFS */
30 enum command {
31 CIFS_GET_IP = 0x00000001, /* get ip address for hostname */
32 CIFS_GET_SECBLOB = 0x00000002, /* get SPNEGO wrapped blob */
33 } command;
34 /* union cifs upcall data follows */
35};
36#endif /* CIFS_UPCALL */
37#endif /* _CN_CIFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 88c84a38bccb..7e73176acb58 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -47,7 +47,6 @@
47#include "ntlmssp.h" 47#include "ntlmssp.h"
48#include "nterr.h" 48#include "nterr.h"
49#include "rfc1002pdu.h" 49#include "rfc1002pdu.h"
50#include "cn_cifs.h"
51#include "fscache.h" 50#include "fscache.h"
52 51
53#define CIFS_PORT 445 52#define CIFS_PORT 445
@@ -100,16 +99,24 @@ struct smb_vol {
100 bool noautotune:1; 99 bool noautotune:1;
101 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ 100 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
102 bool fsc:1; /* enable fscache */ 101 bool fsc:1; /* enable fscache */
102 bool mfsymlinks:1; /* use Minshall+French Symlinks */
103 bool multiuser:1;
103 unsigned int rsize; 104 unsigned int rsize;
104 unsigned int wsize; 105 unsigned int wsize;
105 bool sockopt_tcp_nodelay:1; 106 bool sockopt_tcp_nodelay:1;
106 unsigned short int port; 107 unsigned short int port;
107 char *prepath; 108 char *prepath;
109 struct sockaddr_storage srcaddr; /* allow binding to a local IP */
108 struct nls_table *local_nls; 110 struct nls_table *local_nls;
109}; 111};
110 112
113/* FIXME: should these be tunable? */
114#define TLINK_ERROR_EXPIRE (1 * HZ)
115#define TLINK_IDLE_EXPIRE (600 * HZ)
116
111static int ipv4_connect(struct TCP_Server_Info *server); 117static int ipv4_connect(struct TCP_Server_Info *server);
112static int ipv6_connect(struct TCP_Server_Info *server); 118static int ipv6_connect(struct TCP_Server_Info *server);
119static void cifs_prune_tlinks(struct work_struct *work);
113 120
114/* 121/*
115 * cifs tcp session reconnection 122 * cifs tcp session reconnection
@@ -143,7 +150,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
143 150
144 /* before reconnecting the tcp session, mark the smb session (uid) 151 /* before reconnecting the tcp session, mark the smb session (uid)
145 and the tid bad so they are not used until reconnected */ 152 and the tid bad so they are not used until reconnected */
146 read_lock(&cifs_tcp_ses_lock); 153 spin_lock(&cifs_tcp_ses_lock);
147 list_for_each(tmp, &server->smb_ses_list) { 154 list_for_each(tmp, &server->smb_ses_list) {
148 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 155 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
149 ses->need_reconnect = true; 156 ses->need_reconnect = true;
@@ -153,7 +160,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
153 tcon->need_reconnect = true; 160 tcon->need_reconnect = true;
154 } 161 }
155 } 162 }
156 read_unlock(&cifs_tcp_ses_lock); 163 spin_unlock(&cifs_tcp_ses_lock);
157 /* do not want to be sending data on a socket we are freeing */ 164 /* do not want to be sending data on a socket we are freeing */
158 mutex_lock(&server->srv_mutex); 165 mutex_lock(&server->srv_mutex);
159 if (server->ssocket) { 166 if (server->ssocket) {
@@ -166,6 +173,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
166 sock_release(server->ssocket); 173 sock_release(server->ssocket);
167 server->ssocket = NULL; 174 server->ssocket = NULL;
168 } 175 }
176 server->sequence_number = 0;
177 server->session_estab = false;
169 178
170 spin_lock(&GlobalMid_Lock); 179 spin_lock(&GlobalMid_Lock);
171 list_for_each(tmp, &server->pending_mid_q) { 180 list_for_each(tmp, &server->pending_mid_q) {
@@ -198,7 +207,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
198 spin_lock(&GlobalMid_Lock); 207 spin_lock(&GlobalMid_Lock);
199 if (server->tcpStatus != CifsExiting) 208 if (server->tcpStatus != CifsExiting)
200 server->tcpStatus = CifsGood; 209 server->tcpStatus = CifsGood;
201 server->sequence_number = 0;
202 spin_unlock(&GlobalMid_Lock); 210 spin_unlock(&GlobalMid_Lock);
203 /* atomic_set(&server->inFlight,0);*/ 211 /* atomic_set(&server->inFlight,0);*/
204 wake_up(&server->response_q); 212 wake_up(&server->response_q);
@@ -629,9 +637,9 @@ multi_t2_fnd:
629 } /* end while !EXITING */ 637 } /* end while !EXITING */
630 638
631 /* take it off the list, if it's not already */ 639 /* take it off the list, if it's not already */
632 write_lock(&cifs_tcp_ses_lock); 640 spin_lock(&cifs_tcp_ses_lock);
633 list_del_init(&server->tcp_ses_list); 641 list_del_init(&server->tcp_ses_list);
634 write_unlock(&cifs_tcp_ses_lock); 642 spin_unlock(&cifs_tcp_ses_lock);
635 643
636 spin_lock(&GlobalMid_Lock); 644 spin_lock(&GlobalMid_Lock);
637 server->tcpStatus = CifsExiting; 645 server->tcpStatus = CifsExiting;
@@ -669,7 +677,7 @@ multi_t2_fnd:
669 * BB: we shouldn't have to do any of this. It shouldn't be 677 * BB: we shouldn't have to do any of this. It shouldn't be
670 * possible to exit from the thread with active SMB sessions 678 * possible to exit from the thread with active SMB sessions
671 */ 679 */
672 read_lock(&cifs_tcp_ses_lock); 680 spin_lock(&cifs_tcp_ses_lock);
673 if (list_empty(&server->pending_mid_q)) { 681 if (list_empty(&server->pending_mid_q)) {
674 /* loop through server session structures attached to this and 682 /* loop through server session structures attached to this and
675 mark them dead */ 683 mark them dead */
@@ -679,7 +687,7 @@ multi_t2_fnd:
679 ses->status = CifsExiting; 687 ses->status = CifsExiting;
680 ses->server = NULL; 688 ses->server = NULL;
681 } 689 }
682 read_unlock(&cifs_tcp_ses_lock); 690 spin_unlock(&cifs_tcp_ses_lock);
683 } else { 691 } else {
684 /* although we can not zero the server struct pointer yet, 692 /* although we can not zero the server struct pointer yet,
685 since there are active requests which may depnd on them, 693 since there are active requests which may depnd on them,
@@ -702,7 +710,7 @@ multi_t2_fnd:
702 } 710 }
703 } 711 }
704 spin_unlock(&GlobalMid_Lock); 712 spin_unlock(&GlobalMid_Lock);
705 read_unlock(&cifs_tcp_ses_lock); 713 spin_unlock(&cifs_tcp_ses_lock);
706 /* 1/8th of sec is more than enough time for them to exit */ 714 /* 1/8th of sec is more than enough time for them to exit */
707 msleep(125); 715 msleep(125);
708 } 716 }
@@ -725,12 +733,12 @@ multi_t2_fnd:
725 if a crazy root user tried to kill cifsd 733 if a crazy root user tried to kill cifsd
726 kernel thread explicitly this might happen) */ 734 kernel thread explicitly this might happen) */
727 /* BB: This shouldn't be necessary, see above */ 735 /* BB: This shouldn't be necessary, see above */
728 read_lock(&cifs_tcp_ses_lock); 736 spin_lock(&cifs_tcp_ses_lock);
729 list_for_each(tmp, &server->smb_ses_list) { 737 list_for_each(tmp, &server->smb_ses_list) {
730 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 738 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
731 ses->server = NULL; 739 ses->server = NULL;
732 } 740 }
733 read_unlock(&cifs_tcp_ses_lock); 741 spin_unlock(&cifs_tcp_ses_lock);
734 742
735 kfree(server->hostname); 743 kfree(server->hostname);
736 task_to_wake = xchg(&server->tsk, NULL); 744 task_to_wake = xchg(&server->tsk, NULL);
@@ -1046,6 +1054,22 @@ cifs_parse_mount_options(char *options, const char *devname,
1046 "long\n"); 1054 "long\n");
1047 return 1; 1055 return 1;
1048 } 1056 }
1057 } else if (strnicmp(data, "srcaddr", 7) == 0) {
1058 vol->srcaddr.ss_family = AF_UNSPEC;
1059
1060 if (!value || !*value) {
1061 printk(KERN_WARNING "CIFS: srcaddr value"
1062 " not specified.\n");
1063 return 1; /* needs_arg; */
1064 }
1065 i = cifs_convert_address((struct sockaddr *)&vol->srcaddr,
1066 value, strlen(value));
1067 if (i < 0) {
1068 printk(KERN_WARNING "CIFS: Could not parse"
1069 " srcaddr: %s\n",
1070 value);
1071 return 1;
1072 }
1049 } else if (strnicmp(data, "prefixpath", 10) == 0) { 1073 } else if (strnicmp(data, "prefixpath", 10) == 0) {
1050 if (!value || !*value) { 1074 if (!value || !*value) {
1051 printk(KERN_WARNING 1075 printk(KERN_WARNING
@@ -1325,6 +1349,10 @@ cifs_parse_mount_options(char *options, const char *devname,
1325 "/proc/fs/cifs/LookupCacheEnabled to 0\n"); 1349 "/proc/fs/cifs/LookupCacheEnabled to 0\n");
1326 } else if (strnicmp(data, "fsc", 3) == 0) { 1350 } else if (strnicmp(data, "fsc", 3) == 0) {
1327 vol->fsc = true; 1351 vol->fsc = true;
1352 } else if (strnicmp(data, "mfsymlinks", 10) == 0) {
1353 vol->mfsymlinks = true;
1354 } else if (strnicmp(data, "multiuser", 8) == 0) {
1355 vol->multiuser = true;
1328 } else 1356 } else
1329 printk(KERN_WARNING "CIFS: Unknown mount option %s\n", 1357 printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
1330 data); 1358 data);
@@ -1356,6 +1384,13 @@ cifs_parse_mount_options(char *options, const char *devname,
1356 return 1; 1384 return 1;
1357 } 1385 }
1358 } 1386 }
1387
1388 if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) {
1389 cERROR(1, "Multiuser mounts currently require krb5 "
1390 "authentication!");
1391 return 1;
1392 }
1393
1359 if (vol->UNCip == NULL) 1394 if (vol->UNCip == NULL)
1360 vol->UNCip = &vol->UNC[2]; 1395 vol->UNCip = &vol->UNC[2];
1361 1396
@@ -1374,8 +1409,36 @@ cifs_parse_mount_options(char *options, const char *devname,
1374 return 0; 1409 return 0;
1375} 1410}
1376 1411
1412/** Returns true if srcaddr isn't specified and rhs isn't
1413 * specified, or if srcaddr is specified and
1414 * matches the IP address of the rhs argument.
1415 */
1416static bool
1417srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
1418{
1419 switch (srcaddr->sa_family) {
1420 case AF_UNSPEC:
1421 return (rhs->sa_family == AF_UNSPEC);
1422 case AF_INET: {
1423 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1424 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1425 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1426 }
1427 case AF_INET6: {
1428 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1429 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs;
1430 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
1431 }
1432 default:
1433 WARN_ON(1);
1434 return false; /* don't expect to be here */
1435 }
1436}
1437
1438
1377static bool 1439static bool
1378match_address(struct TCP_Server_Info *server, struct sockaddr *addr) 1440match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
1441 struct sockaddr *srcaddr)
1379{ 1442{
1380 struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; 1443 struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
1381 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; 1444 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
@@ -1402,6 +1465,9 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr)
1402 break; 1465 break;
1403 } 1466 }
1404 1467
1468 if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
1469 return false;
1470
1405 return true; 1471 return true;
1406} 1472}
1407 1473
@@ -1458,29 +1524,21 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
1458{ 1524{
1459 struct TCP_Server_Info *server; 1525 struct TCP_Server_Info *server;
1460 1526
1461 write_lock(&cifs_tcp_ses_lock); 1527 spin_lock(&cifs_tcp_ses_lock);
1462 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1528 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1463 /* 1529 if (!match_address(server, addr,
1464 * the demux thread can exit on its own while still in CifsNew 1530 (struct sockaddr *)&vol->srcaddr))
1465 * so don't accept any sockets in that state. Since the
1466 * tcpStatus never changes back to CifsNew it's safe to check
1467 * for this without a lock.
1468 */
1469 if (server->tcpStatus == CifsNew)
1470 continue;
1471
1472 if (!match_address(server, addr))
1473 continue; 1531 continue;
1474 1532
1475 if (!match_security(server, vol)) 1533 if (!match_security(server, vol))
1476 continue; 1534 continue;
1477 1535
1478 ++server->srv_count; 1536 ++server->srv_count;
1479 write_unlock(&cifs_tcp_ses_lock); 1537 spin_unlock(&cifs_tcp_ses_lock);
1480 cFYI(1, "Existing tcp session with server found"); 1538 cFYI(1, "Existing tcp session with server found");
1481 return server; 1539 return server;
1482 } 1540 }
1483 write_unlock(&cifs_tcp_ses_lock); 1541 spin_unlock(&cifs_tcp_ses_lock);
1484 return NULL; 1542 return NULL;
1485} 1543}
1486 1544
@@ -1489,14 +1547,14 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
1489{ 1547{
1490 struct task_struct *task; 1548 struct task_struct *task;
1491 1549
1492 write_lock(&cifs_tcp_ses_lock); 1550 spin_lock(&cifs_tcp_ses_lock);
1493 if (--server->srv_count > 0) { 1551 if (--server->srv_count > 0) {
1494 write_unlock(&cifs_tcp_ses_lock); 1552 spin_unlock(&cifs_tcp_ses_lock);
1495 return; 1553 return;
1496 } 1554 }
1497 1555
1498 list_del_init(&server->tcp_ses_list); 1556 list_del_init(&server->tcp_ses_list);
1499 write_unlock(&cifs_tcp_ses_lock); 1557 spin_unlock(&cifs_tcp_ses_lock);
1500 1558
1501 spin_lock(&GlobalMid_Lock); 1559 spin_lock(&GlobalMid_Lock);
1502 server->tcpStatus = CifsExiting; 1560 server->tcpStatus = CifsExiting;
@@ -1574,6 +1632,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1574 volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1632 volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1575 memcpy(tcp_ses->server_RFC1001_name, 1633 memcpy(tcp_ses->server_RFC1001_name,
1576 volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1634 volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1635 tcp_ses->session_estab = false;
1577 tcp_ses->sequence_number = 0; 1636 tcp_ses->sequence_number = 0;
1578 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1637 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1579 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1638 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
@@ -1584,6 +1643,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1584 * no need to spinlock this init of tcpStatus or srv_count 1643 * no need to spinlock this init of tcpStatus or srv_count
1585 */ 1644 */
1586 tcp_ses->tcpStatus = CifsNew; 1645 tcp_ses->tcpStatus = CifsNew;
1646 memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
1647 sizeof(tcp_ses->srcaddr));
1587 ++tcp_ses->srv_count; 1648 ++tcp_ses->srv_count;
1588 1649
1589 if (addr.ss_family == AF_INET6) { 1650 if (addr.ss_family == AF_INET6) {
@@ -1618,9 +1679,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1618 } 1679 }
1619 1680
1620 /* thread spawned, put it on the list */ 1681 /* thread spawned, put it on the list */
1621 write_lock(&cifs_tcp_ses_lock); 1682 spin_lock(&cifs_tcp_ses_lock);
1622 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); 1683 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1623 write_unlock(&cifs_tcp_ses_lock); 1684 spin_unlock(&cifs_tcp_ses_lock);
1624 1685
1625 cifs_fscache_get_client_cookie(tcp_ses); 1686 cifs_fscache_get_client_cookie(tcp_ses);
1626 1687
@@ -1642,7 +1703,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1642{ 1703{
1643 struct cifsSesInfo *ses; 1704 struct cifsSesInfo *ses;
1644 1705
1645 write_lock(&cifs_tcp_ses_lock); 1706 spin_lock(&cifs_tcp_ses_lock);
1646 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1707 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1647 switch (server->secType) { 1708 switch (server->secType) {
1648 case Kerberos: 1709 case Kerberos:
@@ -1662,10 +1723,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1662 continue; 1723 continue;
1663 } 1724 }
1664 ++ses->ses_count; 1725 ++ses->ses_count;
1665 write_unlock(&cifs_tcp_ses_lock); 1726 spin_unlock(&cifs_tcp_ses_lock);
1666 return ses; 1727 return ses;
1667 } 1728 }
1668 write_unlock(&cifs_tcp_ses_lock); 1729 spin_unlock(&cifs_tcp_ses_lock);
1669 return NULL; 1730 return NULL;
1670} 1731}
1671 1732
@@ -1676,14 +1737,14 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1676 struct TCP_Server_Info *server = ses->server; 1737 struct TCP_Server_Info *server = ses->server;
1677 1738
1678 cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count); 1739 cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
1679 write_lock(&cifs_tcp_ses_lock); 1740 spin_lock(&cifs_tcp_ses_lock);
1680 if (--ses->ses_count > 0) { 1741 if (--ses->ses_count > 0) {
1681 write_unlock(&cifs_tcp_ses_lock); 1742 spin_unlock(&cifs_tcp_ses_lock);
1682 return; 1743 return;
1683 } 1744 }
1684 1745
1685 list_del_init(&ses->smb_ses_list); 1746 list_del_init(&ses->smb_ses_list);
1686 write_unlock(&cifs_tcp_ses_lock); 1747 spin_unlock(&cifs_tcp_ses_lock);
1687 1748
1688 if (ses->status == CifsGood) { 1749 if (ses->status == CifsGood) {
1689 xid = GetXid(); 1750 xid = GetXid();
@@ -1740,6 +1801,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1740 if (ses == NULL) 1801 if (ses == NULL)
1741 goto get_ses_fail; 1802 goto get_ses_fail;
1742 1803
1804 ses->tilen = 0;
1805 ses->tiblob = NULL;
1743 /* new SMB session uses our server ref */ 1806 /* new SMB session uses our server ref */
1744 ses->server = server; 1807 ses->server = server;
1745 if (server->addr.sockAddr6.sin6_family == AF_INET6) 1808 if (server->addr.sockAddr6.sin6_family == AF_INET6)
@@ -1778,9 +1841,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1778 goto get_ses_fail; 1841 goto get_ses_fail;
1779 1842
1780 /* success, put it on the list */ 1843 /* success, put it on the list */
1781 write_lock(&cifs_tcp_ses_lock); 1844 spin_lock(&cifs_tcp_ses_lock);
1782 list_add(&ses->smb_ses_list, &server->smb_ses_list); 1845 list_add(&ses->smb_ses_list, &server->smb_ses_list);
1783 write_unlock(&cifs_tcp_ses_lock); 1846 spin_unlock(&cifs_tcp_ses_lock);
1784 1847
1785 FreeXid(xid); 1848 FreeXid(xid);
1786 return ses; 1849 return ses;
@@ -1797,7 +1860,7 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
1797 struct list_head *tmp; 1860 struct list_head *tmp;
1798 struct cifsTconInfo *tcon; 1861 struct cifsTconInfo *tcon;
1799 1862
1800 write_lock(&cifs_tcp_ses_lock); 1863 spin_lock(&cifs_tcp_ses_lock);
1801 list_for_each(tmp, &ses->tcon_list) { 1864 list_for_each(tmp, &ses->tcon_list) {
1802 tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); 1865 tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
1803 if (tcon->tidStatus == CifsExiting) 1866 if (tcon->tidStatus == CifsExiting)
@@ -1806,10 +1869,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
1806 continue; 1869 continue;
1807 1870
1808 ++tcon->tc_count; 1871 ++tcon->tc_count;
1809 write_unlock(&cifs_tcp_ses_lock); 1872 spin_unlock(&cifs_tcp_ses_lock);
1810 return tcon; 1873 return tcon;
1811 } 1874 }
1812 write_unlock(&cifs_tcp_ses_lock); 1875 spin_unlock(&cifs_tcp_ses_lock);
1813 return NULL; 1876 return NULL;
1814} 1877}
1815 1878
@@ -1820,14 +1883,14 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
1820 struct cifsSesInfo *ses = tcon->ses; 1883 struct cifsSesInfo *ses = tcon->ses;
1821 1884
1822 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); 1885 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
1823 write_lock(&cifs_tcp_ses_lock); 1886 spin_lock(&cifs_tcp_ses_lock);
1824 if (--tcon->tc_count > 0) { 1887 if (--tcon->tc_count > 0) {
1825 write_unlock(&cifs_tcp_ses_lock); 1888 spin_unlock(&cifs_tcp_ses_lock);
1826 return; 1889 return;
1827 } 1890 }
1828 1891
1829 list_del_init(&tcon->tcon_list); 1892 list_del_init(&tcon->tcon_list);
1830 write_unlock(&cifs_tcp_ses_lock); 1893 spin_unlock(&cifs_tcp_ses_lock);
1831 1894
1832 xid = GetXid(); 1895 xid = GetXid();
1833 CIFSSMBTDis(xid, tcon); 1896 CIFSSMBTDis(xid, tcon);
@@ -1900,9 +1963,9 @@ cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
1900 tcon->nocase = volume_info->nocase; 1963 tcon->nocase = volume_info->nocase;
1901 tcon->local_lease = volume_info->local_lease; 1964 tcon->local_lease = volume_info->local_lease;
1902 1965
1903 write_lock(&cifs_tcp_ses_lock); 1966 spin_lock(&cifs_tcp_ses_lock);
1904 list_add(&tcon->tcon_list, &ses->tcon_list); 1967 list_add(&tcon->tcon_list, &ses->tcon_list);
1905 write_unlock(&cifs_tcp_ses_lock); 1968 spin_unlock(&cifs_tcp_ses_lock);
1906 1969
1907 cifs_fscache_get_super_cookie(tcon); 1970 cifs_fscache_get_super_cookie(tcon);
1908 1971
@@ -1913,6 +1976,23 @@ out_fail:
1913 return ERR_PTR(rc); 1976 return ERR_PTR(rc);
1914} 1977}
1915 1978
1979void
1980cifs_put_tlink(struct tcon_link *tlink)
1981{
1982 if (!tlink || IS_ERR(tlink))
1983 return;
1984
1985 if (!atomic_dec_and_test(&tlink->tl_count) ||
1986 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
1987 tlink->tl_time = jiffies;
1988 return;
1989 }
1990
1991 if (!IS_ERR(tlink_tcon(tlink)))
1992 cifs_put_tcon(tlink_tcon(tlink));
1993 kfree(tlink);
1994 return;
1995}
1916 1996
1917int 1997int
1918get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, 1998get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
@@ -1997,6 +2077,33 @@ static void rfc1002mangle(char *target, char *source, unsigned int length)
1997 2077
1998} 2078}
1999 2079
2080static int
2081bind_socket(struct TCP_Server_Info *server)
2082{
2083 int rc = 0;
2084 if (server->srcaddr.ss_family != AF_UNSPEC) {
2085 /* Bind to the specified local IP address */
2086 struct socket *socket = server->ssocket;
2087 rc = socket->ops->bind(socket,
2088 (struct sockaddr *) &server->srcaddr,
2089 sizeof(server->srcaddr));
2090 if (rc < 0) {
2091 struct sockaddr_in *saddr4;
2092 struct sockaddr_in6 *saddr6;
2093 saddr4 = (struct sockaddr_in *)&server->srcaddr;
2094 saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2095 if (saddr6->sin6_family == AF_INET6)
2096 cERROR(1, "cifs: "
2097 "Failed to bind to: %pI6c, error: %d\n",
2098 &saddr6->sin6_addr, rc);
2099 else
2100 cERROR(1, "cifs: "
2101 "Failed to bind to: %pI4, error: %d\n",
2102 &saddr4->sin_addr.s_addr, rc);
2103 }
2104 }
2105 return rc;
2106}
2000 2107
2001static int 2108static int
2002ipv4_connect(struct TCP_Server_Info *server) 2109ipv4_connect(struct TCP_Server_Info *server)
@@ -2022,6 +2129,10 @@ ipv4_connect(struct TCP_Server_Info *server)
2022 cifs_reclassify_socket4(socket); 2129 cifs_reclassify_socket4(socket);
2023 } 2130 }
2024 2131
2132 rc = bind_socket(server);
2133 if (rc < 0)
2134 return rc;
2135
2025 /* user overrode default port */ 2136 /* user overrode default port */
2026 if (server->addr.sockAddr.sin_port) { 2137 if (server->addr.sockAddr.sin_port) {
2027 rc = socket->ops->connect(socket, (struct sockaddr *) 2138 rc = socket->ops->connect(socket, (struct sockaddr *)
@@ -2184,6 +2295,10 @@ ipv6_connect(struct TCP_Server_Info *server)
2184 cifs_reclassify_socket6(socket); 2295 cifs_reclassify_socket6(socket);
2185 } 2296 }
2186 2297
2298 rc = bind_socket(server);
2299 if (rc < 0)
2300 return rc;
2301
2187 /* user overrode default port */ 2302 /* user overrode default port */
2188 if (server->addr.sockAddr6.sin6_port) { 2303 if (server->addr.sockAddr6.sin6_port) {
2189 rc = socket->ops->connect(socket, 2304 rc = socket->ops->connect(socket,
@@ -2383,6 +2498,8 @@ convert_delimiter(char *path, char delim)
2383static void setup_cifs_sb(struct smb_vol *pvolume_info, 2498static void setup_cifs_sb(struct smb_vol *pvolume_info,
2384 struct cifs_sb_info *cifs_sb) 2499 struct cifs_sb_info *cifs_sb)
2385{ 2500{
2501 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
2502
2386 if (pvolume_info->rsize > CIFSMaxBufSize) { 2503 if (pvolume_info->rsize > CIFSMaxBufSize) {
2387 cERROR(1, "rsize %d too large, using MaxBufSize", 2504 cERROR(1, "rsize %d too large, using MaxBufSize",
2388 pvolume_info->rsize); 2505 pvolume_info->rsize);
@@ -2462,10 +2579,21 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2462 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; 2579 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
2463 if (pvolume_info->fsc) 2580 if (pvolume_info->fsc)
2464 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; 2581 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
2582 if (pvolume_info->multiuser)
2583 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
2584 CIFS_MOUNT_NO_PERM);
2465 if (pvolume_info->direct_io) { 2585 if (pvolume_info->direct_io) {
2466 cFYI(1, "mounting share using direct i/o"); 2586 cFYI(1, "mounting share using direct i/o");
2467 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; 2587 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
2468 } 2588 }
2589 if (pvolume_info->mfsymlinks) {
2590 if (pvolume_info->sfu_emul) {
2591 cERROR(1, "mount option mfsymlinks ignored if sfu "
2592 "mount option is used");
2593 } else {
2594 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
2595 }
2596 }
2469 2597
2470 if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) 2598 if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
2471 cERROR(1, "mount option dynperm ignored if cifsacl " 2599 cERROR(1, "mount option dynperm ignored if cifsacl "
@@ -2552,6 +2680,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
2552 struct TCP_Server_Info *srvTcp; 2680 struct TCP_Server_Info *srvTcp;
2553 char *full_path; 2681 char *full_path;
2554 char *mount_data = mount_data_global; 2682 char *mount_data = mount_data_global;
2683 struct tcon_link *tlink;
2555#ifdef CONFIG_CIFS_DFS_UPCALL 2684#ifdef CONFIG_CIFS_DFS_UPCALL
2556 struct dfs_info3_param *referrals = NULL; 2685 struct dfs_info3_param *referrals = NULL;
2557 unsigned int num_referrals = 0; 2686 unsigned int num_referrals = 0;
@@ -2563,6 +2692,7 @@ try_mount_again:
2563 pSesInfo = NULL; 2692 pSesInfo = NULL;
2564 srvTcp = NULL; 2693 srvTcp = NULL;
2565 full_path = NULL; 2694 full_path = NULL;
2695 tlink = NULL;
2566 2696
2567 xid = GetXid(); 2697 xid = GetXid();
2568 2698
@@ -2638,8 +2768,6 @@ try_mount_again:
2638 goto remote_path_check; 2768 goto remote_path_check;
2639 } 2769 }
2640 2770
2641 cifs_sb->tcon = tcon;
2642
2643 /* do not care if following two calls succeed - informational */ 2771 /* do not care if following two calls succeed - informational */
2644 if (!tcon->ipc) { 2772 if (!tcon->ipc) {
2645 CIFSSMBQFSDeviceInfo(xid, tcon); 2773 CIFSSMBQFSDeviceInfo(xid, tcon);
@@ -2748,6 +2876,38 @@ remote_path_check:
2748#endif 2876#endif
2749 } 2877 }
2750 2878
2879 if (rc)
2880 goto mount_fail_check;
2881
2882 /* now, hang the tcon off of the superblock */
2883 tlink = kzalloc(sizeof *tlink, GFP_KERNEL);
2884 if (tlink == NULL) {
2885 rc = -ENOMEM;
2886 goto mount_fail_check;
2887 }
2888
2889 tlink->tl_index = pSesInfo->linux_uid;
2890 tlink->tl_tcon = tcon;
2891 tlink->tl_time = jiffies;
2892 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
2893 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
2894
2895 rc = radix_tree_preload(GFP_KERNEL);
2896 if (rc == -ENOMEM) {
2897 kfree(tlink);
2898 goto mount_fail_check;
2899 }
2900
2901 spin_lock(&cifs_sb->tlink_tree_lock);
2902 radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
2903 radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
2904 CIFS_TLINK_MASTER_TAG);
2905 spin_unlock(&cifs_sb->tlink_tree_lock);
2906 radix_tree_preload_end();
2907
2908 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
2909 TLINK_IDLE_EXPIRE);
2910
2751mount_fail_check: 2911mount_fail_check:
2752 /* on error free sesinfo and tcon struct if needed */ 2912 /* on error free sesinfo and tcon struct if needed */
2753 if (rc) { 2913 if (rc) {
@@ -2825,14 +2985,13 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
2825#ifdef CONFIG_CIFS_WEAK_PW_HASH 2985#ifdef CONFIG_CIFS_WEAK_PW_HASH
2826 if ((global_secflags & CIFSSEC_MAY_LANMAN) && 2986 if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
2827 (ses->server->secType == LANMAN)) 2987 (ses->server->secType == LANMAN))
2828 calc_lanman_hash(tcon->password, ses->server->cryptKey, 2988 calc_lanman_hash(tcon->password, ses->cryptKey,
2829 ses->server->secMode & 2989 ses->server->secMode &
2830 SECMODE_PW_ENCRYPT ? true : false, 2990 SECMODE_PW_ENCRYPT ? true : false,
2831 bcc_ptr); 2991 bcc_ptr);
2832 else 2992 else
2833#endif /* CIFS_WEAK_PW_HASH */ 2993#endif /* CIFS_WEAK_PW_HASH */
2834 SMBNTencrypt(tcon->password, ses->server->cryptKey, 2994 SMBNTencrypt(tcon->password, ses->cryptKey, bcc_ptr);
2835 bcc_ptr);
2836 2995
2837 bcc_ptr += CIFS_SESS_KEY_SIZE; 2996 bcc_ptr += CIFS_SESS_KEY_SIZE;
2838 if (ses->capabilities & CAP_UNICODE) { 2997 if (ses->capabilities & CAP_UNICODE) {
@@ -2934,19 +3093,39 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
2934int 3093int
2935cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3094cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
2936{ 3095{
2937 int rc = 0; 3096 int i, ret;
2938 char *tmp; 3097 char *tmp;
3098 struct tcon_link *tlink[8];
3099 unsigned long index = 0;
3100
3101 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3102
3103 do {
3104 spin_lock(&cifs_sb->tlink_tree_lock);
3105 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
3106 (void **)tlink, index,
3107 ARRAY_SIZE(tlink));
3108 /* increment index for next pass */
3109 if (ret > 0)
3110 index = tlink[ret - 1]->tl_index + 1;
3111 for (i = 0; i < ret; i++) {
3112 cifs_get_tlink(tlink[i]);
3113 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
3114 radix_tree_delete(&cifs_sb->tlink_tree,
3115 tlink[i]->tl_index);
3116 }
3117 spin_unlock(&cifs_sb->tlink_tree_lock);
2939 3118
2940 if (cifs_sb->tcon) 3119 for (i = 0; i < ret; i++)
2941 cifs_put_tcon(cifs_sb->tcon); 3120 cifs_put_tlink(tlink[i]);
3121 } while (ret != 0);
2942 3122
2943 cifs_sb->tcon = NULL;
2944 tmp = cifs_sb->prepath; 3123 tmp = cifs_sb->prepath;
2945 cifs_sb->prepathlen = 0; 3124 cifs_sb->prepathlen = 0;
2946 cifs_sb->prepath = NULL; 3125 cifs_sb->prepath = NULL;
2947 kfree(tmp); 3126 kfree(tmp);
2948 3127
2949 return rc; 3128 return 0;
2950} 3129}
2951 3130
2952int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) 3131int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
@@ -2997,6 +3176,15 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
2997 if (rc) { 3176 if (rc) {
2998 cERROR(1, "Send error in SessSetup = %d", rc); 3177 cERROR(1, "Send error in SessSetup = %d", rc);
2999 } else { 3178 } else {
3179 mutex_lock(&ses->server->srv_mutex);
3180 if (!server->session_estab) {
3181 memcpy(&server->session_key.data,
3182 &ses->auth_key.data, ses->auth_key.len);
3183 server->session_key.len = ses->auth_key.len;
3184 ses->server->session_estab = true;
3185 }
3186 mutex_unlock(&server->srv_mutex);
3187
3000 cFYI(1, "CIFS Session Established successfully"); 3188 cFYI(1, "CIFS Session Established successfully");
3001 spin_lock(&GlobalMid_Lock); 3189 spin_lock(&GlobalMid_Lock);
3002 ses->status = CifsGood; 3190 ses->status = CifsGood;
@@ -3007,3 +3195,237 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
3007 return rc; 3195 return rc;
3008} 3196}
3009 3197
3198static struct cifsTconInfo *
3199cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3200{
3201 struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb);
3202 struct cifsSesInfo *ses;
3203 struct cifsTconInfo *tcon = NULL;
3204 struct smb_vol *vol_info;
3205 char username[MAX_USERNAME_SIZE + 1];
3206
3207 vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
3208 if (vol_info == NULL) {
3209 tcon = ERR_PTR(-ENOMEM);
3210 goto out;
3211 }
3212
3213 snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
3214 vol_info->username = username;
3215 vol_info->local_nls = cifs_sb->local_nls;
3216 vol_info->linux_uid = fsuid;
3217 vol_info->cred_uid = fsuid;
3218 vol_info->UNC = master_tcon->treeName;
3219 vol_info->retry = master_tcon->retry;
3220 vol_info->nocase = master_tcon->nocase;
3221 vol_info->local_lease = master_tcon->local_lease;
3222 vol_info->no_linux_ext = !master_tcon->unix_ext;
3223
3224 /* FIXME: allow for other secFlg settings */
3225 vol_info->secFlg = CIFSSEC_MUST_KRB5;
3226
3227 /* get a reference for the same TCP session */
3228 spin_lock(&cifs_tcp_ses_lock);
3229 ++master_tcon->ses->server->srv_count;
3230 spin_unlock(&cifs_tcp_ses_lock);
3231
3232 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
3233 if (IS_ERR(ses)) {
3234 tcon = (struct cifsTconInfo *)ses;
3235 cifs_put_tcp_session(master_tcon->ses->server);
3236 goto out;
3237 }
3238
3239 tcon = cifs_get_tcon(ses, vol_info);
3240 if (IS_ERR(tcon)) {
3241 cifs_put_smb_ses(ses);
3242 goto out;
3243 }
3244
3245 if (ses->capabilities & CAP_UNIX)
3246 reset_cifs_unix_caps(0, tcon, NULL, vol_info);
3247out:
3248 kfree(vol_info);
3249
3250 return tcon;
3251}
3252
3253static struct tcon_link *
3254cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3255{
3256 struct tcon_link *tlink;
3257 unsigned int ret;
3258
3259 spin_lock(&cifs_sb->tlink_tree_lock);
3260 ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
3261 0, 1, CIFS_TLINK_MASTER_TAG);
3262 spin_unlock(&cifs_sb->tlink_tree_lock);
3263
3264 /* the master tcon should always be present */
3265 if (ret == 0)
3266 BUG();
3267
3268 return tlink;
3269}
3270
3271struct cifsTconInfo *
3272cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
3273{
3274 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
3275}
3276
3277static int
3278cifs_sb_tcon_pending_wait(void *unused)
3279{
3280 schedule();
3281 return signal_pending(current) ? -ERESTARTSYS : 0;
3282}
3283
3284/*
3285 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
3286 * current task.
3287 *
3288 * If the superblock doesn't refer to a multiuser mount, then just return
3289 * the master tcon for the mount.
3290 *
3291 * First, search the radix tree for an existing tcon for this fsuid. If one
3292 * exists, then check to see if it's pending construction. If it is then wait
3293 * for construction to complete. Once it's no longer pending, check to see if
3294 * it failed and either return an error or retry construction, depending on
3295 * the timeout.
3296 *
3297 * If one doesn't exist then insert a new tcon_link struct into the tree and
3298 * try to construct a new one.
3299 */
3300struct tcon_link *
3301cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
3302{
3303 int ret;
3304 unsigned long fsuid = (unsigned long) current_fsuid();
3305 struct tcon_link *tlink, *newtlink;
3306
3307 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3308 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
3309
3310 spin_lock(&cifs_sb->tlink_tree_lock);
3311 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
3312 if (tlink)
3313 cifs_get_tlink(tlink);
3314 spin_unlock(&cifs_sb->tlink_tree_lock);
3315
3316 if (tlink == NULL) {
3317 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3318 if (newtlink == NULL)
3319 return ERR_PTR(-ENOMEM);
3320 newtlink->tl_index = fsuid;
3321 newtlink->tl_tcon = ERR_PTR(-EACCES);
3322 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
3323 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
3324 cifs_get_tlink(newtlink);
3325
3326 ret = radix_tree_preload(GFP_KERNEL);
3327 if (ret != 0) {
3328 kfree(newtlink);
3329 return ERR_PTR(ret);
3330 }
3331
3332 spin_lock(&cifs_sb->tlink_tree_lock);
3333 /* was one inserted after previous search? */
3334 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
3335 if (tlink) {
3336 cifs_get_tlink(tlink);
3337 spin_unlock(&cifs_sb->tlink_tree_lock);
3338 radix_tree_preload_end();
3339 kfree(newtlink);
3340 goto wait_for_construction;
3341 }
3342 ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
3343 spin_unlock(&cifs_sb->tlink_tree_lock);
3344 radix_tree_preload_end();
3345 if (ret) {
3346 kfree(newtlink);
3347 return ERR_PTR(ret);
3348 }
3349 tlink = newtlink;
3350 } else {
3351wait_for_construction:
3352 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
3353 cifs_sb_tcon_pending_wait,
3354 TASK_INTERRUPTIBLE);
3355 if (ret) {
3356 cifs_put_tlink(tlink);
3357 return ERR_PTR(ret);
3358 }
3359
3360 /* if it's good, return it */
3361 if (!IS_ERR(tlink->tl_tcon))
3362 return tlink;
3363
3364 /* return error if we tried this already recently */
3365 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
3366 cifs_put_tlink(tlink);
3367 return ERR_PTR(-EACCES);
3368 }
3369
3370 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
3371 goto wait_for_construction;
3372 }
3373
3374 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
3375 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
3376 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
3377
3378 if (IS_ERR(tlink->tl_tcon)) {
3379 cifs_put_tlink(tlink);
3380 return ERR_PTR(-EACCES);
3381 }
3382
3383 return tlink;
3384}
3385
3386/*
3387 * periodic workqueue job that scans tcon_tree for a superblock and closes
3388 * out tcons.
3389 */
3390static void
3391cifs_prune_tlinks(struct work_struct *work)
3392{
3393 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
3394 prune_tlinks.work);
3395 struct tcon_link *tlink[8];
3396 unsigned long now = jiffies;
3397 unsigned long index = 0;
3398 int i, ret;
3399
3400 do {
3401 spin_lock(&cifs_sb->tlink_tree_lock);
3402 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
3403 (void **)tlink, index,
3404 ARRAY_SIZE(tlink));
3405 /* increment index for next pass */
3406 if (ret > 0)
3407 index = tlink[ret - 1]->tl_index + 1;
3408 for (i = 0; i < ret; i++) {
3409 if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
3410 atomic_read(&tlink[i]->tl_count) != 0 ||
3411 time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
3412 now)) {
3413 tlink[i] = NULL;
3414 continue;
3415 }
3416 cifs_get_tlink(tlink[i]);
3417 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
3418 radix_tree_delete(&cifs_sb->tlink_tree,
3419 tlink[i]->tl_index);
3420 }
3421 spin_unlock(&cifs_sb->tlink_tree_lock);
3422
3423 for (i = 0; i < ret; i++) {
3424 if (tlink[i] != NULL)
3425 cifs_put_tlink(tlink[i]);
3426 }
3427 } while (ret != 0);
3428
3429 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
3430 TLINK_IDLE_EXPIRE);
3431}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9ed0751cc12..3840eddbfb7a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -54,18 +54,18 @@ build_path_from_dentry(struct dentry *direntry)
54 int dfsplen; 54 int dfsplen;
55 char *full_path; 55 char *full_path;
56 char dirsep; 56 char dirsep;
57 struct cifs_sb_info *cifs_sb; 57 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
58 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
58 59
59 if (direntry == NULL) 60 if (direntry == NULL)
60 return NULL; /* not much we can do if dentry is freed and 61 return NULL; /* not much we can do if dentry is freed and
61 we need to reopen the file after it was closed implicitly 62 we need to reopen the file after it was closed implicitly
62 when the server crashed */ 63 when the server crashed */
63 64
64 cifs_sb = CIFS_SB(direntry->d_sb);
65 dirsep = CIFS_DIR_SEP(cifs_sb); 65 dirsep = CIFS_DIR_SEP(cifs_sb);
66 pplen = cifs_sb->prepathlen; 66 pplen = cifs_sb->prepathlen;
67 if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS)) 67 if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
68 dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1); 68 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
69 else 69 else
70 dfsplen = 0; 70 dfsplen = 0;
71cifs_bp_rename_retry: 71cifs_bp_rename_retry:
@@ -117,7 +117,7 @@ cifs_bp_rename_retry:
117 /* BB test paths to Windows with '/' in the midst of prepath */ 117 /* BB test paths to Windows with '/' in the midst of prepath */
118 118
119 if (dfsplen) { 119 if (dfsplen) {
120 strncpy(full_path, cifs_sb->tcon->treeName, dfsplen); 120 strncpy(full_path, tcon->treeName, dfsplen);
121 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { 121 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
122 int i; 122 int i;
123 for (i = 0; i < dfsplen; i++) { 123 for (i = 0; i < dfsplen; i++) {
@@ -130,135 +130,6 @@ cifs_bp_rename_retry:
130 return full_path; 130 return full_path;
131} 131}
132 132
133struct cifsFileInfo *
134cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
135 struct file *file, struct vfsmount *mnt, unsigned int oflags)
136{
137 int oplock = 0;
138 struct cifsFileInfo *pCifsFile;
139 struct cifsInodeInfo *pCifsInode;
140 struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
141
142 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
143 if (pCifsFile == NULL)
144 return pCifsFile;
145
146 if (oplockEnabled)
147 oplock = REQ_OPLOCK;
148
149 pCifsFile->netfid = fileHandle;
150 pCifsFile->pid = current->tgid;
151 pCifsFile->pInode = igrab(newinode);
152 pCifsFile->mnt = mnt;
153 pCifsFile->pfile = file;
154 pCifsFile->invalidHandle = false;
155 pCifsFile->closePend = false;
156 mutex_init(&pCifsFile->fh_mutex);
157 mutex_init(&pCifsFile->lock_mutex);
158 INIT_LIST_HEAD(&pCifsFile->llist);
159 atomic_set(&pCifsFile->count, 1);
160 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
161
162 write_lock(&GlobalSMBSeslock);
163 list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
164 pCifsInode = CIFS_I(newinode);
165 if (pCifsInode) {
166 /* if readable file instance put first in list*/
167 if (oflags & FMODE_READ)
168 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
169 else
170 list_add_tail(&pCifsFile->flist,
171 &pCifsInode->openFileList);
172
173 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
174 pCifsInode->clientCanCacheAll = true;
175 pCifsInode->clientCanCacheRead = true;
176 cFYI(1, "Exclusive Oplock inode %p", newinode);
177 } else if ((oplock & 0xF) == OPLOCK_READ)
178 pCifsInode->clientCanCacheRead = true;
179 }
180 write_unlock(&GlobalSMBSeslock);
181
182 file->private_data = pCifsFile;
183
184 return pCifsFile;
185}
186
187int cifs_posix_open(char *full_path, struct inode **pinode,
188 struct super_block *sb, int mode, int oflags,
189 __u32 *poplock, __u16 *pnetfid, int xid)
190{
191 int rc;
192 FILE_UNIX_BASIC_INFO *presp_data;
193 __u32 posix_flags = 0;
194 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
195 struct cifs_fattr fattr;
196
197 cFYI(1, "posix open %s", full_path);
198
199 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
200 if (presp_data == NULL)
201 return -ENOMEM;
202
203/* So far cifs posix extensions can only map the following flags.
204 There are other valid fmode oflags such as FMODE_LSEEK, FMODE_PREAD, but
205 so far we do not seem to need them, and we can treat them as local only */
206 if ((oflags & (FMODE_READ | FMODE_WRITE)) ==
207 (FMODE_READ | FMODE_WRITE))
208 posix_flags = SMB_O_RDWR;
209 else if (oflags & FMODE_READ)
210 posix_flags = SMB_O_RDONLY;
211 else if (oflags & FMODE_WRITE)
212 posix_flags = SMB_O_WRONLY;
213 if (oflags & O_CREAT)
214 posix_flags |= SMB_O_CREAT;
215 if (oflags & O_EXCL)
216 posix_flags |= SMB_O_EXCL;
217 if (oflags & O_TRUNC)
218 posix_flags |= SMB_O_TRUNC;
219 /* be safe and imply O_SYNC for O_DSYNC */
220 if (oflags & O_DSYNC)
221 posix_flags |= SMB_O_SYNC;
222 if (oflags & O_DIRECTORY)
223 posix_flags |= SMB_O_DIRECTORY;
224 if (oflags & O_NOFOLLOW)
225 posix_flags |= SMB_O_NOFOLLOW;
226 if (oflags & O_DIRECT)
227 posix_flags |= SMB_O_DIRECT;
228
229 mode &= ~current_umask();
230 rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
231 pnetfid, presp_data, poplock, full_path,
232 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
233 CIFS_MOUNT_MAP_SPECIAL_CHR);
234 if (rc)
235 goto posix_open_ret;
236
237 if (presp_data->Type == cpu_to_le32(-1))
238 goto posix_open_ret; /* open ok, caller does qpathinfo */
239
240 if (!pinode)
241 goto posix_open_ret; /* caller does not need info */
242
243 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
244
245 /* get new inode and set it up */
246 if (*pinode == NULL) {
247 cifs_fill_uniqueid(sb, &fattr);
248 *pinode = cifs_iget(sb, &fattr);
249 if (!*pinode) {
250 rc = -ENOMEM;
251 goto posix_open_ret;
252 }
253 } else {
254 cifs_fattr_to_inode(*pinode, &fattr);
255 }
256
257posix_open_ret:
258 kfree(presp_data);
259 return rc;
260}
261
262static void setup_cifs_dentry(struct cifsTconInfo *tcon, 133static void setup_cifs_dentry(struct cifsTconInfo *tcon,
263 struct dentry *direntry, 134 struct dentry *direntry,
264 struct inode *newinode) 135 struct inode *newinode)
@@ -291,6 +162,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
291 int desiredAccess = GENERIC_READ | GENERIC_WRITE; 162 int desiredAccess = GENERIC_READ | GENERIC_WRITE;
292 __u16 fileHandle; 163 __u16 fileHandle;
293 struct cifs_sb_info *cifs_sb; 164 struct cifs_sb_info *cifs_sb;
165 struct tcon_link *tlink;
294 struct cifsTconInfo *tcon; 166 struct cifsTconInfo *tcon;
295 char *full_path = NULL; 167 char *full_path = NULL;
296 FILE_ALL_INFO *buf = NULL; 168 FILE_ALL_INFO *buf = NULL;
@@ -300,21 +172,26 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
300 xid = GetXid(); 172 xid = GetXid();
301 173
302 cifs_sb = CIFS_SB(inode->i_sb); 174 cifs_sb = CIFS_SB(inode->i_sb);
303 tcon = cifs_sb->tcon; 175 tlink = cifs_sb_tlink(cifs_sb);
304 176 if (IS_ERR(tlink)) {
305 full_path = build_path_from_dentry(direntry); 177 FreeXid(xid);
306 if (full_path == NULL) { 178 return PTR_ERR(tlink);
307 rc = -ENOMEM;
308 goto cifs_create_out;
309 } 179 }
180 tcon = tlink_tcon(tlink);
310 181
311 if (oplockEnabled) 182 if (oplockEnabled)
312 oplock = REQ_OPLOCK; 183 oplock = REQ_OPLOCK;
313 184
314 if (nd && (nd->flags & LOOKUP_OPEN)) 185 if (nd && (nd->flags & LOOKUP_OPEN))
315 oflags = nd->intent.open.flags; 186 oflags = nd->intent.open.file->f_flags;
316 else 187 else
317 oflags = FMODE_READ | SMB_O_CREAT; 188 oflags = O_RDONLY | O_CREAT;
189
190 full_path = build_path_from_dentry(direntry);
191 if (full_path == NULL) {
192 rc = -ENOMEM;
193 goto cifs_create_out;
194 }
318 195
319 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && 196 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
320 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 197 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
@@ -344,9 +221,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
344 /* if the file is going to stay open, then we 221 /* if the file is going to stay open, then we
345 need to set the desired access properly */ 222 need to set the desired access properly */
346 desiredAccess = 0; 223 desiredAccess = 0;
347 if (oflags & FMODE_READ) 224 if (OPEN_FMODE(oflags) & FMODE_READ)
348 desiredAccess |= GENERIC_READ; /* is this too little? */ 225 desiredAccess |= GENERIC_READ; /* is this too little? */
349 if (oflags & FMODE_WRITE) 226 if (OPEN_FMODE(oflags) & FMODE_WRITE)
350 desiredAccess |= GENERIC_WRITE; 227 desiredAccess |= GENERIC_WRITE;
351 228
352 if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 229 if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
@@ -375,7 +252,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
375 if (!tcon->unix_ext && (mode & S_IWUGO) == 0) 252 if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
376 create_options |= CREATE_OPTION_READONLY; 253 create_options |= CREATE_OPTION_READONLY;
377 254
378 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) 255 if (tcon->ses->capabilities & CAP_NT_SMBS)
379 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, 256 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
380 desiredAccess, create_options, 257 desiredAccess, create_options,
381 &fileHandle, &oplock, buf, cifs_sb->local_nls, 258 &fileHandle, &oplock, buf, cifs_sb->local_nls,
@@ -467,8 +344,7 @@ cifs_create_set_dentry:
467 goto cifs_create_out; 344 goto cifs_create_out;
468 } 345 }
469 346
470 pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp, 347 pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock);
471 nd->path.mnt, oflags);
472 if (pfile_info == NULL) { 348 if (pfile_info == NULL) {
473 fput(filp); 349 fput(filp);
474 CIFSSMBClose(xid, tcon, fileHandle); 350 CIFSSMBClose(xid, tcon, fileHandle);
@@ -481,6 +357,7 @@ cifs_create_set_dentry:
481cifs_create_out: 357cifs_create_out:
482 kfree(buf); 358 kfree(buf);
483 kfree(full_path); 359 kfree(full_path);
360 cifs_put_tlink(tlink);
484 FreeXid(xid); 361 FreeXid(xid);
485 return rc; 362 return rc;
486} 363}
@@ -491,6 +368,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
491 int rc = -EPERM; 368 int rc = -EPERM;
492 int xid; 369 int xid;
493 struct cifs_sb_info *cifs_sb; 370 struct cifs_sb_info *cifs_sb;
371 struct tcon_link *tlink;
494 struct cifsTconInfo *pTcon; 372 struct cifsTconInfo *pTcon;
495 char *full_path = NULL; 373 char *full_path = NULL;
496 struct inode *newinode = NULL; 374 struct inode *newinode = NULL;
@@ -503,10 +381,14 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
503 if (!old_valid_dev(device_number)) 381 if (!old_valid_dev(device_number))
504 return -EINVAL; 382 return -EINVAL;
505 383
506 xid = GetXid();
507
508 cifs_sb = CIFS_SB(inode->i_sb); 384 cifs_sb = CIFS_SB(inode->i_sb);
509 pTcon = cifs_sb->tcon; 385 tlink = cifs_sb_tlink(cifs_sb);
386 if (IS_ERR(tlink))
387 return PTR_ERR(tlink);
388
389 pTcon = tlink_tcon(tlink);
390
391 xid = GetXid();
510 392
511 full_path = build_path_from_dentry(direntry); 393 full_path = build_path_from_dentry(direntry);
512 if (full_path == NULL) { 394 if (full_path == NULL) {
@@ -606,6 +488,7 @@ mknod_out:
606 kfree(full_path); 488 kfree(full_path);
607 kfree(buf); 489 kfree(buf);
608 FreeXid(xid); 490 FreeXid(xid);
491 cifs_put_tlink(tlink);
609 return rc; 492 return rc;
610} 493}
611 494
@@ -619,6 +502,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
619 __u16 fileHandle = 0; 502 __u16 fileHandle = 0;
620 bool posix_open = false; 503 bool posix_open = false;
621 struct cifs_sb_info *cifs_sb; 504 struct cifs_sb_info *cifs_sb;
505 struct tcon_link *tlink;
622 struct cifsTconInfo *pTcon; 506 struct cifsTconInfo *pTcon;
623 struct cifsFileInfo *cfile; 507 struct cifsFileInfo *cfile;
624 struct inode *newInode = NULL; 508 struct inode *newInode = NULL;
@@ -633,7 +517,12 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
633 /* check whether path exists */ 517 /* check whether path exists */
634 518
635 cifs_sb = CIFS_SB(parent_dir_inode->i_sb); 519 cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
636 pTcon = cifs_sb->tcon; 520 tlink = cifs_sb_tlink(cifs_sb);
521 if (IS_ERR(tlink)) {
522 FreeXid(xid);
523 return (struct dentry *)tlink;
524 }
525 pTcon = tlink_tcon(tlink);
637 526
638 /* 527 /*
639 * Don't allow the separator character in a path component. 528 * Don't allow the separator character in a path component.
@@ -644,8 +533,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
644 for (i = 0; i < direntry->d_name.len; i++) 533 for (i = 0; i < direntry->d_name.len; i++)
645 if (direntry->d_name.name[i] == '\\') { 534 if (direntry->d_name.name[i] == '\\') {
646 cFYI(1, "Invalid file name"); 535 cFYI(1, "Invalid file name");
647 FreeXid(xid); 536 rc = -EINVAL;
648 return ERR_PTR(-EINVAL); 537 goto lookup_out;
649 } 538 }
650 } 539 }
651 540
@@ -655,7 +544,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
655 */ 544 */
656 if (nd && (nd->flags & LOOKUP_EXCL)) { 545 if (nd && (nd->flags & LOOKUP_EXCL)) {
657 d_instantiate(direntry, NULL); 546 d_instantiate(direntry, NULL);
658 return NULL; 547 rc = 0;
548 goto lookup_out;
659 } 549 }
660 550
661 /* can not grab the rename sem here since it would 551 /* can not grab the rename sem here since it would
@@ -663,8 +553,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
663 in which we already have the sb rename sem */ 553 in which we already have the sb rename sem */
664 full_path = build_path_from_dentry(direntry); 554 full_path = build_path_from_dentry(direntry);
665 if (full_path == NULL) { 555 if (full_path == NULL) {
666 FreeXid(xid); 556 rc = -ENOMEM;
667 return ERR_PTR(-ENOMEM); 557 goto lookup_out;
668 } 558 }
669 559
670 if (direntry->d_inode != NULL) { 560 if (direntry->d_inode != NULL) {
@@ -687,11 +577,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
687 if (pTcon->unix_ext) { 577 if (pTcon->unix_ext) {
688 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 578 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
689 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 579 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
690 (nd->intent.open.flags & O_CREAT)) { 580 (nd->intent.open.file->f_flags & O_CREAT)) {
691 rc = cifs_posix_open(full_path, &newInode, 581 rc = cifs_posix_open(full_path, &newInode,
692 parent_dir_inode->i_sb, 582 parent_dir_inode->i_sb,
693 nd->intent.open.create_mode, 583 nd->intent.open.create_mode,
694 nd->intent.open.flags, &oplock, 584 nd->intent.open.file->f_flags, &oplock,
695 &fileHandle, xid); 585 &fileHandle, xid);
696 /* 586 /*
697 * The check below works around a bug in POSIX 587 * The check below works around a bug in POSIX
@@ -727,9 +617,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
727 goto lookup_out; 617 goto lookup_out;
728 } 618 }
729 619
730 cfile = cifs_new_fileinfo(newInode, fileHandle, filp, 620 cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
731 nd->path.mnt, 621 oplock);
732 nd->intent.open.flags);
733 if (cfile == NULL) { 622 if (cfile == NULL) {
734 fput(filp); 623 fput(filp);
735 CIFSSMBClose(xid, pTcon, fileHandle); 624 CIFSSMBClose(xid, pTcon, fileHandle);
@@ -759,6 +648,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
759 648
760lookup_out: 649lookup_out:
761 kfree(full_path); 650 kfree(full_path);
651 cifs_put_tlink(tlink);
762 FreeXid(xid); 652 FreeXid(xid);
763 return ERR_PTR(rc); 653 return ERR_PTR(rc);
764} 654}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index de748c652d11..8c81e7b14d53 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -60,34 +60,32 @@ static inline int cifs_convert_flags(unsigned int flags)
60 FILE_READ_DATA); 60 FILE_READ_DATA);
61} 61}
62 62
63static inline fmode_t cifs_posix_convert_flags(unsigned int flags) 63static u32 cifs_posix_convert_flags(unsigned int flags)
64{ 64{
65 fmode_t posix_flags = 0; 65 u32 posix_flags = 0;
66 66
67 if ((flags & O_ACCMODE) == O_RDONLY) 67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = FMODE_READ; 68 posix_flags = SMB_O_RDONLY;
69 else if ((flags & O_ACCMODE) == O_WRONLY) 69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = FMODE_WRITE; 70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR) { 71 else if ((flags & O_ACCMODE) == O_RDWR)
72 /* GENERIC_ALL is too much permission to request 72 posix_flags = SMB_O_RDWR;
73 can cause unnecessary access denied on create */ 73
74 /* return GENERIC_ALL; */ 74 if (flags & O_CREAT)
75 posix_flags = FMODE_READ | FMODE_WRITE; 75 posix_flags |= SMB_O_CREAT;
76 } 76 if (flags & O_EXCL)
77 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when 77 posix_flags |= SMB_O_EXCL;
78 reopening a file. They had their effect on the original open */ 78 if (flags & O_TRUNC)
79 if (flags & O_APPEND) 79 posix_flags |= SMB_O_TRUNC;
80 posix_flags |= (fmode_t)O_APPEND; 80 /* be safe and imply O_SYNC for O_DSYNC */
81 if (flags & O_DSYNC) 81 if (flags & O_DSYNC)
82 posix_flags |= (fmode_t)O_DSYNC; 82 posix_flags |= SMB_O_SYNC;
83 if (flags & __O_SYNC)
84 posix_flags |= (fmode_t)__O_SYNC;
85 if (flags & O_DIRECTORY) 83 if (flags & O_DIRECTORY)
86 posix_flags |= (fmode_t)O_DIRECTORY; 84 posix_flags |= SMB_O_DIRECTORY;
87 if (flags & O_NOFOLLOW) 85 if (flags & O_NOFOLLOW)
88 posix_flags |= (fmode_t)O_NOFOLLOW; 86 posix_flags |= SMB_O_NOFOLLOW;
89 if (flags & O_DIRECT) 87 if (flags & O_DIRECT)
90 posix_flags |= (fmode_t)O_DIRECT; 88 posix_flags |= SMB_O_DIRECT;
91 89
92 return posix_flags; 90 return posix_flags;
93} 91}
@@ -106,66 +104,8 @@ static inline int cifs_get_disposition(unsigned int flags)
106 return FILE_OPEN; 104 return FILE_OPEN;
107} 105}
108 106
109/* all arguments to this function must be checked for validity in caller */
110static inline int
111cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
112 struct cifsInodeInfo *pCifsInode, __u32 oplock,
113 u16 netfid)
114{
115
116 write_lock(&GlobalSMBSeslock);
117
118 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
119 if (pCifsInode == NULL) {
120 write_unlock(&GlobalSMBSeslock);
121 return -EINVAL;
122 }
123
124 if (pCifsInode->clientCanCacheRead) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache;
128 }
129
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
132
133 /* if not oplocked, invalidate inode pages if mtime or file
134 size changed */
135/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
139 cFYI(1, "inode unchanged on server");
140 } else {
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
143 if (rc != 0)
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
145 }
146 cFYI(1, "invalidating remote inode since open detected it "
147 "changed");
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 } */
150
151psx_client_can_cache:
152 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
153 pCifsInode->clientCanCacheAll = true;
154 pCifsInode->clientCanCacheRead = true;
155 cFYI(1, "Exclusive Oplock granted on inode %p",
156 file->f_path.dentry->d_inode);
157 } else if ((oplock & 0xF) == OPLOCK_READ)
158 pCifsInode->clientCanCacheRead = true;
159
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock);
163 return 0;
164}
165
166/* all arguments to this function must be checked for validity in caller */
167static inline int cifs_open_inode_helper(struct inode *inode, 107static inline int cifs_open_inode_helper(struct inode *inode,
168 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf, 108 struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
169 char *full_path, int xid) 109 char *full_path, int xid)
170{ 110{
171 struct cifsInodeInfo *pCifsInode = CIFS_I(inode); 111 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
@@ -207,16 +147,175 @@ client_can_cache:
207 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 147 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
208 xid, NULL); 148 xid, NULL);
209 149
210 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { 150 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
211 pCifsInode->clientCanCacheAll = true; 151 pCifsInode->clientCanCacheAll = true;
212 pCifsInode->clientCanCacheRead = true; 152 pCifsInode->clientCanCacheRead = true;
213 cFYI(1, "Exclusive Oplock granted on inode %p", inode); 153 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
214 } else if ((*oplock & 0xF) == OPLOCK_READ) 154 } else if ((oplock & 0xF) == OPLOCK_READ)
215 pCifsInode->clientCanCacheRead = true; 155 pCifsInode->clientCanCacheRead = true;
216 156
217 return rc; 157 return rc;
218} 158}
219 159
160int cifs_posix_open(char *full_path, struct inode **pinode,
161 struct super_block *sb, int mode, unsigned int f_flags,
162 __u32 *poplock, __u16 *pnetfid, int xid)
163{
164 int rc;
165 FILE_UNIX_BASIC_INFO *presp_data;
166 __u32 posix_flags = 0;
167 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
168 struct cifs_fattr fattr;
169 struct tcon_link *tlink;
170 struct cifsTconInfo *tcon;
171
172 cFYI(1, "posix open %s", full_path);
173
174 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
175 if (presp_data == NULL)
176 return -ENOMEM;
177
178 tlink = cifs_sb_tlink(cifs_sb);
179 if (IS_ERR(tlink)) {
180 rc = PTR_ERR(tlink);
181 goto posix_open_ret;
182 }
183
184 tcon = tlink_tcon(tlink);
185 mode &= ~current_umask();
186
187 posix_flags = cifs_posix_convert_flags(f_flags);
188 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
189 poplock, full_path, cifs_sb->local_nls,
190 cifs_sb->mnt_cifs_flags &
191 CIFS_MOUNT_MAP_SPECIAL_CHR);
192 cifs_put_tlink(tlink);
193
194 if (rc)
195 goto posix_open_ret;
196
197 if (presp_data->Type == cpu_to_le32(-1))
198 goto posix_open_ret; /* open ok, caller does qpathinfo */
199
200 if (!pinode)
201 goto posix_open_ret; /* caller does not need info */
202
203 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
204
205 /* get new inode and set it up */
206 if (*pinode == NULL) {
207 cifs_fill_uniqueid(sb, &fattr);
208 *pinode = cifs_iget(sb, &fattr);
209 if (!*pinode) {
210 rc = -ENOMEM;
211 goto posix_open_ret;
212 }
213 } else {
214 cifs_fattr_to_inode(*pinode, &fattr);
215 }
216
217posix_open_ret:
218 kfree(presp_data);
219 return rc;
220}
221
222struct cifsFileInfo *
223cifs_new_fileinfo(__u16 fileHandle, struct file *file,
224 struct tcon_link *tlink, __u32 oplock)
225{
226 struct dentry *dentry = file->f_path.dentry;
227 struct inode *inode = dentry->d_inode;
228 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
229 struct cifsFileInfo *pCifsFile;
230
231 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
232 if (pCifsFile == NULL)
233 return pCifsFile;
234
235 pCifsFile->netfid = fileHandle;
236 pCifsFile->pid = current->tgid;
237 pCifsFile->uid = current_fsuid();
238 pCifsFile->dentry = dget(dentry);
239 pCifsFile->f_flags = file->f_flags;
240 pCifsFile->invalidHandle = false;
241 pCifsFile->tlink = cifs_get_tlink(tlink);
242 mutex_init(&pCifsFile->fh_mutex);
243 mutex_init(&pCifsFile->lock_mutex);
244 INIT_LIST_HEAD(&pCifsFile->llist);
245 atomic_set(&pCifsFile->count, 1);
246 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
247
248 spin_lock(&cifs_file_list_lock);
249 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
250 /* if readable file instance put first in list*/
251 if (file->f_mode & FMODE_READ)
252 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
253 else
254 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
255 spin_unlock(&cifs_file_list_lock);
256
257 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
258 pCifsInode->clientCanCacheAll = true;
259 pCifsInode->clientCanCacheRead = true;
260 cFYI(1, "Exclusive Oplock inode %p", inode);
261 } else if ((oplock & 0xF) == OPLOCK_READ)
262 pCifsInode->clientCanCacheRead = true;
263
264 file->private_data = pCifsFile;
265 return pCifsFile;
266}
267
268/*
269 * Release a reference on the file private data. This may involve closing
270 * the filehandle out on the server.
271 */
272void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
273{
274 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
275 struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
276 struct cifsLockInfo *li, *tmp;
277
278 spin_lock(&cifs_file_list_lock);
279 if (!atomic_dec_and_test(&cifs_file->count)) {
280 spin_unlock(&cifs_file_list_lock);
281 return;
282 }
283
284 /* remove it from the lists */
285 list_del(&cifs_file->flist);
286 list_del(&cifs_file->tlist);
287
288 if (list_empty(&cifsi->openFileList)) {
289 cFYI(1, "closing last open instance for inode %p",
290 cifs_file->dentry->d_inode);
291 cifsi->clientCanCacheRead = false;
292 cifsi->clientCanCacheAll = false;
293 }
294 spin_unlock(&cifs_file_list_lock);
295
296 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
297 int xid, rc;
298
299 xid = GetXid();
300 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
301 FreeXid(xid);
302 }
303
304 /* Delete any outstanding lock records. We'll lose them when the file
305 * is closed anyway.
306 */
307 mutex_lock(&cifs_file->lock_mutex);
308 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
309 list_del(&li->llist);
310 kfree(li);
311 }
312 mutex_unlock(&cifs_file->lock_mutex);
313
314 cifs_put_tlink(cifs_file->tlink);
315 dput(cifs_file->dentry);
316 kfree(cifs_file);
317}
318
220int cifs_open(struct inode *inode, struct file *file) 319int cifs_open(struct inode *inode, struct file *file)
221{ 320{
222 int rc = -EACCES; 321 int rc = -EACCES;
@@ -224,6 +323,7 @@ int cifs_open(struct inode *inode, struct file *file)
224 __u32 oplock; 323 __u32 oplock;
225 struct cifs_sb_info *cifs_sb; 324 struct cifs_sb_info *cifs_sb;
226 struct cifsTconInfo *tcon; 325 struct cifsTconInfo *tcon;
326 struct tcon_link *tlink;
227 struct cifsFileInfo *pCifsFile = NULL; 327 struct cifsFileInfo *pCifsFile = NULL;
228 struct cifsInodeInfo *pCifsInode; 328 struct cifsInodeInfo *pCifsInode;
229 char *full_path = NULL; 329 char *full_path = NULL;
@@ -235,7 +335,12 @@ int cifs_open(struct inode *inode, struct file *file)
235 xid = GetXid(); 335 xid = GetXid();
236 336
237 cifs_sb = CIFS_SB(inode->i_sb); 337 cifs_sb = CIFS_SB(inode->i_sb);
238 tcon = cifs_sb->tcon; 338 tlink = cifs_sb_tlink(cifs_sb);
339 if (IS_ERR(tlink)) {
340 FreeXid(xid);
341 return PTR_ERR(tlink);
342 }
343 tcon = tlink_tcon(tlink);
239 344
240 pCifsInode = CIFS_I(file->f_path.dentry->d_inode); 345 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
241 346
@@ -257,27 +362,15 @@ int cifs_open(struct inode *inode, struct file *file)
257 (tcon->ses->capabilities & CAP_UNIX) && 362 (tcon->ses->capabilities & CAP_UNIX) &&
258 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 363 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
259 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 364 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
260 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
261 oflags |= SMB_O_CREAT;
262 /* can not refresh inode info since size could be stale */ 365 /* can not refresh inode info since size could be stale */
263 rc = cifs_posix_open(full_path, &inode, inode->i_sb, 366 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
264 cifs_sb->mnt_file_mode /* ignored */, 367 cifs_sb->mnt_file_mode /* ignored */,
265 oflags, &oplock, &netfid, xid); 368 file->f_flags, &oplock, &netfid, xid);
266 if (rc == 0) { 369 if (rc == 0) {
267 cFYI(1, "posix open succeeded"); 370 cFYI(1, "posix open succeeded");
268 /* no need for special case handling of setting mode
269 on read only files needed here */
270 371
271 rc = cifs_posix_open_inode_helper(inode, file, 372 pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
272 pCifsInode, oplock, netfid); 373 oplock);
273 if (rc != 0) {
274 CIFSSMBClose(xid, tcon, netfid);
275 goto out;
276 }
277
278 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
279 file->f_path.mnt,
280 oflags);
281 if (pCifsFile == NULL) { 374 if (pCifsFile == NULL) {
282 CIFSSMBClose(xid, tcon, netfid); 375 CIFSSMBClose(xid, tcon, netfid);
283 rc = -ENOMEM; 376 rc = -ENOMEM;
@@ -345,7 +438,7 @@ int cifs_open(struct inode *inode, struct file *file)
345 goto out; 438 goto out;
346 } 439 }
347 440
348 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) 441 if (tcon->ses->capabilities & CAP_NT_SMBS)
349 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, 442 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
350 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, 443 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
351 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags 444 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
@@ -365,12 +458,11 @@ int cifs_open(struct inode *inode, struct file *file)
365 goto out; 458 goto out;
366 } 459 }
367 460
368 rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid); 461 rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
369 if (rc != 0) 462 if (rc != 0)
370 goto out; 463 goto out;
371 464
372 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, 465 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
373 file->f_flags);
374 if (pCifsFile == NULL) { 466 if (pCifsFile == NULL) {
375 rc = -ENOMEM; 467 rc = -ENOMEM;
376 goto out; 468 goto out;
@@ -402,6 +494,7 @@ out:
402 kfree(buf); 494 kfree(buf);
403 kfree(full_path); 495 kfree(full_path);
404 FreeXid(xid); 496 FreeXid(xid);
497 cifs_put_tlink(tlink);
405 return rc; 498 return rc;
406} 499}
407 500
@@ -416,14 +509,13 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile)
416 return rc; 509 return rc;
417} 510}
418 511
419static int cifs_reopen_file(struct file *file, bool can_flush) 512static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
420{ 513{
421 int rc = -EACCES; 514 int rc = -EACCES;
422 int xid; 515 int xid;
423 __u32 oplock; 516 __u32 oplock;
424 struct cifs_sb_info *cifs_sb; 517 struct cifs_sb_info *cifs_sb;
425 struct cifsTconInfo *tcon; 518 struct cifsTconInfo *tcon;
426 struct cifsFileInfo *pCifsFile;
427 struct cifsInodeInfo *pCifsInode; 519 struct cifsInodeInfo *pCifsInode;
428 struct inode *inode; 520 struct inode *inode;
429 char *full_path = NULL; 521 char *full_path = NULL;
@@ -431,11 +523,6 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
431 int disposition = FILE_OPEN; 523 int disposition = FILE_OPEN;
432 __u16 netfid; 524 __u16 netfid;
433 525
434 if (file->private_data)
435 pCifsFile = file->private_data;
436 else
437 return -EBADF;
438
439 xid = GetXid(); 526 xid = GetXid();
440 mutex_lock(&pCifsFile->fh_mutex); 527 mutex_lock(&pCifsFile->fh_mutex);
441 if (!pCifsFile->invalidHandle) { 528 if (!pCifsFile->invalidHandle) {
@@ -445,39 +532,24 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
445 return rc; 532 return rc;
446 } 533 }
447 534
448 if (file->f_path.dentry == NULL) { 535 inode = pCifsFile->dentry->d_inode;
449 cERROR(1, "no valid name if dentry freed");
450 dump_stack();
451 rc = -EBADF;
452 goto reopen_error_exit;
453 }
454
455 inode = file->f_path.dentry->d_inode;
456 if (inode == NULL) {
457 cERROR(1, "inode not valid");
458 dump_stack();
459 rc = -EBADF;
460 goto reopen_error_exit;
461 }
462
463 cifs_sb = CIFS_SB(inode->i_sb); 536 cifs_sb = CIFS_SB(inode->i_sb);
464 tcon = cifs_sb->tcon; 537 tcon = tlink_tcon(pCifsFile->tlink);
465 538
466/* can not grab rename sem here because various ops, including 539/* can not grab rename sem here because various ops, including
467 those that already have the rename sem can end up causing writepage 540 those that already have the rename sem can end up causing writepage
468 to get called and if the server was down that means we end up here, 541 to get called and if the server was down that means we end up here,
469 and we can never tell if the caller already has the rename_sem */ 542 and we can never tell if the caller already has the rename_sem */
470 full_path = build_path_from_dentry(file->f_path.dentry); 543 full_path = build_path_from_dentry(pCifsFile->dentry);
471 if (full_path == NULL) { 544 if (full_path == NULL) {
472 rc = -ENOMEM; 545 rc = -ENOMEM;
473reopen_error_exit:
474 mutex_unlock(&pCifsFile->fh_mutex); 546 mutex_unlock(&pCifsFile->fh_mutex);
475 FreeXid(xid); 547 FreeXid(xid);
476 return rc; 548 return rc;
477 } 549 }
478 550
479 cFYI(1, "inode = 0x%p file flags 0x%x for %s", 551 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
480 inode, file->f_flags, full_path); 552 inode, pCifsFile->f_flags, full_path);
481 553
482 if (oplockEnabled) 554 if (oplockEnabled)
483 oplock = REQ_OPLOCK; 555 oplock = REQ_OPLOCK;
@@ -487,8 +559,14 @@ reopen_error_exit:
487 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && 559 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
488 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 560 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
489 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 561 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
490 int oflags = (int) cifs_posix_convert_flags(file->f_flags); 562
491 /* can not refresh inode info since size could be stale */ 563 /*
564 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
565 * original open. Must mask them off for a reopen.
566 */
567 unsigned int oflags = pCifsFile->f_flags &
568 ~(O_CREAT | O_EXCL | O_TRUNC);
569
492 rc = cifs_posix_open(full_path, NULL, inode->i_sb, 570 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
493 cifs_sb->mnt_file_mode /* ignored */, 571 cifs_sb->mnt_file_mode /* ignored */,
494 oflags, &oplock, &netfid, xid); 572 oflags, &oplock, &netfid, xid);
@@ -500,7 +578,7 @@ reopen_error_exit:
500 in the reconnect path it is important to retry hard */ 578 in the reconnect path it is important to retry hard */
501 } 579 }
502 580
503 desiredAccess = cifs_convert_flags(file->f_flags); 581 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
504 582
505 /* Can not refresh inode by passing in file_info buf to be returned 583 /* Can not refresh inode by passing in file_info buf to be returned
506 by SMBOpen and then calling get_inode_info with returned buf 584 by SMBOpen and then calling get_inode_info with returned buf
@@ -516,49 +594,50 @@ reopen_error_exit:
516 mutex_unlock(&pCifsFile->fh_mutex); 594 mutex_unlock(&pCifsFile->fh_mutex);
517 cFYI(1, "cifs_open returned 0x%x", rc); 595 cFYI(1, "cifs_open returned 0x%x", rc);
518 cFYI(1, "oplock: %d", oplock); 596 cFYI(1, "oplock: %d", oplock);
519 } else { 597 goto reopen_error_exit;
598 }
599
520reopen_success: 600reopen_success:
521 pCifsFile->netfid = netfid; 601 pCifsFile->netfid = netfid;
522 pCifsFile->invalidHandle = false; 602 pCifsFile->invalidHandle = false;
523 mutex_unlock(&pCifsFile->fh_mutex); 603 mutex_unlock(&pCifsFile->fh_mutex);
524 pCifsInode = CIFS_I(inode); 604 pCifsInode = CIFS_I(inode);
525 if (pCifsInode) { 605
526 if (can_flush) { 606 if (can_flush) {
527 rc = filemap_write_and_wait(inode->i_mapping); 607 rc = filemap_write_and_wait(inode->i_mapping);
528 if (rc != 0) 608 if (rc != 0)
529 CIFS_I(inode)->write_behind_rc = rc; 609 CIFS_I(inode)->write_behind_rc = rc;
530 /* temporarily disable caching while we 610
531 go to server to get inode info */ 611 pCifsInode->clientCanCacheAll = false;
532 pCifsInode->clientCanCacheAll = false; 612 pCifsInode->clientCanCacheRead = false;
533 pCifsInode->clientCanCacheRead = false; 613 if (tcon->unix_ext)
534 if (tcon->unix_ext) 614 rc = cifs_get_inode_info_unix(&inode,
535 rc = cifs_get_inode_info_unix(&inode, 615 full_path, inode->i_sb, xid);
536 full_path, inode->i_sb, xid); 616 else
537 else 617 rc = cifs_get_inode_info(&inode,
538 rc = cifs_get_inode_info(&inode, 618 full_path, NULL, inode->i_sb,
539 full_path, NULL, inode->i_sb, 619 xid, NULL);
540 xid, NULL); 620 } /* else we are writing out data to server already
541 } /* else we are writing out data to server already 621 and could deadlock if we tried to flush data, and
542 and could deadlock if we tried to flush data, and 622 since we do not know if we have data that would
543 since we do not know if we have data that would 623 invalidate the current end of file on the server
544 invalidate the current end of file on the server 624 we can not go to the server to get the new inod
545 we can not go to the server to get the new inod 625 info */
546 info */ 626 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
547 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 627 pCifsInode->clientCanCacheAll = true;
548 pCifsInode->clientCanCacheAll = true; 628 pCifsInode->clientCanCacheRead = true;
549 pCifsInode->clientCanCacheRead = true; 629 cFYI(1, "Exclusive Oplock granted on inode %p",
550 cFYI(1, "Exclusive Oplock granted on inode %p", 630 pCifsFile->dentry->d_inode);
551 file->f_path.dentry->d_inode); 631 } else if ((oplock & 0xF) == OPLOCK_READ) {
552 } else if ((oplock & 0xF) == OPLOCK_READ) { 632 pCifsInode->clientCanCacheRead = true;
553 pCifsInode->clientCanCacheRead = true; 633 pCifsInode->clientCanCacheAll = false;
554 pCifsInode->clientCanCacheAll = false; 634 } else {
555 } else { 635 pCifsInode->clientCanCacheRead = false;
556 pCifsInode->clientCanCacheRead = false; 636 pCifsInode->clientCanCacheAll = false;
557 pCifsInode->clientCanCacheAll = false;
558 }
559 cifs_relock_file(pCifsFile);
560 }
561 } 637 }
638 cifs_relock_file(pCifsFile);
639
640reopen_error_exit:
562 kfree(full_path); 641 kfree(full_path);
563 FreeXid(xid); 642 FreeXid(xid);
564 return rc; 643 return rc;
@@ -566,79 +645,11 @@ reopen_success:
566 645
567int cifs_close(struct inode *inode, struct file *file) 646int cifs_close(struct inode *inode, struct file *file)
568{ 647{
569 int rc = 0; 648 cifsFileInfo_put(file->private_data);
570 int xid, timeout; 649 file->private_data = NULL;
571 struct cifs_sb_info *cifs_sb;
572 struct cifsTconInfo *pTcon;
573 struct cifsFileInfo *pSMBFile = file->private_data;
574 650
575 xid = GetXid(); 651 /* return code from the ->release op is always ignored */
576 652 return 0;
577 cifs_sb = CIFS_SB(inode->i_sb);
578 pTcon = cifs_sb->tcon;
579 if (pSMBFile) {
580 struct cifsLockInfo *li, *tmp;
581 write_lock(&GlobalSMBSeslock);
582 pSMBFile->closePend = true;
583 if (pTcon) {
584 /* no sense reconnecting to close a file that is
585 already closed */
586 if (!pTcon->need_reconnect) {
587 write_unlock(&GlobalSMBSeslock);
588 timeout = 2;
589 while ((atomic_read(&pSMBFile->count) != 1)
590 && (timeout <= 2048)) {
591 /* Give write a better chance to get to
592 server ahead of the close. We do not
593 want to add a wait_q here as it would
594 increase the memory utilization as
595 the struct would be in each open file,
596 but this should give enough time to
597 clear the socket */
598 cFYI(DBG2, "close delay, write pending");
599 msleep(timeout);
600 timeout *= 4;
601 }
602 if (!pTcon->need_reconnect &&
603 !pSMBFile->invalidHandle)
604 rc = CIFSSMBClose(xid, pTcon,
605 pSMBFile->netfid);
606 } else
607 write_unlock(&GlobalSMBSeslock);
608 } else
609 write_unlock(&GlobalSMBSeslock);
610
611 /* Delete any outstanding lock records.
612 We'll lose them when the file is closed anyway. */
613 mutex_lock(&pSMBFile->lock_mutex);
614 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
615 list_del(&li->llist);
616 kfree(li);
617 }
618 mutex_unlock(&pSMBFile->lock_mutex);
619
620 write_lock(&GlobalSMBSeslock);
621 list_del(&pSMBFile->flist);
622 list_del(&pSMBFile->tlist);
623 write_unlock(&GlobalSMBSeslock);
624 cifsFileInfo_put(file->private_data);
625 file->private_data = NULL;
626 } else
627 rc = -EBADF;
628
629 read_lock(&GlobalSMBSeslock);
630 if (list_empty(&(CIFS_I(inode)->openFileList))) {
631 cFYI(1, "closing last open instance for inode %p", inode);
632 /* if the file is not open we do not know if we can cache info
633 on this inode, much less write behind and read ahead */
634 CIFS_I(inode)->clientCanCacheRead = false;
635 CIFS_I(inode)->clientCanCacheAll = false;
636 }
637 read_unlock(&GlobalSMBSeslock);
638 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
639 rc = CIFS_I(inode)->write_behind_rc;
640 FreeXid(xid);
641 return rc;
642} 653}
643 654
644int cifs_closedir(struct inode *inode, struct file *file) 655int cifs_closedir(struct inode *inode, struct file *file)
@@ -653,25 +664,21 @@ int cifs_closedir(struct inode *inode, struct file *file)
653 xid = GetXid(); 664 xid = GetXid();
654 665
655 if (pCFileStruct) { 666 if (pCFileStruct) {
656 struct cifsTconInfo *pTcon; 667 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
657 struct cifs_sb_info *cifs_sb =
658 CIFS_SB(file->f_path.dentry->d_sb);
659
660 pTcon = cifs_sb->tcon;
661 668
662 cFYI(1, "Freeing private data in close dir"); 669 cFYI(1, "Freeing private data in close dir");
663 write_lock(&GlobalSMBSeslock); 670 spin_lock(&cifs_file_list_lock);
664 if (!pCFileStruct->srch_inf.endOfSearch && 671 if (!pCFileStruct->srch_inf.endOfSearch &&
665 !pCFileStruct->invalidHandle) { 672 !pCFileStruct->invalidHandle) {
666 pCFileStruct->invalidHandle = true; 673 pCFileStruct->invalidHandle = true;
667 write_unlock(&GlobalSMBSeslock); 674 spin_unlock(&cifs_file_list_lock);
668 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); 675 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
669 cFYI(1, "Closing uncompleted readdir with rc %d", 676 cFYI(1, "Closing uncompleted readdir with rc %d",
670 rc); 677 rc);
671 /* not much we can do if it fails anyway, ignore rc */ 678 /* not much we can do if it fails anyway, ignore rc */
672 rc = 0; 679 rc = 0;
673 } else 680 } else
674 write_unlock(&GlobalSMBSeslock); 681 spin_unlock(&cifs_file_list_lock);
675 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; 682 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
676 if (ptmp) { 683 if (ptmp) {
677 cFYI(1, "closedir free smb buf in srch struct"); 684 cFYI(1, "closedir free smb buf in srch struct");
@@ -681,6 +688,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
681 else 688 else
682 cifs_buf_release(ptmp); 689 cifs_buf_release(ptmp);
683 } 690 }
691 cifs_put_tlink(pCFileStruct->tlink);
684 kfree(file->private_data); 692 kfree(file->private_data);
685 file->private_data = NULL; 693 file->private_data = NULL;
686 } 694 }
@@ -767,7 +775,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
767 cFYI(1, "Unknown type of lock"); 775 cFYI(1, "Unknown type of lock");
768 776
769 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 777 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
770 tcon = cifs_sb->tcon; 778 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
771 779
772 if (file->private_data == NULL) { 780 if (file->private_data == NULL) {
773 rc = -EBADF; 781 rc = -EBADF;
@@ -960,14 +968,14 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
960 968
961 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 969 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
962 970
963 pTcon = cifs_sb->tcon;
964
965 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size, 971 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
966 *poffset, file->f_path.dentry->d_name.name); */ 972 *poffset, file->f_path.dentry->d_name.name); */
967 973
968 if (file->private_data == NULL) 974 if (file->private_data == NULL)
969 return -EBADF; 975 return -EBADF;
976
970 open_file = file->private_data; 977 open_file = file->private_data;
978 pTcon = tlink_tcon(open_file->tlink);
971 979
972 rc = generic_write_checks(file, poffset, &write_size, 0); 980 rc = generic_write_checks(file, poffset, &write_size, 0);
973 if (rc) 981 if (rc)
@@ -988,19 +996,12 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
988 we blocked so return what we managed to write */ 996 we blocked so return what we managed to write */
989 return total_written; 997 return total_written;
990 } 998 }
991 if (open_file->closePend) {
992 FreeXid(xid);
993 if (total_written)
994 return total_written;
995 else
996 return -EBADF;
997 }
998 if (open_file->invalidHandle) { 999 if (open_file->invalidHandle) {
999 /* we could deadlock if we called 1000 /* we could deadlock if we called
1000 filemap_fdatawait from here so tell 1001 filemap_fdatawait from here so tell
1001 reopen_file not to flush data to server 1002 reopen_file not to flush data to server
1002 now */ 1003 now */
1003 rc = cifs_reopen_file(file, false); 1004 rc = cifs_reopen_file(open_file, false);
1004 if (rc != 0) 1005 if (rc != 0)
1005 break; 1006 break;
1006 } 1007 }
@@ -1048,8 +1049,9 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
1048 return total_written; 1049 return total_written;
1049} 1050}
1050 1051
1051static ssize_t cifs_write(struct file *file, const char *write_data, 1052static ssize_t cifs_write(struct cifsFileInfo *open_file,
1052 size_t write_size, loff_t *poffset) 1053 const char *write_data, size_t write_size,
1054 loff_t *poffset)
1053{ 1055{
1054 int rc = 0; 1056 int rc = 0;
1055 unsigned int bytes_written = 0; 1057 unsigned int bytes_written = 0;
@@ -1057,19 +1059,15 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
1057 struct cifs_sb_info *cifs_sb; 1059 struct cifs_sb_info *cifs_sb;
1058 struct cifsTconInfo *pTcon; 1060 struct cifsTconInfo *pTcon;
1059 int xid, long_op; 1061 int xid, long_op;
1060 struct cifsFileInfo *open_file; 1062 struct dentry *dentry = open_file->dentry;
1061 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); 1063 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1062 1064
1063 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1065 cifs_sb = CIFS_SB(dentry->d_sb);
1064
1065 pTcon = cifs_sb->tcon;
1066 1066
1067 cFYI(1, "write %zd bytes to offset %lld of %s", write_size, 1067 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1068 *poffset, file->f_path.dentry->d_name.name); 1068 *poffset, dentry->d_name.name);
1069 1069
1070 if (file->private_data == NULL) 1070 pTcon = tlink_tcon(open_file->tlink);
1071 return -EBADF;
1072 open_file = file->private_data;
1073 1071
1074 xid = GetXid(); 1072 xid = GetXid();
1075 1073
@@ -1078,28 +1076,12 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
1078 total_written += bytes_written) { 1076 total_written += bytes_written) {
1079 rc = -EAGAIN; 1077 rc = -EAGAIN;
1080 while (rc == -EAGAIN) { 1078 while (rc == -EAGAIN) {
1081 if (file->private_data == NULL) {
1082 /* file has been closed on us */
1083 FreeXid(xid);
1084 /* if we have gotten here we have written some data
1085 and blocked, and the file has been freed on us
1086 while we blocked so return what we managed to
1087 write */
1088 return total_written;
1089 }
1090 if (open_file->closePend) {
1091 FreeXid(xid);
1092 if (total_written)
1093 return total_written;
1094 else
1095 return -EBADF;
1096 }
1097 if (open_file->invalidHandle) { 1079 if (open_file->invalidHandle) {
1098 /* we could deadlock if we called 1080 /* we could deadlock if we called
1099 filemap_fdatawait from here so tell 1081 filemap_fdatawait from here so tell
1100 reopen_file not to flush data to 1082 reopen_file not to flush data to
1101 server now */ 1083 server now */
1102 rc = cifs_reopen_file(file, false); 1084 rc = cifs_reopen_file(open_file, false);
1103 if (rc != 0) 1085 if (rc != 0)
1104 break; 1086 break;
1105 } 1087 }
@@ -1146,43 +1128,41 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
1146 1128
1147 cifs_stats_bytes_written(pTcon, total_written); 1129 cifs_stats_bytes_written(pTcon, total_written);
1148 1130
1149 /* since the write may have blocked check these pointers again */ 1131 if (total_written > 0) {
1150 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { 1132 spin_lock(&dentry->d_inode->i_lock);
1151/*BB We could make this contingent on superblock ATIME flag too */ 1133 if (*poffset > dentry->d_inode->i_size)
1152/* file->f_path.dentry->d_inode->i_ctime = 1134 i_size_write(dentry->d_inode, *poffset);
1153 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ 1135 spin_unlock(&dentry->d_inode->i_lock);
1154 if (total_written > 0) {
1155 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1156 if (*poffset > file->f_path.dentry->d_inode->i_size)
1157 i_size_write(file->f_path.dentry->d_inode,
1158 *poffset);
1159 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1160 }
1161 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1162 } 1136 }
1137 mark_inode_dirty_sync(dentry->d_inode);
1163 FreeXid(xid); 1138 FreeXid(xid);
1164 return total_written; 1139 return total_written;
1165} 1140}
1166 1141
1167#ifdef CONFIG_CIFS_EXPERIMENTAL 1142#ifdef CONFIG_CIFS_EXPERIMENTAL
1168struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode) 1143struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1144 bool fsuid_only)
1169{ 1145{
1170 struct cifsFileInfo *open_file = NULL; 1146 struct cifsFileInfo *open_file = NULL;
1147 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1148
1149 /* only filter by fsuid on multiuser mounts */
1150 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1151 fsuid_only = false;
1171 1152
1172 read_lock(&GlobalSMBSeslock); 1153 spin_lock(&cifs_file_list_lock);
1173 /* we could simply get the first_list_entry since write-only entries 1154 /* we could simply get the first_list_entry since write-only entries
1174 are always at the end of the list but since the first entry might 1155 are always at the end of the list but since the first entry might
1175 have a close pending, we go through the whole list */ 1156 have a close pending, we go through the whole list */
1176 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 1157 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1177 if (open_file->closePend) 1158 if (fsuid_only && open_file->uid != current_fsuid())
1178 continue; 1159 continue;
1179 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) || 1160 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1180 (open_file->pfile->f_flags & O_RDONLY))) {
1181 if (!open_file->invalidHandle) { 1161 if (!open_file->invalidHandle) {
1182 /* found a good file */ 1162 /* found a good file */
1183 /* lock it so it will not be closed on us */ 1163 /* lock it so it will not be closed on us */
1184 cifsFileInfo_get(open_file); 1164 cifsFileInfo_get(open_file);
1185 read_unlock(&GlobalSMBSeslock); 1165 spin_unlock(&cifs_file_list_lock);
1186 return open_file; 1166 return open_file;
1187 } /* else might as well continue, and look for 1167 } /* else might as well continue, and look for
1188 another, or simply have the caller reopen it 1168 another, or simply have the caller reopen it
@@ -1190,14 +1170,16 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1190 } else /* write only file */ 1170 } else /* write only file */
1191 break; /* write only files are last so must be done */ 1171 break; /* write only files are last so must be done */
1192 } 1172 }
1193 read_unlock(&GlobalSMBSeslock); 1173 spin_unlock(&cifs_file_list_lock);
1194 return NULL; 1174 return NULL;
1195} 1175}
1196#endif 1176#endif
1197 1177
1198struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode) 1178struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1179 bool fsuid_only)
1199{ 1180{
1200 struct cifsFileInfo *open_file; 1181 struct cifsFileInfo *open_file;
1182 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1201 bool any_available = false; 1183 bool any_available = false;
1202 int rc; 1184 int rc;
1203 1185
@@ -1211,53 +1193,39 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1211 return NULL; 1193 return NULL;
1212 } 1194 }
1213 1195
1214 read_lock(&GlobalSMBSeslock); 1196 /* only filter by fsuid on multiuser mounts */
1197 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1198 fsuid_only = false;
1199
1200 spin_lock(&cifs_file_list_lock);
1215refind_writable: 1201refind_writable:
1216 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 1202 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1217 if (open_file->closePend || 1203 if (!any_available && open_file->pid != current->tgid)
1218 (!any_available && open_file->pid != current->tgid))
1219 continue; 1204 continue;
1220 1205 if (fsuid_only && open_file->uid != current_fsuid())
1221 if (open_file->pfile && 1206 continue;
1222 ((open_file->pfile->f_flags & O_RDWR) || 1207 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1223 (open_file->pfile->f_flags & O_WRONLY))) {
1224 cifsFileInfo_get(open_file); 1208 cifsFileInfo_get(open_file);
1225 1209
1226 if (!open_file->invalidHandle) { 1210 if (!open_file->invalidHandle) {
1227 /* found a good writable file */ 1211 /* found a good writable file */
1228 read_unlock(&GlobalSMBSeslock); 1212 spin_unlock(&cifs_file_list_lock);
1229 return open_file; 1213 return open_file;
1230 } 1214 }
1231 1215
1232 read_unlock(&GlobalSMBSeslock); 1216 spin_unlock(&cifs_file_list_lock);
1217
1233 /* Had to unlock since following call can block */ 1218 /* Had to unlock since following call can block */
1234 rc = cifs_reopen_file(open_file->pfile, false); 1219 rc = cifs_reopen_file(open_file, false);
1235 if (!rc) { 1220 if (!rc)
1236 if (!open_file->closePend) 1221 return open_file;
1237 return open_file;
1238 else { /* start over in case this was deleted */
1239 /* since the list could be modified */
1240 read_lock(&GlobalSMBSeslock);
1241 cifsFileInfo_put(open_file);
1242 goto refind_writable;
1243 }
1244 }
1245 1222
1246 /* if it fails, try another handle if possible - 1223 /* if it fails, try another handle if possible */
1247 (we can not do this if closePending since
1248 loop could be modified - in which case we
1249 have to start at the beginning of the list
1250 again. Note that it would be bad
1251 to hold up writepages here (rather than
1252 in caller) with continuous retries */
1253 cFYI(1, "wp failed on reopen file"); 1224 cFYI(1, "wp failed on reopen file");
1254 read_lock(&GlobalSMBSeslock);
1255 /* can not use this handle, no write
1256 pending on this one after all */
1257 cifsFileInfo_put(open_file); 1225 cifsFileInfo_put(open_file);
1258 1226
1259 if (open_file->closePend) /* list could have changed */ 1227 spin_lock(&cifs_file_list_lock);
1260 goto refind_writable; 1228
1261 /* else we simply continue to the next entry. Thus 1229 /* else we simply continue to the next entry. Thus
1262 we do not loop on reopen errors. If we 1230 we do not loop on reopen errors. If we
1263 can not reopen the file, for example if we 1231 can not reopen the file, for example if we
@@ -1272,7 +1240,7 @@ refind_writable:
1272 any_available = true; 1240 any_available = true;
1273 goto refind_writable; 1241 goto refind_writable;
1274 } 1242 }
1275 read_unlock(&GlobalSMBSeslock); 1243 spin_unlock(&cifs_file_list_lock);
1276 return NULL; 1244 return NULL;
1277} 1245}
1278 1246
@@ -1284,7 +1252,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1284 int rc = -EFAULT; 1252 int rc = -EFAULT;
1285 int bytes_written = 0; 1253 int bytes_written = 0;
1286 struct cifs_sb_info *cifs_sb; 1254 struct cifs_sb_info *cifs_sb;
1287 struct cifsTconInfo *pTcon;
1288 struct inode *inode; 1255 struct inode *inode;
1289 struct cifsFileInfo *open_file; 1256 struct cifsFileInfo *open_file;
1290 1257
@@ -1293,7 +1260,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1293 1260
1294 inode = page->mapping->host; 1261 inode = page->mapping->host;
1295 cifs_sb = CIFS_SB(inode->i_sb); 1262 cifs_sb = CIFS_SB(inode->i_sb);
1296 pTcon = cifs_sb->tcon;
1297 1263
1298 offset += (loff_t)from; 1264 offset += (loff_t)from;
1299 write_data = kmap(page); 1265 write_data = kmap(page);
@@ -1314,10 +1280,10 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1314 if (mapping->host->i_size - offset < (loff_t)to) 1280 if (mapping->host->i_size - offset < (loff_t)to)
1315 to = (unsigned)(mapping->host->i_size - offset); 1281 to = (unsigned)(mapping->host->i_size - offset);
1316 1282
1317 open_file = find_writable_file(CIFS_I(mapping->host)); 1283 open_file = find_writable_file(CIFS_I(mapping->host), false);
1318 if (open_file) { 1284 if (open_file) {
1319 bytes_written = cifs_write(open_file->pfile, write_data, 1285 bytes_written = cifs_write(open_file, write_data,
1320 to-from, &offset); 1286 to - from, &offset);
1321 cifsFileInfo_put(open_file); 1287 cifsFileInfo_put(open_file);
1322 /* Does mm or vfs already set times? */ 1288 /* Does mm or vfs already set times? */
1323 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); 1289 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
@@ -1352,6 +1318,7 @@ static int cifs_writepages(struct address_space *mapping,
1352 int nr_pages; 1318 int nr_pages;
1353 __u64 offset = 0; 1319 __u64 offset = 0;
1354 struct cifsFileInfo *open_file; 1320 struct cifsFileInfo *open_file;
1321 struct cifsTconInfo *tcon;
1355 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host); 1322 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1356 struct page *page; 1323 struct page *page;
1357 struct pagevec pvec; 1324 struct pagevec pvec;
@@ -1359,6 +1326,15 @@ static int cifs_writepages(struct address_space *mapping,
1359 int scanned = 0; 1326 int scanned = 0;
1360 int xid, long_op; 1327 int xid, long_op;
1361 1328
1329 /*
1330 * BB: Is this meaningful for a non-block-device file system?
1331 * If it is, we should test it again after we do I/O
1332 */
1333 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1334 wbc->encountered_congestion = 1;
1335 return 0;
1336 }
1337
1362 cifs_sb = CIFS_SB(mapping->host->i_sb); 1338 cifs_sb = CIFS_SB(mapping->host->i_sb);
1363 1339
1364 /* 1340 /*
@@ -1368,27 +1344,29 @@ static int cifs_writepages(struct address_space *mapping,
1368 if (cifs_sb->wsize < PAGE_CACHE_SIZE) 1344 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1369 return generic_writepages(mapping, wbc); 1345 return generic_writepages(mapping, wbc);
1370 1346
1371 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1372 if (cifs_sb->tcon->ses->server->secMode &
1373 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1374 if (!experimEnabled)
1375 return generic_writepages(mapping, wbc);
1376
1377 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL); 1347 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1378 if (iov == NULL) 1348 if (iov == NULL)
1379 return generic_writepages(mapping, wbc); 1349 return generic_writepages(mapping, wbc);
1380 1350
1381
1382 /* 1351 /*
1383 * BB: Is this meaningful for a non-block-device file system? 1352 * if there's no open file, then this is likely to fail too,
1384 * If it is, we should test it again after we do I/O 1353 * but it'll at least handle the return. Maybe it should be
1354 * a BUG() instead?
1385 */ 1355 */
1386 if (wbc->nonblocking && bdi_write_congested(bdi)) { 1356 open_file = find_writable_file(CIFS_I(mapping->host), false);
1387 wbc->encountered_congestion = 1; 1357 if (!open_file) {
1388 kfree(iov); 1358 kfree(iov);
1389 return 0; 1359 return generic_writepages(mapping, wbc);
1390 } 1360 }
1391 1361
1362 tcon = tlink_tcon(open_file->tlink);
1363 if (!experimEnabled && tcon->ses->server->secMode &
1364 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1365 cifsFileInfo_put(open_file);
1366 return generic_writepages(mapping, wbc);
1367 }
1368 cifsFileInfo_put(open_file);
1369
1392 xid = GetXid(); 1370 xid = GetXid();
1393 1371
1394 pagevec_init(&pvec, 0); 1372 pagevec_init(&pvec, 0);
@@ -1492,38 +1470,34 @@ retry:
1492 break; 1470 break;
1493 } 1471 }
1494 if (n_iov) { 1472 if (n_iov) {
1495 /* Search for a writable handle every time we call 1473 open_file = find_writable_file(CIFS_I(mapping->host),
1496 * CIFSSMBWrite2. We can't rely on the last handle 1474 false);
1497 * we used to still be valid
1498 */
1499 open_file = find_writable_file(CIFS_I(mapping->host));
1500 if (!open_file) { 1475 if (!open_file) {
1501 cERROR(1, "No writable handles for inode"); 1476 cERROR(1, "No writable handles for inode");
1502 rc = -EBADF; 1477 rc = -EBADF;
1503 } else { 1478 } else {
1504 long_op = cifs_write_timeout(cifsi, offset); 1479 long_op = cifs_write_timeout(cifsi, offset);
1505 rc = CIFSSMBWrite2(xid, cifs_sb->tcon, 1480 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1506 open_file->netfid,
1507 bytes_to_write, offset, 1481 bytes_to_write, offset,
1508 &bytes_written, iov, n_iov, 1482 &bytes_written, iov, n_iov,
1509 long_op); 1483 long_op);
1510 cifsFileInfo_put(open_file); 1484 cifsFileInfo_put(open_file);
1511 cifs_update_eof(cifsi, offset, bytes_written); 1485 cifs_update_eof(cifsi, offset, bytes_written);
1486 }
1512 1487
1513 if (rc || bytes_written < bytes_to_write) { 1488 if (rc || bytes_written < bytes_to_write) {
1514 cERROR(1, "Write2 ret %d, wrote %d", 1489 cERROR(1, "Write2 ret %d, wrote %d",
1515 rc, bytes_written); 1490 rc, bytes_written);
1516 /* BB what if continued retry is 1491 /* BB what if continued retry is
1517 requested via mount flags? */ 1492 requested via mount flags? */
1518 if (rc == -ENOSPC) 1493 if (rc == -ENOSPC)
1519 set_bit(AS_ENOSPC, &mapping->flags); 1494 set_bit(AS_ENOSPC, &mapping->flags);
1520 else 1495 else
1521 set_bit(AS_EIO, &mapping->flags); 1496 set_bit(AS_EIO, &mapping->flags);
1522 } else { 1497 } else {
1523 cifs_stats_bytes_written(cifs_sb->tcon, 1498 cifs_stats_bytes_written(tcon, bytes_written);
1524 bytes_written);
1525 }
1526 } 1499 }
1500
1527 for (i = 0; i < n_iov; i++) { 1501 for (i = 0; i < n_iov; i++) {
1528 page = pvec.pages[first + i]; 1502 page = pvec.pages[first + i];
1529 /* Should we also set page error on 1503 /* Should we also set page error on
@@ -1624,7 +1598,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
1624 /* BB check if anything else missing out of ppw 1598 /* BB check if anything else missing out of ppw
1625 such as updating last write time */ 1599 such as updating last write time */
1626 page_data = kmap(page); 1600 page_data = kmap(page);
1627 rc = cifs_write(file, page_data + offset, copied, &pos); 1601 rc = cifs_write(file->private_data, page_data + offset,
1602 copied, &pos);
1628 /* if (rc < 0) should we set writebehind rc? */ 1603 /* if (rc < 0) should we set writebehind rc? */
1629 kunmap(page); 1604 kunmap(page);
1630 1605
@@ -1665,7 +1640,7 @@ int cifs_fsync(struct file *file, int datasync)
1665 if (rc == 0) { 1640 if (rc == 0) {
1666 rc = CIFS_I(inode)->write_behind_rc; 1641 rc = CIFS_I(inode)->write_behind_rc;
1667 CIFS_I(inode)->write_behind_rc = 0; 1642 CIFS_I(inode)->write_behind_rc = 0;
1668 tcon = CIFS_SB(inode->i_sb)->tcon; 1643 tcon = tlink_tcon(smbfile->tlink);
1669 if (!rc && tcon && smbfile && 1644 if (!rc && tcon && smbfile &&
1670 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) 1645 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1671 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); 1646 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
@@ -1750,7 +1725,6 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1750 1725
1751 xid = GetXid(); 1726 xid = GetXid();
1752 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1727 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1753 pTcon = cifs_sb->tcon;
1754 1728
1755 if (file->private_data == NULL) { 1729 if (file->private_data == NULL) {
1756 rc = -EBADF; 1730 rc = -EBADF;
@@ -1758,6 +1732,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1758 return rc; 1732 return rc;
1759 } 1733 }
1760 open_file = file->private_data; 1734 open_file = file->private_data;
1735 pTcon = tlink_tcon(open_file->tlink);
1761 1736
1762 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 1737 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1763 cFYI(1, "attempting read on write only file instance"); 1738 cFYI(1, "attempting read on write only file instance");
@@ -1771,9 +1746,8 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1771 smb_read_data = NULL; 1746 smb_read_data = NULL;
1772 while (rc == -EAGAIN) { 1747 while (rc == -EAGAIN) {
1773 int buf_type = CIFS_NO_BUFFER; 1748 int buf_type = CIFS_NO_BUFFER;
1774 if ((open_file->invalidHandle) && 1749 if (open_file->invalidHandle) {
1775 (!open_file->closePend)) { 1750 rc = cifs_reopen_file(open_file, true);
1776 rc = cifs_reopen_file(file, true);
1777 if (rc != 0) 1751 if (rc != 0)
1778 break; 1752 break;
1779 } 1753 }
@@ -1831,7 +1805,6 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1831 1805
1832 xid = GetXid(); 1806 xid = GetXid();
1833 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1807 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1834 pTcon = cifs_sb->tcon;
1835 1808
1836 if (file->private_data == NULL) { 1809 if (file->private_data == NULL) {
1837 rc = -EBADF; 1810 rc = -EBADF;
@@ -1839,6 +1812,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1839 return rc; 1812 return rc;
1840 } 1813 }
1841 open_file = file->private_data; 1814 open_file = file->private_data;
1815 pTcon = tlink_tcon(open_file->tlink);
1842 1816
1843 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 1817 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1844 cFYI(1, "attempting read on write only file instance"); 1818 cFYI(1, "attempting read on write only file instance");
@@ -1857,9 +1831,8 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1857 } 1831 }
1858 rc = -EAGAIN; 1832 rc = -EAGAIN;
1859 while (rc == -EAGAIN) { 1833 while (rc == -EAGAIN) {
1860 if ((open_file->invalidHandle) && 1834 if (open_file->invalidHandle) {
1861 (!open_file->closePend)) { 1835 rc = cifs_reopen_file(open_file, true);
1862 rc = cifs_reopen_file(file, true);
1863 if (rc != 0) 1836 if (rc != 0)
1864 break; 1837 break;
1865 } 1838 }
@@ -1974,7 +1947,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1974 } 1947 }
1975 open_file = file->private_data; 1948 open_file = file->private_data;
1976 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1949 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1977 pTcon = cifs_sb->tcon; 1950 pTcon = tlink_tcon(open_file->tlink);
1978 1951
1979 /* 1952 /*
1980 * Reads as many pages as possible from fscache. Returns -ENOBUFS 1953 * Reads as many pages as possible from fscache. Returns -ENOBUFS
@@ -2022,9 +1995,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2022 read_size, contig_pages); 1995 read_size, contig_pages);
2023 rc = -EAGAIN; 1996 rc = -EAGAIN;
2024 while (rc == -EAGAIN) { 1997 while (rc == -EAGAIN) {
2025 if ((open_file->invalidHandle) && 1998 if (open_file->invalidHandle) {
2026 (!open_file->closePend)) { 1999 rc = cifs_reopen_file(open_file, true);
2027 rc = cifs_reopen_file(file, true);
2028 if (rc != 0) 2000 if (rc != 0)
2029 break; 2001 break;
2030 } 2002 }
@@ -2173,18 +2145,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2173{ 2145{
2174 struct cifsFileInfo *open_file; 2146 struct cifsFileInfo *open_file;
2175 2147
2176 read_lock(&GlobalSMBSeslock); 2148 spin_lock(&cifs_file_list_lock);
2177 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2149 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2178 if (open_file->closePend) 2150 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2179 continue; 2151 spin_unlock(&cifs_file_list_lock);
2180 if (open_file->pfile &&
2181 ((open_file->pfile->f_flags & O_RDWR) ||
2182 (open_file->pfile->f_flags & O_WRONLY))) {
2183 read_unlock(&GlobalSMBSeslock);
2184 return 1; 2152 return 1;
2185 } 2153 }
2186 } 2154 }
2187 read_unlock(&GlobalSMBSeslock); 2155 spin_unlock(&cifs_file_list_lock);
2188 return 0; 2156 return 0;
2189} 2157}
2190 2158
@@ -2310,9 +2278,8 @@ void cifs_oplock_break(struct work_struct *work)
2310{ 2278{
2311 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, 2279 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2312 oplock_break); 2280 oplock_break);
2313 struct inode *inode = cfile->pInode; 2281 struct inode *inode = cfile->dentry->d_inode;
2314 struct cifsInodeInfo *cinode = CIFS_I(inode); 2282 struct cifsInodeInfo *cinode = CIFS_I(inode);
2315 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2316 int rc, waitrc = 0; 2283 int rc, waitrc = 0;
2317 2284
2318 if (inode && S_ISREG(inode->i_mode)) { 2285 if (inode && S_ISREG(inode->i_mode)) {
@@ -2338,9 +2305,9 @@ void cifs_oplock_break(struct work_struct *work)
2338 * not bother sending an oplock release if session to server still is 2305 * not bother sending an oplock release if session to server still is
2339 * disconnected since oplock already released by the server 2306 * disconnected since oplock already released by the server
2340 */ 2307 */
2341 if (!cfile->closePend && !cfile->oplock_break_cancelled) { 2308 if (!cfile->oplock_break_cancelled) {
2342 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, 2309 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2343 LOCKING_ANDX_OPLOCK_RELEASE, false); 2310 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
2344 cFYI(1, "Oplock release rc = %d", rc); 2311 cFYI(1, "Oplock release rc = %d", rc);
2345 } 2312 }
2346 2313
@@ -2349,22 +2316,22 @@ void cifs_oplock_break(struct work_struct *work)
2349 * finished grabbing reference for us. Make sure it's done by 2316 * finished grabbing reference for us. Make sure it's done by
2350 * waiting for GlobalSMSSeslock. 2317 * waiting for GlobalSMSSeslock.
2351 */ 2318 */
2352 write_lock(&GlobalSMBSeslock); 2319 spin_lock(&cifs_file_list_lock);
2353 write_unlock(&GlobalSMBSeslock); 2320 spin_unlock(&cifs_file_list_lock);
2354 2321
2355 cifs_oplock_break_put(cfile); 2322 cifs_oplock_break_put(cfile);
2356} 2323}
2357 2324
2358void cifs_oplock_break_get(struct cifsFileInfo *cfile) 2325void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2359{ 2326{
2360 mntget(cfile->mnt); 2327 cifs_sb_active(cfile->dentry->d_sb);
2361 cifsFileInfo_get(cfile); 2328 cifsFileInfo_get(cfile);
2362} 2329}
2363 2330
2364void cifs_oplock_break_put(struct cifsFileInfo *cfile) 2331void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2365{ 2332{
2366 mntput(cfile->mnt);
2367 cifsFileInfo_put(cfile); 2333 cifsFileInfo_put(cfile);
2334 cifs_sb_deactive(cfile->dentry->d_sb);
2368} 2335}
2369 2336
2370const struct address_space_operations cifs_addr_ops = { 2337const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 9f3f5c4be161..a2ad94efcfe6 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -62,15 +62,15 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
62{ 62{
63 struct cifsInodeInfo *cifsi = CIFS_I(inode); 63 struct cifsInodeInfo *cifsi = CIFS_I(inode);
64 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 64 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
65 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
65 66
66 if (cifsi->fscache) 67 if (cifsi->fscache)
67 return; 68 return;
68 69
69 cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache, 70 cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
70 &cifs_fscache_inode_object_def, 71 &cifs_fscache_inode_object_def, cifsi);
71 cifsi); 72 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
72 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", 73 cifsi->fscache);
73 cifs_sb->tcon->fscache, cifsi->fscache);
74} 74}
75 75
76void cifs_fscache_release_inode_cookie(struct inode *inode) 76void cifs_fscache_release_inode_cookie(struct inode *inode)
@@ -117,7 +117,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
117 /* retire the current fscache cache and get a new one */ 117 /* retire the current fscache cache and get a new one */
118 fscache_relinquish_cookie(cifsi->fscache, 1); 118 fscache_relinquish_cookie(cifsi->fscache, 1);
119 119
120 cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache, 120 cifsi->fscache = fscache_acquire_cookie(
121 cifs_sb_master_tcon(cifs_sb)->fscache,
121 &cifs_fscache_inode_object_def, 122 &cifs_fscache_inode_object_def,
122 cifsi); 123 cifsi);
123 cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", 124 cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 53cce8cc2224..94979309698a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -52,7 +52,7 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
52 52
53 53
54 /* check if server can support readpages */ 54 /* check if server can support readpages */
55 if (cifs_sb->tcon->ses->server->maxBuf < 55 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
56 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) 56 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
57 inode->i_data.a_ops = &cifs_addr_ops_smallbuf; 57 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
58 else 58 else
@@ -288,8 +288,8 @@ int cifs_get_file_info_unix(struct file *filp)
288 struct cifs_fattr fattr; 288 struct cifs_fattr fattr;
289 struct inode *inode = filp->f_path.dentry->d_inode; 289 struct inode *inode = filp->f_path.dentry->d_inode;
290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 struct cifsTconInfo *tcon = cifs_sb->tcon;
292 struct cifsFileInfo *cfile = filp->private_data; 291 struct cifsFileInfo *cfile = filp->private_data;
292 struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
293 293
294 xid = GetXid(); 294 xid = GetXid();
295 rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); 295 rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -313,15 +313,21 @@ int cifs_get_inode_info_unix(struct inode **pinode,
313 FILE_UNIX_BASIC_INFO find_data; 313 FILE_UNIX_BASIC_INFO find_data;
314 struct cifs_fattr fattr; 314 struct cifs_fattr fattr;
315 struct cifsTconInfo *tcon; 315 struct cifsTconInfo *tcon;
316 struct tcon_link *tlink;
316 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 317 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
317 318
318 tcon = cifs_sb->tcon;
319 cFYI(1, "Getting info on %s", full_path); 319 cFYI(1, "Getting info on %s", full_path);
320 320
321 tlink = cifs_sb_tlink(cifs_sb);
322 if (IS_ERR(tlink))
323 return PTR_ERR(tlink);
324 tcon = tlink_tcon(tlink);
325
321 /* could have done a find first instead but this returns more info */ 326 /* could have done a find first instead but this returns more info */
322 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, 327 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
323 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 328 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
324 CIFS_MOUNT_MAP_SPECIAL_CHR); 329 CIFS_MOUNT_MAP_SPECIAL_CHR);
330 cifs_put_tlink(tlink);
325 331
326 if (!rc) { 332 if (!rc) {
327 cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb); 333 cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
@@ -332,6 +338,13 @@ int cifs_get_inode_info_unix(struct inode **pinode,
332 return rc; 338 return rc;
333 } 339 }
334 340
341 /* check for Minshall+French symlinks */
342 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
343 int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
344 if (tmprc)
345 cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
346 }
347
335 if (*pinode == NULL) { 348 if (*pinode == NULL) {
336 /* get new inode */ 349 /* get new inode */
337 cifs_fill_uniqueid(sb, &fattr); 350 cifs_fill_uniqueid(sb, &fattr);
@@ -353,7 +366,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
353 int rc; 366 int rc;
354 int oplock = 0; 367 int oplock = 0;
355 __u16 netfid; 368 __u16 netfid;
356 struct cifsTconInfo *pTcon = cifs_sb->tcon; 369 struct tcon_link *tlink;
370 struct cifsTconInfo *tcon;
357 char buf[24]; 371 char buf[24];
358 unsigned int bytes_read; 372 unsigned int bytes_read;
359 char *pbuf; 373 char *pbuf;
@@ -372,7 +386,12 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
372 return -EINVAL; /* EOPNOTSUPP? */ 386 return -EINVAL; /* EOPNOTSUPP? */
373 } 387 }
374 388
375 rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, 389 tlink = cifs_sb_tlink(cifs_sb);
390 if (IS_ERR(tlink))
391 return PTR_ERR(tlink);
392 tcon = tlink_tcon(tlink);
393
394 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, GENERIC_READ,
376 CREATE_NOT_DIR, &netfid, &oplock, NULL, 395 CREATE_NOT_DIR, &netfid, &oplock, NULL,
377 cifs_sb->local_nls, 396 cifs_sb->local_nls,
378 cifs_sb->mnt_cifs_flags & 397 cifs_sb->mnt_cifs_flags &
@@ -380,7 +399,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
380 if (rc == 0) { 399 if (rc == 0) {
381 int buf_type = CIFS_NO_BUFFER; 400 int buf_type = CIFS_NO_BUFFER;
382 /* Read header */ 401 /* Read header */
383 rc = CIFSSMBRead(xid, pTcon, netfid, 402 rc = CIFSSMBRead(xid, tcon, netfid,
384 24 /* length */, 0 /* offset */, 403 24 /* length */, 0 /* offset */,
385 &bytes_read, &pbuf, &buf_type); 404 &bytes_read, &pbuf, &buf_type);
386 if ((rc == 0) && (bytes_read >= 8)) { 405 if ((rc == 0) && (bytes_read >= 8)) {
@@ -422,8 +441,9 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
422 fattr->cf_dtype = DT_REG; 441 fattr->cf_dtype = DT_REG;
423 rc = -EOPNOTSUPP; /* or some unknown SFU type */ 442 rc = -EOPNOTSUPP; /* or some unknown SFU type */
424 } 443 }
425 CIFSSMBClose(xid, pTcon, netfid); 444 CIFSSMBClose(xid, tcon, netfid);
426 } 445 }
446 cifs_put_tlink(tlink);
427 return rc; 447 return rc;
428} 448}
429 449
@@ -441,11 +461,19 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
441 ssize_t rc; 461 ssize_t rc;
442 char ea_value[4]; 462 char ea_value[4];
443 __u32 mode; 463 __u32 mode;
464 struct tcon_link *tlink;
465 struct cifsTconInfo *tcon;
444 466
445 rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS", 467 tlink = cifs_sb_tlink(cifs_sb);
468 if (IS_ERR(tlink))
469 return PTR_ERR(tlink);
470 tcon = tlink_tcon(tlink);
471
472 rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
446 ea_value, 4 /* size of buf */, cifs_sb->local_nls, 473 ea_value, 4 /* size of buf */, cifs_sb->local_nls,
447 cifs_sb->mnt_cifs_flags & 474 cifs_sb->mnt_cifs_flags &
448 CIFS_MOUNT_MAP_SPECIAL_CHR); 475 CIFS_MOUNT_MAP_SPECIAL_CHR);
476 cifs_put_tlink(tlink);
449 if (rc < 0) 477 if (rc < 0)
450 return (int)rc; 478 return (int)rc;
451 else if (rc > 3) { 479 else if (rc > 3) {
@@ -468,6 +496,8 @@ static void
468cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, 496cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
469 struct cifs_sb_info *cifs_sb, bool adjust_tz) 497 struct cifs_sb_info *cifs_sb, bool adjust_tz)
470{ 498{
499 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
500
471 memset(fattr, 0, sizeof(*fattr)); 501 memset(fattr, 0, sizeof(*fattr));
472 fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); 502 fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
473 if (info->DeletePending) 503 if (info->DeletePending)
@@ -482,8 +512,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
482 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); 512 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
483 513
484 if (adjust_tz) { 514 if (adjust_tz) {
485 fattr->cf_ctime.tv_sec += cifs_sb->tcon->ses->server->timeAdj; 515 fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
486 fattr->cf_mtime.tv_sec += cifs_sb->tcon->ses->server->timeAdj; 516 fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
487 } 517 }
488 518
489 fattr->cf_eof = le64_to_cpu(info->EndOfFile); 519 fattr->cf_eof = le64_to_cpu(info->EndOfFile);
@@ -515,8 +545,8 @@ int cifs_get_file_info(struct file *filp)
515 struct cifs_fattr fattr; 545 struct cifs_fattr fattr;
516 struct inode *inode = filp->f_path.dentry->d_inode; 546 struct inode *inode = filp->f_path.dentry->d_inode;
517 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 547 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
518 struct cifsTconInfo *tcon = cifs_sb->tcon;
519 struct cifsFileInfo *cfile = filp->private_data; 548 struct cifsFileInfo *cfile = filp->private_data;
549 struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
520 550
521 xid = GetXid(); 551 xid = GetXid();
522 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); 552 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -554,26 +584,33 @@ int cifs_get_inode_info(struct inode **pinode,
554{ 584{
555 int rc = 0, tmprc; 585 int rc = 0, tmprc;
556 struct cifsTconInfo *pTcon; 586 struct cifsTconInfo *pTcon;
587 struct tcon_link *tlink;
557 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 588 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
558 char *buf = NULL; 589 char *buf = NULL;
559 bool adjustTZ = false; 590 bool adjustTZ = false;
560 struct cifs_fattr fattr; 591 struct cifs_fattr fattr;
561 592
562 pTcon = cifs_sb->tcon; 593 tlink = cifs_sb_tlink(cifs_sb);
594 if (IS_ERR(tlink))
595 return PTR_ERR(tlink);
596 pTcon = tlink_tcon(tlink);
597
563 cFYI(1, "Getting info on %s", full_path); 598 cFYI(1, "Getting info on %s", full_path);
564 599
565 if ((pfindData == NULL) && (*pinode != NULL)) { 600 if ((pfindData == NULL) && (*pinode != NULL)) {
566 if (CIFS_I(*pinode)->clientCanCacheRead) { 601 if (CIFS_I(*pinode)->clientCanCacheRead) {
567 cFYI(1, "No need to revalidate cached inode sizes"); 602 cFYI(1, "No need to revalidate cached inode sizes");
568 return rc; 603 goto cgii_exit;
569 } 604 }
570 } 605 }
571 606
572 /* if file info not passed in then get it from server */ 607 /* if file info not passed in then get it from server */
573 if (pfindData == NULL) { 608 if (pfindData == NULL) {
574 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 609 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
575 if (buf == NULL) 610 if (buf == NULL) {
576 return -ENOMEM; 611 rc = -ENOMEM;
612 goto cgii_exit;
613 }
577 pfindData = (FILE_ALL_INFO *)buf; 614 pfindData = (FILE_ALL_INFO *)buf;
578 615
579 /* could do find first instead but this returns more info */ 616 /* could do find first instead but this returns more info */
@@ -661,6 +698,13 @@ int cifs_get_inode_info(struct inode **pinode,
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 698 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
662 cifs_sfu_mode(&fattr, full_path, cifs_sb, xid); 699 cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
663 700
701 /* check for Minshall+French symlinks */
702 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
703 tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
704 if (tmprc)
705 cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
706 }
707
664 if (!*pinode) { 708 if (!*pinode) {
665 *pinode = cifs_iget(sb, &fattr); 709 *pinode = cifs_iget(sb, &fattr);
666 if (!*pinode) 710 if (!*pinode)
@@ -671,6 +715,7 @@ int cifs_get_inode_info(struct inode **pinode,
671 715
672cgii_exit: 716cgii_exit:
673 kfree(buf); 717 kfree(buf);
718 cifs_put_tlink(tlink);
674 return rc; 719 return rc;
675} 720}
676 721
@@ -683,6 +728,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
683 int pplen = cifs_sb->prepathlen; 728 int pplen = cifs_sb->prepathlen;
684 int dfsplen; 729 int dfsplen;
685 char *full_path = NULL; 730 char *full_path = NULL;
731 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
686 732
687 /* if no prefix path, simply set path to the root of share to "" */ 733 /* if no prefix path, simply set path to the root of share to "" */
688 if (pplen == 0) { 734 if (pplen == 0) {
@@ -692,8 +738,8 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
692 return full_path; 738 return full_path;
693 } 739 }
694 740
695 if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS)) 741 if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
696 dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1); 742 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
697 else 743 else
698 dfsplen = 0; 744 dfsplen = 0;
699 745
@@ -702,7 +748,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
702 return full_path; 748 return full_path;
703 749
704 if (dfsplen) { 750 if (dfsplen) {
705 strncpy(full_path, cifs_sb->tcon->treeName, dfsplen); 751 strncpy(full_path, tcon->treeName, dfsplen);
706 /* switch slash direction in prepath depending on whether 752 /* switch slash direction in prepath depending on whether
707 * windows or posix style path names 753 * windows or posix style path names
708 */ 754 */
@@ -818,18 +864,18 @@ retry_iget5_locked:
818struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) 864struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
819{ 865{
820 int xid; 866 int xid;
821 struct cifs_sb_info *cifs_sb; 867 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
822 struct inode *inode = NULL; 868 struct inode *inode = NULL;
823 long rc; 869 long rc;
824 char *full_path; 870 char *full_path;
871 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
825 872
826 cifs_sb = CIFS_SB(sb);
827 full_path = cifs_build_path_to_root(cifs_sb); 873 full_path = cifs_build_path_to_root(cifs_sb);
828 if (full_path == NULL) 874 if (full_path == NULL)
829 return ERR_PTR(-ENOMEM); 875 return ERR_PTR(-ENOMEM);
830 876
831 xid = GetXid(); 877 xid = GetXid();
832 if (cifs_sb->tcon->unix_ext) 878 if (tcon->unix_ext)
833 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); 879 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
834 else 880 else
835 rc = cifs_get_inode_info(&inode, full_path, NULL, sb, 881 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
@@ -840,10 +886,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
840 886
841#ifdef CONFIG_CIFS_FSCACHE 887#ifdef CONFIG_CIFS_FSCACHE
842 /* populate tcon->resource_id */ 888 /* populate tcon->resource_id */
843 cifs_sb->tcon->resource_id = CIFS_I(inode)->uniqueid; 889 tcon->resource_id = CIFS_I(inode)->uniqueid;
844#endif 890#endif
845 891
846 if (rc && cifs_sb->tcon->ipc) { 892 if (rc && tcon->ipc) {
847 cFYI(1, "ipc connection - fake read inode"); 893 cFYI(1, "ipc connection - fake read inode");
848 inode->i_mode |= S_IFDIR; 894 inode->i_mode |= S_IFDIR;
849 inode->i_nlink = 2; 895 inode->i_nlink = 2;
@@ -879,7 +925,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
879 struct cifsFileInfo *open_file; 925 struct cifsFileInfo *open_file;
880 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 926 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
881 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 927 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
882 struct cifsTconInfo *pTcon = cifs_sb->tcon; 928 struct tcon_link *tlink = NULL;
929 struct cifsTconInfo *pTcon;
883 FILE_BASIC_INFO info_buf; 930 FILE_BASIC_INFO info_buf;
884 931
885 if (attrs == NULL) 932 if (attrs == NULL)
@@ -918,13 +965,22 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
918 /* 965 /*
919 * If the file is already open for write, just use that fileid 966 * If the file is already open for write, just use that fileid
920 */ 967 */
921 open_file = find_writable_file(cifsInode); 968 open_file = find_writable_file(cifsInode, true);
922 if (open_file) { 969 if (open_file) {
923 netfid = open_file->netfid; 970 netfid = open_file->netfid;
924 netpid = open_file->pid; 971 netpid = open_file->pid;
972 pTcon = tlink_tcon(open_file->tlink);
925 goto set_via_filehandle; 973 goto set_via_filehandle;
926 } 974 }
927 975
976 tlink = cifs_sb_tlink(cifs_sb);
977 if (IS_ERR(tlink)) {
978 rc = PTR_ERR(tlink);
979 tlink = NULL;
980 goto out;
981 }
982 pTcon = tlink_tcon(tlink);
983
928 /* 984 /*
929 * NT4 apparently returns success on this call, but it doesn't 985 * NT4 apparently returns success on this call, but it doesn't
930 * really work. 986 * really work.
@@ -968,6 +1024,8 @@ set_via_filehandle:
968 else 1024 else
969 cifsFileInfo_put(open_file); 1025 cifsFileInfo_put(open_file);
970out: 1026out:
1027 if (tlink != NULL)
1028 cifs_put_tlink(tlink);
971 return rc; 1029 return rc;
972} 1030}
973 1031
@@ -985,10 +1043,16 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
985 struct inode *inode = dentry->d_inode; 1043 struct inode *inode = dentry->d_inode;
986 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1044 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
987 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1045 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
988 struct cifsTconInfo *tcon = cifs_sb->tcon; 1046 struct tcon_link *tlink;
1047 struct cifsTconInfo *tcon;
989 __u32 dosattr, origattr; 1048 __u32 dosattr, origattr;
990 FILE_BASIC_INFO *info_buf = NULL; 1049 FILE_BASIC_INFO *info_buf = NULL;
991 1050
1051 tlink = cifs_sb_tlink(cifs_sb);
1052 if (IS_ERR(tlink))
1053 return PTR_ERR(tlink);
1054 tcon = tlink_tcon(tlink);
1055
992 rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN, 1056 rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
993 DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR, 1057 DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
994 &netfid, &oplock, NULL, cifs_sb->local_nls, 1058 &netfid, &oplock, NULL, cifs_sb->local_nls,
@@ -1057,6 +1121,7 @@ out_close:
1057 CIFSSMBClose(xid, tcon, netfid); 1121 CIFSSMBClose(xid, tcon, netfid);
1058out: 1122out:
1059 kfree(info_buf); 1123 kfree(info_buf);
1124 cifs_put_tlink(tlink);
1060 return rc; 1125 return rc;
1061 1126
1062 /* 1127 /*
@@ -1096,12 +1161,18 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
1096 struct cifsInodeInfo *cifs_inode; 1161 struct cifsInodeInfo *cifs_inode;
1097 struct super_block *sb = dir->i_sb; 1162 struct super_block *sb = dir->i_sb;
1098 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 1163 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
1099 struct cifsTconInfo *tcon = cifs_sb->tcon; 1164 struct tcon_link *tlink;
1165 struct cifsTconInfo *tcon;
1100 struct iattr *attrs = NULL; 1166 struct iattr *attrs = NULL;
1101 __u32 dosattr = 0, origattr = 0; 1167 __u32 dosattr = 0, origattr = 0;
1102 1168
1103 cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry); 1169 cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
1104 1170
1171 tlink = cifs_sb_tlink(cifs_sb);
1172 if (IS_ERR(tlink))
1173 return PTR_ERR(tlink);
1174 tcon = tlink_tcon(tlink);
1175
1105 xid = GetXid(); 1176 xid = GetXid();
1106 1177
1107 /* Unlink can be called from rename so we can not take the 1178 /* Unlink can be called from rename so we can not take the
@@ -1109,8 +1180,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
1109 full_path = build_path_from_dentry(dentry); 1180 full_path = build_path_from_dentry(dentry);
1110 if (full_path == NULL) { 1181 if (full_path == NULL) {
1111 rc = -ENOMEM; 1182 rc = -ENOMEM;
1112 FreeXid(xid); 1183 goto unlink_out;
1113 return rc;
1114 } 1184 }
1115 1185
1116 if ((tcon->ses->capabilities & CAP_UNIX) && 1186 if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1176,10 +1246,11 @@ out_reval:
1176 dir->i_ctime = dir->i_mtime = current_fs_time(sb); 1246 dir->i_ctime = dir->i_mtime = current_fs_time(sb);
1177 cifs_inode = CIFS_I(dir); 1247 cifs_inode = CIFS_I(dir);
1178 CIFS_I(dir)->time = 0; /* force revalidate of dir as well */ 1248 CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
1179 1249unlink_out:
1180 kfree(full_path); 1250 kfree(full_path);
1181 kfree(attrs); 1251 kfree(attrs);
1182 FreeXid(xid); 1252 FreeXid(xid);
1253 cifs_put_tlink(tlink);
1183 return rc; 1254 return rc;
1184} 1255}
1185 1256
@@ -1188,6 +1259,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1188 int rc = 0, tmprc; 1259 int rc = 0, tmprc;
1189 int xid; 1260 int xid;
1190 struct cifs_sb_info *cifs_sb; 1261 struct cifs_sb_info *cifs_sb;
1262 struct tcon_link *tlink;
1191 struct cifsTconInfo *pTcon; 1263 struct cifsTconInfo *pTcon;
1192 char *full_path = NULL; 1264 char *full_path = NULL;
1193 struct inode *newinode = NULL; 1265 struct inode *newinode = NULL;
@@ -1195,16 +1267,18 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1195 1267
1196 cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode); 1268 cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
1197 1269
1198 xid = GetXid();
1199
1200 cifs_sb = CIFS_SB(inode->i_sb); 1270 cifs_sb = CIFS_SB(inode->i_sb);
1201 pTcon = cifs_sb->tcon; 1271 tlink = cifs_sb_tlink(cifs_sb);
1272 if (IS_ERR(tlink))
1273 return PTR_ERR(tlink);
1274 pTcon = tlink_tcon(tlink);
1275
1276 xid = GetXid();
1202 1277
1203 full_path = build_path_from_dentry(direntry); 1278 full_path = build_path_from_dentry(direntry);
1204 if (full_path == NULL) { 1279 if (full_path == NULL) {
1205 rc = -ENOMEM; 1280 rc = -ENOMEM;
1206 FreeXid(xid); 1281 goto mkdir_out;
1207 return rc;
1208 } 1282 }
1209 1283
1210 if ((pTcon->ses->capabilities & CAP_UNIX) && 1284 if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1362,6 +1436,7 @@ mkdir_get_info:
1362mkdir_out: 1436mkdir_out:
1363 kfree(full_path); 1437 kfree(full_path);
1364 FreeXid(xid); 1438 FreeXid(xid);
1439 cifs_put_tlink(tlink);
1365 return rc; 1440 return rc;
1366} 1441}
1367 1442
@@ -1370,6 +1445,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1370 int rc = 0; 1445 int rc = 0;
1371 int xid; 1446 int xid;
1372 struct cifs_sb_info *cifs_sb; 1447 struct cifs_sb_info *cifs_sb;
1448 struct tcon_link *tlink;
1373 struct cifsTconInfo *pTcon; 1449 struct cifsTconInfo *pTcon;
1374 char *full_path = NULL; 1450 char *full_path = NULL;
1375 struct cifsInodeInfo *cifsInode; 1451 struct cifsInodeInfo *cifsInode;
@@ -1378,18 +1454,23 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1378 1454
1379 xid = GetXid(); 1455 xid = GetXid();
1380 1456
1381 cifs_sb = CIFS_SB(inode->i_sb);
1382 pTcon = cifs_sb->tcon;
1383
1384 full_path = build_path_from_dentry(direntry); 1457 full_path = build_path_from_dentry(direntry);
1385 if (full_path == NULL) { 1458 if (full_path == NULL) {
1386 rc = -ENOMEM; 1459 rc = -ENOMEM;
1387 FreeXid(xid); 1460 goto rmdir_exit;
1388 return rc;
1389 } 1461 }
1390 1462
1463 cifs_sb = CIFS_SB(inode->i_sb);
1464 tlink = cifs_sb_tlink(cifs_sb);
1465 if (IS_ERR(tlink)) {
1466 rc = PTR_ERR(tlink);
1467 goto rmdir_exit;
1468 }
1469 pTcon = tlink_tcon(tlink);
1470
1391 rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, 1471 rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
1392 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 1472 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1473 cifs_put_tlink(tlink);
1393 1474
1394 if (!rc) { 1475 if (!rc) {
1395 drop_nlink(inode); 1476 drop_nlink(inode);
@@ -1410,6 +1491,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1410 direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime = 1491 direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
1411 current_fs_time(inode->i_sb); 1492 current_fs_time(inode->i_sb);
1412 1493
1494rmdir_exit:
1413 kfree(full_path); 1495 kfree(full_path);
1414 FreeXid(xid); 1496 FreeXid(xid);
1415 return rc; 1497 return rc;
@@ -1420,10 +1502,16 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
1420 struct dentry *to_dentry, const char *toPath) 1502 struct dentry *to_dentry, const char *toPath)
1421{ 1503{
1422 struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb); 1504 struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
1423 struct cifsTconInfo *pTcon = cifs_sb->tcon; 1505 struct tcon_link *tlink;
1506 struct cifsTconInfo *pTcon;
1424 __u16 srcfid; 1507 __u16 srcfid;
1425 int oplock, rc; 1508 int oplock, rc;
1426 1509
1510 tlink = cifs_sb_tlink(cifs_sb);
1511 if (IS_ERR(tlink))
1512 return PTR_ERR(tlink);
1513 pTcon = tlink_tcon(tlink);
1514
1427 /* try path-based rename first */ 1515 /* try path-based rename first */
1428 rc = CIFSSMBRename(xid, pTcon, fromPath, toPath, cifs_sb->local_nls, 1516 rc = CIFSSMBRename(xid, pTcon, fromPath, toPath, cifs_sb->local_nls,
1429 cifs_sb->mnt_cifs_flags & 1517 cifs_sb->mnt_cifs_flags &
@@ -1435,11 +1523,11 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
1435 * rename by filehandle to various Windows servers. 1523 * rename by filehandle to various Windows servers.
1436 */ 1524 */
1437 if (rc == 0 || rc != -ETXTBSY) 1525 if (rc == 0 || rc != -ETXTBSY)
1438 return rc; 1526 goto do_rename_exit;
1439 1527
1440 /* open-file renames don't work across directories */ 1528 /* open-file renames don't work across directories */
1441 if (to_dentry->d_parent != from_dentry->d_parent) 1529 if (to_dentry->d_parent != from_dentry->d_parent)
1442 return rc; 1530 goto do_rename_exit;
1443 1531
1444 /* open the file to be renamed -- we need DELETE perms */ 1532 /* open the file to be renamed -- we need DELETE perms */
1445 rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE, 1533 rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
@@ -1455,7 +1543,8 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
1455 1543
1456 CIFSSMBClose(xid, pTcon, srcfid); 1544 CIFSSMBClose(xid, pTcon, srcfid);
1457 } 1545 }
1458 1546do_rename_exit:
1547 cifs_put_tlink(tlink);
1459 return rc; 1548 return rc;
1460} 1549}
1461 1550
@@ -1465,13 +1554,17 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1465 char *fromName = NULL; 1554 char *fromName = NULL;
1466 char *toName = NULL; 1555 char *toName = NULL;
1467 struct cifs_sb_info *cifs_sb; 1556 struct cifs_sb_info *cifs_sb;
1557 struct tcon_link *tlink;
1468 struct cifsTconInfo *tcon; 1558 struct cifsTconInfo *tcon;
1469 FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 1559 FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
1470 FILE_UNIX_BASIC_INFO *info_buf_target; 1560 FILE_UNIX_BASIC_INFO *info_buf_target;
1471 int xid, rc, tmprc; 1561 int xid, rc, tmprc;
1472 1562
1473 cifs_sb = CIFS_SB(source_dir->i_sb); 1563 cifs_sb = CIFS_SB(source_dir->i_sb);
1474 tcon = cifs_sb->tcon; 1564 tlink = cifs_sb_tlink(cifs_sb);
1565 if (IS_ERR(tlink))
1566 return PTR_ERR(tlink);
1567 tcon = tlink_tcon(tlink);
1475 1568
1476 xid = GetXid(); 1569 xid = GetXid();
1477 1570
@@ -1547,6 +1640,7 @@ cifs_rename_exit:
1547 kfree(fromName); 1640 kfree(fromName);
1548 kfree(toName); 1641 kfree(toName);
1549 FreeXid(xid); 1642 FreeXid(xid);
1643 cifs_put_tlink(tlink);
1550 return rc; 1644 return rc;
1551} 1645}
1552 1646
@@ -1599,11 +1693,12 @@ int cifs_revalidate_file(struct file *filp)
1599{ 1693{
1600 int rc = 0; 1694 int rc = 0;
1601 struct inode *inode = filp->f_path.dentry->d_inode; 1695 struct inode *inode = filp->f_path.dentry->d_inode;
1696 struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
1602 1697
1603 if (!cifs_inode_needs_reval(inode)) 1698 if (!cifs_inode_needs_reval(inode))
1604 goto check_inval; 1699 goto check_inval;
1605 1700
1606 if (CIFS_SB(inode->i_sb)->tcon->unix_ext) 1701 if (tlink_tcon(cfile->tlink)->unix_ext)
1607 rc = cifs_get_file_info_unix(filp); 1702 rc = cifs_get_file_info_unix(filp);
1608 else 1703 else
1609 rc = cifs_get_file_info(filp); 1704 rc = cifs_get_file_info(filp);
@@ -1644,7 +1739,7 @@ int cifs_revalidate_dentry(struct dentry *dentry)
1644 "jiffies %ld", full_path, inode, inode->i_count.counter, 1739 "jiffies %ld", full_path, inode, inode->i_count.counter,
1645 dentry, dentry->d_time, jiffies); 1740 dentry, dentry->d_time, jiffies);
1646 1741
1647 if (CIFS_SB(sb)->tcon->unix_ext) 1742 if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
1648 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); 1743 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
1649 else 1744 else
1650 rc = cifs_get_inode_info(&inode, full_path, NULL, sb, 1745 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
@@ -1660,13 +1755,29 @@ check_inval:
1660} 1755}
1661 1756
1662int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1757int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1663 struct kstat *stat) 1758 struct kstat *stat)
1664{ 1759{
1760 struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
1761 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
1665 int err = cifs_revalidate_dentry(dentry); 1762 int err = cifs_revalidate_dentry(dentry);
1763
1666 if (!err) { 1764 if (!err) {
1667 generic_fillattr(dentry->d_inode, stat); 1765 generic_fillattr(dentry->d_inode, stat);
1668 stat->blksize = CIFS_MAX_MSGSIZE; 1766 stat->blksize = CIFS_MAX_MSGSIZE;
1669 stat->ino = CIFS_I(dentry->d_inode)->uniqueid; 1767 stat->ino = CIFS_I(dentry->d_inode)->uniqueid;
1768
1769 /*
1770 * If on a multiuser mount without unix extensions, and the
1771 * admin hasn't overridden them, set the ownership to the
1772 * fsuid/fsgid of the current process.
1773 */
1774 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
1775 !tcon->unix_ext) {
1776 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
1777 stat->uid = current_fsuid();
1778 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
1779 stat->gid = current_fsgid();
1780 }
1670 } 1781 }
1671 return err; 1782 return err;
1672} 1783}
@@ -1708,7 +1819,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1708 struct cifsFileInfo *open_file; 1819 struct cifsFileInfo *open_file;
1709 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1820 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
1710 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1821 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1711 struct cifsTconInfo *pTcon = cifs_sb->tcon; 1822 struct tcon_link *tlink = NULL;
1823 struct cifsTconInfo *pTcon = NULL;
1712 1824
1713 /* 1825 /*
1714 * To avoid spurious oplock breaks from server, in the case of 1826 * To avoid spurious oplock breaks from server, in the case of
@@ -1719,10 +1831,11 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1719 * writebehind data than the SMB timeout for the SetPathInfo 1831 * writebehind data than the SMB timeout for the SetPathInfo
1720 * request would allow 1832 * request would allow
1721 */ 1833 */
1722 open_file = find_writable_file(cifsInode); 1834 open_file = find_writable_file(cifsInode, true);
1723 if (open_file) { 1835 if (open_file) {
1724 __u16 nfid = open_file->netfid; 1836 __u16 nfid = open_file->netfid;
1725 __u32 npid = open_file->pid; 1837 __u32 npid = open_file->pid;
1838 pTcon = tlink_tcon(open_file->tlink);
1726 rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid, 1839 rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
1727 npid, false); 1840 npid, false);
1728 cifsFileInfo_put(open_file); 1841 cifsFileInfo_put(open_file);
@@ -1737,6 +1850,13 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1737 rc = -EINVAL; 1850 rc = -EINVAL;
1738 1851
1739 if (rc != 0) { 1852 if (rc != 0) {
1853 if (pTcon == NULL) {
1854 tlink = cifs_sb_tlink(cifs_sb);
1855 if (IS_ERR(tlink))
1856 return PTR_ERR(tlink);
1857 pTcon = tlink_tcon(tlink);
1858 }
1859
1740 /* Set file size by pathname rather than by handle 1860 /* Set file size by pathname rather than by handle
1741 either because no valid, writeable file handle for 1861 either because no valid, writeable file handle for
1742 it was found or because there was an error setting 1862 it was found or because there was an error setting
@@ -1766,6 +1886,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1766 CIFSSMBClose(xid, pTcon, netfid); 1886 CIFSSMBClose(xid, pTcon, netfid);
1767 } 1887 }
1768 } 1888 }
1889 if (tlink)
1890 cifs_put_tlink(tlink);
1769 } 1891 }
1770 1892
1771 if (rc == 0) { 1893 if (rc == 0) {
@@ -1786,7 +1908,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1786 struct inode *inode = direntry->d_inode; 1908 struct inode *inode = direntry->d_inode;
1787 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1909 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
1788 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1910 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1789 struct cifsTconInfo *pTcon = cifs_sb->tcon; 1911 struct tcon_link *tlink;
1912 struct cifsTconInfo *pTcon;
1790 struct cifs_unix_set_info_args *args = NULL; 1913 struct cifs_unix_set_info_args *args = NULL;
1791 struct cifsFileInfo *open_file; 1914 struct cifsFileInfo *open_file;
1792 1915
@@ -1873,17 +1996,25 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1873 args->ctime = NO_CHANGE_64; 1996 args->ctime = NO_CHANGE_64;
1874 1997
1875 args->device = 0; 1998 args->device = 0;
1876 open_file = find_writable_file(cifsInode); 1999 open_file = find_writable_file(cifsInode, true);
1877 if (open_file) { 2000 if (open_file) {
1878 u16 nfid = open_file->netfid; 2001 u16 nfid = open_file->netfid;
1879 u32 npid = open_file->pid; 2002 u32 npid = open_file->pid;
2003 pTcon = tlink_tcon(open_file->tlink);
1880 rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid); 2004 rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
1881 cifsFileInfo_put(open_file); 2005 cifsFileInfo_put(open_file);
1882 } else { 2006 } else {
2007 tlink = cifs_sb_tlink(cifs_sb);
2008 if (IS_ERR(tlink)) {
2009 rc = PTR_ERR(tlink);
2010 goto out;
2011 }
2012 pTcon = tlink_tcon(tlink);
1883 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, 2013 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
1884 cifs_sb->local_nls, 2014 cifs_sb->local_nls,
1885 cifs_sb->mnt_cifs_flags & 2015 cifs_sb->mnt_cifs_flags &
1886 CIFS_MOUNT_MAP_SPECIAL_CHR); 2016 CIFS_MOUNT_MAP_SPECIAL_CHR);
2017 cifs_put_tlink(tlink);
1887 } 2018 }
1888 2019
1889 if (rc) 2020 if (rc)
@@ -2064,7 +2195,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
2064{ 2195{
2065 struct inode *inode = direntry->d_inode; 2196 struct inode *inode = direntry->d_inode;
2066 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2197 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2067 struct cifsTconInfo *pTcon = cifs_sb->tcon; 2198 struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb);
2068 2199
2069 if (pTcon->unix_ext) 2200 if (pTcon->unix_ext)
2070 return cifs_setattr_unix(direntry, attrs); 2201 return cifs_setattr_unix(direntry, attrs);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 9d38a71c8e14..077bf756f342 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -37,11 +37,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
37 int xid; 37 int xid;
38 struct cifs_sb_info *cifs_sb; 38 struct cifs_sb_info *cifs_sb;
39#ifdef CONFIG_CIFS_POSIX 39#ifdef CONFIG_CIFS_POSIX
40 struct cifsFileInfo *pSMBFile = filep->private_data;
41 struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink);
40 __u64 ExtAttrBits = 0; 42 __u64 ExtAttrBits = 0;
41 __u64 ExtAttrMask = 0; 43 __u64 ExtAttrMask = 0;
42 __u64 caps; 44 __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
43 struct cifsTconInfo *tcon;
44 struct cifsFileInfo *pSMBFile = filep->private_data;
45#endif /* CONFIG_CIFS_POSIX */ 45#endif /* CONFIG_CIFS_POSIX */
46 46
47 xid = GetXid(); 47 xid = GetXid();
@@ -50,17 +50,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
50 50
51 cifs_sb = CIFS_SB(inode->i_sb); 51 cifs_sb = CIFS_SB(inode->i_sb);
52 52
53#ifdef CONFIG_CIFS_POSIX
54 tcon = cifs_sb->tcon;
55 if (tcon)
56 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
57 else {
58 rc = -EIO;
59 FreeXid(xid);
60 return -EIO;
61 }
62#endif /* CONFIG_CIFS_POSIX */
63
64 switch (command) { 53 switch (command) {
65 case CIFS_IOC_CHECKUMOUNT: 54 case CIFS_IOC_CHECKUMOUNT:
66 cFYI(1, "User unmount attempted"); 55 cFYI(1, "User unmount attempted");
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 473ca8033656..85cdbf831e7b 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -28,6 +28,296 @@
28#include "cifsproto.h" 28#include "cifsproto.h"
29#include "cifs_debug.h" 29#include "cifs_debug.h"
30#include "cifs_fs_sb.h" 30#include "cifs_fs_sb.h"
31#include "md5.h"
32
33#define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
34#define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
35#define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
36#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
37#define CIFS_MF_SYMLINK_FILE_SIZE \
38 (CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN)
39
40#define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n"
41#define CIFS_MF_SYMLINK_MD5_FORMAT \
42 "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n"
43#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) \
44 md5_hash[0], md5_hash[1], md5_hash[2], md5_hash[3], \
45 md5_hash[4], md5_hash[5], md5_hash[6], md5_hash[7], \
46 md5_hash[8], md5_hash[9], md5_hash[10], md5_hash[11],\
47 md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
48
49static int
50CIFSParseMFSymlink(const u8 *buf,
51 unsigned int buf_len,
52 unsigned int *_link_len,
53 char **_link_str)
54{
55 int rc;
56 unsigned int link_len;
57 const char *md5_str1;
58 const char *link_str;
59 struct MD5Context md5_ctx;
60 u8 md5_hash[16];
61 char md5_str2[34];
62
63 if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
64 return -EINVAL;
65
66 md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET];
67 link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET];
68
69 rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len);
70 if (rc != 1)
71 return -EINVAL;
72
73 cifs_MD5_init(&md5_ctx);
74 cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
75 cifs_MD5_final(md5_hash, &md5_ctx);
76
77 snprintf(md5_str2, sizeof(md5_str2),
78 CIFS_MF_SYMLINK_MD5_FORMAT,
79 CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
80
81 if (strncmp(md5_str1, md5_str2, 17) != 0)
82 return -EINVAL;
83
84 if (_link_str) {
85 *_link_str = kstrndup(link_str, link_len, GFP_KERNEL);
86 if (!*_link_str)
87 return -ENOMEM;
88 }
89
90 *_link_len = link_len;
91 return 0;
92}
93
94static int
95CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
96{
97 unsigned int link_len;
98 unsigned int ofs;
99 struct MD5Context md5_ctx;
100 u8 md5_hash[16];
101
102 if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
103 return -EINVAL;
104
105 link_len = strlen(link_str);
106
107 if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
108 return -ENAMETOOLONG;
109
110 cifs_MD5_init(&md5_ctx);
111 cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
112 cifs_MD5_final(md5_hash, &md5_ctx);
113
114 snprintf(buf, buf_len,
115 CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
116 link_len,
117 CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
118
119 ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
120 memcpy(buf + ofs, link_str, link_len);
121
122 ofs += link_len;
123 if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
124 buf[ofs] = '\n';
125 ofs++;
126 }
127
128 while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
129 buf[ofs] = ' ';
130 ofs++;
131 }
132
133 return 0;
134}
135
136static int
137CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
138 const char *fromName, const char *toName,
139 const struct nls_table *nls_codepage, int remap)
140{
141 int rc;
142 int oplock = 0;
143 __u16 netfid = 0;
144 u8 *buf;
145 unsigned int bytes_written = 0;
146
147 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
148 if (!buf)
149 return -ENOMEM;
150
151 rc = CIFSFormatMFSymlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
152 if (rc != 0) {
153 kfree(buf);
154 return rc;
155 }
156
157 rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
158 CREATE_NOT_DIR, &netfid, &oplock, NULL,
159 nls_codepage, remap);
160 if (rc != 0) {
161 kfree(buf);
162 return rc;
163 }
164
165 rc = CIFSSMBWrite(xid, tcon, netfid,
166 CIFS_MF_SYMLINK_FILE_SIZE /* length */,
167 0 /* offset */,
168 &bytes_written, buf, NULL, 0);
169 CIFSSMBClose(xid, tcon, netfid);
170 kfree(buf);
171 if (rc != 0)
172 return rc;
173
174 if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
175 return -EIO;
176
177 return 0;
178}
179
180static int
181CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
182 const unsigned char *searchName, char **symlinkinfo,
183 const struct nls_table *nls_codepage, int remap)
184{
185 int rc;
186 int oplock = 0;
187 __u16 netfid = 0;
188 u8 *buf;
189 char *pbuf;
190 unsigned int bytes_read = 0;
191 int buf_type = CIFS_NO_BUFFER;
192 unsigned int link_len = 0;
193 FILE_ALL_INFO file_info;
194
195 rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
196 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
197 nls_codepage, remap);
198 if (rc != 0)
199 return rc;
200
201 if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
202 CIFSSMBClose(xid, tcon, netfid);
203 /* it's not a symlink */
204 return -EINVAL;
205 }
206
207 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
208 if (!buf)
209 return -ENOMEM;
210 pbuf = buf;
211
212 rc = CIFSSMBRead(xid, tcon, netfid,
213 CIFS_MF_SYMLINK_FILE_SIZE /* length */,
214 0 /* offset */,
215 &bytes_read, &pbuf, &buf_type);
216 CIFSSMBClose(xid, tcon, netfid);
217 if (rc != 0) {
218 kfree(buf);
219 return rc;
220 }
221
222 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, symlinkinfo);
223 kfree(buf);
224 if (rc != 0)
225 return rc;
226
227 return 0;
228}
229
230bool
231CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
232{
233 if (!(fattr->cf_mode & S_IFREG))
234 /* it's not a symlink */
235 return false;
236
237 if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE)
238 /* it's not a symlink */
239 return false;
240
241 return true;
242}
243
244int
245CIFSCheckMFSymlink(struct cifs_fattr *fattr,
246 const unsigned char *path,
247 struct cifs_sb_info *cifs_sb, int xid)
248{
249 int rc;
250 int oplock = 0;
251 __u16 netfid = 0;
252 struct tcon_link *tlink;
253 struct cifsTconInfo *pTcon;
254 u8 *buf;
255 char *pbuf;
256 unsigned int bytes_read = 0;
257 int buf_type = CIFS_NO_BUFFER;
258 unsigned int link_len = 0;
259 FILE_ALL_INFO file_info;
260
261 if (!CIFSCouldBeMFSymlink(fattr))
262 /* it's not a symlink */
263 return 0;
264
265 tlink = cifs_sb_tlink(cifs_sb);
266 if (IS_ERR(tlink))
267 return PTR_ERR(tlink);
268 pTcon = tlink_tcon(tlink);
269
270 rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
271 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
272 cifs_sb->local_nls,
273 cifs_sb->mnt_cifs_flags &
274 CIFS_MOUNT_MAP_SPECIAL_CHR);
275 if (rc != 0)
276 goto out;
277
278 if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
279 CIFSSMBClose(xid, pTcon, netfid);
280 /* it's not a symlink */
281 goto out;
282 }
283
284 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
285 if (!buf) {
286 rc = -ENOMEM;
287 goto out;
288 }
289 pbuf = buf;
290
291 rc = CIFSSMBRead(xid, pTcon, netfid,
292 CIFS_MF_SYMLINK_FILE_SIZE /* length */,
293 0 /* offset */,
294 &bytes_read, &pbuf, &buf_type);
295 CIFSSMBClose(xid, pTcon, netfid);
296 if (rc != 0) {
297 kfree(buf);
298 goto out;
299 }
300
301 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
302 kfree(buf);
303 if (rc == -EINVAL) {
304 /* it's not a symlink */
305 rc = 0;
306 goto out;
307 }
308
309 if (rc != 0)
310 goto out;
311
312 /* it is a symlink */
313 fattr->cf_eof = link_len;
314 fattr->cf_mode &= ~S_IFMT;
315 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
316 fattr->cf_dtype = DT_LNK;
317out:
318 cifs_put_tlink(tlink);
319 return rc;
320}
31 321
32int 322int
33cifs_hardlink(struct dentry *old_file, struct inode *inode, 323cifs_hardlink(struct dentry *old_file, struct inode *inode,
@@ -37,17 +327,17 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
37 int xid; 327 int xid;
38 char *fromName = NULL; 328 char *fromName = NULL;
39 char *toName = NULL; 329 char *toName = NULL;
40 struct cifs_sb_info *cifs_sb_target; 330 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
331 struct tcon_link *tlink;
41 struct cifsTconInfo *pTcon; 332 struct cifsTconInfo *pTcon;
42 struct cifsInodeInfo *cifsInode; 333 struct cifsInodeInfo *cifsInode;
43 334
44 xid = GetXid(); 335 tlink = cifs_sb_tlink(cifs_sb);
45 336 if (IS_ERR(tlink))
46 cifs_sb_target = CIFS_SB(inode->i_sb); 337 return PTR_ERR(tlink);
47 pTcon = cifs_sb_target->tcon; 338 pTcon = tlink_tcon(tlink);
48 339
49/* No need to check for cross device links since server will do that 340 xid = GetXid();
50 BB note DFS case in future though (when we may have to check) */
51 341
52 fromName = build_path_from_dentry(old_file); 342 fromName = build_path_from_dentry(old_file);
53 toName = build_path_from_dentry(direntry); 343 toName = build_path_from_dentry(direntry);
@@ -56,16 +346,15 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
56 goto cifs_hl_exit; 346 goto cifs_hl_exit;
57 } 347 }
58 348
59/* if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)*/
60 if (pTcon->unix_ext) 349 if (pTcon->unix_ext)
61 rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName, 350 rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName,
62 cifs_sb_target->local_nls, 351 cifs_sb->local_nls,
63 cifs_sb_target->mnt_cifs_flags & 352 cifs_sb->mnt_cifs_flags &
64 CIFS_MOUNT_MAP_SPECIAL_CHR); 353 CIFS_MOUNT_MAP_SPECIAL_CHR);
65 else { 354 else {
66 rc = CIFSCreateHardLink(xid, pTcon, fromName, toName, 355 rc = CIFSCreateHardLink(xid, pTcon, fromName, toName,
67 cifs_sb_target->local_nls, 356 cifs_sb->local_nls,
68 cifs_sb_target->mnt_cifs_flags & 357 cifs_sb->mnt_cifs_flags &
69 CIFS_MOUNT_MAP_SPECIAL_CHR); 358 CIFS_MOUNT_MAP_SPECIAL_CHR);
70 if ((rc == -EIO) || (rc == -EINVAL)) 359 if ((rc == -EIO) || (rc == -EINVAL))
71 rc = -EOPNOTSUPP; 360 rc = -EOPNOTSUPP;
@@ -101,6 +390,7 @@ cifs_hl_exit:
101 kfree(fromName); 390 kfree(fromName);
102 kfree(toName); 391 kfree(toName);
103 FreeXid(xid); 392 FreeXid(xid);
393 cifs_put_tlink(tlink);
104 return rc; 394 return rc;
105} 395}
106 396
@@ -113,10 +403,19 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
113 char *full_path = NULL; 403 char *full_path = NULL;
114 char *target_path = NULL; 404 char *target_path = NULL;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 405 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
116 struct cifsTconInfo *tcon = cifs_sb->tcon; 406 struct tcon_link *tlink = NULL;
407 struct cifsTconInfo *tcon;
117 408
118 xid = GetXid(); 409 xid = GetXid();
119 410
411 tlink = cifs_sb_tlink(cifs_sb);
412 if (IS_ERR(tlink)) {
413 rc = PTR_ERR(tlink);
414 tlink = NULL;
415 goto out;
416 }
417 tcon = tlink_tcon(tlink);
418
120 /* 419 /*
121 * For now, we just handle symlinks with unix extensions enabled. 420 * For now, we just handle symlinks with unix extensions enabled.
122 * Eventually we should handle NTFS reparse points, and MacOS 421 * Eventually we should handle NTFS reparse points, and MacOS
@@ -130,7 +429,8 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
130 * but there doesn't seem to be any harm in allowing the client to 429 * but there doesn't seem to be any harm in allowing the client to
131 * read them. 430 * read them.
132 */ 431 */
133 if (!(tcon->ses->capabilities & CAP_UNIX)) { 432 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
433 && !(tcon->ses->capabilities & CAP_UNIX)) {
134 rc = -EACCES; 434 rc = -EACCES;
135 goto out; 435 goto out;
136 } 436 }
@@ -141,8 +441,21 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
141 441
142 cFYI(1, "Full path: %s inode = 0x%p", full_path, inode); 442 cFYI(1, "Full path: %s inode = 0x%p", full_path, inode);
143 443
144 rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path, 444 rc = -EACCES;
145 cifs_sb->local_nls); 445 /*
446 * First try Minshall+French Symlinks, if configured
447 * and fallback to UNIX Extensions Symlinks.
448 */
449 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
450 rc = CIFSQueryMFSymLink(xid, tcon, full_path, &target_path,
451 cifs_sb->local_nls,
452 cifs_sb->mnt_cifs_flags &
453 CIFS_MOUNT_MAP_SPECIAL_CHR);
454
455 if ((rc != 0) && (tcon->ses->capabilities & CAP_UNIX))
456 rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
457 cifs_sb->local_nls);
458
146 kfree(full_path); 459 kfree(full_path);
147out: 460out:
148 if (rc != 0) { 461 if (rc != 0) {
@@ -151,6 +464,8 @@ out:
151 } 464 }
152 465
153 FreeXid(xid); 466 FreeXid(xid);
467 if (tlink)
468 cifs_put_tlink(tlink);
154 nd_set_link(nd, target_path); 469 nd_set_link(nd, target_path);
155 return NULL; 470 return NULL;
156} 471}
@@ -160,29 +475,37 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
160{ 475{
161 int rc = -EOPNOTSUPP; 476 int rc = -EOPNOTSUPP;
162 int xid; 477 int xid;
163 struct cifs_sb_info *cifs_sb; 478 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
479 struct tcon_link *tlink;
164 struct cifsTconInfo *pTcon; 480 struct cifsTconInfo *pTcon;
165 char *full_path = NULL; 481 char *full_path = NULL;
166 struct inode *newinode = NULL; 482 struct inode *newinode = NULL;
167 483
168 xid = GetXid(); 484 xid = GetXid();
169 485
170 cifs_sb = CIFS_SB(inode->i_sb); 486 tlink = cifs_sb_tlink(cifs_sb);
171 pTcon = cifs_sb->tcon; 487 if (IS_ERR(tlink)) {
488 rc = PTR_ERR(tlink);
489 goto symlink_exit;
490 }
491 pTcon = tlink_tcon(tlink);
172 492
173 full_path = build_path_from_dentry(direntry); 493 full_path = build_path_from_dentry(direntry);
174
175 if (full_path == NULL) { 494 if (full_path == NULL) {
176 rc = -ENOMEM; 495 rc = -ENOMEM;
177 FreeXid(xid); 496 goto symlink_exit;
178 return rc;
179 } 497 }
180 498
181 cFYI(1, "Full path: %s", full_path); 499 cFYI(1, "Full path: %s", full_path);
182 cFYI(1, "symname is %s", symname); 500 cFYI(1, "symname is %s", symname);
183 501
184 /* BB what if DFS and this volume is on different share? BB */ 502 /* BB what if DFS and this volume is on different share? BB */
185 if (pTcon->unix_ext) 503 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
504 rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
505 cifs_sb->local_nls,
506 cifs_sb->mnt_cifs_flags &
507 CIFS_MOUNT_MAP_SPECIAL_CHR);
508 else if (pTcon->unix_ext)
186 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, 509 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
187 cifs_sb->local_nls); 510 cifs_sb->local_nls);
188 /* else 511 /* else
@@ -208,8 +531,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
208 d_instantiate(direntry, newinode); 531 d_instantiate(direntry, newinode);
209 } 532 }
210 } 533 }
211 534symlink_exit:
212 kfree(full_path); 535 kfree(full_path);
536 cifs_put_tlink(tlink);
213 FreeXid(xid); 537 FreeXid(xid);
214 return rc; 538 return rc;
215} 539}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 3ccadc1326d6..1c681f6a6803 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -347,7 +347,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
347 if (current_fsuid() != treeCon->ses->linux_uid) { 347 if (current_fsuid() != treeCon->ses->linux_uid) {
348 cFYI(1, "Multiuser mode and UID " 348 cFYI(1, "Multiuser mode and UID "
349 "did not match tcon uid"); 349 "did not match tcon uid");
350 read_lock(&cifs_tcp_ses_lock); 350 spin_lock(&cifs_tcp_ses_lock);
351 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { 351 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
352 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); 352 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
353 if (ses->linux_uid == current_fsuid()) { 353 if (ses->linux_uid == current_fsuid()) {
@@ -361,7 +361,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
361 } 361 }
362 } 362 }
363 } 363 }
364 read_unlock(&cifs_tcp_ses_lock); 364 spin_unlock(&cifs_tcp_ses_lock);
365 } 365 }
366 } 366 }
367 } 367 }
@@ -551,7 +551,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
551 return false; 551 return false;
552 552
553 /* look up tcon based on tid & uid */ 553 /* look up tcon based on tid & uid */
554 read_lock(&cifs_tcp_ses_lock); 554 spin_lock(&cifs_tcp_ses_lock);
555 list_for_each(tmp, &srv->smb_ses_list) { 555 list_for_each(tmp, &srv->smb_ses_list) {
556 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 556 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
557 list_for_each(tmp1, &ses->tcon_list) { 557 list_for_each(tmp1, &ses->tcon_list) {
@@ -560,25 +560,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
560 continue; 560 continue;
561 561
562 cifs_stats_inc(&tcon->num_oplock_brks); 562 cifs_stats_inc(&tcon->num_oplock_brks);
563 read_lock(&GlobalSMBSeslock); 563 spin_lock(&cifs_file_list_lock);
564 list_for_each(tmp2, &tcon->openFileList) { 564 list_for_each(tmp2, &tcon->openFileList) {
565 netfile = list_entry(tmp2, struct cifsFileInfo, 565 netfile = list_entry(tmp2, struct cifsFileInfo,
566 tlist); 566 tlist);
567 if (pSMB->Fid != netfile->netfid) 567 if (pSMB->Fid != netfile->netfid)
568 continue; 568 continue;
569 569
570 /*
571 * don't do anything if file is about to be
572 * closed anyway.
573 */
574 if (netfile->closePend) {
575 read_unlock(&GlobalSMBSeslock);
576 read_unlock(&cifs_tcp_ses_lock);
577 return true;
578 }
579
580 cFYI(1, "file id match, oplock break"); 570 cFYI(1, "file id match, oplock break");
581 pCifsInode = CIFS_I(netfile->pInode); 571 pCifsInode = CIFS_I(netfile->dentry->d_inode);
582 pCifsInode->clientCanCacheAll = false; 572 pCifsInode->clientCanCacheAll = false;
583 if (pSMB->OplockLevel == 0) 573 if (pSMB->OplockLevel == 0)
584 pCifsInode->clientCanCacheRead = false; 574 pCifsInode->clientCanCacheRead = false;
@@ -594,17 +584,17 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
594 cifs_oplock_break_get(netfile); 584 cifs_oplock_break_get(netfile);
595 netfile->oplock_break_cancelled = false; 585 netfile->oplock_break_cancelled = false;
596 586
597 read_unlock(&GlobalSMBSeslock); 587 spin_unlock(&cifs_file_list_lock);
598 read_unlock(&cifs_tcp_ses_lock); 588 spin_unlock(&cifs_tcp_ses_lock);
599 return true; 589 return true;
600 } 590 }
601 read_unlock(&GlobalSMBSeslock); 591 spin_unlock(&cifs_file_list_lock);
602 read_unlock(&cifs_tcp_ses_lock); 592 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "No matching file for oplock break"); 593 cFYI(1, "No matching file for oplock break");
604 return true; 594 return true;
605 } 595 }
606 } 596 }
607 read_unlock(&cifs_tcp_ses_lock); 597 spin_unlock(&cifs_tcp_ses_lock);
608 cFYI(1, "Can not process oplock break for non-existent connection"); 598 cFYI(1, "Can not process oplock break for non-existent connection");
609 return true; 599 return true;
610} 600}
@@ -729,6 +719,6 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
729 "properly. Hardlinks will not be recognized on this " 719 "properly. Hardlinks will not be recognized on this "
730 "mount. Consider mounting with the \"noserverino\" " 720 "mount. Consider mounting with the \"noserverino\" "
731 "option to silence this message.", 721 "option to silence this message.",
732 cifs_sb->tcon->treeName); 722 cifs_sb_master_tcon(cifs_sb)->treeName);
733 } 723 }
734} 724}
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 49c9a4e75319..5d52e4a3b1ed 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,6 +61,21 @@
61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
62#define NTLMSSP_NEGOTIATE_56 0x80000000 62#define NTLMSSP_NEGOTIATE_56 0x80000000
63 63
64/* Define AV Pair Field IDs */
65enum av_field_type {
66 NTLMSSP_AV_EOL = 0,
67 NTLMSSP_AV_NB_COMPUTER_NAME,
68 NTLMSSP_AV_NB_DOMAIN_NAME,
69 NTLMSSP_AV_DNS_COMPUTER_NAME,
70 NTLMSSP_AV_DNS_DOMAIN_NAME,
71 NTLMSSP_AV_DNS_TREE_NAME,
72 NTLMSSP_AV_FLAGS,
73 NTLMSSP_AV_TIMESTAMP,
74 NTLMSSP_AV_RESTRICTION,
75 NTLMSSP_AV_TARGET_NAME,
76 NTLMSSP_AV_CHANNEL_BINDINGS
77};
78
64/* Although typedefs are not commonly used for structure definitions */ 79/* Although typedefs are not commonly used for structure definitions */
65/* in the Linux kernel, in this particular case they are useful */ 80/* in the Linux kernel, in this particular case they are useful */
66/* to more closely match the standards document for NTLMSSP from */ 81/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index d5e591fab475..ef7bb7b50f58 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -102,7 +102,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
102 return NULL; 102 return NULL;
103 } 103 }
104 104
105 if (CIFS_SB(sb)->tcon->nocase) 105 if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
106 dentry->d_op = &cifs_ci_dentry_ops; 106 dentry->d_op = &cifs_ci_dentry_ops;
107 else 107 else
108 dentry->d_op = &cifs_dentry_ops; 108 dentry->d_op = &cifs_dentry_ops;
@@ -171,7 +171,7 @@ static void
171cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, 171cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
172 struct cifs_sb_info *cifs_sb) 172 struct cifs_sb_info *cifs_sb)
173{ 173{
174 int offset = cifs_sb->tcon->ses->server->timeAdj; 174 int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj;
175 175
176 memset(fattr, 0, sizeof(*fattr)); 176 memset(fattr, 0, sizeof(*fattr));
177 fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate, 177 fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
@@ -199,7 +199,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
199 int len; 199 int len;
200 int oplock = 0; 200 int oplock = 0;
201 int rc; 201 int rc;
202 struct cifsTconInfo *ptcon = cifs_sb->tcon; 202 struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb);
203 char *tmpbuffer; 203 char *tmpbuffer;
204 204
205 rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, 205 rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,34 +223,35 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
223static int initiate_cifs_search(const int xid, struct file *file) 223static int initiate_cifs_search(const int xid, struct file *file)
224{ 224{
225 int rc = 0; 225 int rc = 0;
226 char *full_path; 226 char *full_path = NULL;
227 struct cifsFileInfo *cifsFile; 227 struct cifsFileInfo *cifsFile;
228 struct cifs_sb_info *cifs_sb; 228 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
229 struct tcon_link *tlink;
229 struct cifsTconInfo *pTcon; 230 struct cifsTconInfo *pTcon;
230 231
231 if (file->private_data == NULL) { 232 tlink = cifs_sb_tlink(cifs_sb);
233 if (IS_ERR(tlink))
234 return PTR_ERR(tlink);
235 pTcon = tlink_tcon(tlink);
236
237 if (file->private_data == NULL)
232 file->private_data = 238 file->private_data =
233 kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 239 kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
240 if (file->private_data == NULL) {
241 rc = -ENOMEM;
242 goto error_exit;
234 } 243 }
235 244
236 if (file->private_data == NULL)
237 return -ENOMEM;
238 cifsFile = file->private_data; 245 cifsFile = file->private_data;
239 cifsFile->invalidHandle = true; 246 cifsFile->invalidHandle = true;
240 cifsFile->srch_inf.endOfSearch = false; 247 cifsFile->srch_inf.endOfSearch = false;
241 248 cifsFile->tlink = cifs_get_tlink(tlink);
242 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
243 if (cifs_sb == NULL)
244 return -EINVAL;
245
246 pTcon = cifs_sb->tcon;
247 if (pTcon == NULL)
248 return -EINVAL;
249 249
250 full_path = build_path_from_dentry(file->f_path.dentry); 250 full_path = build_path_from_dentry(file->f_path.dentry);
251 251 if (full_path == NULL) {
252 if (full_path == NULL) 252 rc = -ENOMEM;
253 return -ENOMEM; 253 goto error_exit;
254 }
254 255
255 cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos); 256 cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos);
256 257
@@ -283,7 +284,9 @@ ffirst_retry:
283 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; 284 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
284 goto ffirst_retry; 285 goto ffirst_retry;
285 } 286 }
287error_exit:
286 kfree(full_path); 288 kfree(full_path);
289 cifs_put_tlink(tlink);
287 return rc; 290 return rc;
288} 291}
289 292
@@ -525,14 +528,14 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
525 (index_to_find < first_entry_in_buffer)) { 528 (index_to_find < first_entry_in_buffer)) {
526 /* close and restart search */ 529 /* close and restart search */
527 cFYI(1, "search backing up - close and restart search"); 530 cFYI(1, "search backing up - close and restart search");
528 write_lock(&GlobalSMBSeslock); 531 spin_lock(&cifs_file_list_lock);
529 if (!cifsFile->srch_inf.endOfSearch && 532 if (!cifsFile->srch_inf.endOfSearch &&
530 !cifsFile->invalidHandle) { 533 !cifsFile->invalidHandle) {
531 cifsFile->invalidHandle = true; 534 cifsFile->invalidHandle = true;
532 write_unlock(&GlobalSMBSeslock); 535 spin_unlock(&cifs_file_list_lock);
533 CIFSFindClose(xid, pTcon, cifsFile->netfid); 536 CIFSFindClose(xid, pTcon, cifsFile->netfid);
534 } else 537 } else
535 write_unlock(&GlobalSMBSeslock); 538 spin_unlock(&cifs_file_list_lock);
536 if (cifsFile->srch_inf.ntwrk_buf_start) { 539 if (cifsFile->srch_inf.ntwrk_buf_start) {
537 cFYI(1, "freeing SMB ff cache buf on search rewind"); 540 cFYI(1, "freeing SMB ff cache buf on search rewind");
538 if (cifsFile->srch_inf.smallBuf) 541 if (cifsFile->srch_inf.smallBuf)
@@ -738,6 +741,15 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
738 cifs_autodisable_serverino(cifs_sb); 741 cifs_autodisable_serverino(cifs_sb);
739 } 742 }
740 743
744 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
745 CIFSCouldBeMFSymlink(&fattr))
746 /*
747 * trying to get the type and mode can be slow,
748 * so just call those regular files for now, and mark
749 * for reval
750 */
751 fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
752
741 ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); 753 ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
742 tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr); 754 tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);
743 755
@@ -777,9 +789,17 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
777 xid = GetXid(); 789 xid = GetXid();
778 790
779 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 791 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
780 pTcon = cifs_sb->tcon; 792
781 if (pTcon == NULL) 793 /*
782 return -EINVAL; 794 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
795 * '..'. Otherwise we won't be able to notify VFS in case of failure.
796 */
797 if (file->private_data == NULL) {
798 rc = initiate_cifs_search(xid, file);
799 cFYI(1, "initiate cifs search rc %d", rc);
800 if (rc)
801 goto rddir2_exit;
802 }
783 803
784 switch ((int) file->f_pos) { 804 switch ((int) file->f_pos) {
785 case 0: 805 case 0:
@@ -805,14 +825,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
805 if after then keep searching till find it */ 825 if after then keep searching till find it */
806 826
807 if (file->private_data == NULL) { 827 if (file->private_data == NULL) {
808 rc = initiate_cifs_search(xid, file);
809 cFYI(1, "initiate cifs search rc %d", rc);
810 if (rc) {
811 FreeXid(xid);
812 return rc;
813 }
814 }
815 if (file->private_data == NULL) {
816 rc = -EINVAL; 828 rc = -EINVAL;
817 FreeXid(xid); 829 FreeXid(xid);
818 return rc; 830 return rc;
@@ -829,6 +841,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
829 CIFSFindClose(xid, pTcon, cifsFile->netfid); 841 CIFSFindClose(xid, pTcon, cifsFile->netfid);
830 } */ 842 } */
831 843
844 pTcon = tlink_tcon(cifsFile->tlink);
832 rc = find_cifs_entry(xid, pTcon, file, 845 rc = find_cifs_entry(xid, pTcon, file,
833 &current_entry, &num_to_fill); 846 &current_entry, &num_to_fill);
834 if (rc) { 847 if (rc) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 0a57cb7db5dd..2a11efd96592 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -80,7 +80,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
80 if (max_vcs < 2) 80 if (max_vcs < 2)
81 max_vcs = 0xFFFF; 81 max_vcs = 0xFFFF;
82 82
83 write_lock(&cifs_tcp_ses_lock); 83 spin_lock(&cifs_tcp_ses_lock);
84 if ((ses->need_reconnect) && is_first_ses_reconnect(ses)) 84 if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
85 goto get_vc_num_exit; /* vcnum will be zero */ 85 goto get_vc_num_exit; /* vcnum will be zero */
86 for (i = ses->server->srv_count - 1; i < max_vcs; i++) { 86 for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
@@ -112,7 +112,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
112 vcnum = i; 112 vcnum = i;
113 ses->vcnum = vcnum; 113 ses->vcnum = vcnum;
114get_vc_num_exit: 114get_vc_num_exit:
115 write_unlock(&cifs_tcp_ses_lock); 115 spin_unlock(&cifs_tcp_ses_lock);
116 116
117 return cpu_to_le16(vcnum); 117 return cpu_to_le16(vcnum);
118} 118}
@@ -383,6 +383,9 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
384 struct cifsSesInfo *ses) 384 struct cifsSesInfo *ses)
385{ 385{
386 unsigned int tioffset; /* challenge message target info area */
387 unsigned int tilen; /* challenge message target info area length */
388
386 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; 389 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
387 390
388 if (blob_len < sizeof(CHALLENGE_MESSAGE)) { 391 if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -399,12 +402,25 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
399 return -EINVAL; 402 return -EINVAL;
400 } 403 }
401 404
402 memcpy(ses->server->cryptKey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE); 405 memcpy(ses->cryptKey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
403 /* BB we could decode pblob->NegotiateFlags; some may be useful */ 406 /* BB we could decode pblob->NegotiateFlags; some may be useful */
404 /* In particular we can examine sign flags */ 407 /* In particular we can examine sign flags */
405 /* BB spec says that if AvId field of MsvAvTimestamp is populated then 408 /* BB spec says that if AvId field of MsvAvTimestamp is populated then
406 we must set the MIC field of the AUTHENTICATE_MESSAGE */ 409 we must set the MIC field of the AUTHENTICATE_MESSAGE */
407 410
411 tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
412 tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
413 ses->tilen = tilen;
414 if (ses->tilen) {
415 ses->tiblob = kmalloc(tilen, GFP_KERNEL);
416 if (!ses->tiblob) {
417 cERROR(1, "Challenge target info allocation failure");
418 ses->tilen = 0;
419 return -ENOMEM;
420 }
421 memcpy(ses->tiblob, bcc_ptr + tioffset, ses->tilen);
422 }
423
408 return 0; 424 return 0;
409} 425}
410 426
@@ -425,7 +441,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
425 /* BB is NTLMV2 session security format easier to use here? */ 441 /* BB is NTLMV2 session security format easier to use here? */
426 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 442 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
427 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 443 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
428 NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM; 444 NTLMSSP_NEGOTIATE_NTLM;
429 if (ses->server->secMode & 445 if (ses->server->secMode &
430 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 446 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
431 flags |= NTLMSSP_NEGOTIATE_SIGN; 447 flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -448,13 +464,16 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
448 maximum possible size is fixed and small, making this approach cleaner. 464 maximum possible size is fixed and small, making this approach cleaner.
449 This function returns the length of the data in the blob */ 465 This function returns the length of the data in the blob */
450static int build_ntlmssp_auth_blob(unsigned char *pbuffer, 466static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
467 u16 *buflen,
451 struct cifsSesInfo *ses, 468 struct cifsSesInfo *ses,
452 const struct nls_table *nls_cp, bool first) 469 const struct nls_table *nls_cp)
453{ 470{
471 int rc;
472 unsigned int size;
454 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; 473 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
455 __u32 flags; 474 __u32 flags;
456 unsigned char *tmp; 475 unsigned char *tmp;
457 char ntlm_session_key[CIFS_SESS_KEY_SIZE]; 476 struct ntlmv2_resp ntlmv2_response = {};
458 477
459 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); 478 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
460 sec_blob->MessageType = NtLmAuthenticate; 479 sec_blob->MessageType = NtLmAuthenticate;
@@ -462,7 +481,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
462 flags = NTLMSSP_NEGOTIATE_56 | 481 flags = NTLMSSP_NEGOTIATE_56 |
463 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | 482 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
464 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 483 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
465 NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM; 484 NTLMSSP_NEGOTIATE_NTLM;
466 if (ses->server->secMode & 485 if (ses->server->secMode &
467 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 486 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
468 flags |= NTLMSSP_NEGOTIATE_SIGN; 487 flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -477,19 +496,26 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
477 sec_blob->LmChallengeResponse.Length = 0; 496 sec_blob->LmChallengeResponse.Length = 0;
478 sec_blob->LmChallengeResponse.MaximumLength = 0; 497 sec_blob->LmChallengeResponse.MaximumLength = 0;
479 498
480 /* calculate session key, BB what about adding similar ntlmv2 path? */
481 SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
482 if (first)
483 cifs_calculate_mac_key(&ses->server->mac_signing_key,
484 ntlm_session_key, ses->password);
485
486 memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
487 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); 499 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
488 sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE); 500 rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
489 sec_blob->NtChallengeResponse.MaximumLength = 501 if (rc) {
490 cpu_to_le16(CIFS_SESS_KEY_SIZE); 502 cERROR(1, "Error %d during NTLMSSP authentication", rc);
503 goto setup_ntlmv2_ret;
504 }
505 size = sizeof(struct ntlmv2_resp);
506 memcpy(tmp, (char *)&ntlmv2_response, size);
507 tmp += size;
508 if (ses->tilen > 0) {
509 memcpy(tmp, ses->tiblob, ses->tilen);
510 tmp += ses->tilen;
511 }
491 512
492 tmp += CIFS_SESS_KEY_SIZE; 513 sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + ses->tilen);
514 sec_blob->NtChallengeResponse.MaximumLength =
515 cpu_to_le16(size + ses->tilen);
516 kfree(ses->tiblob);
517 ses->tiblob = NULL;
518 ses->tilen = 0;
493 519
494 if (ses->domainName == NULL) { 520 if (ses->domainName == NULL) {
495 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 521 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -501,7 +527,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
501 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, 527 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
502 MAX_USERNAME_SIZE, nls_cp); 528 MAX_USERNAME_SIZE, nls_cp);
503 len *= 2; /* unicode is 2 bytes each */ 529 len *= 2; /* unicode is 2 bytes each */
504 len += 2; /* trailing null */
505 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 530 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
506 sec_blob->DomainName.Length = cpu_to_le16(len); 531 sec_blob->DomainName.Length = cpu_to_le16(len);
507 sec_blob->DomainName.MaximumLength = cpu_to_le16(len); 532 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -518,7 +543,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
518 len = cifs_strtoUCS((__le16 *)tmp, ses->userName, 543 len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
519 MAX_USERNAME_SIZE, nls_cp); 544 MAX_USERNAME_SIZE, nls_cp);
520 len *= 2; /* unicode is 2 bytes each */ 545 len *= 2; /* unicode is 2 bytes each */
521 len += 2; /* trailing null */
522 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 546 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
523 sec_blob->UserName.Length = cpu_to_le16(len); 547 sec_blob->UserName.Length = cpu_to_le16(len);
524 sec_blob->UserName.MaximumLength = cpu_to_le16(len); 548 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -533,7 +557,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
533 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); 557 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
534 sec_blob->SessionKey.Length = 0; 558 sec_blob->SessionKey.Length = 0;
535 sec_blob->SessionKey.MaximumLength = 0; 559 sec_blob->SessionKey.MaximumLength = 0;
536 return tmp - pbuffer; 560
561setup_ntlmv2_ret:
562 *buflen = tmp - pbuffer;
563 return rc;
537} 564}
538 565
539 566
@@ -545,19 +572,6 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
545 572
546 return; 573 return;
547} 574}
548
549static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
550 struct cifsSesInfo *ses,
551 const struct nls_table *nls, bool first_time)
552{
553 int bloblen;
554
555 bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
556 first_time);
557 pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
558
559 return bloblen;
560}
561#endif 575#endif
562 576
563int 577int
@@ -579,15 +593,12 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
579 int bytes_remaining; 593 int bytes_remaining;
580 struct key *spnego_key = NULL; 594 struct key *spnego_key = NULL;
581 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ 595 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
582 bool first_time; 596 u16 blob_len;
597 char *ntlmsspblob = NULL;
583 598
584 if (ses == NULL) 599 if (ses == NULL)
585 return -EINVAL; 600 return -EINVAL;
586 601
587 read_lock(&cifs_tcp_ses_lock);
588 first_time = is_first_ses_reconnect(ses);
589 read_unlock(&cifs_tcp_ses_lock);
590
591 type = ses->server->secType; 602 type = ses->server->secType;
592 603
593 cFYI(1, "sess setup type %d", type); 604 cFYI(1, "sess setup type %d", type);
@@ -658,7 +669,7 @@ ssetup_ntlmssp_authenticate:
658 /* BB calculate hash with password */ 669 /* BB calculate hash with password */
659 /* and copy into bcc */ 670 /* and copy into bcc */
660 671
661 calc_lanman_hash(ses->password, ses->server->cryptKey, 672 calc_lanman_hash(ses->password, ses->cryptKey,
662 ses->server->secMode & SECMODE_PW_ENCRYPT ? 673 ses->server->secMode & SECMODE_PW_ENCRYPT ?
663 true : false, lnm_session_key); 674 true : false, lnm_session_key);
664 675
@@ -685,15 +696,11 @@ ssetup_ntlmssp_authenticate:
685 cpu_to_le16(CIFS_SESS_KEY_SIZE); 696 cpu_to_le16(CIFS_SESS_KEY_SIZE);
686 697
687 /* calculate session key */ 698 /* calculate session key */
688 SMBNTencrypt(ses->password, ses->server->cryptKey, 699 SMBNTencrypt(ses->password, ses->cryptKey, ntlm_session_key);
689 ntlm_session_key);
690 700
691 if (first_time) /* should this be moved into common code 701 cifs_calculate_session_key(&ses->auth_key,
692 with similar ntlmv2 path? */ 702 ntlm_session_key, ses->password);
693 cifs_calculate_mac_key(&ses->server->mac_signing_key,
694 ntlm_session_key, ses->password);
695 /* copy session key */ 703 /* copy session key */
696
697 memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE); 704 memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
698 bcc_ptr += CIFS_SESS_KEY_SIZE; 705 bcc_ptr += CIFS_SESS_KEY_SIZE;
699 memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE); 706 memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
@@ -725,16 +732,31 @@ ssetup_ntlmssp_authenticate:
725 pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; 732 pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
726 /* cpu_to_le16(LM2_SESS_KEY_SIZE); */ 733 /* cpu_to_le16(LM2_SESS_KEY_SIZE); */
727 734
728 pSMB->req_no_secext.CaseSensitivePasswordLength =
729 cpu_to_le16(sizeof(struct ntlmv2_resp));
730
731 /* calculate session key */ 735 /* calculate session key */
732 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); 736 rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
733 /* FIXME: calculate MAC key */ 737 if (rc) {
738 cERROR(1, "Error %d during NTLMv2 authentication", rc);
739 kfree(v2_sess_key);
740 goto ssetup_exit;
741 }
734 memcpy(bcc_ptr, (char *)v2_sess_key, 742 memcpy(bcc_ptr, (char *)v2_sess_key,
735 sizeof(struct ntlmv2_resp)); 743 sizeof(struct ntlmv2_resp));
736 bcc_ptr += sizeof(struct ntlmv2_resp); 744 bcc_ptr += sizeof(struct ntlmv2_resp);
737 kfree(v2_sess_key); 745 kfree(v2_sess_key);
746 /* set case sensitive password length after tilen may get
747 * assigned, tilen is 0 otherwise.
748 */
749 pSMB->req_no_secext.CaseSensitivePasswordLength =
750 cpu_to_le16(sizeof(struct ntlmv2_resp) + ses->tilen);
751 if (ses->tilen > 0) {
752 memcpy(bcc_ptr, ses->tiblob, ses->tilen);
753 bcc_ptr += ses->tilen;
754 /* we never did allocate ses->domainName to free */
755 kfree(ses->tiblob);
756 ses->tiblob = NULL;
757 ses->tilen = 0;
758 }
759
738 if (ses->capabilities & CAP_UNICODE) { 760 if (ses->capabilities & CAP_UNICODE) {
739 if (iov[0].iov_len % 2) { 761 if (iov[0].iov_len % 2) {
740 *bcc_ptr = 0; 762 *bcc_ptr = 0;
@@ -765,17 +787,14 @@ ssetup_ntlmssp_authenticate:
765 } 787 }
766 /* bail out if key is too long */ 788 /* bail out if key is too long */
767 if (msg->sesskey_len > 789 if (msg->sesskey_len >
768 sizeof(ses->server->mac_signing_key.data.krb5)) { 790 sizeof(ses->auth_key.data.krb5)) {
769 cERROR(1, "Kerberos signing key too long (%u bytes)", 791 cERROR(1, "Kerberos signing key too long (%u bytes)",
770 msg->sesskey_len); 792 msg->sesskey_len);
771 rc = -EOVERFLOW; 793 rc = -EOVERFLOW;
772 goto ssetup_exit; 794 goto ssetup_exit;
773 } 795 }
774 if (first_time) { 796 ses->auth_key.len = msg->sesskey_len;
775 ses->server->mac_signing_key.len = msg->sesskey_len; 797 memcpy(ses->auth_key.data.krb5, msg->data, msg->sesskey_len);
776 memcpy(ses->server->mac_signing_key.data.krb5,
777 msg->data, msg->sesskey_len);
778 }
779 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; 798 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
780 capabilities |= CAP_EXTENDED_SECURITY; 799 capabilities |= CAP_EXTENDED_SECURITY;
781 pSMB->req.Capabilities = cpu_to_le32(capabilities); 800 pSMB->req.Capabilities = cpu_to_le32(capabilities);
@@ -815,12 +834,30 @@ ssetup_ntlmssp_authenticate:
815 if (phase == NtLmNegotiate) { 834 if (phase == NtLmNegotiate) {
816 setup_ntlmssp_neg_req(pSMB, ses); 835 setup_ntlmssp_neg_req(pSMB, ses);
817 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); 836 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
837 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
818 } else if (phase == NtLmAuthenticate) { 838 } else if (phase == NtLmAuthenticate) {
819 int blob_len; 839 /* 5 is an empirical value, large enought to
820 blob_len = setup_ntlmssp_auth_req(pSMB, ses, 840 * hold authenticate message, max 10 of
821 nls_cp, 841 * av paris, doamin,user,workstation mames,
822 first_time); 842 * flags etc..
843 */
844 ntlmsspblob = kmalloc(
845 5*sizeof(struct _AUTHENTICATE_MESSAGE),
846 GFP_KERNEL);
847 if (!ntlmsspblob) {
848 cERROR(1, "Can't allocate NTLMSSP");
849 rc = -ENOMEM;
850 goto ssetup_exit;
851 }
852
853 rc = build_ntlmssp_auth_blob(ntlmsspblob,
854 &blob_len, ses, nls_cp);
855 if (rc)
856 goto ssetup_exit;
823 iov[1].iov_len = blob_len; 857 iov[1].iov_len = blob_len;
858 iov[1].iov_base = ntlmsspblob;
859 pSMB->req.SecurityBlobLength =
860 cpu_to_le16(blob_len);
824 /* Make sure that we tell the server that we 861 /* Make sure that we tell the server that we
825 are using the uid that it just gave us back 862 are using the uid that it just gave us back
826 on the response (challenge) */ 863 on the response (challenge) */
@@ -830,7 +867,6 @@ ssetup_ntlmssp_authenticate:
830 rc = -ENOSYS; 867 rc = -ENOSYS;
831 goto ssetup_exit; 868 goto ssetup_exit;
832 } 869 }
833 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
834 /* unicode strings must be word aligned */ 870 /* unicode strings must be word aligned */
835 if ((iov[0].iov_len + iov[1].iov_len) % 2) { 871 if ((iov[0].iov_len + iov[1].iov_len) % 2) {
836 *bcc_ptr = 0; 872 *bcc_ptr = 0;
@@ -895,7 +931,6 @@ ssetup_ntlmssp_authenticate:
895 bcc_ptr = pByteArea(smb_buf); 931 bcc_ptr = pByteArea(smb_buf);
896 932
897 if (smb_buf->WordCount == 4) { 933 if (smb_buf->WordCount == 4) {
898 __u16 blob_len;
899 blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); 934 blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
900 if (blob_len > bytes_remaining) { 935 if (blob_len > bytes_remaining) {
901 cERROR(1, "bad security blob length %d", blob_len); 936 cERROR(1, "bad security blob length %d", blob_len);
@@ -931,6 +966,8 @@ ssetup_exit:
931 key_put(spnego_key); 966 key_put(spnego_key);
932 } 967 }
933 kfree(str_area); 968 kfree(str_area);
969 kfree(ntlmsspblob);
970 ntlmsspblob = NULL;
934 if (resp_buf_type == CIFS_SMALL_BUFFER) { 971 if (resp_buf_type == CIFS_SMALL_BUFFER) {
935 cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base); 972 cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base);
936 cifs_small_buf_release(iov[0].iov_base); 973 cifs_small_buf_release(iov[0].iov_base);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 82f78c4d6978..a66c91eb6eb4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
544 SECMODE_SIGN_ENABLED))) { 544 SECMODE_SIGN_ENABLED))) {
545 rc = cifs_verify_signature(midQ->resp_buf, 545 rc = cifs_verify_signature(midQ->resp_buf,
546 &ses->server->mac_signing_key, 546 &ses->server->session_key,
547 midQ->sequence_number+1); 547 midQ->sequence_number+1);
548 if (rc) { 548 if (rc) {
549 cERROR(1, "Unexpected SMB signature"); 549 cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
732 SECMODE_SIGN_ENABLED))) { 732 SECMODE_SIGN_ENABLED))) {
733 rc = cifs_verify_signature(out_buf, 733 rc = cifs_verify_signature(out_buf,
734 &ses->server->mac_signing_key, 734 &ses->server->session_key,
735 midQ->sequence_number+1); 735 midQ->sequence_number+1);
736 if (rc) { 736 if (rc) {
737 cERROR(1, "Unexpected SMB signature"); 737 cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
982 SECMODE_SIGN_ENABLED))) { 982 SECMODE_SIGN_ENABLED))) {
983 rc = cifs_verify_signature(out_buf, 983 rc = cifs_verify_signature(out_buf,
984 &ses->server->mac_signing_key, 984 &ses->server->session_key,
985 midQ->sequence_number+1); 985 midQ->sequence_number+1);
986 if (rc) { 986 if (rc) {
987 cERROR(1, "Unexpected SMB signature"); 987 cERROR(1, "Unexpected SMB signature");
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a1509207bfa6..a264b744bb41 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -47,9 +47,10 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
47#ifdef CONFIG_CIFS_XATTR 47#ifdef CONFIG_CIFS_XATTR
48 int xid; 48 int xid;
49 struct cifs_sb_info *cifs_sb; 49 struct cifs_sb_info *cifs_sb;
50 struct tcon_link *tlink;
50 struct cifsTconInfo *pTcon; 51 struct cifsTconInfo *pTcon;
51 struct super_block *sb; 52 struct super_block *sb;
52 char *full_path; 53 char *full_path = NULL;
53 54
54 if (direntry == NULL) 55 if (direntry == NULL)
55 return -EIO; 56 return -EIO;
@@ -58,16 +59,19 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
58 sb = direntry->d_inode->i_sb; 59 sb = direntry->d_inode->i_sb;
59 if (sb == NULL) 60 if (sb == NULL)
60 return -EIO; 61 return -EIO;
61 xid = GetXid();
62 62
63 cifs_sb = CIFS_SB(sb); 63 cifs_sb = CIFS_SB(sb);
64 pTcon = cifs_sb->tcon; 64 tlink = cifs_sb_tlink(cifs_sb);
65 if (IS_ERR(tlink))
66 return PTR_ERR(tlink);
67 pTcon = tlink_tcon(tlink);
68
69 xid = GetXid();
65 70
66 full_path = build_path_from_dentry(direntry); 71 full_path = build_path_from_dentry(direntry);
67 if (full_path == NULL) { 72 if (full_path == NULL) {
68 rc = -ENOMEM; 73 rc = -ENOMEM;
69 FreeXid(xid); 74 goto remove_ea_exit;
70 return rc;
71 } 75 }
72 if (ea_name == NULL) { 76 if (ea_name == NULL) {
73 cFYI(1, "Null xattr names not supported"); 77 cFYI(1, "Null xattr names not supported");
@@ -91,6 +95,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
91remove_ea_exit: 95remove_ea_exit:
92 kfree(full_path); 96 kfree(full_path);
93 FreeXid(xid); 97 FreeXid(xid);
98 cifs_put_tlink(tlink);
94#endif 99#endif
95 return rc; 100 return rc;
96} 101}
@@ -102,6 +107,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
102#ifdef CONFIG_CIFS_XATTR 107#ifdef CONFIG_CIFS_XATTR
103 int xid; 108 int xid;
104 struct cifs_sb_info *cifs_sb; 109 struct cifs_sb_info *cifs_sb;
110 struct tcon_link *tlink;
105 struct cifsTconInfo *pTcon; 111 struct cifsTconInfo *pTcon;
106 struct super_block *sb; 112 struct super_block *sb;
107 char *full_path; 113 char *full_path;
@@ -113,16 +119,19 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
113 sb = direntry->d_inode->i_sb; 119 sb = direntry->d_inode->i_sb;
114 if (sb == NULL) 120 if (sb == NULL)
115 return -EIO; 121 return -EIO;
116 xid = GetXid();
117 122
118 cifs_sb = CIFS_SB(sb); 123 cifs_sb = CIFS_SB(sb);
119 pTcon = cifs_sb->tcon; 124 tlink = cifs_sb_tlink(cifs_sb);
125 if (IS_ERR(tlink))
126 return PTR_ERR(tlink);
127 pTcon = tlink_tcon(tlink);
128
129 xid = GetXid();
120 130
121 full_path = build_path_from_dentry(direntry); 131 full_path = build_path_from_dentry(direntry);
122 if (full_path == NULL) { 132 if (full_path == NULL) {
123 rc = -ENOMEM; 133 rc = -ENOMEM;
124 FreeXid(xid); 134 goto set_ea_exit;
125 return rc;
126 } 135 }
127 /* return dos attributes as pseudo xattr */ 136 /* return dos attributes as pseudo xattr */
128 /* return alt name if available as pseudo attr */ 137 /* return alt name if available as pseudo attr */
@@ -132,9 +141,8 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
132 returns as xattrs */ 141 returns as xattrs */
133 if (value_size > MAX_EA_VALUE_SIZE) { 142 if (value_size > MAX_EA_VALUE_SIZE) {
134 cFYI(1, "size of EA value too large"); 143 cFYI(1, "size of EA value too large");
135 kfree(full_path); 144 rc = -EOPNOTSUPP;
136 FreeXid(xid); 145 goto set_ea_exit;
137 return -EOPNOTSUPP;
138 } 146 }
139 147
140 if (ea_name == NULL) { 148 if (ea_name == NULL) {
@@ -198,6 +206,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
198set_ea_exit: 206set_ea_exit:
199 kfree(full_path); 207 kfree(full_path);
200 FreeXid(xid); 208 FreeXid(xid);
209 cifs_put_tlink(tlink);
201#endif 210#endif
202 return rc; 211 return rc;
203} 212}
@@ -209,6 +218,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
209#ifdef CONFIG_CIFS_XATTR 218#ifdef CONFIG_CIFS_XATTR
210 int xid; 219 int xid;
211 struct cifs_sb_info *cifs_sb; 220 struct cifs_sb_info *cifs_sb;
221 struct tcon_link *tlink;
212 struct cifsTconInfo *pTcon; 222 struct cifsTconInfo *pTcon;
213 struct super_block *sb; 223 struct super_block *sb;
214 char *full_path; 224 char *full_path;
@@ -221,16 +231,18 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
221 if (sb == NULL) 231 if (sb == NULL)
222 return -EIO; 232 return -EIO;
223 233
224 xid = GetXid();
225
226 cifs_sb = CIFS_SB(sb); 234 cifs_sb = CIFS_SB(sb);
227 pTcon = cifs_sb->tcon; 235 tlink = cifs_sb_tlink(cifs_sb);
236 if (IS_ERR(tlink))
237 return PTR_ERR(tlink);
238 pTcon = tlink_tcon(tlink);
239
240 xid = GetXid();
228 241
229 full_path = build_path_from_dentry(direntry); 242 full_path = build_path_from_dentry(direntry);
230 if (full_path == NULL) { 243 if (full_path == NULL) {
231 rc = -ENOMEM; 244 rc = -ENOMEM;
232 FreeXid(xid); 245 goto get_ea_exit;
233 return rc;
234 } 246 }
235 /* return dos attributes as pseudo xattr */ 247 /* return dos attributes as pseudo xattr */
236 /* return alt name if available as pseudo attr */ 248 /* return alt name if available as pseudo attr */
@@ -323,6 +335,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
323get_ea_exit: 335get_ea_exit:
324 kfree(full_path); 336 kfree(full_path);
325 FreeXid(xid); 337 FreeXid(xid);
338 cifs_put_tlink(tlink);
326#endif 339#endif
327 return rc; 340 return rc;
328} 341}
@@ -333,6 +346,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
333#ifdef CONFIG_CIFS_XATTR 346#ifdef CONFIG_CIFS_XATTR
334 int xid; 347 int xid;
335 struct cifs_sb_info *cifs_sb; 348 struct cifs_sb_info *cifs_sb;
349 struct tcon_link *tlink;
336 struct cifsTconInfo *pTcon; 350 struct cifsTconInfo *pTcon;
337 struct super_block *sb; 351 struct super_block *sb;
338 char *full_path; 352 char *full_path;
@@ -346,18 +360,20 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
346 return -EIO; 360 return -EIO;
347 361
348 cifs_sb = CIFS_SB(sb); 362 cifs_sb = CIFS_SB(sb);
349 pTcon = cifs_sb->tcon;
350
351 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 363 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
352 return -EOPNOTSUPP; 364 return -EOPNOTSUPP;
353 365
366 tlink = cifs_sb_tlink(cifs_sb);
367 if (IS_ERR(tlink))
368 return PTR_ERR(tlink);
369 pTcon = tlink_tcon(tlink);
370
354 xid = GetXid(); 371 xid = GetXid();
355 372
356 full_path = build_path_from_dentry(direntry); 373 full_path = build_path_from_dentry(direntry);
357 if (full_path == NULL) { 374 if (full_path == NULL) {
358 rc = -ENOMEM; 375 rc = -ENOMEM;
359 FreeXid(xid); 376 goto list_ea_exit;
360 return rc;
361 } 377 }
362 /* return dos attributes as pseudo xattr */ 378 /* return dos attributes as pseudo xattr */
363 /* return alt name if available as pseudo attr */ 379 /* return alt name if available as pseudo attr */
@@ -370,8 +386,10 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
370 cifs_sb->mnt_cifs_flags & 386 cifs_sb->mnt_cifs_flags &
371 CIFS_MOUNT_MAP_SPECIAL_CHR); 387 CIFS_MOUNT_MAP_SPECIAL_CHR);
372 388
389list_ea_exit:
373 kfree(full_path); 390 kfree(full_path);
374 FreeXid(xid); 391 FreeXid(xid);
392 cifs_put_tlink(tlink);
375#endif 393#endif
376 return rc; 394 return rc;
377} 395}
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 031dbe3a15ca..64e5f3efdd81 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1846,6 +1846,9 @@ static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1846 struct dlm_lkb *gr; 1846 struct dlm_lkb *gr;
1847 1847
1848 list_for_each_entry(gr, head, lkb_statequeue) { 1848 list_for_each_entry(gr, head, lkb_statequeue) {
1849 /* skip self when sending basts to convertqueue */
1850 if (gr == lkb)
1851 continue;
1849 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { 1852 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
1850 queue_bast(r, gr, lkb->lkb_rqmode); 1853 queue_bast(r, gr, lkb->lkb_rqmode);
1851 gr->lkb_highbast = lkb->lkb_rqmode; 1854 gr->lkb_highbast = lkb->lkb_rqmode;
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index d7e9f74dc3a6..09b13bb34c94 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -90,7 +90,6 @@ int ext3_sync_file(struct file *file, int datasync)
90 * storage 90 * storage
91 */ 91 */
92 if (needs_barrier) 92 if (needs_barrier)
93 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, 93 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
94 BLKDEV_IFL_WAIT);
95 return ret; 94 return ret;
96} 95}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 592adf2e546e..3f3ff5ee8f9d 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -128,10 +128,9 @@ int ext4_sync_file(struct file *file, int datasync)
128 (journal->j_fs_dev != journal->j_dev) && 128 (journal->j_fs_dev != journal->j_dev) &&
129 (journal->j_flags & JBD2_BARRIER)) 129 (journal->j_flags & JBD2_BARRIER))
130 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, 130 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
131 NULL, BLKDEV_IFL_WAIT); 131 NULL);
132 ret = jbd2_log_wait_commit(journal, commit_tid); 132 ret = jbd2_log_wait_commit(journal, commit_tid);
133 } else if (journal->j_flags & JBD2_BARRIER) 133 } else if (journal->j_flags & JBD2_BARRIER)
134 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, 134 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
135 BLKDEV_IFL_WAIT);
136 return ret; 135 return ret;
137} 136}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4b4ad4b7ce57..19aa0d44d822 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2566,7 +2566,7 @@ static inline void ext4_issue_discard(struct super_block *sb,
2566 discard_block = block + ext4_group_first_block_no(sb, block_group); 2566 discard_block = block + ext4_group_first_block_no(sb, block_group);
2567 trace_ext4_discard_blocks(sb, 2567 trace_ext4_discard_blocks(sb,
2568 (unsigned long long) discard_block, count); 2568 (unsigned long long) discard_block, count);
2569 ret = sb_issue_discard(sb, discard_block, count); 2569 ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2570 if (ret == EOPNOTSUPP) { 2570 if (ret == EOPNOTSUPP) {
2571 ext4_warning(sb, "discard not supported, disabling"); 2571 ext4_warning(sb, "discard not supported, disabling");
2572 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); 2572 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 81184d3b75a3..b47d2c9f4fa1 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -577,7 +577,8 @@ int fat_free_clusters(struct inode *inode, int cluster)
577 577
578 sb_issue_discard(sb, 578 sb_issue_discard(sb,
579 fat_clus_to_blknr(sbi, first_cl), 579 fat_clus_to_blknr(sbi, first_cl),
580 nr_clus * sbi->sec_per_clus); 580 nr_clus * sbi->sec_per_clus,
581 GFP_NOFS, 0);
581 582
582 first_cl = cluster; 583 first_cl = cluster;
583 } 584 }
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1736f2356388..970e682ea754 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -255,10 +255,7 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
255 255
256 for (i = 0; i < nr_bhs; i++) { 256 for (i = 0; i < nr_bhs; i++) {
257 wait_on_buffer(bhs[i]); 257 wait_on_buffer(bhs[i]);
258 if (buffer_eopnotsupp(bhs[i])) { 258 if (!err && !buffer_uptodate(bhs[i]))
259 clear_buffer_eopnotsupp(bhs[i]);
260 err = -EOPNOTSUPP;
261 } else if (!err && !buffer_uptodate(bhs[i]))
262 err = -EIO; 259 err = -EIO;
263 } 260 }
264 return err; 261 return err;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ac750bd31a6f..eb01f3575e10 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -592,22 +592,13 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
592 lh->lh_hash = cpu_to_be32(hash); 592 lh->lh_hash = cpu_to_be32(hash);
593 593
594 bh->b_end_io = end_buffer_write_sync; 594 bh->b_end_io = end_buffer_write_sync;
595 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
596 goto skip_barrier;
597 get_bh(bh); 595 get_bh(bh);
598 submit_bh(WRITE_BARRIER | REQ_META, bh); 596 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
599 wait_on_buffer(bh);
600 if (buffer_eopnotsupp(bh)) {
601 clear_buffer_eopnotsupp(bh);
602 set_buffer_uptodate(bh);
603 fs_info(sdp, "barrier sync failed - disabling barriers\n");
604 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
605 lock_buffer(bh);
606skip_barrier:
607 get_bh(bh);
608 submit_bh(WRITE_SYNC | REQ_META, bh); 597 submit_bh(WRITE_SYNC | REQ_META, bh);
609 wait_on_buffer(bh); 598 else
610 } 599 submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
600 wait_on_buffer(bh);
601
611 if (!buffer_uptodate(bh)) 602 if (!buffer_uptodate(bh))
612 gfs2_io_error_bh(sdp, bh); 603 gfs2_io_error_bh(sdp, bh);
613 brelse(bh); 604 brelse(bh);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index d7eb1e209aa8..ebef7ab6e17e 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void)
144 144
145 error = -ENOMEM; 145 error = -ENOMEM;
146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery",
147 WQ_RESCUER | WQ_FREEZEABLE, 0); 147 WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
148 if (!gfs_recovery_wq) 148 if (!gfs_recovery_wq)
149 goto fail_wq; 149 goto fail_wq;
150 150
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index fb67f593f408..bef3ab6cf5c1 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -866,8 +866,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
866 if ((start + nr_sects) != blk) { 866 if ((start + nr_sects) != blk) {
867 rv = blkdev_issue_discard(bdev, start, 867 rv = blkdev_issue_discard(bdev, start,
868 nr_sects, GFP_NOFS, 868 nr_sects, GFP_NOFS,
869 BLKDEV_IFL_WAIT | 869 0);
870 BLKDEV_IFL_BARRIER);
871 if (rv) 870 if (rv)
872 goto fail; 871 goto fail;
873 nr_sects = 0; 872 nr_sects = 0;
@@ -881,8 +880,7 @@ start_new_extent:
881 } 880 }
882 } 881 }
883 if (nr_sects) { 882 if (nr_sects) {
884 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 883 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
885 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
886 if (rv) 884 if (rv)
887 goto fail; 885 goto fail;
888 } 886 }
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 95d8c11c929e..85a6883c0aca 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -137,34 +137,10 @@ static int journal_write_commit_record(journal_t *journal,
137 JBUFFER_TRACE(descriptor, "write commit block"); 137 JBUFFER_TRACE(descriptor, "write commit block");
138 set_buffer_dirty(bh); 138 set_buffer_dirty(bh);
139 139
140 if (journal->j_flags & JFS_BARRIER) { 140 if (journal->j_flags & JFS_BARRIER)
141 ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER); 141 ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
142 142 else
143 /*
144 * Is it possible for another commit to fail at roughly
145 * the same time as this one? If so, we don't want to
146 * trust the barrier flag in the super, but instead want
147 * to remember if we sent a barrier request
148 */
149 if (ret == -EOPNOTSUPP) {
150 char b[BDEVNAME_SIZE];
151
152 printk(KERN_WARNING
153 "JBD: barrier-based sync failed on %s - "
154 "disabling barriers\n",
155 bdevname(journal->j_dev, b));
156 spin_lock(&journal->j_state_lock);
157 journal->j_flags &= ~JFS_BARRIER;
158 spin_unlock(&journal->j_state_lock);
159
160 /* And try again, without the barrier */
161 set_buffer_uptodate(bh);
162 set_buffer_dirty(bh);
163 ret = sync_dirty_buffer(bh);
164 }
165 } else {
166 ret = sync_dirty_buffer(bh); 143 ret = sync_dirty_buffer(bh);
167 }
168 144
169 put_bh(bh); /* One for getblk() */ 145 put_bh(bh); /* One for getblk() */
170 journal_put_journal_head(descriptor); 146 journal_put_journal_head(descriptor);
@@ -318,7 +294,7 @@ void journal_commit_transaction(journal_t *journal)
318 int first_tag = 0; 294 int first_tag = 0;
319 int tag_flag; 295 int tag_flag;
320 int i; 296 int i;
321 int write_op = WRITE; 297 int write_op = WRITE_SYNC;
322 298
323 /* 299 /*
324 * First job: lock down the current transaction and wait for 300 * First job: lock down the current transaction and wait for
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 5247e7ffdcb4..6571a056e55d 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -532,8 +532,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
532 */ 532 */
533 if ((journal->j_fs_dev != journal->j_dev) && 533 if ((journal->j_fs_dev != journal->j_dev) &&
534 (journal->j_flags & JBD2_BARRIER)) 534 (journal->j_flags & JBD2_BARRIER))
535 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL, 535 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
536 BLKDEV_IFL_WAIT);
537 if (!(journal->j_flags & JBD2_ABORT)) 536 if (!(journal->j_flags & JBD2_ABORT))
538 jbd2_journal_update_superblock(journal, 1); 537 jbd2_journal_update_superblock(journal, 1);
539 return 0; 538 return 0;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7c068c189d80..bc6be8bda1cc 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -134,25 +134,11 @@ static int journal_submit_commit_record(journal_t *journal,
134 134
135 if (journal->j_flags & JBD2_BARRIER && 135 if (journal->j_flags & JBD2_BARRIER &&
136 !JBD2_HAS_INCOMPAT_FEATURE(journal, 136 !JBD2_HAS_INCOMPAT_FEATURE(journal,
137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { 137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
138 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh); 138 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
139 if (ret == -EOPNOTSUPP) { 139 else
140 printk(KERN_WARNING
141 "JBD2: Disabling barriers on %s, "
142 "not supported by device\n", journal->j_devname);
143 write_lock(&journal->j_state_lock);
144 journal->j_flags &= ~JBD2_BARRIER;
145 write_unlock(&journal->j_state_lock);
146
147 /* And try again, without the barrier */
148 lock_buffer(bh);
149 set_buffer_uptodate(bh);
150 clear_buffer_dirty(bh);
151 ret = submit_bh(WRITE_SYNC_PLUG, bh);
152 }
153 } else {
154 ret = submit_bh(WRITE_SYNC_PLUG, bh); 140 ret = submit_bh(WRITE_SYNC_PLUG, bh);
155 } 141
156 *cbh = bh; 142 *cbh = bh;
157 return ret; 143 return ret;
158} 144}
@@ -166,29 +152,8 @@ static int journal_wait_on_commit_record(journal_t *journal,
166{ 152{
167 int ret = 0; 153 int ret = 0;
168 154
169retry:
170 clear_buffer_dirty(bh); 155 clear_buffer_dirty(bh);
171 wait_on_buffer(bh); 156 wait_on_buffer(bh);
172 if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
173 printk(KERN_WARNING
174 "JBD2: %s: disabling barries on %s - not supported "
175 "by device\n", __func__, journal->j_devname);
176 write_lock(&journal->j_state_lock);
177 journal->j_flags &= ~JBD2_BARRIER;
178 write_unlock(&journal->j_state_lock);
179
180 lock_buffer(bh);
181 clear_buffer_dirty(bh);
182 set_buffer_uptodate(bh);
183 bh->b_end_io = journal_end_buffer_io_sync;
184
185 ret = submit_bh(WRITE_SYNC_PLUG, bh);
186 if (ret) {
187 unlock_buffer(bh);
188 return ret;
189 }
190 goto retry;
191 }
192 157
193 if (unlikely(!buffer_uptodate(bh))) 158 if (unlikely(!buffer_uptodate(bh)))
194 ret = -EIO; 159 ret = -EIO;
@@ -360,7 +325,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
360 int tag_bytes = journal_tag_bytes(journal); 325 int tag_bytes = journal_tag_bytes(journal);
361 struct buffer_head *cbh = NULL; /* For transactional checksums */ 326 struct buffer_head *cbh = NULL; /* For transactional checksums */
362 __u32 crc32_sum = ~0; 327 __u32 crc32_sum = ~0;
363 int write_op = WRITE; 328 int write_op = WRITE_SYNC;
364 329
365 /* 330 /*
366 * First job: lock down the current transaction and wait for 331 * First job: lock down the current transaction and wait for
@@ -701,6 +666,16 @@ start_journal_io:
701 } 666 }
702 } 667 }
703 668
669 err = journal_finish_inode_data_buffers(journal, commit_transaction);
670 if (err) {
671 printk(KERN_WARNING
672 "JBD2: Detected IO errors while flushing file data "
673 "on %s\n", journal->j_devname);
674 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
675 jbd2_journal_abort(journal, err);
676 err = 0;
677 }
678
704 /* 679 /*
705 * If the journal is not located on the file system device, 680 * If the journal is not located on the file system device,
706 * then we must flush the file system device before we issue 681 * then we must flush the file system device before we issue
@@ -709,8 +684,7 @@ start_journal_io:
709 if (commit_transaction->t_flushed_data_blocks && 684 if (commit_transaction->t_flushed_data_blocks &&
710 (journal->j_fs_dev != journal->j_dev) && 685 (journal->j_fs_dev != journal->j_dev) &&
711 (journal->j_flags & JBD2_BARRIER)) 686 (journal->j_flags & JBD2_BARRIER))
712 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL, 687 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
713 BLKDEV_IFL_WAIT);
714 688
715 /* Done it all: now write the commit record asynchronously. */ 689 /* Done it all: now write the commit record asynchronously. */
716 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 690 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -719,19 +693,6 @@ start_journal_io:
719 &cbh, crc32_sum); 693 &cbh, crc32_sum);
720 if (err) 694 if (err)
721 __jbd2_journal_abort_hard(journal); 695 __jbd2_journal_abort_hard(journal);
722 if (journal->j_flags & JBD2_BARRIER)
723 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
724 BLKDEV_IFL_WAIT);
725 }
726
727 err = journal_finish_inode_data_buffers(journal, commit_transaction);
728 if (err) {
729 printk(KERN_WARNING
730 "JBD2: Detected IO errors while flushing file data "
731 "on %s\n", journal->j_devname);
732 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
733 jbd2_journal_abort(journal, err);
734 err = 0;
735 } 696 }
736 697
737 /* Lo and behold: we have just managed to send a transaction to 698 /* Lo and behold: we have just managed to send a transaction to
@@ -845,6 +806,11 @@ wait_for_iobuf:
845 } 806 }
846 if (!err && !is_journal_aborted(journal)) 807 if (!err && !is_journal_aborted(journal))
847 err = journal_wait_on_commit_record(journal, cbh); 808 err = journal_wait_on_commit_record(journal, cbh);
809 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
810 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
811 journal->j_flags & JBD2_BARRIER) {
812 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
813 }
848 814
849 if (err) 815 if (err)
850 jbd2_journal_abort(journal, err); 816 jbd2_journal_abort(journal, err);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 9f4913f78408..f3b75206e956 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -177,17 +177,9 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
177 177
178 retry: 178 retry:
179 set_buffer_dirty(nilfs->ns_sbh[0]); 179 set_buffer_dirty(nilfs->ns_sbh[0]);
180
181 if (nilfs_test_opt(sbi, BARRIER)) { 180 if (nilfs_test_opt(sbi, BARRIER)) {
182 err = __sync_dirty_buffer(nilfs->ns_sbh[0], 181 err = __sync_dirty_buffer(nilfs->ns_sbh[0],
183 WRITE_SYNC | WRITE_BARRIER); 182 WRITE_SYNC | WRITE_FLUSH_FUA);
184 if (err == -EOPNOTSUPP) {
185 nilfs_warning(sbi->s_super, __func__,
186 "barrier-based sync failed. "
187 "disabling barriers\n");
188 nilfs_clear_opt(sbi, BARRIER);
189 goto retry;
190 }
191 } else { 183 } else {
192 err = sync_dirty_buffer(nilfs->ns_sbh[0]); 184 err = sync_dirty_buffer(nilfs->ns_sbh[0]);
193 } 185 }
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ba7c10c917fc..d27715103376 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -775,9 +775,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
775 ret = blkdev_issue_discard(nilfs->ns_bdev, 775 ret = blkdev_issue_discard(nilfs->ns_bdev,
776 start * sects_per_block, 776 start * sects_per_block,
777 nblocks * sects_per_block, 777 nblocks * sects_per_block,
778 GFP_NOFS, 778 GFP_NOFS, 0);
779 BLKDEV_IFL_WAIT |
780 BLKDEV_IFL_BARRIER);
781 if (ret < 0) 779 if (ret < 0)
782 return ret; 780 return ret;
783 nblocks = 0; 781 nblocks = 0;
@@ -787,8 +785,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
787 ret = blkdev_issue_discard(nilfs->ns_bdev, 785 ret = blkdev_issue_discard(nilfs->ns_bdev,
788 start * sects_per_block, 786 start * sects_per_block,
789 nblocks * sects_per_block, 787 nblocks * sects_per_block,
790 GFP_NOFS, 788 GFP_NOFS, 0);
791 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
792 return ret; 789 return ret;
793} 790}
794 791
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9e8cc4346b76..1ca6867935bb 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -187,8 +187,7 @@ static int ocfs2_sync_file(struct file *file, int datasync)
187 * platter 187 * platter
188 */ 188 */
189 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) 189 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
190 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, 190 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
191 NULL, BLKDEV_IFL_WAIT);
192 goto bail; 191 goto bail;
193 } 192 }
194 193
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 79fbf3f390f0..b81bfc016a05 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -352,6 +352,7 @@ static void part_release(struct device *dev)
352{ 352{
353 struct hd_struct *p = dev_to_part(dev); 353 struct hd_struct *p = dev_to_part(dev);
354 free_part_stats(p); 354 free_part_stats(p);
355 free_part_info(p);
355 kfree(p); 356 kfree(p);
356} 357}
357 358
@@ -364,17 +365,25 @@ struct device_type part_type = {
364static void delete_partition_rcu_cb(struct rcu_head *head) 365static void delete_partition_rcu_cb(struct rcu_head *head)
365{ 366{
366 struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); 367 struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
368 struct gendisk *disk = part_to_disk(part);
369 struct request_queue *q = disk->queue;
370 unsigned long flags;
367 371
368 part->start_sect = 0; 372 part->start_sect = 0;
369 part->nr_sects = 0; 373 part->nr_sects = 0;
370 part_stat_set_all(part, 0); 374 part_stat_set_all(part, 0);
371 put_device(part_to_dev(part)); 375 put_device(part_to_dev(part));
376
377 spin_lock_irqsave(q->queue_lock, flags);
378 elv_quiesce_end(q);
379 spin_unlock_irqrestore(q->queue_lock, flags);
372} 380}
373 381
374void delete_partition(struct gendisk *disk, int partno) 382void delete_partition(struct gendisk *disk, int partno)
375{ 383{
376 struct disk_part_tbl *ptbl = disk->part_tbl; 384 struct disk_part_tbl *ptbl = disk->part_tbl;
377 struct hd_struct *part; 385 struct hd_struct *part;
386 struct request_queue *q = disk->queue;
378 387
379 if (partno >= ptbl->len) 388 if (partno >= ptbl->len)
380 return; 389 return;
@@ -389,6 +398,10 @@ void delete_partition(struct gendisk *disk, int partno)
389 kobject_put(part->holder_dir); 398 kobject_put(part->holder_dir);
390 device_del(part_to_dev(part)); 399 device_del(part_to_dev(part));
391 400
401 spin_lock_irq(q->queue_lock);
402 elv_quiesce_start(q);
403 spin_unlock_irq(q->queue_lock);
404
392 call_rcu(&part->rcu_head, delete_partition_rcu_cb); 405 call_rcu(&part->rcu_head, delete_partition_rcu_cb);
393} 406}
394 407
@@ -401,7 +414,8 @@ static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
401 whole_disk_show, NULL); 414 whole_disk_show, NULL);
402 415
403struct hd_struct *add_partition(struct gendisk *disk, int partno, 416struct hd_struct *add_partition(struct gendisk *disk, int partno,
404 sector_t start, sector_t len, int flags) 417 sector_t start, sector_t len, int flags,
418 struct partition_meta_info *info)
405{ 419{
406 struct hd_struct *p; 420 struct hd_struct *p;
407 dev_t devt = MKDEV(0, 0); 421 dev_t devt = MKDEV(0, 0);
@@ -438,6 +452,14 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
438 p->partno = partno; 452 p->partno = partno;
439 p->policy = get_disk_ro(disk); 453 p->policy = get_disk_ro(disk);
440 454
455 if (info) {
456 struct partition_meta_info *pinfo = alloc_part_info(disk);
457 if (!pinfo)
458 goto out_free_stats;
459 memcpy(pinfo, info, sizeof(*info));
460 p->info = pinfo;
461 }
462
441 dname = dev_name(ddev); 463 dname = dev_name(ddev);
442 if (isdigit(dname[strlen(dname) - 1])) 464 if (isdigit(dname[strlen(dname) - 1]))
443 dev_set_name(pdev, "%sp%d", dname, partno); 465 dev_set_name(pdev, "%sp%d", dname, partno);
@@ -451,7 +473,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
451 473
452 err = blk_alloc_devt(p, &devt); 474 err = blk_alloc_devt(p, &devt);
453 if (err) 475 if (err)
454 goto out_free_stats; 476 goto out_free_info;
455 pdev->devt = devt; 477 pdev->devt = devt;
456 478
457 /* delay uevent until 'holders' subdir is created */ 479 /* delay uevent until 'holders' subdir is created */
@@ -481,6 +503,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
481 503
482 return p; 504 return p;
483 505
506out_free_info:
507 free_part_info(p);
484out_free_stats: 508out_free_stats:
485 free_part_stats(p); 509 free_part_stats(p);
486out_free: 510out_free:
@@ -513,14 +537,14 @@ void register_disk(struct gendisk *disk)
513 537
514 if (device_add(ddev)) 538 if (device_add(ddev))
515 return; 539 return;
516#ifndef CONFIG_SYSFS_DEPRECATED 540 if (!sysfs_deprecated) {
517 err = sysfs_create_link(block_depr, &ddev->kobj, 541 err = sysfs_create_link(block_depr, &ddev->kobj,
518 kobject_name(&ddev->kobj)); 542 kobject_name(&ddev->kobj));
519 if (err) { 543 if (err) {
520 device_del(ddev); 544 device_del(ddev);
521 return; 545 return;
546 }
522 } 547 }
523#endif
524 disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); 548 disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
525 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); 549 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
526 550
@@ -642,6 +666,7 @@ rescan:
642 /* add partitions */ 666 /* add partitions */
643 for (p = 1; p < state->limit; p++) { 667 for (p = 1; p < state->limit; p++) {
644 sector_t size, from; 668 sector_t size, from;
669 struct partition_meta_info *info = NULL;
645 670
646 size = state->parts[p].size; 671 size = state->parts[p].size;
647 if (!size) 672 if (!size)
@@ -675,8 +700,12 @@ rescan:
675 size = get_capacity(disk) - from; 700 size = get_capacity(disk) - from;
676 } 701 }
677 } 702 }
703
704 if (state->parts[p].has_info)
705 info = &state->parts[p].info;
678 part = add_partition(disk, p, from, size, 706 part = add_partition(disk, p, from, size,
679 state->parts[p].flags); 707 state->parts[p].flags,
708 &state->parts[p].info);
680 if (IS_ERR(part)) { 709 if (IS_ERR(part)) {
681 printk(KERN_ERR " %s: p%d could not be added: %ld\n", 710 printk(KERN_ERR " %s: p%d could not be added: %ld\n",
682 disk->disk_name, p, -PTR_ERR(part)); 711 disk->disk_name, p, -PTR_ERR(part));
@@ -737,8 +766,7 @@ void del_gendisk(struct gendisk *disk)
737 kobject_put(disk->part0.holder_dir); 766 kobject_put(disk->part0.holder_dir);
738 kobject_put(disk->slave_dir); 767 kobject_put(disk->slave_dir);
739 disk->driverfs_dev = NULL; 768 disk->driverfs_dev = NULL;
740#ifndef CONFIG_SYSFS_DEPRECATED 769 if (!sysfs_deprecated)
741 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 770 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
742#endif
743 device_del(disk_to_dev(disk)); 771 device_del(disk_to_dev(disk));
744} 772}
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
index 8e4e103ba216..d68bf4dc3bc2 100644
--- a/fs/partitions/check.h
+++ b/fs/partitions/check.h
@@ -1,5 +1,6 @@
1#include <linux/pagemap.h> 1#include <linux/pagemap.h>
2#include <linux/blkdev.h> 2#include <linux/blkdev.h>
3#include <linux/genhd.h>
3 4
4/* 5/*
5 * add_gd_partition adds a partitions details to the devices partition 6 * add_gd_partition adds a partitions details to the devices partition
@@ -12,6 +13,8 @@ struct parsed_partitions {
12 sector_t from; 13 sector_t from;
13 sector_t size; 14 sector_t size;
14 int flags; 15 int flags;
16 bool has_info;
17 struct partition_meta_info info;
15 } parts[DISK_MAX_PARTS]; 18 } parts[DISK_MAX_PARTS];
16 int next; 19 int next;
17 int limit; 20 int limit;
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index dbb44d4bb8a7..ac0ccb5026a2 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -94,6 +94,7 @@
94 * 94 *
95 ************************************************************/ 95 ************************************************************/
96#include <linux/crc32.h> 96#include <linux/crc32.h>
97#include <linux/ctype.h>
97#include <linux/math64.h> 98#include <linux/math64.h>
98#include <linux/slab.h> 99#include <linux/slab.h>
99#include "check.h" 100#include "check.h"
@@ -604,6 +605,7 @@ int efi_partition(struct parsed_partitions *state)
604 gpt_entry *ptes = NULL; 605 gpt_entry *ptes = NULL;
605 u32 i; 606 u32 i;
606 unsigned ssz = bdev_logical_block_size(state->bdev) / 512; 607 unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
608 u8 unparsed_guid[37];
607 609
608 if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) { 610 if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
609 kfree(gpt); 611 kfree(gpt);
@@ -614,6 +616,9 @@ int efi_partition(struct parsed_partitions *state)
614 pr_debug("GUID Partition Table is valid! Yea!\n"); 616 pr_debug("GUID Partition Table is valid! Yea!\n");
615 617
616 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { 618 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
619 struct partition_meta_info *info;
620 unsigned label_count = 0;
621 unsigned label_max;
617 u64 start = le64_to_cpu(ptes[i].starting_lba); 622 u64 start = le64_to_cpu(ptes[i].starting_lba);
618 u64 size = le64_to_cpu(ptes[i].ending_lba) - 623 u64 size = le64_to_cpu(ptes[i].ending_lba) -
619 le64_to_cpu(ptes[i].starting_lba) + 1ULL; 624 le64_to_cpu(ptes[i].starting_lba) + 1ULL;
@@ -627,6 +632,26 @@ int efi_partition(struct parsed_partitions *state)
627 if (!efi_guidcmp(ptes[i].partition_type_guid, 632 if (!efi_guidcmp(ptes[i].partition_type_guid,
628 PARTITION_LINUX_RAID_GUID)) 633 PARTITION_LINUX_RAID_GUID))
629 state->parts[i + 1].flags = ADDPART_FLAG_RAID; 634 state->parts[i + 1].flags = ADDPART_FLAG_RAID;
635
636 info = &state->parts[i + 1].info;
637 /* Instead of doing a manual swap to big endian, reuse the
638 * common ASCII hex format as the interim.
639 */
640 efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
641 part_pack_uuid(unparsed_guid, info->uuid);
642
643 /* Naively convert UTF16-LE to 7 bits. */
644 label_max = min(sizeof(info->volname) - 1,
645 sizeof(ptes[i].partition_name));
646 info->volname[label_max] = 0;
647 while (label_count < label_max) {
648 u8 c = ptes[i].partition_name[label_count] & 0xff;
649 if (c && !isprint(c))
650 c = '!';
651 info->volname[label_count] = c;
652 label_count++;
653 }
654 state->parts[i + 1].has_info = true;
630 } 655 }
631 kfree(ptes); 656 kfree(ptes);
632 kfree(gpt); 657 kfree(gpt);
diff --git a/fs/pipe.c b/fs/pipe.c
index 279eef96c51c..37eb1ebeaa90 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
382 error = ops->confirm(pipe, buf); 382 error = ops->confirm(pipe, buf);
383 if (error) { 383 if (error) {
384 if (!ret) 384 if (!ret)
385 error = ret; 385 ret = error;
386 break; 386 break;
387 } 387 }
388 388
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 83adcc869437..dc44f94022f1 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -12,7 +12,10 @@
12#include <linux/proc_fs.h> 12#include <linux/proc_fs.h>
13#include <linux/stat.h> 13#include <linux/stat.h>
14#include <linux/tty.h> 14#include <linux/tty.h>
15#include <linux/tty_driver.h>
16#include <linux/console.h>
15#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/fdtable.h>
16#include <linux/bitops.h> 19#include <linux/bitops.h>
17 20
18/* 21/*
@@ -137,6 +140,160 @@ static const struct file_operations proc_tty_drivers_operations = {
137}; 140};
138 141
139/* 142/*
143 * The device ID of file descriptor 0 of the current reading
144 * task if a character device...
145 */
146static dev_t current_dev;
147
148/*
149 * This is the handler for /proc/tty/consoles
150 */
151static int show_console_dev(struct seq_file *m, void *v)
152{
153 const struct tty_driver *driver;
154 struct console *con;
155 int index, len;
156 char flags[10];
157 dev_t dev;
158
159 if (v == SEQ_START_TOKEN)
160 return 0;
161 con = (struct console *)v;
162 if (!con)
163 return 0;
164 driver = con->device(con, &index);
165 if (!driver)
166 return 0;
167 dev = MKDEV(driver->major, driver->minor_start) + index;
168
169 index = 0;
170 if (con->flags & CON_ENABLED)
171 flags[index++] = 'E';
172 if (con->flags & CON_CONSDEV)
173 flags[index++] = 'C';
174 if (con->flags & CON_BOOT)
175 flags[index++] = 'B';
176 if (con->flags & CON_PRINTBUFFER)
177 flags[index++] = 'p';
178 if (con->flags & CON_BRL)
179 flags[index++] = 'b';
180 if (con->flags & CON_ANYTIME)
181 flags[index++] = 'a';
182 if (current_dev == dev)
183 flags[index++] = '*';
184 flags[index] = 0;
185
186 seq_printf(m, "%s%d%n", con->name, con->index, &len);
187 len = 21 - len;
188 if (len < 1)
189 len = 1;
190 seq_printf(m, "%*c", len, ' ');
191 seq_printf(m, "%c%c%c (%s)%n", con->read ? 'R' : '-',
192 con->write ? 'W' : '-', con->unblank ? 'U' : '-',
193 flags, &len);
194 len = 13 - len;
195 if (len < 1)
196 len = 1;
197 seq_printf(m, "%*c%4d:%d\n", len, ' ', MAJOR(dev), MINOR(dev));
198
199 return 0;
200}
201
202/* iterator for consoles */
203static void *c_start(struct seq_file *m, loff_t *pos)
204{
205 struct console *con;
206 loff_t off = 0;
207
208 if (*pos == 0)
209 return SEQ_START_TOKEN;
210
211 acquire_console_sem();
212 for (con = console_drivers; con; con = con->next) {
213 if (!con->device)
214 continue;
215 if (++off == *pos)
216 break;
217 }
218 release_console_sem();
219
220 return con;
221}
222
223static void *c_next(struct seq_file *m, void *v, loff_t *pos)
224{
225 struct console *con;
226
227 acquire_console_sem();
228 if (v == SEQ_START_TOKEN)
229 con = console_drivers;
230 else
231 con = ((struct console *)v)->next;
232 for (; con; con = con->next) {
233 if (!con->device)
234 continue;
235 ++*pos;
236 break;
237 }
238 release_console_sem();
239
240 return con;
241}
242
243static void c_stop(struct seq_file *m, void *v)
244{
245}
246
247static const struct seq_operations tty_consoles_op = {
248 .start = c_start,
249 .next = c_next,
250 .stop = c_stop,
251 .show = show_console_dev
252};
253
254/*
255 * Used for open /proc/tty/consoles. Before this detect
256 * the device ID of file descriptor 0 of the current
257 * reading task if a character device...
258 */
259static int tty_consoles_open(struct inode *inode, struct file *file)
260{
261 struct files_struct *curfiles;
262
263 current_dev = 0;
264 curfiles = get_files_struct(current);
265 if (curfiles) {
266 const struct file *curfp;
267 spin_lock(&curfiles->file_lock);
268 curfp = fcheck_files(curfiles, 0);
269 if (curfp && curfp->private_data) {
270 const struct inode *inode;
271 dget(curfp->f_dentry);
272 inode = curfp->f_dentry->d_inode;
273 if (S_ISCHR(inode->i_mode)) {
274 struct tty_struct *tty;
275 tty = (struct tty_struct *)curfp->private_data;
276 if (tty && tty->magic == TTY_MAGIC) {
277 tty = tty_pair_get_tty(tty);
278 current_dev = tty_devnum(tty);
279 }
280 }
281 dput(curfp->f_dentry);
282 }
283 spin_unlock(&curfiles->file_lock);
284 put_files_struct(curfiles);
285 }
286 return seq_open(file, &tty_consoles_op);
287}
288
289static const struct file_operations proc_tty_consoles_operations = {
290 .open = tty_consoles_open,
291 .read = seq_read,
292 .llseek = seq_lseek,
293 .release = seq_release,
294};
295
296/*
140 * This function is called by tty_register_driver() to handle 297 * This function is called by tty_register_driver() to handle
141 * registering the driver's /proc handler into /proc/tty/driver/<foo> 298 * registering the driver's /proc handler into /proc/tty/driver/<foo>
142 */ 299 */
@@ -186,4 +343,5 @@ void __init proc_tty_init(void)
186 proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); 343 proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL);
187 proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops); 344 proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops);
188 proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations); 345 proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations);
346 proc_create("tty/consoles", 0, NULL, &proc_tty_consoles_operations);
189} 347}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 6846371498b6..91f080cc76c8 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -152,8 +152,7 @@ static int reiserfs_sync_file(struct file *filp, int datasync)
152 barrier_done = reiserfs_commit_for_inode(inode); 152 barrier_done = reiserfs_commit_for_inode(inode);
153 reiserfs_write_unlock(inode->i_sb); 153 reiserfs_write_unlock(inode->i_sb);
154 if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) 154 if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
155 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, 155 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
156 BLKDEV_IFL_WAIT);
157 if (barrier_done < 0) 156 if (barrier_done < 0)
158 return barrier_done; 157 return barrier_done;
159 return (err < 0) ? -EIO : 0; 158 return (err < 0) ? -EIO : 0;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 812e2c05aa29..076c8b194682 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -138,13 +138,6 @@ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
138 return 0; 138 return 0;
139} 139}
140 140
141static void disable_barrier(struct super_block *s)
142{
143 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
144 printk("reiserfs: disabling flush barriers on %s\n",
145 reiserfs_bdevname(s));
146}
147
148static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block 141static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
149 *sb) 142 *sb)
150{ 143{
@@ -677,30 +670,6 @@ static void submit_ordered_buffer(struct buffer_head *bh)
677 submit_bh(WRITE, bh); 670 submit_bh(WRITE, bh);
678} 671}
679 672
680static int submit_barrier_buffer(struct buffer_head *bh)
681{
682 get_bh(bh);
683 bh->b_end_io = reiserfs_end_ordered_io;
684 clear_buffer_dirty(bh);
685 if (!buffer_uptodate(bh))
686 BUG();
687 return submit_bh(WRITE_BARRIER, bh);
688}
689
690static void check_barrier_completion(struct super_block *s,
691 struct buffer_head *bh)
692{
693 if (buffer_eopnotsupp(bh)) {
694 clear_buffer_eopnotsupp(bh);
695 disable_barrier(s);
696 set_buffer_uptodate(bh);
697 set_buffer_dirty(bh);
698 reiserfs_write_unlock(s);
699 sync_dirty_buffer(bh);
700 reiserfs_write_lock(s);
701 }
702}
703
704#define CHUNK_SIZE 32 673#define CHUNK_SIZE 32
705struct buffer_chunk { 674struct buffer_chunk {
706 struct buffer_head *bh[CHUNK_SIZE]; 675 struct buffer_head *bh[CHUNK_SIZE];
@@ -1009,7 +978,6 @@ static int flush_commit_list(struct super_block *s,
1009 struct buffer_head *tbh = NULL; 978 struct buffer_head *tbh = NULL;
1010 unsigned int trans_id = jl->j_trans_id; 979 unsigned int trans_id = jl->j_trans_id;
1011 struct reiserfs_journal *journal = SB_JOURNAL(s); 980 struct reiserfs_journal *journal = SB_JOURNAL(s);
1012 int barrier = 0;
1013 int retval = 0; 981 int retval = 0;
1014 int write_len; 982 int write_len;
1015 983
@@ -1094,24 +1062,6 @@ static int flush_commit_list(struct super_block *s,
1094 } 1062 }
1095 atomic_dec(&journal->j_async_throttle); 1063 atomic_dec(&journal->j_async_throttle);
1096 1064
1097 /* We're skipping the commit if there's an error */
1098 if (retval || reiserfs_is_journal_aborted(journal))
1099 barrier = 0;
1100
1101 /* wait on everything written so far before writing the commit
1102 * if we are in barrier mode, send the commit down now
1103 */
1104 barrier = reiserfs_barrier_flush(s);
1105 if (barrier) {
1106 int ret;
1107 lock_buffer(jl->j_commit_bh);
1108 ret = submit_barrier_buffer(jl->j_commit_bh);
1109 if (ret == -EOPNOTSUPP) {
1110 set_buffer_uptodate(jl->j_commit_bh);
1111 disable_barrier(s);
1112 barrier = 0;
1113 }
1114 }
1115 for (i = 0; i < (jl->j_len + 1); i++) { 1065 for (i = 0; i < (jl->j_len + 1); i++) {
1116 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + 1066 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1117 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); 1067 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
@@ -1143,27 +1093,22 @@ static int flush_commit_list(struct super_block *s,
1143 1093
1144 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); 1094 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1145 1095
1146 if (!barrier) { 1096 /* If there was a write error in the journal - we can't commit
1147 /* If there was a write error in the journal - we can't commit 1097 * this transaction - it will be invalid and, if successful,
1148 * this transaction - it will be invalid and, if successful, 1098 * will just end up propagating the write error out to
1149 * will just end up propagating the write error out to 1099 * the file system. */
1150 * the file system. */ 1100 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1151 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { 1101 if (buffer_dirty(jl->j_commit_bh))
1152 if (buffer_dirty(jl->j_commit_bh)) 1102 BUG();
1153 BUG(); 1103 mark_buffer_dirty(jl->j_commit_bh) ;
1154 mark_buffer_dirty(jl->j_commit_bh) ;
1155 reiserfs_write_unlock(s);
1156 sync_dirty_buffer(jl->j_commit_bh) ;
1157 reiserfs_write_lock(s);
1158 }
1159 } else {
1160 reiserfs_write_unlock(s); 1104 reiserfs_write_unlock(s);
1161 wait_on_buffer(jl->j_commit_bh); 1105 if (reiserfs_barrier_flush(s))
1106 __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
1107 else
1108 sync_dirty_buffer(jl->j_commit_bh);
1162 reiserfs_write_lock(s); 1109 reiserfs_write_lock(s);
1163 } 1110 }
1164 1111
1165 check_barrier_completion(s, jl->j_commit_bh);
1166
1167 /* If there was a write error in the journal - we can't commit this 1112 /* If there was a write error in the journal - we can't commit this
1168 * transaction - it will be invalid and, if successful, will just end 1113 * transaction - it will be invalid and, if successful, will just end
1169 * up propagating the write error out to the filesystem. */ 1114 * up propagating the write error out to the filesystem. */
@@ -1319,26 +1264,15 @@ static int _update_journal_header_block(struct super_block *sb,
1319 jh->j_first_unflushed_offset = cpu_to_le32(offset); 1264 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1320 jh->j_mount_id = cpu_to_le32(journal->j_mount_id); 1265 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1321 1266
1322 if (reiserfs_barrier_flush(sb)) { 1267 set_buffer_dirty(journal->j_header_bh);
1323 int ret; 1268 reiserfs_write_unlock(sb);
1324 lock_buffer(journal->j_header_bh); 1269
1325 ret = submit_barrier_buffer(journal->j_header_bh); 1270 if (reiserfs_barrier_flush(sb))
1326 if (ret == -EOPNOTSUPP) { 1271 __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
1327 set_buffer_uptodate(journal->j_header_bh); 1272 else
1328 disable_barrier(sb);
1329 goto sync;
1330 }
1331 reiserfs_write_unlock(sb);
1332 wait_on_buffer(journal->j_header_bh);
1333 reiserfs_write_lock(sb);
1334 check_barrier_completion(sb, journal->j_header_bh);
1335 } else {
1336 sync:
1337 set_buffer_dirty(journal->j_header_bh);
1338 reiserfs_write_unlock(sb);
1339 sync_dirty_buffer(journal->j_header_bh); 1273 sync_dirty_buffer(journal->j_header_bh);
1340 reiserfs_write_lock(sb); 1274
1341 } 1275 reiserfs_write_lock(sb);
1342 if (!buffer_uptodate(journal->j_header_bh)) { 1276 if (!buffer_uptodate(journal->j_header_bh)) {
1343 reiserfs_warning(sb, "journal-837", 1277 reiserfs_warning(sb, "journal-837",
1344 "IO error during journal replay"); 1278 "IO error during journal replay");
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 4e321f7353fa..a4759833d62d 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -179,30 +179,14 @@ static void bin_vma_open(struct vm_area_struct *vma)
179 struct bin_buffer *bb = file->private_data; 179 struct bin_buffer *bb = file->private_data;
180 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 180 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
181 181
182 if (!bb->vm_ops || !bb->vm_ops->open) 182 if (!bb->vm_ops)
183 return;
184
185 if (!sysfs_get_active(attr_sd))
186 return;
187
188 bb->vm_ops->open(vma);
189
190 sysfs_put_active(attr_sd);
191}
192
193static void bin_vma_close(struct vm_area_struct *vma)
194{
195 struct file *file = vma->vm_file;
196 struct bin_buffer *bb = file->private_data;
197 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
198
199 if (!bb->vm_ops || !bb->vm_ops->close)
200 return; 183 return;
201 184
202 if (!sysfs_get_active(attr_sd)) 185 if (!sysfs_get_active(attr_sd))
203 return; 186 return;
204 187
205 bb->vm_ops->close(vma); 188 if (bb->vm_ops->open)
189 bb->vm_ops->open(vma);
206 190
207 sysfs_put_active(attr_sd); 191 sysfs_put_active(attr_sd);
208} 192}
@@ -214,13 +198,15 @@ static int bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
214 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 198 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
215 int ret; 199 int ret;
216 200
217 if (!bb->vm_ops || !bb->vm_ops->fault) 201 if (!bb->vm_ops)
218 return VM_FAULT_SIGBUS; 202 return VM_FAULT_SIGBUS;
219 203
220 if (!sysfs_get_active(attr_sd)) 204 if (!sysfs_get_active(attr_sd))
221 return VM_FAULT_SIGBUS; 205 return VM_FAULT_SIGBUS;
222 206
223 ret = bb->vm_ops->fault(vma, vmf); 207 ret = VM_FAULT_SIGBUS;
208 if (bb->vm_ops->fault)
209 ret = bb->vm_ops->fault(vma, vmf);
224 210
225 sysfs_put_active(attr_sd); 211 sysfs_put_active(attr_sd);
226 return ret; 212 return ret;
@@ -236,13 +222,12 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
236 if (!bb->vm_ops) 222 if (!bb->vm_ops)
237 return VM_FAULT_SIGBUS; 223 return VM_FAULT_SIGBUS;
238 224
239 if (!bb->vm_ops->page_mkwrite)
240 return 0;
241
242 if (!sysfs_get_active(attr_sd)) 225 if (!sysfs_get_active(attr_sd))
243 return VM_FAULT_SIGBUS; 226 return VM_FAULT_SIGBUS;
244 227
245 ret = bb->vm_ops->page_mkwrite(vma, vmf); 228 ret = 0;
229 if (bb->vm_ops->page_mkwrite)
230 ret = bb->vm_ops->page_mkwrite(vma, vmf);
246 231
247 sysfs_put_active(attr_sd); 232 sysfs_put_active(attr_sd);
248 return ret; 233 return ret;
@@ -256,13 +241,15 @@ static int bin_access(struct vm_area_struct *vma, unsigned long addr,
256 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 241 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
257 int ret; 242 int ret;
258 243
259 if (!bb->vm_ops || !bb->vm_ops->access) 244 if (!bb->vm_ops)
260 return -EINVAL; 245 return -EINVAL;
261 246
262 if (!sysfs_get_active(attr_sd)) 247 if (!sysfs_get_active(attr_sd))
263 return -EINVAL; 248 return -EINVAL;
264 249
265 ret = bb->vm_ops->access(vma, addr, buf, len, write); 250 ret = -EINVAL;
251 if (bb->vm_ops->access)
252 ret = bb->vm_ops->access(vma, addr, buf, len, write);
266 253
267 sysfs_put_active(attr_sd); 254 sysfs_put_active(attr_sd);
268 return ret; 255 return ret;
@@ -276,13 +263,15 @@ static int bin_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
276 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 263 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
277 int ret; 264 int ret;
278 265
279 if (!bb->vm_ops || !bb->vm_ops->set_policy) 266 if (!bb->vm_ops)
280 return 0; 267 return 0;
281 268
282 if (!sysfs_get_active(attr_sd)) 269 if (!sysfs_get_active(attr_sd))
283 return -EINVAL; 270 return -EINVAL;
284 271
285 ret = bb->vm_ops->set_policy(vma, new); 272 ret = 0;
273 if (bb->vm_ops->set_policy)
274 ret = bb->vm_ops->set_policy(vma, new);
286 275
287 sysfs_put_active(attr_sd); 276 sysfs_put_active(attr_sd);
288 return ret; 277 return ret;
@@ -296,13 +285,15 @@ static struct mempolicy *bin_get_policy(struct vm_area_struct *vma,
296 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 285 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
297 struct mempolicy *pol; 286 struct mempolicy *pol;
298 287
299 if (!bb->vm_ops || !bb->vm_ops->get_policy) 288 if (!bb->vm_ops)
300 return vma->vm_policy; 289 return vma->vm_policy;
301 290
302 if (!sysfs_get_active(attr_sd)) 291 if (!sysfs_get_active(attr_sd))
303 return vma->vm_policy; 292 return vma->vm_policy;
304 293
305 pol = bb->vm_ops->get_policy(vma, addr); 294 pol = vma->vm_policy;
295 if (bb->vm_ops->get_policy)
296 pol = bb->vm_ops->get_policy(vma, addr);
306 297
307 sysfs_put_active(attr_sd); 298 sysfs_put_active(attr_sd);
308 return pol; 299 return pol;
@@ -316,13 +307,15 @@ static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
316 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 307 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
317 int ret; 308 int ret;
318 309
319 if (!bb->vm_ops || !bb->vm_ops->migrate) 310 if (!bb->vm_ops)
320 return 0; 311 return 0;
321 312
322 if (!sysfs_get_active(attr_sd)) 313 if (!sysfs_get_active(attr_sd))
323 return 0; 314 return 0;
324 315
325 ret = bb->vm_ops->migrate(vma, from, to, flags); 316 ret = 0;
317 if (bb->vm_ops->migrate)
318 ret = bb->vm_ops->migrate(vma, from, to, flags);
326 319
327 sysfs_put_active(attr_sd); 320 sysfs_put_active(attr_sd);
328 return ret; 321 return ret;
@@ -331,7 +324,6 @@ static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
331 324
332static const struct vm_operations_struct bin_vm_ops = { 325static const struct vm_operations_struct bin_vm_ops = {
333 .open = bin_vma_open, 326 .open = bin_vma_open,
334 .close = bin_vma_close,
335 .fault = bin_fault, 327 .fault = bin_fault,
336 .page_mkwrite = bin_page_mkwrite, 328 .page_mkwrite = bin_page_mkwrite,
337 .access = bin_access, 329 .access = bin_access,
@@ -377,6 +369,14 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
377 if (bb->mmapped && bb->vm_ops != vma->vm_ops) 369 if (bb->mmapped && bb->vm_ops != vma->vm_ops)
378 goto out_put; 370 goto out_put;
379 371
372 /*
373 * It is not possible to successfully wrap close.
374 * So error if someone is trying to use close.
375 */
376 rc = -EINVAL;
377 if (vma->vm_ops && vma->vm_ops->close)
378 goto out_put;
379
380 rc = 0; 380 rc = 0;
381 bb->mmapped = 1; 381 bb->mmapped = 1;
382 bb->vm_ops = vma->vm_ops; 382 bb->vm_ops = vma->vm_ops;
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 37fa7ed062d8..02429d81ca33 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -63,7 +63,9 @@ static int do_commit(struct ubifs_info *c)
63 struct ubifs_lp_stats lst; 63 struct ubifs_lp_stats lst;
64 64
65 dbg_cmt("start"); 65 dbg_cmt("start");
66 if (c->ro_media) { 66 ubifs_assert(!c->ro_media && !c->ro_mount);
67
68 if (c->ro_error) {
67 err = -EROFS; 69 err = -EROFS;
68 goto out_up; 70 goto out_up;
69 } 71 }
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index c6c553fd0b3d..0bee4dbffc31 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2239,6 +2239,162 @@ out_free:
2239 return err; 2239 return err;
2240} 2240}
2241 2241
2242/**
2243 * dbg_check_data_nodes_order - check that list of data nodes is sorted.
2244 * @c: UBIFS file-system description object
2245 * @head: the list of nodes ('struct ubifs_scan_node' objects)
2246 *
2247 * This function returns zero if the list of data nodes is sorted correctly,
2248 * and %-EINVAL if not.
2249 */
2250int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
2251{
2252 struct list_head *cur;
2253 struct ubifs_scan_node *sa, *sb;
2254
2255 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
2256 return 0;
2257
2258 for (cur = head->next; cur->next != head; cur = cur->next) {
2259 ino_t inuma, inumb;
2260 uint32_t blka, blkb;
2261
2262 cond_resched();
2263 sa = container_of(cur, struct ubifs_scan_node, list);
2264 sb = container_of(cur->next, struct ubifs_scan_node, list);
2265
2266 if (sa->type != UBIFS_DATA_NODE) {
2267 ubifs_err("bad node type %d", sa->type);
2268 dbg_dump_node(c, sa->node);
2269 return -EINVAL;
2270 }
2271 if (sb->type != UBIFS_DATA_NODE) {
2272 ubifs_err("bad node type %d", sb->type);
2273 dbg_dump_node(c, sb->node);
2274 return -EINVAL;
2275 }
2276
2277 inuma = key_inum(c, &sa->key);
2278 inumb = key_inum(c, &sb->key);
2279
2280 if (inuma < inumb)
2281 continue;
2282 if (inuma > inumb) {
2283 ubifs_err("larger inum %lu goes before inum %lu",
2284 (unsigned long)inuma, (unsigned long)inumb);
2285 goto error_dump;
2286 }
2287
2288 blka = key_block(c, &sa->key);
2289 blkb = key_block(c, &sb->key);
2290
2291 if (blka > blkb) {
2292 ubifs_err("larger block %u goes before %u", blka, blkb);
2293 goto error_dump;
2294 }
2295 if (blka == blkb) {
2296 ubifs_err("two data nodes for the same block");
2297 goto error_dump;
2298 }
2299 }
2300
2301 return 0;
2302
2303error_dump:
2304 dbg_dump_node(c, sa->node);
2305 dbg_dump_node(c, sb->node);
2306 return -EINVAL;
2307}
2308
2309/**
2310 * dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
2311 * @c: UBIFS file-system description object
2312 * @head: the list of nodes ('struct ubifs_scan_node' objects)
2313 *
2314 * This function returns zero if the list of non-data nodes is sorted correctly,
2315 * and %-EINVAL if not.
2316 */
2317int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
2318{
2319 struct list_head *cur;
2320 struct ubifs_scan_node *sa, *sb;
2321
2322 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
2323 return 0;
2324
2325 for (cur = head->next; cur->next != head; cur = cur->next) {
2326 ino_t inuma, inumb;
2327 uint32_t hasha, hashb;
2328
2329 cond_resched();
2330 sa = container_of(cur, struct ubifs_scan_node, list);
2331 sb = container_of(cur->next, struct ubifs_scan_node, list);
2332
2333 if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
2334 sa->type != UBIFS_XENT_NODE) {
2335 ubifs_err("bad node type %d", sa->type);
2336 dbg_dump_node(c, sa->node);
2337 return -EINVAL;
2338 }
2339 if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
2340 sa->type != UBIFS_XENT_NODE) {
2341 ubifs_err("bad node type %d", sb->type);
2342 dbg_dump_node(c, sb->node);
2343 return -EINVAL;
2344 }
2345
2346 if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
2347 ubifs_err("non-inode node goes before inode node");
2348 goto error_dump;
2349 }
2350
2351 if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
2352 continue;
2353
2354 if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
2355 /* Inode nodes are sorted in descending size order */
2356 if (sa->len < sb->len) {
2357 ubifs_err("smaller inode node goes first");
2358 goto error_dump;
2359 }
2360 continue;
2361 }
2362
2363 /*
2364 * This is either a dentry or xentry, which should be sorted in
2365 * ascending (parent ino, hash) order.
2366 */
2367 inuma = key_inum(c, &sa->key);
2368 inumb = key_inum(c, &sb->key);
2369
2370 if (inuma < inumb)
2371 continue;
2372 if (inuma > inumb) {
2373 ubifs_err("larger inum %lu goes before inum %lu",
2374 (unsigned long)inuma, (unsigned long)inumb);
2375 goto error_dump;
2376 }
2377
2378 hasha = key_block(c, &sa->key);
2379 hashb = key_block(c, &sb->key);
2380
2381 if (hasha > hashb) {
2382 ubifs_err("larger hash %u goes before %u", hasha, hashb);
2383 goto error_dump;
2384 }
2385 }
2386
2387 return 0;
2388
2389error_dump:
2390 ubifs_msg("dumping first node");
2391 dbg_dump_node(c, sa->node);
2392 ubifs_msg("dumping second node");
2393 dbg_dump_node(c, sb->node);
2394 return -EINVAL;
2395 return 0;
2396}
2397
2242static int invocation_cnt; 2398static int invocation_cnt;
2243 2399
2244int dbg_force_in_the_gaps(void) 2400int dbg_force_in_the_gaps(void)
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 29d960101ea6..69ebe4729151 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -324,6 +324,8 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
324 int row, int col); 324 int row, int col);
325int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, 325int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
326 loff_t size); 326 loff_t size);
327int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
328int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
327 329
328/* Force the use of in-the-gaps method for testing */ 330/* Force the use of in-the-gaps method for testing */
329 331
@@ -465,6 +467,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
465#define dbg_check_lprops(c) 0 467#define dbg_check_lprops(c) 0
466#define dbg_check_lpt_nodes(c, cnode, row, col) 0 468#define dbg_check_lpt_nodes(c, cnode, row, col) 0
467#define dbg_check_inode_size(c, inode, size) 0 469#define dbg_check_inode_size(c, inode, size) 0
470#define dbg_check_data_nodes_order(c, head) 0
471#define dbg_check_nondata_nodes_order(c, head) 0
468#define dbg_force_in_the_gaps_enabled 0 472#define dbg_force_in_the_gaps_enabled 0
469#define dbg_force_in_the_gaps() 0 473#define dbg_force_in_the_gaps() 0
470#define dbg_failure_mode 0 474#define dbg_failure_mode 0
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 03ae894c45de..d77db7e36484 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -433,8 +433,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
433 struct page *page; 433 struct page *page;
434 434
435 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); 435 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
436 ubifs_assert(!c->ro_media && !c->ro_mount);
436 437
437 if (unlikely(c->ro_media)) 438 if (unlikely(c->ro_error))
438 return -EROFS; 439 return -EROFS;
439 440
440 /* Try out the fast-path part first */ 441 /* Try out the fast-path part first */
@@ -1439,9 +1440,9 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vm
1439 1440
1440 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, 1441 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1441 i_size_read(inode)); 1442 i_size_read(inode));
1442 ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY)); 1443 ubifs_assert(!c->ro_media && !c->ro_mount);
1443 1444
1444 if (unlikely(c->ro_media)) 1445 if (unlikely(c->ro_error))
1445 return VM_FAULT_SIGBUS; /* -EROFS */ 1446 return VM_FAULT_SIGBUS; /* -EROFS */
1446 1447
1447 /* 1448 /*
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 918d1582ca05..151f10882820 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -125,10 +125,16 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
125 struct ubifs_scan_node *sa, *sb; 125 struct ubifs_scan_node *sa, *sb;
126 126
127 cond_resched(); 127 cond_resched();
128 if (a == b)
129 return 0;
130
128 sa = list_entry(a, struct ubifs_scan_node, list); 131 sa = list_entry(a, struct ubifs_scan_node, list);
129 sb = list_entry(b, struct ubifs_scan_node, list); 132 sb = list_entry(b, struct ubifs_scan_node, list);
133
130 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY); 134 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
131 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY); 135 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
136 ubifs_assert(sa->type == UBIFS_DATA_NODE);
137 ubifs_assert(sb->type == UBIFS_DATA_NODE);
132 138
133 inuma = key_inum(c, &sa->key); 139 inuma = key_inum(c, &sa->key);
134 inumb = key_inum(c, &sb->key); 140 inumb = key_inum(c, &sb->key);
@@ -157,28 +163,40 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
157 */ 163 */
158int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 164int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
159{ 165{
160 int typea, typeb;
161 ino_t inuma, inumb; 166 ino_t inuma, inumb;
162 struct ubifs_info *c = priv; 167 struct ubifs_info *c = priv;
163 struct ubifs_scan_node *sa, *sb; 168 struct ubifs_scan_node *sa, *sb;
164 169
165 cond_resched(); 170 cond_resched();
171 if (a == b)
172 return 0;
173
166 sa = list_entry(a, struct ubifs_scan_node, list); 174 sa = list_entry(a, struct ubifs_scan_node, list);
167 sb = list_entry(b, struct ubifs_scan_node, list); 175 sb = list_entry(b, struct ubifs_scan_node, list);
168 typea = key_type(c, &sa->key); 176
169 typeb = key_type(c, &sb->key); 177 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
170 ubifs_assert(typea != UBIFS_DATA_KEY && typeb != UBIFS_DATA_KEY); 178 key_type(c, &sb->key) != UBIFS_DATA_KEY);
179 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
180 sb->type != UBIFS_DATA_NODE);
171 181
172 /* Inodes go before directory entries */ 182 /* Inodes go before directory entries */
173 if (typea == UBIFS_INO_KEY) { 183 if (sa->type == UBIFS_INO_NODE) {
174 if (typeb == UBIFS_INO_KEY) 184 if (sb->type == UBIFS_INO_NODE)
175 return sb->len - sa->len; 185 return sb->len - sa->len;
176 return -1; 186 return -1;
177 } 187 }
178 if (typeb == UBIFS_INO_KEY) 188 if (sb->type == UBIFS_INO_NODE)
179 return 1; 189 return 1;
180 190
181 ubifs_assert(typea == UBIFS_DENT_KEY && typeb == UBIFS_DENT_KEY); 191 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
192 key_type(c, &sa->key) == UBIFS_XENT_KEY);
193 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
194 key_type(c, &sb->key) == UBIFS_XENT_KEY);
195 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
196 sa->type == UBIFS_XENT_NODE);
197 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
198 sb->type == UBIFS_XENT_NODE);
199
182 inuma = key_inum(c, &sa->key); 200 inuma = key_inum(c, &sa->key);
183 inumb = key_inum(c, &sb->key); 201 inumb = key_inum(c, &sb->key);
184 202
@@ -224,17 +242,33 @@ int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
224static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb, 242static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
225 struct list_head *nondata, int *min) 243 struct list_head *nondata, int *min)
226{ 244{
245 int err;
227 struct ubifs_scan_node *snod, *tmp; 246 struct ubifs_scan_node *snod, *tmp;
228 247
229 *min = INT_MAX; 248 *min = INT_MAX;
230 249
231 /* Separate data nodes and non-data nodes */ 250 /* Separate data nodes and non-data nodes */
232 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { 251 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
233 int err; 252 ubifs_assert(snod->type == UBIFS_INO_NODE ||
253 snod->type == UBIFS_DATA_NODE ||
254 snod->type == UBIFS_DENT_NODE ||
255 snod->type == UBIFS_XENT_NODE ||
256 snod->type == UBIFS_TRUN_NODE);
257
258 if (snod->type != UBIFS_INO_NODE &&
259 snod->type != UBIFS_DATA_NODE &&
260 snod->type != UBIFS_DENT_NODE &&
261 snod->type != UBIFS_XENT_NODE) {
262 /* Probably truncation node, zap it */
263 list_del(&snod->list);
264 kfree(snod);
265 continue;
266 }
234 267
235 ubifs_assert(snod->type != UBIFS_IDX_NODE); 268 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
236 ubifs_assert(snod->type != UBIFS_REF_NODE); 269 key_type(c, &snod->key) == UBIFS_INO_KEY ||
237 ubifs_assert(snod->type != UBIFS_CS_NODE); 270 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
271 key_type(c, &snod->key) == UBIFS_XENT_KEY);
238 272
239 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum, 273 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
240 snod->offs, 0); 274 snod->offs, 0);
@@ -258,6 +292,13 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
258 /* Sort data and non-data nodes */ 292 /* Sort data and non-data nodes */
259 list_sort(c, &sleb->nodes, &data_nodes_cmp); 293 list_sort(c, &sleb->nodes, &data_nodes_cmp);
260 list_sort(c, nondata, &nondata_nodes_cmp); 294 list_sort(c, nondata, &nondata_nodes_cmp);
295
296 err = dbg_check_data_nodes_order(c, &sleb->nodes);
297 if (err)
298 return err;
299 err = dbg_check_nondata_nodes_order(c, nondata);
300 if (err)
301 return err;
261 return 0; 302 return 0;
262} 303}
263 304
@@ -575,13 +616,14 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
575 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 616 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
576 617
577 ubifs_assert_cmt_locked(c); 618 ubifs_assert_cmt_locked(c);
619 ubifs_assert(!c->ro_media && !c->ro_mount);
578 620
579 if (ubifs_gc_should_commit(c)) 621 if (ubifs_gc_should_commit(c))
580 return -EAGAIN; 622 return -EAGAIN;
581 623
582 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 624 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
583 625
584 if (c->ro_media) { 626 if (c->ro_error) {
585 ret = -EROFS; 627 ret = -EROFS;
586 goto out_unlock; 628 goto out_unlock;
587 } 629 }
@@ -677,14 +719,12 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
677 719
678 ret = ubifs_garbage_collect_leb(c, &lp); 720 ret = ubifs_garbage_collect_leb(c, &lp);
679 if (ret < 0) { 721 if (ret < 0) {
680 if (ret == -EAGAIN || ret == -ENOSPC) { 722 if (ret == -EAGAIN) {
681 /* 723 /*
682 * These codes are not errors, so we have to 724 * This is not error, so we have to return the
683 * return the LEB to lprops. But if the 725 * LEB to lprops. But if 'ubifs_return_leb()'
684 * 'ubifs_return_leb()' function fails, its 726 * fails, its failure code is propagated to the
685 * failure code is propagated to the caller 727 * caller instead of the original '-EAGAIN'.
686 * instead of the original '-EAGAIN' or
687 * '-ENOSPC'.
688 */ 728 */
689 err = ubifs_return_leb(c, lp.lnum); 729 err = ubifs_return_leb(c, lp.lnum);
690 if (err) 730 if (err)
@@ -774,8 +814,8 @@ out_unlock:
774out: 814out:
775 ubifs_assert(ret < 0); 815 ubifs_assert(ret < 0);
776 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN); 816 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
777 ubifs_ro_mode(c, ret);
778 ubifs_wbuf_sync_nolock(wbuf); 817 ubifs_wbuf_sync_nolock(wbuf);
818 ubifs_ro_mode(c, ret);
779 mutex_unlock(&wbuf->io_mutex); 819 mutex_unlock(&wbuf->io_mutex);
780 ubifs_return_leb(c, lp.lnum); 820 ubifs_return_leb(c, lp.lnum);
781 return ret; 821 return ret;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index bcf5a16f30bb..d82173182eeb 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -61,8 +61,8 @@
61 */ 61 */
62void ubifs_ro_mode(struct ubifs_info *c, int err) 62void ubifs_ro_mode(struct ubifs_info *c, int err)
63{ 63{
64 if (!c->ro_media) { 64 if (!c->ro_error) {
65 c->ro_media = 1; 65 c->ro_error = 1;
66 c->no_chk_data_crc = 0; 66 c->no_chk_data_crc = 0;
67 c->vfs_sb->s_flags |= MS_RDONLY; 67 c->vfs_sb->s_flags |= MS_RDONLY;
68 ubifs_warn("switched to read-only mode, error %d", err); 68 ubifs_warn("switched to read-only mode, error %d", err);
@@ -356,11 +356,11 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
356 356
357 dbg_io("LEB %d:%d, %d bytes, jhead %s", 357 dbg_io("LEB %d:%d, %d bytes, jhead %s",
358 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); 358 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
359 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
360 ubifs_assert(!(wbuf->avail & 7)); 359 ubifs_assert(!(wbuf->avail & 7));
361 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); 360 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
361 ubifs_assert(!c->ro_media && !c->ro_mount);
362 362
363 if (c->ro_media) 363 if (c->ro_error)
364 return -EROFS; 364 return -EROFS;
365 365
366 ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail); 366 ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail);
@@ -440,11 +440,12 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
440{ 440{
441 int err, i; 441 int err, i;
442 442
443 ubifs_assert(!c->ro_media && !c->ro_mount);
443 if (!c->need_wbuf_sync) 444 if (!c->need_wbuf_sync)
444 return 0; 445 return 0;
445 c->need_wbuf_sync = 0; 446 c->need_wbuf_sync = 0;
446 447
447 if (c->ro_media) { 448 if (c->ro_error) {
448 err = -EROFS; 449 err = -EROFS;
449 goto out_timers; 450 goto out_timers;
450 } 451 }
@@ -519,6 +520,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
519 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 520 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
520 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size); 521 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size);
521 ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 522 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
523 ubifs_assert(!c->ro_media && !c->ro_mount);
522 524
523 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { 525 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
524 err = -ENOSPC; 526 err = -ENOSPC;
@@ -527,7 +529,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
527 529
528 cancel_wbuf_timer_nolock(wbuf); 530 cancel_wbuf_timer_nolock(wbuf);
529 531
530 if (c->ro_media) 532 if (c->ro_error)
531 return -EROFS; 533 return -EROFS;
532 534
533 if (aligned_len <= wbuf->avail) { 535 if (aligned_len <= wbuf->avail) {
@@ -663,8 +665,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
663 buf_len); 665 buf_len);
664 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 666 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
665 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); 667 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
668 ubifs_assert(!c->ro_media && !c->ro_mount);
666 669
667 if (c->ro_media) 670 if (c->ro_error)
668 return -EROFS; 671 return -EROFS;
669 672
670 ubifs_prepare_node(c, buf, len, 1); 673 ubifs_prepare_node(c, buf, len, 1);
@@ -815,7 +818,8 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
815 return 0; 818 return 0;
816 819
817out: 820out:
818 ubifs_err("bad node at LEB %d:%d", lnum, offs); 821 ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
822 ubi_is_mapped(c->ubi, lnum));
819 dbg_dump_node(c, buf); 823 dbg_dump_node(c, buf);
820 dbg_dump_stack(); 824 dbg_dump_stack();
821 return -EINVAL; 825 return -EINVAL;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index d321baeca68d..914f1bd89e57 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -122,11 +122,12 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
122 * better to try to allocate space at the ends of eraseblocks. This is 122 * better to try to allocate space at the ends of eraseblocks. This is
123 * what the squeeze parameter does. 123 * what the squeeze parameter does.
124 */ 124 */
125 ubifs_assert(!c->ro_media && !c->ro_mount);
125 squeeze = (jhead == BASEHD); 126 squeeze = (jhead == BASEHD);
126again: 127again:
127 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 128 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
128 129
129 if (c->ro_media) { 130 if (c->ro_error) {
130 err = -EROFS; 131 err = -EROFS;
131 goto out_unlock; 132 goto out_unlock;
132 } 133 }
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index 0f530c684f0b..92a8491a8f8c 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -306,6 +306,20 @@ static inline void trun_key_init(const struct ubifs_info *c,
306} 306}
307 307
308/** 308/**
309 * invalid_key_init - initialize invalid node key.
310 * @c: UBIFS file-system description object
311 * @key: key to initialize
312 *
313 * This is a helper function which marks a @key object as invalid.
314 */
315static inline void invalid_key_init(const struct ubifs_info *c,
316 union ubifs_key *key)
317{
318 key->u32[0] = 0xDEADBEAF;
319 key->u32[1] = UBIFS_INVALID_KEY;
320}
321
322/**
309 * key_type - get key type. 323 * key_type - get key type.
310 * @c: UBIFS file-system description object 324 * @c: UBIFS file-system description object
311 * @key: key to get type of 325 * @key: key to get type of
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index c345e125f42c..4d0cb1241460 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -159,7 +159,7 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
159 jhead = &c->jheads[bud->jhead]; 159 jhead = &c->jheads[bud->jhead];
160 list_add_tail(&bud->list, &jhead->buds_list); 160 list_add_tail(&bud->list, &jhead->buds_list);
161 } else 161 } else
162 ubifs_assert(c->replaying && (c->vfs_sb->s_flags & MS_RDONLY)); 162 ubifs_assert(c->replaying && c->ro_mount);
163 163
164 /* 164 /*
165 * Note, although this is a new bud, we anyway account this space now, 165 * Note, although this is a new bud, we anyway account this space now,
@@ -223,8 +223,8 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
223 } 223 }
224 224
225 mutex_lock(&c->log_mutex); 225 mutex_lock(&c->log_mutex);
226 226 ubifs_assert(!c->ro_media && !c->ro_mount);
227 if (c->ro_media) { 227 if (c->ro_error) {
228 err = -EROFS; 228 err = -EROFS;
229 goto out_unlock; 229 goto out_unlock;
230 } 230 }
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 0084a33c4c69..72775d35b99e 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1363,6 +1363,7 @@ static int read_lsave(struct ubifs_info *c)
1363 goto out; 1363 goto out;
1364 for (i = 0; i < c->lsave_cnt; i++) { 1364 for (i = 0; i < c->lsave_cnt; i++) {
1365 int lnum = c->lsave[i]; 1365 int lnum = c->lsave[i];
1366 struct ubifs_lprops *lprops;
1366 1367
1367 /* 1368 /*
1368 * Due to automatic resizing, the values in the lsave table 1369 * Due to automatic resizing, the values in the lsave table
@@ -1370,7 +1371,11 @@ static int read_lsave(struct ubifs_info *c)
1370 */ 1371 */
1371 if (lnum >= c->leb_cnt) 1372 if (lnum >= c->leb_cnt)
1372 continue; 1373 continue;
1373 ubifs_lpt_lookup(c, lnum); 1374 lprops = ubifs_lpt_lookup(c, lnum);
1375 if (IS_ERR(lprops)) {
1376 err = PTR_ERR(lprops);
1377 goto out;
1378 }
1374 } 1379 }
1375out: 1380out:
1376 vfree(buf); 1381 vfree(buf);
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index d12535b7fc78..5c90dec5db0b 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -705,6 +705,9 @@ static int make_tree_dirty(struct ubifs_info *c)
705 struct ubifs_pnode *pnode; 705 struct ubifs_pnode *pnode;
706 706
707 pnode = pnode_lookup(c, 0); 707 pnode = pnode_lookup(c, 0);
708 if (IS_ERR(pnode))
709 return PTR_ERR(pnode);
710
708 while (pnode) { 711 while (pnode) {
709 do_make_pnode_dirty(c, pnode); 712 do_make_pnode_dirty(c, pnode);
710 pnode = next_pnode_to_dirty(c, pnode); 713 pnode = next_pnode_to_dirty(c, pnode);
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 28beaeedadc0..21f47afdacff 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -361,7 +361,8 @@ int ubifs_write_master(struct ubifs_info *c)
361{ 361{
362 int err, lnum, offs, len; 362 int err, lnum, offs, len;
363 363
364 if (c->ro_media) 364 ubifs_assert(!c->ro_media && !c->ro_mount);
365 if (c->ro_error)
365 return -EROFS; 366 return -EROFS;
366 367
367 lnum = UBIFS_MST_LNUM; 368 lnum = UBIFS_MST_LNUM;
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 4fa81d867e41..c3de04dc952a 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -132,7 +132,8 @@ static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum)
132{ 132{
133 int err; 133 int err;
134 134
135 if (c->ro_media) 135 ubifs_assert(!c->ro_media && !c->ro_mount);
136 if (c->ro_error)
136 return -EROFS; 137 return -EROFS;
137 err = ubi_leb_unmap(c->ubi, lnum); 138 err = ubi_leb_unmap(c->ubi, lnum);
138 if (err) { 139 if (err) {
@@ -159,7 +160,8 @@ static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum,
159{ 160{
160 int err; 161 int err;
161 162
162 if (c->ro_media) 163 ubifs_assert(!c->ro_media && !c->ro_mount);
164 if (c->ro_error)
163 return -EROFS; 165 return -EROFS;
164 err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); 166 err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
165 if (err) { 167 if (err) {
@@ -186,7 +188,8 @@ static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum,
186{ 188{
187 int err; 189 int err;
188 190
189 if (c->ro_media) 191 ubifs_assert(!c->ro_media && !c->ro_mount);
192 if (c->ro_error)
190 return -EROFS; 193 return -EROFS;
191 err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); 194 err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
192 if (err) { 195 if (err) {
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index daae9e1f5382..77e9b874b6c2 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -292,7 +292,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
292 292
293 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); 293 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
294 294
295 if ((c->vfs_sb->s_flags & MS_RDONLY)) { 295 if (c->ro_mount) {
296 /* Read-only mode. Keep a copy for switching to rw mode */ 296 /* Read-only mode. Keep a copy for switching to rw mode */
297 c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL); 297 c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
298 if (!c->rcvrd_mst_node) { 298 if (!c->rcvrd_mst_node) {
@@ -469,7 +469,7 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
469 endpt = snod->offs + snod->len; 469 endpt = snod->offs + snod->len;
470 } 470 }
471 471
472 if ((c->vfs_sb->s_flags & MS_RDONLY) && !c->remounting_rw) { 472 if (c->ro_mount && !c->remounting_rw) {
473 /* Add to recovery list */ 473 /* Add to recovery list */
474 struct ubifs_unclean_leb *ucleb; 474 struct ubifs_unclean_leb *ucleb;
475 475
@@ -772,7 +772,8 @@ out_free:
772 * @sbuf: LEB-sized buffer to use 772 * @sbuf: LEB-sized buffer to use
773 * 773 *
774 * This function does a scan of a LEB, but caters for errors that might have 774 * This function does a scan of a LEB, but caters for errors that might have
775 * been caused by the unclean unmount from which we are attempting to recover. 775 * been caused by unclean reboots from which we are attempting to recover
776 * (assume that only the last log LEB can be corrupted by an unclean reboot).
776 * 777 *
777 * This function returns %0 on success and a negative error code on failure. 778 * This function returns %0 on success and a negative error code on failure.
778 */ 779 */
@@ -883,7 +884,7 @@ int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
883{ 884{
884 int err; 885 int err;
885 886
886 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY) || c->remounting_rw); 887 ubifs_assert(!c->ro_mount || c->remounting_rw);
887 888
888 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs); 889 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
889 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf); 890 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
@@ -1461,7 +1462,7 @@ int ubifs_recover_size(struct ubifs_info *c)
1461 } 1462 }
1462 } 1463 }
1463 if (e->exists && e->i_size < e->d_size) { 1464 if (e->exists && e->i_size < e->d_size) {
1464 if (!e->inode && (c->vfs_sb->s_flags & MS_RDONLY)) { 1465 if (!e->inode && c->ro_mount) {
1465 /* Fix the inode size and pin it in memory */ 1466 /* Fix the inode size and pin it in memory */
1466 struct inode *inode; 1467 struct inode *inode;
1467 1468
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 5c2d6d759a3e..eed0fcff8d73 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -627,8 +627,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
627 ubifs_assert(sleb->endpt - offs >= used); 627 ubifs_assert(sleb->endpt - offs >= used);
628 ubifs_assert(sleb->endpt % c->min_io_size == 0); 628 ubifs_assert(sleb->endpt % c->min_io_size == 0);
629 629
630 if (sleb->endpt + c->min_io_size <= c->leb_size && 630 if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount)
631 !(c->vfs_sb->s_flags & MS_RDONLY))
632 err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum, 631 err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum,
633 sleb->endpt, UBI_SHORTTERM); 632 sleb->endpt, UBI_SHORTTERM);
634 633
@@ -840,6 +839,11 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
840 if (IS_ERR(sleb)) { 839 if (IS_ERR(sleb)) {
841 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery) 840 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
842 return PTR_ERR(sleb); 841 return PTR_ERR(sleb);
842 /*
843 * Note, the below function will recover this log LEB only if
844 * it is the last, because unclean reboots can possibly corrupt
845 * only the tail of the log.
846 */
843 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); 847 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
844 if (IS_ERR(sleb)) 848 if (IS_ERR(sleb))
845 return PTR_ERR(sleb); 849 return PTR_ERR(sleb);
@@ -851,7 +855,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
851 } 855 }
852 856
853 node = sleb->buf; 857 node = sleb->buf;
854
855 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list); 858 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
856 if (c->cs_sqnum == 0) { 859 if (c->cs_sqnum == 0) {
857 /* 860 /*
@@ -898,7 +901,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
898 } 901 }
899 902
900 list_for_each_entry(snod, &sleb->nodes, list) { 903 list_for_each_entry(snod, &sleb->nodes, list) {
901
902 cond_resched(); 904 cond_resched();
903 905
904 if (snod->sqnum >= SQNUM_WATERMARK) { 906 if (snod->sqnum >= SQNUM_WATERMARK) {
@@ -1011,7 +1013,6 @@ out:
1011int ubifs_replay_journal(struct ubifs_info *c) 1013int ubifs_replay_journal(struct ubifs_info *c)
1012{ 1014{
1013 int err, i, lnum, offs, free; 1015 int err, i, lnum, offs, free;
1014 void *sbuf = NULL;
1015 1016
1016 BUILD_BUG_ON(UBIFS_TRUN_KEY > 5); 1017 BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
1017 1018
@@ -1026,14 +1027,8 @@ int ubifs_replay_journal(struct ubifs_info *c)
1026 return -EINVAL; 1027 return -EINVAL;
1027 } 1028 }
1028 1029
1029 sbuf = vmalloc(c->leb_size);
1030 if (!sbuf)
1031 return -ENOMEM;
1032
1033 dbg_mnt("start replaying the journal"); 1030 dbg_mnt("start replaying the journal");
1034
1035 c->replaying = 1; 1031 c->replaying = 1;
1036
1037 lnum = c->ltail_lnum = c->lhead_lnum; 1032 lnum = c->ltail_lnum = c->lhead_lnum;
1038 offs = c->lhead_offs; 1033 offs = c->lhead_offs;
1039 1034
@@ -1046,7 +1041,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
1046 lnum = UBIFS_LOG_LNUM; 1041 lnum = UBIFS_LOG_LNUM;
1047 offs = 0; 1042 offs = 0;
1048 } 1043 }
1049 err = replay_log_leb(c, lnum, offs, sbuf); 1044 err = replay_log_leb(c, lnum, offs, c->sbuf);
1050 if (err == 1) 1045 if (err == 1)
1051 /* We hit the end of the log */ 1046 /* We hit the end of the log */
1052 break; 1047 break;
@@ -1079,7 +1074,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
1079out: 1074out:
1080 destroy_replay_tree(c); 1075 destroy_replay_tree(c);
1081 destroy_bud_list(c); 1076 destroy_bud_list(c);
1082 vfree(sbuf);
1083 c->replaying = 0; 1077 c->replaying = 0;
1084 return err; 1078 return err;
1085} 1079}
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 96cb62c8a9dd..bf31b4729e51 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -542,11 +542,8 @@ int ubifs_read_superblock(struct ubifs_info *c)
542 * due to the unavailability of time-travelling equipment. 542 * due to the unavailability of time-travelling equipment.
543 */ 543 */
544 if (c->fmt_version > UBIFS_FORMAT_VERSION) { 544 if (c->fmt_version > UBIFS_FORMAT_VERSION) {
545 struct super_block *sb = c->vfs_sb; 545 ubifs_assert(!c->ro_media || c->ro_mount);
546 int mounting_ro = sb->s_flags & MS_RDONLY; 546 if (!c->ro_mount ||
547
548 ubifs_assert(!c->ro_media || mounting_ro);
549 if (!mounting_ro ||
550 c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) { 547 c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
551 ubifs_err("on-flash format version is w%d/r%d, but " 548 ubifs_err("on-flash format version is w%d/r%d, but "
552 "software only supports up to version " 549 "software only supports up to version "
@@ -624,7 +621,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
624 c->old_leb_cnt = c->leb_cnt; 621 c->old_leb_cnt = c->leb_cnt;
625 if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) { 622 if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
626 c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size); 623 c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
627 if (c->vfs_sb->s_flags & MS_RDONLY) 624 if (c->ro_mount)
628 dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs", 625 dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
629 c->old_leb_cnt, c->leb_cnt); 626 c->old_leb_cnt, c->leb_cnt);
630 else { 627 else {
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 96c525384191..3e1ee57dbeaa 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -197,7 +197,7 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
197 struct ubifs_ino_node *ino = buf; 197 struct ubifs_ino_node *ino = buf;
198 struct ubifs_scan_node *snod; 198 struct ubifs_scan_node *snod;
199 199
200 snod = kzalloc(sizeof(struct ubifs_scan_node), GFP_NOFS); 200 snod = kmalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
201 if (!snod) 201 if (!snod)
202 return -ENOMEM; 202 return -ENOMEM;
203 203
@@ -212,13 +212,15 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
212 case UBIFS_DENT_NODE: 212 case UBIFS_DENT_NODE:
213 case UBIFS_XENT_NODE: 213 case UBIFS_XENT_NODE:
214 case UBIFS_DATA_NODE: 214 case UBIFS_DATA_NODE:
215 case UBIFS_TRUN_NODE:
216 /* 215 /*
217 * The key is in the same place in all keyed 216 * The key is in the same place in all keyed
218 * nodes. 217 * nodes.
219 */ 218 */
220 key_read(c, &ino->key, &snod->key); 219 key_read(c, &ino->key, &snod->key);
221 break; 220 break;
221 default:
222 invalid_key_init(c, &snod->key);
223 break;
222 } 224 }
223 list_add_tail(&snod->list, &sleb->nodes); 225 list_add_tail(&snod->list, &sleb->nodes);
224 sleb->nodes_cnt += 1; 226 sleb->nodes_cnt += 1;
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 0b201114a5ad..46961c003236 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -250,7 +250,7 @@ static int kick_a_thread(void)
250 dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt); 250 dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt);
251 251
252 if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN || 252 if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN ||
253 c->ro_media) { 253 c->ro_mount || c->ro_error) {
254 mutex_unlock(&c->umount_mutex); 254 mutex_unlock(&c->umount_mutex);
255 continue; 255 continue;
256 } 256 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index cd5900b85d38..9a47c9f0ad07 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1137,11 +1137,11 @@ static int check_free_space(struct ubifs_info *c)
1137 */ 1137 */
1138static int mount_ubifs(struct ubifs_info *c) 1138static int mount_ubifs(struct ubifs_info *c)
1139{ 1139{
1140 struct super_block *sb = c->vfs_sb; 1140 int err;
1141 int err, mounted_read_only = (sb->s_flags & MS_RDONLY);
1142 long long x; 1141 long long x;
1143 size_t sz; 1142 size_t sz;
1144 1143
1144 c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
1145 err = init_constants_early(c); 1145 err = init_constants_early(c);
1146 if (err) 1146 if (err)
1147 return err; 1147 return err;
@@ -1154,7 +1154,7 @@ static int mount_ubifs(struct ubifs_info *c)
1154 if (err) 1154 if (err)
1155 goto out_free; 1155 goto out_free;
1156 1156
1157 if (c->empty && (mounted_read_only || c->ro_media)) { 1157 if (c->empty && (c->ro_mount || c->ro_media)) {
1158 /* 1158 /*
1159 * This UBI volume is empty, and read-only, or the file system 1159 * This UBI volume is empty, and read-only, or the file system
1160 * is mounted read-only - we cannot format it. 1160 * is mounted read-only - we cannot format it.
@@ -1165,7 +1165,7 @@ static int mount_ubifs(struct ubifs_info *c)
1165 goto out_free; 1165 goto out_free;
1166 } 1166 }
1167 1167
1168 if (c->ro_media && !mounted_read_only) { 1168 if (c->ro_media && !c->ro_mount) {
1169 ubifs_err("cannot mount read-write - read-only media"); 1169 ubifs_err("cannot mount read-write - read-only media");
1170 err = -EROFS; 1170 err = -EROFS;
1171 goto out_free; 1171 goto out_free;
@@ -1185,7 +1185,7 @@ static int mount_ubifs(struct ubifs_info *c)
1185 if (!c->sbuf) 1185 if (!c->sbuf)
1186 goto out_free; 1186 goto out_free;
1187 1187
1188 if (!mounted_read_only) { 1188 if (!c->ro_mount) {
1189 c->ileb_buf = vmalloc(c->leb_size); 1189 c->ileb_buf = vmalloc(c->leb_size);
1190 if (!c->ileb_buf) 1190 if (!c->ileb_buf)
1191 goto out_free; 1191 goto out_free;
@@ -1228,7 +1228,7 @@ static int mount_ubifs(struct ubifs_info *c)
1228 } 1228 }
1229 1229
1230 sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); 1230 sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
1231 if (!mounted_read_only) { 1231 if (!c->ro_mount) {
1232 err = alloc_wbufs(c); 1232 err = alloc_wbufs(c);
1233 if (err) 1233 if (err)
1234 goto out_cbuf; 1234 goto out_cbuf;
@@ -1254,12 +1254,12 @@ static int mount_ubifs(struct ubifs_info *c)
1254 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { 1254 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
1255 ubifs_msg("recovery needed"); 1255 ubifs_msg("recovery needed");
1256 c->need_recovery = 1; 1256 c->need_recovery = 1;
1257 if (!mounted_read_only) { 1257 if (!c->ro_mount) {
1258 err = ubifs_recover_inl_heads(c, c->sbuf); 1258 err = ubifs_recover_inl_heads(c, c->sbuf);
1259 if (err) 1259 if (err)
1260 goto out_master; 1260 goto out_master;
1261 } 1261 }
1262 } else if (!mounted_read_only) { 1262 } else if (!c->ro_mount) {
1263 /* 1263 /*
1264 * Set the "dirty" flag so that if we reboot uncleanly we 1264 * Set the "dirty" flag so that if we reboot uncleanly we
1265 * will notice this immediately on the next mount. 1265 * will notice this immediately on the next mount.
@@ -1270,7 +1270,7 @@ static int mount_ubifs(struct ubifs_info *c)
1270 goto out_master; 1270 goto out_master;
1271 } 1271 }
1272 1272
1273 err = ubifs_lpt_init(c, 1, !mounted_read_only); 1273 err = ubifs_lpt_init(c, 1, !c->ro_mount);
1274 if (err) 1274 if (err)
1275 goto out_lpt; 1275 goto out_lpt;
1276 1276
@@ -1285,11 +1285,11 @@ static int mount_ubifs(struct ubifs_info *c)
1285 /* Calculate 'min_idx_lebs' after journal replay */ 1285 /* Calculate 'min_idx_lebs' after journal replay */
1286 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 1286 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
1287 1287
1288 err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only); 1288 err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
1289 if (err) 1289 if (err)
1290 goto out_orphans; 1290 goto out_orphans;
1291 1291
1292 if (!mounted_read_only) { 1292 if (!c->ro_mount) {
1293 int lnum; 1293 int lnum;
1294 1294
1295 err = check_free_space(c); 1295 err = check_free_space(c);
@@ -1351,7 +1351,7 @@ static int mount_ubifs(struct ubifs_info *c)
1351 spin_unlock(&ubifs_infos_lock); 1351 spin_unlock(&ubifs_infos_lock);
1352 1352
1353 if (c->need_recovery) { 1353 if (c->need_recovery) {
1354 if (mounted_read_only) 1354 if (c->ro_mount)
1355 ubifs_msg("recovery deferred"); 1355 ubifs_msg("recovery deferred");
1356 else { 1356 else {
1357 c->need_recovery = 0; 1357 c->need_recovery = 0;
@@ -1378,7 +1378,7 @@ static int mount_ubifs(struct ubifs_info *c)
1378 1378
1379 ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"", 1379 ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
1380 c->vi.ubi_num, c->vi.vol_id, c->vi.name); 1380 c->vi.ubi_num, c->vi.vol_id, c->vi.name);
1381 if (mounted_read_only) 1381 if (c->ro_mount)
1382 ubifs_msg("mounted read-only"); 1382 ubifs_msg("mounted read-only");
1383 x = (long long)c->main_lebs * c->leb_size; 1383 x = (long long)c->main_lebs * c->leb_size;
1384 ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d " 1384 ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d "
@@ -1640,7 +1640,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1640 } 1640 }
1641 1641
1642 dbg_gen("re-mounted read-write"); 1642 dbg_gen("re-mounted read-write");
1643 c->vfs_sb->s_flags &= ~MS_RDONLY; 1643 c->ro_mount = 0;
1644 c->remounting_rw = 0; 1644 c->remounting_rw = 0;
1645 c->always_chk_crc = 0; 1645 c->always_chk_crc = 0;
1646 err = dbg_check_space_info(c); 1646 err = dbg_check_space_info(c);
@@ -1676,7 +1676,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
1676 int i, err; 1676 int i, err;
1677 1677
1678 ubifs_assert(!c->need_recovery); 1678 ubifs_assert(!c->need_recovery);
1679 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); 1679 ubifs_assert(!c->ro_mount);
1680 1680
1681 mutex_lock(&c->umount_mutex); 1681 mutex_lock(&c->umount_mutex);
1682 if (c->bgt) { 1682 if (c->bgt) {
@@ -1686,10 +1686,8 @@ static void ubifs_remount_ro(struct ubifs_info *c)
1686 1686
1687 dbg_save_space_info(c); 1687 dbg_save_space_info(c);
1688 1688
1689 for (i = 0; i < c->jhead_cnt; i++) { 1689 for (i = 0; i < c->jhead_cnt; i++)
1690 ubifs_wbuf_sync(&c->jheads[i].wbuf); 1690 ubifs_wbuf_sync(&c->jheads[i].wbuf);
1691 hrtimer_cancel(&c->jheads[i].wbuf.timer);
1692 }
1693 1691
1694 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); 1692 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
1695 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); 1693 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1704,6 +1702,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
1704 vfree(c->ileb_buf); 1702 vfree(c->ileb_buf);
1705 c->ileb_buf = NULL; 1703 c->ileb_buf = NULL;
1706 ubifs_lpt_free(c, 1); 1704 ubifs_lpt_free(c, 1);
1705 c->ro_mount = 1;
1707 err = dbg_check_space_info(c); 1706 err = dbg_check_space_info(c);
1708 if (err) 1707 if (err)
1709 ubifs_ro_mode(c, err); 1708 ubifs_ro_mode(c, err);
@@ -1735,7 +1734,7 @@ static void ubifs_put_super(struct super_block *sb)
1735 * the mutex is locked. 1734 * the mutex is locked.
1736 */ 1735 */
1737 mutex_lock(&c->umount_mutex); 1736 mutex_lock(&c->umount_mutex);
1738 if (!(c->vfs_sb->s_flags & MS_RDONLY)) { 1737 if (!c->ro_mount) {
1739 /* 1738 /*
1740 * First of all kill the background thread to make sure it does 1739 * First of all kill the background thread to make sure it does
1741 * not interfere with un-mounting and freeing resources. 1740 * not interfere with un-mounting and freeing resources.
@@ -1745,23 +1744,22 @@ static void ubifs_put_super(struct super_block *sb)
1745 c->bgt = NULL; 1744 c->bgt = NULL;
1746 } 1745 }
1747 1746
1748 /* Synchronize write-buffers */
1749 if (c->jheads)
1750 for (i = 0; i < c->jhead_cnt; i++)
1751 ubifs_wbuf_sync(&c->jheads[i].wbuf);
1752
1753 /* 1747 /*
1754 * On fatal errors c->ro_media is set to 1, in which case we do 1748 * On fatal errors c->ro_error is set to 1, in which case we do
1755 * not write the master node. 1749 * not write the master node.
1756 */ 1750 */
1757 if (!c->ro_media) { 1751 if (!c->ro_error) {
1752 int err;
1753
1754 /* Synchronize write-buffers */
1755 for (i = 0; i < c->jhead_cnt; i++)
1756 ubifs_wbuf_sync(&c->jheads[i].wbuf);
1757
1758 /* 1758 /*
1759 * We are being cleanly unmounted which means the 1759 * We are being cleanly unmounted which means the
1760 * orphans were killed - indicate this in the master 1760 * orphans were killed - indicate this in the master
1761 * node. Also save the reserved GC LEB number. 1761 * node. Also save the reserved GC LEB number.
1762 */ 1762 */
1763 int err;
1764
1765 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); 1763 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
1766 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); 1764 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
1767 c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); 1765 c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
@@ -1774,6 +1772,10 @@ static void ubifs_put_super(struct super_block *sb)
1774 */ 1772 */
1775 ubifs_err("failed to write master node, " 1773 ubifs_err("failed to write master node, "
1776 "error %d", err); 1774 "error %d", err);
1775 } else {
1776 for (i = 0; i < c->jhead_cnt; i++)
1777 /* Make sure write-buffer timers are canceled */
1778 hrtimer_cancel(&c->jheads[i].wbuf.timer);
1777 } 1779 }
1778 } 1780 }
1779 1781
@@ -1797,17 +1799,21 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
1797 return err; 1799 return err;
1798 } 1800 }
1799 1801
1800 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1802 if (c->ro_mount && !(*flags & MS_RDONLY)) {
1803 if (c->ro_error) {
1804 ubifs_msg("cannot re-mount R/W due to prior errors");
1805 return -EROFS;
1806 }
1801 if (c->ro_media) { 1807 if (c->ro_media) {
1802 ubifs_msg("cannot re-mount due to prior errors"); 1808 ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
1803 return -EROFS; 1809 return -EROFS;
1804 } 1810 }
1805 err = ubifs_remount_rw(c); 1811 err = ubifs_remount_rw(c);
1806 if (err) 1812 if (err)
1807 return err; 1813 return err;
1808 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) { 1814 } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
1809 if (c->ro_media) { 1815 if (c->ro_error) {
1810 ubifs_msg("cannot re-mount due to prior errors"); 1816 ubifs_msg("cannot re-mount R/O due to prior errors");
1811 return -EROFS; 1817 return -EROFS;
1812 } 1818 }
1813 ubifs_remount_ro(c); 1819 ubifs_remount_ro(c);
@@ -2049,8 +2055,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
2049 */ 2055 */
2050 ubi = open_ubi(name, UBI_READONLY); 2056 ubi = open_ubi(name, UBI_READONLY);
2051 if (IS_ERR(ubi)) { 2057 if (IS_ERR(ubi)) {
2052 ubifs_err("cannot open \"%s\", error %d", 2058 dbg_err("cannot open \"%s\", error %d",
2053 name, (int)PTR_ERR(ubi)); 2059 name, (int)PTR_ERR(ubi));
2054 return PTR_ERR(ubi); 2060 return PTR_ERR(ubi);
2055 } 2061 }
2056 ubi_get_volume_info(ubi, &vi); 2062 ubi_get_volume_info(ubi, &vi);
@@ -2064,9 +2070,11 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
2064 } 2070 }
2065 2071
2066 if (sb->s_root) { 2072 if (sb->s_root) {
2073 struct ubifs_info *c1 = sb->s_fs_info;
2074
2067 /* A new mount point for already mounted UBIFS */ 2075 /* A new mount point for already mounted UBIFS */
2068 dbg_gen("this ubi volume is already mounted"); 2076 dbg_gen("this ubi volume is already mounted");
2069 if ((flags ^ sb->s_flags) & MS_RDONLY) { 2077 if (!!(flags & MS_RDONLY) != c1->ro_mount) {
2070 err = -EBUSY; 2078 err = -EBUSY;
2071 goto out_deact; 2079 goto out_deact;
2072 } 2080 }
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 2194915220e5..ad9cf0133622 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1177,6 +1177,7 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1177 unsigned long time = get_seconds(); 1177 unsigned long time = get_seconds();
1178 1178
1179 dbg_tnc("search key %s", DBGKEY(key)); 1179 dbg_tnc("search key %s", DBGKEY(key));
1180 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1180 1181
1181 znode = c->zroot.znode; 1182 znode = c->zroot.znode;
1182 if (unlikely(!znode)) { 1183 if (unlikely(!znode)) {
@@ -2966,7 +2967,7 @@ static struct ubifs_znode *right_znode(struct ubifs_info *c,
2966 * 2967 *
2967 * This function searches an indexing node by its first key @key and its 2968 * This function searches an indexing node by its first key @key and its
2968 * address @lnum:@offs. It looks up the indexing tree by pulling all indexing 2969 * address @lnum:@offs. It looks up the indexing tree by pulling all indexing
2969 * nodes it traverses to TNC. This function is called fro indexing nodes which 2970 * nodes it traverses to TNC. This function is called for indexing nodes which
2970 * were found on the media by scanning, for example when garbage-collecting or 2971 * were found on the media by scanning, for example when garbage-collecting or
2971 * when doing in-the-gaps commit. This means that the indexing node which is 2972 * when doing in-the-gaps commit. This means that the indexing node which is
2972 * looked for does not have to have exactly the same leftmost key @key, because 2973 * looked for does not have to have exactly the same leftmost key @key, because
@@ -2988,6 +2989,8 @@ static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2988 struct ubifs_znode *znode, *zn; 2989 struct ubifs_znode *znode, *zn;
2989 int n, nn; 2990 int n, nn;
2990 2991
2992 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
2993
2991 /* 2994 /*
2992 * The arguments have probably been read off flash, so don't assume 2995 * The arguments have probably been read off flash, so don't assume
2993 * they are valid. 2996 * they are valid.
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 0c9876b396dd..381d6b207a52 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -119,8 +119,12 @@
119 * in TNC. However, when replaying, it is handy to introduce fake "truncation" 119 * in TNC. However, when replaying, it is handy to introduce fake "truncation"
120 * keys for truncation nodes because the code becomes simpler. So we define 120 * keys for truncation nodes because the code becomes simpler. So we define
121 * %UBIFS_TRUN_KEY type. 121 * %UBIFS_TRUN_KEY type.
122 *
123 * But otherwise, out of the journal reply scope, the truncation keys are
124 * invalid.
122 */ 125 */
123#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT 126#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
127#define UBIFS_INVALID_KEY UBIFS_KEY_TYPES_CNT
124 128
125/* 129/*
126 * How much a directory entry/extended attribute entry adds to the parent/host 130 * How much a directory entry/extended attribute entry adds to the parent/host
@@ -1028,6 +1032,8 @@ struct ubifs_debug_info;
1028 * @max_leb_cnt: maximum count of logical eraseblocks 1032 * @max_leb_cnt: maximum count of logical eraseblocks
1029 * @old_leb_cnt: count of logical eraseblocks before re-size 1033 * @old_leb_cnt: count of logical eraseblocks before re-size
1030 * @ro_media: the underlying UBI volume is read-only 1034 * @ro_media: the underlying UBI volume is read-only
1035 * @ro_mount: the file-system was mounted as read-only
1036 * @ro_error: UBIFS switched to R/O mode because an error happened
1031 * 1037 *
1032 * @dirty_pg_cnt: number of dirty pages (not used) 1038 * @dirty_pg_cnt: number of dirty pages (not used)
1033 * @dirty_zn_cnt: number of dirty znodes 1039 * @dirty_zn_cnt: number of dirty znodes
@@ -1168,11 +1174,14 @@ struct ubifs_debug_info;
1168 * @replay_sqnum: sequence number of node currently being replayed 1174 * @replay_sqnum: sequence number of node currently being replayed
1169 * @need_recovery: file-system needs recovery 1175 * @need_recovery: file-system needs recovery
1170 * @replaying: set to %1 during journal replay 1176 * @replaying: set to %1 during journal replay
1171 * @unclean_leb_list: LEBs to recover when mounting ro to rw 1177 * @unclean_leb_list: LEBs to recover when re-mounting R/O mounted FS to R/W
1172 * @rcvrd_mst_node: recovered master node to write when mounting ro to rw 1178 * mode
1179 * @rcvrd_mst_node: recovered master node to write when re-mounting R/O mounted
1180 * FS to R/W mode
1173 * @size_tree: inode size information for recovery 1181 * @size_tree: inode size information for recovery
1174 * @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY) 1182 * @remounting_rw: set while re-mounting from R/O mode to R/W mode
1175 * @always_chk_crc: always check CRCs (while mounting and remounting rw) 1183 * @always_chk_crc: always check CRCs (while mounting and remounting to R/W
1184 * mode)
1176 * @mount_opts: UBIFS-specific mount options 1185 * @mount_opts: UBIFS-specific mount options
1177 * 1186 *
1178 * @dbg: debugging-related information 1187 * @dbg: debugging-related information
@@ -1268,7 +1277,9 @@ struct ubifs_info {
1268 int leb_cnt; 1277 int leb_cnt;
1269 int max_leb_cnt; 1278 int max_leb_cnt;
1270 int old_leb_cnt; 1279 int old_leb_cnt;
1271 int ro_media; 1280 unsigned int ro_media:1;
1281 unsigned int ro_mount:1;
1282 unsigned int ro_error:1;
1272 1283
1273 atomic_long_t dirty_pg_cnt; 1284 atomic_long_t dirty_pg_cnt;
1274 atomic_long_t dirty_zn_cnt; 1285 atomic_long_t dirty_zn_cnt;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 286e36e21dae..ba5312802aa9 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -188,8 +188,8 @@ _xfs_buf_initialize(
188 atomic_set(&bp->b_hold, 1); 188 atomic_set(&bp->b_hold, 1);
189 init_completion(&bp->b_iowait); 189 init_completion(&bp->b_iowait);
190 INIT_LIST_HEAD(&bp->b_list); 190 INIT_LIST_HEAD(&bp->b_list);
191 INIT_LIST_HEAD(&bp->b_hash_list); 191 RB_CLEAR_NODE(&bp->b_rbnode);
192 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ 192 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp); 193 XB_SET_OWNER(bp);
194 bp->b_target = target; 194 bp->b_target = target;
195 bp->b_file_offset = range_base; 195 bp->b_file_offset = range_base;
@@ -262,8 +262,6 @@ xfs_buf_free(
262{ 262{
263 trace_xfs_buf_free(bp, _RET_IP_); 263 trace_xfs_buf_free(bp, _RET_IP_);
264 264
265 ASSERT(list_empty(&bp->b_hash_list));
266
267 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 265 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
268 uint i; 266 uint i;
269 267
@@ -422,8 +420,10 @@ _xfs_buf_find(
422{ 420{
423 xfs_off_t range_base; 421 xfs_off_t range_base;
424 size_t range_length; 422 size_t range_length;
425 xfs_bufhash_t *hash; 423 struct xfs_perag *pag;
426 xfs_buf_t *bp, *n; 424 struct rb_node **rbp;
425 struct rb_node *parent;
426 xfs_buf_t *bp;
427 427
428 range_base = (ioff << BBSHIFT); 428 range_base = (ioff << BBSHIFT);
429 range_length = (isize << BBSHIFT); 429 range_length = (isize << BBSHIFT);
@@ -432,14 +432,37 @@ _xfs_buf_find(
432 ASSERT(!(range_length < (1 << btp->bt_sshift))); 432 ASSERT(!(range_length < (1 << btp->bt_sshift)));
433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); 433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
434 434
435 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 435 /* get tree root */
436 436 pag = xfs_perag_get(btp->bt_mount,
437 spin_lock(&hash->bh_lock); 437 xfs_daddr_to_agno(btp->bt_mount, ioff));
438 438
439 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 439 /* walk tree */
440 ASSERT(btp == bp->b_target); 440 spin_lock(&pag->pag_buf_lock);
441 if (bp->b_file_offset == range_base && 441 rbp = &pag->pag_buf_tree.rb_node;
442 bp->b_buffer_length == range_length) { 442 parent = NULL;
443 bp = NULL;
444 while (*rbp) {
445 parent = *rbp;
446 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
447
448 if (range_base < bp->b_file_offset)
449 rbp = &(*rbp)->rb_left;
450 else if (range_base > bp->b_file_offset)
451 rbp = &(*rbp)->rb_right;
452 else {
453 /*
454 * found a block offset match. If the range doesn't
455 * match, the only way this is allowed is if the buffer
456 * in the cache is stale and the transaction that made
457 * it stale has not yet committed. i.e. we are
458 * reallocating a busy extent. Skip this buffer and
459 * continue searching to the right for an exact match.
460 */
461 if (bp->b_buffer_length != range_length) {
462 ASSERT(bp->b_flags & XBF_STALE);
463 rbp = &(*rbp)->rb_right;
464 continue;
465 }
443 atomic_inc(&bp->b_hold); 466 atomic_inc(&bp->b_hold);
444 goto found; 467 goto found;
445 } 468 }
@@ -449,17 +472,21 @@ _xfs_buf_find(
449 if (new_bp) { 472 if (new_bp) {
450 _xfs_buf_initialize(new_bp, btp, range_base, 473 _xfs_buf_initialize(new_bp, btp, range_base,
451 range_length, flags); 474 range_length, flags);
452 new_bp->b_hash = hash; 475 rb_link_node(&new_bp->b_rbnode, parent, rbp);
453 list_add(&new_bp->b_hash_list, &hash->bh_list); 476 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
477 /* the buffer keeps the perag reference until it is freed */
478 new_bp->b_pag = pag;
479 spin_unlock(&pag->pag_buf_lock);
454 } else { 480 } else {
455 XFS_STATS_INC(xb_miss_locked); 481 XFS_STATS_INC(xb_miss_locked);
482 spin_unlock(&pag->pag_buf_lock);
483 xfs_perag_put(pag);
456 } 484 }
457
458 spin_unlock(&hash->bh_lock);
459 return new_bp; 485 return new_bp;
460 486
461found: 487found:
462 spin_unlock(&hash->bh_lock); 488 spin_unlock(&pag->pag_buf_lock);
489 xfs_perag_put(pag);
463 490
464 /* Attempt to get the semaphore without sleeping, 491 /* Attempt to get the semaphore without sleeping,
465 * if this does not work then we need to drop the 492 * if this does not work then we need to drop the
@@ -625,8 +652,7 @@ void
625xfs_buf_readahead( 652xfs_buf_readahead(
626 xfs_buftarg_t *target, 653 xfs_buftarg_t *target,
627 xfs_off_t ioff, 654 xfs_off_t ioff,
628 size_t isize, 655 size_t isize)
629 xfs_buf_flags_t flags)
630{ 656{
631 struct backing_dev_info *bdi; 657 struct backing_dev_info *bdi;
632 658
@@ -634,8 +660,42 @@ xfs_buf_readahead(
634 if (bdi_read_congested(bdi)) 660 if (bdi_read_congested(bdi))
635 return; 661 return;
636 662
637 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 663 xfs_buf_read(target, ioff, isize,
638 xfs_buf_read(target, ioff, isize, flags); 664 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
665}
666
667/*
668 * Read an uncached buffer from disk. Allocates and returns a locked
669 * buffer containing the disk contents or nothing.
670 */
671struct xfs_buf *
672xfs_buf_read_uncached(
673 struct xfs_mount *mp,
674 struct xfs_buftarg *target,
675 xfs_daddr_t daddr,
676 size_t length,
677 int flags)
678{
679 xfs_buf_t *bp;
680 int error;
681
682 bp = xfs_buf_get_uncached(target, length, flags);
683 if (!bp)
684 return NULL;
685
686 /* set up the buffer for a read IO */
687 xfs_buf_lock(bp);
688 XFS_BUF_SET_ADDR(bp, daddr);
689 XFS_BUF_READ(bp);
690 XFS_BUF_BUSY(bp);
691
692 xfsbdstrat(mp, bp);
693 error = xfs_buf_iowait(bp);
694 if (error || bp->b_error) {
695 xfs_buf_relse(bp);
696 return NULL;
697 }
698 return bp;
639} 699}
640 700
641xfs_buf_t * 701xfs_buf_t *
@@ -707,9 +767,10 @@ xfs_buf_associate_memory(
707} 767}
708 768
709xfs_buf_t * 769xfs_buf_t *
710xfs_buf_get_noaddr( 770xfs_buf_get_uncached(
771 struct xfs_buftarg *target,
711 size_t len, 772 size_t len,
712 xfs_buftarg_t *target) 773 int flags)
713{ 774{
714 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; 775 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
715 int error, i; 776 int error, i;
@@ -725,7 +786,7 @@ xfs_buf_get_noaddr(
725 goto fail_free_buf; 786 goto fail_free_buf;
726 787
727 for (i = 0; i < page_count; i++) { 788 for (i = 0; i < page_count; i++) {
728 bp->b_pages[i] = alloc_page(GFP_KERNEL); 789 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
729 if (!bp->b_pages[i]) 790 if (!bp->b_pages[i])
730 goto fail_free_mem; 791 goto fail_free_mem;
731 } 792 }
@@ -740,7 +801,7 @@ xfs_buf_get_noaddr(
740 801
741 xfs_buf_unlock(bp); 802 xfs_buf_unlock(bp);
742 803
743 trace_xfs_buf_get_noaddr(bp, _RET_IP_); 804 trace_xfs_buf_get_uncached(bp, _RET_IP_);
744 return bp; 805 return bp;
745 806
746 fail_free_mem: 807 fail_free_mem:
@@ -774,29 +835,30 @@ void
774xfs_buf_rele( 835xfs_buf_rele(
775 xfs_buf_t *bp) 836 xfs_buf_t *bp)
776{ 837{
777 xfs_bufhash_t *hash = bp->b_hash; 838 struct xfs_perag *pag = bp->b_pag;
778 839
779 trace_xfs_buf_rele(bp, _RET_IP_); 840 trace_xfs_buf_rele(bp, _RET_IP_);
780 841
781 if (unlikely(!hash)) { 842 if (!pag) {
782 ASSERT(!bp->b_relse); 843 ASSERT(!bp->b_relse);
844 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
783 if (atomic_dec_and_test(&bp->b_hold)) 845 if (atomic_dec_and_test(&bp->b_hold))
784 xfs_buf_free(bp); 846 xfs_buf_free(bp);
785 return; 847 return;
786 } 848 }
787 849
850 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
788 ASSERT(atomic_read(&bp->b_hold) > 0); 851 ASSERT(atomic_read(&bp->b_hold) > 0);
789 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { 852 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
790 if (bp->b_relse) { 853 if (bp->b_relse) {
791 atomic_inc(&bp->b_hold); 854 atomic_inc(&bp->b_hold);
792 spin_unlock(&hash->bh_lock); 855 spin_unlock(&pag->pag_buf_lock);
793 (*(bp->b_relse)) (bp); 856 bp->b_relse(bp);
794 } else if (bp->b_flags & XBF_FS_MANAGED) {
795 spin_unlock(&hash->bh_lock);
796 } else { 857 } else {
797 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 858 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
798 list_del_init(&bp->b_hash_list); 859 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
799 spin_unlock(&hash->bh_lock); 860 spin_unlock(&pag->pag_buf_lock);
861 xfs_perag_put(pag);
800 xfs_buf_free(bp); 862 xfs_buf_free(bp);
801 } 863 }
802 } 864 }
@@ -859,7 +921,7 @@ xfs_buf_lock(
859 trace_xfs_buf_lock(bp, _RET_IP_); 921 trace_xfs_buf_lock(bp, _RET_IP_);
860 922
861 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 923 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
862 xfs_log_force(bp->b_mount, 0); 924 xfs_log_force(bp->b_target->bt_mount, 0);
863 if (atomic_read(&bp->b_io_remaining)) 925 if (atomic_read(&bp->b_io_remaining))
864 blk_run_address_space(bp->b_target->bt_mapping); 926 blk_run_address_space(bp->b_target->bt_mapping);
865 down(&bp->b_sema); 927 down(&bp->b_sema);
@@ -924,19 +986,7 @@ xfs_buf_iodone_work(
924 xfs_buf_t *bp = 986 xfs_buf_t *bp =
925 container_of(work, xfs_buf_t, b_iodone_work); 987 container_of(work, xfs_buf_t, b_iodone_work);
926 988
927 /* 989 if (bp->b_iodone)
928 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
929 * ordered flag and reissue them. Because we can't tell the higher
930 * layers directly that they should not issue ordered I/O anymore, they
931 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
932 */
933 if ((bp->b_error == EOPNOTSUPP) &&
934 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
935 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
936 bp->b_flags &= ~XBF_ORDERED;
937 bp->b_flags |= _XFS_BARRIER_FAILED;
938 xfs_buf_iorequest(bp);
939 } else if (bp->b_iodone)
940 (*(bp->b_iodone))(bp); 990 (*(bp->b_iodone))(bp);
941 else if (bp->b_flags & XBF_ASYNC) 991 else if (bp->b_flags & XBF_ASYNC)
942 xfs_buf_relse(bp); 992 xfs_buf_relse(bp);
@@ -982,7 +1032,6 @@ xfs_bwrite(
982{ 1032{
983 int error; 1033 int error;
984 1034
985 bp->b_mount = mp;
986 bp->b_flags |= XBF_WRITE; 1035 bp->b_flags |= XBF_WRITE;
987 bp->b_flags &= ~(XBF_ASYNC | XBF_READ); 1036 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
988 1037
@@ -1003,8 +1052,6 @@ xfs_bdwrite(
1003{ 1052{
1004 trace_xfs_buf_bdwrite(bp, _RET_IP_); 1053 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1005 1054
1006 bp->b_mount = mp;
1007
1008 bp->b_flags &= ~XBF_READ; 1055 bp->b_flags &= ~XBF_READ;
1009 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); 1056 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1010 1057
@@ -1013,7 +1060,7 @@ xfs_bdwrite(
1013 1060
1014/* 1061/*
1015 * Called when we want to stop a buffer from getting written or read. 1062 * Called when we want to stop a buffer from getting written or read.
1016 * We attach the EIO error, muck with its flags, and call biodone 1063 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1017 * so that the proper iodone callbacks get called. 1064 * so that the proper iodone callbacks get called.
1018 */ 1065 */
1019STATIC int 1066STATIC int
@@ -1030,21 +1077,21 @@ xfs_bioerror(
1030 XFS_BUF_ERROR(bp, EIO); 1077 XFS_BUF_ERROR(bp, EIO);
1031 1078
1032 /* 1079 /*
1033 * We're calling biodone, so delete XBF_DONE flag. 1080 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1034 */ 1081 */
1035 XFS_BUF_UNREAD(bp); 1082 XFS_BUF_UNREAD(bp);
1036 XFS_BUF_UNDELAYWRITE(bp); 1083 XFS_BUF_UNDELAYWRITE(bp);
1037 XFS_BUF_UNDONE(bp); 1084 XFS_BUF_UNDONE(bp);
1038 XFS_BUF_STALE(bp); 1085 XFS_BUF_STALE(bp);
1039 1086
1040 xfs_biodone(bp); 1087 xfs_buf_ioend(bp, 0);
1041 1088
1042 return EIO; 1089 return EIO;
1043} 1090}
1044 1091
1045/* 1092/*
1046 * Same as xfs_bioerror, except that we are releasing the buffer 1093 * Same as xfs_bioerror, except that we are releasing the buffer
1047 * here ourselves, and avoiding the biodone call. 1094 * here ourselves, and avoiding the xfs_buf_ioend call.
1048 * This is meant for userdata errors; metadata bufs come with 1095 * This is meant for userdata errors; metadata bufs come with
1049 * iodone functions attached, so that we can track down errors. 1096 * iodone functions attached, so that we can track down errors.
1050 */ 1097 */
@@ -1093,7 +1140,7 @@ int
1093xfs_bdstrat_cb( 1140xfs_bdstrat_cb(
1094 struct xfs_buf *bp) 1141 struct xfs_buf *bp)
1095{ 1142{
1096 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 1143 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1097 trace_xfs_bdstrat_shut(bp, _RET_IP_); 1144 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1098 /* 1145 /*
1099 * Metadata write that didn't get logged but 1146 * Metadata write that didn't get logged but
@@ -1195,7 +1242,7 @@ _xfs_buf_ioapply(
1195 1242
1196 if (bp->b_flags & XBF_ORDERED) { 1243 if (bp->b_flags & XBF_ORDERED) {
1197 ASSERT(!(bp->b_flags & XBF_READ)); 1244 ASSERT(!(bp->b_flags & XBF_READ));
1198 rw = WRITE_BARRIER; 1245 rw = WRITE_FLUSH_FUA;
1199 } else if (bp->b_flags & XBF_LOG_BUFFER) { 1246 } else if (bp->b_flags & XBF_LOG_BUFFER) {
1200 ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); 1247 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1201 bp->b_flags &= ~_XBF_RUN_QUEUES; 1248 bp->b_flags &= ~_XBF_RUN_QUEUES;
@@ -1399,62 +1446,24 @@ xfs_buf_iomove(
1399 */ 1446 */
1400void 1447void
1401xfs_wait_buftarg( 1448xfs_wait_buftarg(
1402 xfs_buftarg_t *btp) 1449 struct xfs_buftarg *btp)
1403{
1404 xfs_buf_t *bp, *n;
1405 xfs_bufhash_t *hash;
1406 uint i;
1407
1408 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1409 hash = &btp->bt_hash[i];
1410again:
1411 spin_lock(&hash->bh_lock);
1412 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1413 ASSERT(btp == bp->b_target);
1414 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1415 spin_unlock(&hash->bh_lock);
1416 /*
1417 * Catch superblock reference count leaks
1418 * immediately
1419 */
1420 BUG_ON(bp->b_bn == 0);
1421 delay(100);
1422 goto again;
1423 }
1424 }
1425 spin_unlock(&hash->bh_lock);
1426 }
1427}
1428
1429/*
1430 * Allocate buffer hash table for a given target.
1431 * For devices containing metadata (i.e. not the log/realtime devices)
1432 * we need to allocate a much larger hash table.
1433 */
1434STATIC void
1435xfs_alloc_bufhash(
1436 xfs_buftarg_t *btp,
1437 int external)
1438{ 1450{
1439 unsigned int i; 1451 struct xfs_perag *pag;
1452 uint i;
1440 1453
1441 btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ 1454 for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
1442 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * 1455 pag = xfs_perag_get(btp->bt_mount, i);
1443 sizeof(xfs_bufhash_t)); 1456 spin_lock(&pag->pag_buf_lock);
1444 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1457 while (rb_first(&pag->pag_buf_tree)) {
1445 spin_lock_init(&btp->bt_hash[i].bh_lock); 1458 spin_unlock(&pag->pag_buf_lock);
1446 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); 1459 delay(100);
1460 spin_lock(&pag->pag_buf_lock);
1461 }
1462 spin_unlock(&pag->pag_buf_lock);
1463 xfs_perag_put(pag);
1447 } 1464 }
1448} 1465}
1449 1466
1450STATIC void
1451xfs_free_bufhash(
1452 xfs_buftarg_t *btp)
1453{
1454 kmem_free_large(btp->bt_hash);
1455 btp->bt_hash = NULL;
1456}
1457
1458/* 1467/*
1459 * buftarg list for delwrite queue processing 1468 * buftarg list for delwrite queue processing
1460 */ 1469 */
@@ -1487,7 +1496,6 @@ xfs_free_buftarg(
1487 xfs_flush_buftarg(btp, 1); 1496 xfs_flush_buftarg(btp, 1);
1488 if (mp->m_flags & XFS_MOUNT_BARRIER) 1497 if (mp->m_flags & XFS_MOUNT_BARRIER)
1489 xfs_blkdev_issue_flush(btp); 1498 xfs_blkdev_issue_flush(btp);
1490 xfs_free_bufhash(btp);
1491 iput(btp->bt_mapping->host); 1499 iput(btp->bt_mapping->host);
1492 1500
1493 /* Unregister the buftarg first so that we don't get a 1501 /* Unregister the buftarg first so that we don't get a
@@ -1609,6 +1617,7 @@ out_error:
1609 1617
1610xfs_buftarg_t * 1618xfs_buftarg_t *
1611xfs_alloc_buftarg( 1619xfs_alloc_buftarg(
1620 struct xfs_mount *mp,
1612 struct block_device *bdev, 1621 struct block_device *bdev,
1613 int external, 1622 int external,
1614 const char *fsname) 1623 const char *fsname)
@@ -1617,6 +1626,7 @@ xfs_alloc_buftarg(
1617 1626
1618 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1627 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1619 1628
1629 btp->bt_mount = mp;
1620 btp->bt_dev = bdev->bd_dev; 1630 btp->bt_dev = bdev->bd_dev;
1621 btp->bt_bdev = bdev; 1631 btp->bt_bdev = bdev;
1622 if (xfs_setsize_buftarg_early(btp, bdev)) 1632 if (xfs_setsize_buftarg_early(btp, bdev))
@@ -1625,7 +1635,6 @@ xfs_alloc_buftarg(
1625 goto error; 1635 goto error;
1626 if (xfs_alloc_delwrite_queue(btp, fsname)) 1636 if (xfs_alloc_delwrite_queue(btp, fsname))
1627 goto error; 1637 goto error;
1628 xfs_alloc_bufhash(btp, external);
1629 return btp; 1638 return btp;
1630 1639
1631error: 1640error:
@@ -1916,7 +1925,7 @@ xfs_flush_buftarg(
1916 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1925 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1917 1926
1918 list_del_init(&bp->b_list); 1927 list_del_init(&bp->b_list);
1919 xfs_iowait(bp); 1928 xfs_buf_iowait(bp);
1920 xfs_buf_relse(bp); 1929 xfs_buf_relse(bp);
1921 } 1930 }
1922 } 1931 }
@@ -1933,7 +1942,7 @@ xfs_buf_init(void)
1933 goto out; 1942 goto out;
1934 1943
1935 xfslogd_workqueue = alloc_workqueue("xfslogd", 1944 xfslogd_workqueue = alloc_workqueue("xfslogd",
1936 WQ_RESCUER | WQ_HIGHPRI, 1); 1945 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1937 if (!xfslogd_workqueue) 1946 if (!xfslogd_workqueue)
1938 goto out_free_buf_zone; 1947 goto out_free_buf_zone;
1939 1948
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 2a05614f0b92..383a3f37cf98 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -51,7 +51,6 @@ typedef enum {
51#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 51#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
52#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ 52#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
53#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ 53#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
54#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
55#define XBF_ORDERED (1 << 11)/* use ordered writes */ 54#define XBF_ORDERED (1 << 11)/* use ordered writes */
56#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ 55#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
57#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ 56#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
@@ -86,14 +85,6 @@ typedef enum {
86 */ 85 */
87#define _XBF_PAGE_LOCKED (1 << 22) 86#define _XBF_PAGE_LOCKED (1 << 22)
88 87
89/*
90 * If we try a barrier write, but it fails we have to communicate
91 * this to the upper layers. Unfortunately b_error gets overwritten
92 * when the buffer is re-issued so we have to add another flag to
93 * keep this information.
94 */
95#define _XFS_BARRIER_FAILED (1 << 23)
96
97typedef unsigned int xfs_buf_flags_t; 88typedef unsigned int xfs_buf_flags_t;
98 89
99#define XFS_BUF_FLAGS \ 90#define XFS_BUF_FLAGS \
@@ -104,7 +95,6 @@ typedef unsigned int xfs_buf_flags_t;
104 { XBF_DONE, "DONE" }, \ 95 { XBF_DONE, "DONE" }, \
105 { XBF_DELWRI, "DELWRI" }, \ 96 { XBF_DELWRI, "DELWRI" }, \
106 { XBF_STALE, "STALE" }, \ 97 { XBF_STALE, "STALE" }, \
107 { XBF_FS_MANAGED, "FS_MANAGED" }, \
108 { XBF_ORDERED, "ORDERED" }, \ 98 { XBF_ORDERED, "ORDERED" }, \
109 { XBF_READ_AHEAD, "READ_AHEAD" }, \ 99 { XBF_READ_AHEAD, "READ_AHEAD" }, \
110 { XBF_LOCK, "LOCK" }, /* should never be set */\ 100 { XBF_LOCK, "LOCK" }, /* should never be set */\
@@ -114,8 +104,7 @@ typedef unsigned int xfs_buf_flags_t;
114 { _XBF_PAGES, "PAGES" }, \ 104 { _XBF_PAGES, "PAGES" }, \
115 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ 105 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
116 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 106 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
117 { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \ 107 { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }
118 { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
119 108
120 109
121typedef enum { 110typedef enum {
@@ -132,14 +121,11 @@ typedef struct xfs_buftarg {
132 dev_t bt_dev; 121 dev_t bt_dev;
133 struct block_device *bt_bdev; 122 struct block_device *bt_bdev;
134 struct address_space *bt_mapping; 123 struct address_space *bt_mapping;
124 struct xfs_mount *bt_mount;
135 unsigned int bt_bsize; 125 unsigned int bt_bsize;
136 unsigned int bt_sshift; 126 unsigned int bt_sshift;
137 size_t bt_smask; 127 size_t bt_smask;
138 128
139 /* per device buffer hash table */
140 uint bt_hashshift;
141 xfs_bufhash_t *bt_hash;
142
143 /* per device delwri queue */ 129 /* per device delwri queue */
144 struct task_struct *bt_task; 130 struct task_struct *bt_task;
145 struct list_head bt_list; 131 struct list_head bt_list;
@@ -167,34 +153,41 @@ typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
167#define XB_PAGES 2 153#define XB_PAGES 2
168 154
169typedef struct xfs_buf { 155typedef struct xfs_buf {
156 /*
157 * first cacheline holds all the fields needed for an uncontended cache
158 * hit to be fully processed. The semaphore straddles the cacheline
159 * boundary, but the counter and lock sits on the first cacheline,
160 * which is the only bit that is touched if we hit the semaphore
161 * fast-path on locking.
162 */
163 struct rb_node b_rbnode; /* rbtree node */
164 xfs_off_t b_file_offset; /* offset in file */
165 size_t b_buffer_length;/* size of buffer in bytes */
166 atomic_t b_hold; /* reference count */
167 xfs_buf_flags_t b_flags; /* status flags */
170 struct semaphore b_sema; /* semaphore for lockables */ 168 struct semaphore b_sema; /* semaphore for lockables */
171 unsigned long b_queuetime; /* time buffer was queued */ 169
172 atomic_t b_pin_count; /* pin count */
173 wait_queue_head_t b_waiters; /* unpin waiters */ 170 wait_queue_head_t b_waiters; /* unpin waiters */
174 struct list_head b_list; 171 struct list_head b_list;
175 xfs_buf_flags_t b_flags; /* status flags */ 172 struct xfs_perag *b_pag; /* contains rbtree root */
176 struct list_head b_hash_list; /* hash table list */
177 xfs_bufhash_t *b_hash; /* hash table list start */
178 xfs_buftarg_t *b_target; /* buffer target (device) */ 173 xfs_buftarg_t *b_target; /* buffer target (device) */
179 atomic_t b_hold; /* reference count */
180 xfs_daddr_t b_bn; /* block number for I/O */ 174 xfs_daddr_t b_bn; /* block number for I/O */
181 xfs_off_t b_file_offset; /* offset in file */
182 size_t b_buffer_length;/* size of buffer in bytes */
183 size_t b_count_desired;/* desired transfer size */ 175 size_t b_count_desired;/* desired transfer size */
184 void *b_addr; /* virtual address of buffer */ 176 void *b_addr; /* virtual address of buffer */
185 struct work_struct b_iodone_work; 177 struct work_struct b_iodone_work;
186 atomic_t b_io_remaining; /* #outstanding I/O requests */
187 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 178 xfs_buf_iodone_t b_iodone; /* I/O completion function */
188 xfs_buf_relse_t b_relse; /* releasing function */ 179 xfs_buf_relse_t b_relse; /* releasing function */
189 struct completion b_iowait; /* queue for I/O waiters */ 180 struct completion b_iowait; /* queue for I/O waiters */
190 void *b_fspriv; 181 void *b_fspriv;
191 void *b_fspriv2; 182 void *b_fspriv2;
192 struct xfs_mount *b_mount;
193 unsigned short b_error; /* error code on I/O */
194 unsigned int b_page_count; /* size of page array */
195 unsigned int b_offset; /* page offset in first page */
196 struct page **b_pages; /* array of page pointers */ 183 struct page **b_pages; /* array of page pointers */
197 struct page *b_page_array[XB_PAGES]; /* inline pages */ 184 struct page *b_page_array[XB_PAGES]; /* inline pages */
185 unsigned long b_queuetime; /* time buffer was queued */
186 atomic_t b_pin_count; /* pin count */
187 atomic_t b_io_remaining; /* #outstanding I/O requests */
188 unsigned int b_page_count; /* size of page array */
189 unsigned int b_offset; /* page offset in first page */
190 unsigned short b_error; /* error code on I/O */
198#ifdef XFS_BUF_LOCK_TRACKING 191#ifdef XFS_BUF_LOCK_TRACKING
199 int b_last_holder; 192 int b_last_holder;
200#endif 193#endif
@@ -213,11 +206,13 @@ extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
213 xfs_buf_flags_t); 206 xfs_buf_flags_t);
214 207
215extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); 208extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
216extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); 209extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
217extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); 210extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
218extern void xfs_buf_hold(xfs_buf_t *); 211extern void xfs_buf_hold(xfs_buf_t *);
219extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t, 212extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
220 xfs_buf_flags_t); 213struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
214 struct xfs_buftarg *target,
215 xfs_daddr_t daddr, size_t length, int flags);
221 216
222/* Releasing Buffers */ 217/* Releasing Buffers */
223extern void xfs_buf_free(xfs_buf_t *); 218extern void xfs_buf_free(xfs_buf_t *);
@@ -242,6 +237,8 @@ extern int xfs_buf_iorequest(xfs_buf_t *);
242extern int xfs_buf_iowait(xfs_buf_t *); 237extern int xfs_buf_iowait(xfs_buf_t *);
243extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, 238extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
244 xfs_buf_rw_t); 239 xfs_buf_rw_t);
240#define xfs_buf_zero(bp, off, len) \
241 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
245 242
246static inline int xfs_buf_geterror(xfs_buf_t *bp) 243static inline int xfs_buf_geterror(xfs_buf_t *bp)
247{ 244{
@@ -276,8 +273,6 @@ extern void xfs_buf_terminate(void);
276 XFS_BUF_DONE(bp); \ 273 XFS_BUF_DONE(bp); \
277 } while (0) 274 } while (0)
278 275
279#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
280
281#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) 276#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
282#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) 277#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
283#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) 278#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
@@ -356,25 +351,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
356 xfs_buf_rele(bp); 351 xfs_buf_rele(bp);
357} 352}
358 353
359#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
360
361#define xfs_biomove(bp, off, len, data, rw) \
362 xfs_buf_iomove((bp), (off), (len), (data), \
363 ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
364
365#define xfs_biozero(bp, off, len) \
366 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
367
368#define xfs_iowait(bp) xfs_buf_iowait(bp)
369
370#define xfs_baread(target, rablkno, ralen) \
371 xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
372
373
374/* 354/*
375 * Handling of buftargs. 355 * Handling of buftargs.
376 */ 356 */
377extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *); 357extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
358 struct block_device *, int, const char *);
378extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); 359extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
379extern void xfs_wait_buftarg(xfs_buftarg_t *); 360extern void xfs_wait_buftarg(xfs_buftarg_t *);
380extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 361extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h
deleted file mode 100644
index 55bddf3b6091..000000000000
--- a/fs/xfs/linux-2.6/xfs_cred.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_CRED_H__
19#define __XFS_CRED_H__
20
21#include <linux/capability.h>
22
23/*
24 * Credentials
25 */
26typedef const struct cred cred_t;
27
28#endif /* __XFS_CRED_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 1f279b012f94..ed88ed16811c 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -32,10 +32,9 @@ xfs_tosspages(
32 xfs_off_t last, 32 xfs_off_t last,
33 int fiopt) 33 int fiopt)
34{ 34{
35 struct address_space *mapping = VFS_I(ip)->i_mapping; 35 /* can't toss partial tail pages, so mask them out */
36 36 last &= ~(PAGE_SIZE - 1);
37 if (mapping->nrpages) 37 truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1);
38 truncate_inode_pages(mapping, first);
39} 38}
40 39
41int 40int
@@ -50,12 +49,11 @@ xfs_flushinval_pages(
50 49
51 trace_xfs_pagecache_inval(ip, first, last); 50 trace_xfs_pagecache_inval(ip, first, last);
52 51
53 if (mapping->nrpages) { 52 xfs_iflags_clear(ip, XFS_ITRUNCATED);
54 xfs_iflags_clear(ip, XFS_ITRUNCATED); 53 ret = filemap_write_and_wait_range(mapping, first,
55 ret = filemap_write_and_wait(mapping); 54 last == -1 ? LLONG_MAX : last);
56 if (!ret) 55 if (!ret)
57 truncate_inode_pages(mapping, first); 56 truncate_inode_pages_range(mapping, first, last);
58 }
59 return -ret; 57 return -ret;
60} 58}
61 59
@@ -71,10 +69,9 @@ xfs_flush_pages(
71 int ret = 0; 69 int ret = 0;
72 int ret2; 70 int ret2;
73 71
74 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 72 xfs_iflags_clear(ip, XFS_ITRUNCATED);
75 xfs_iflags_clear(ip, XFS_ITRUNCATED); 73 ret = -filemap_fdatawrite_range(mapping, first,
76 ret = -filemap_fdatawrite(mapping); 74 last == -1 ? LLONG_MAX : last);
77 }
78 if (flags & XBF_ASYNC) 75 if (flags & XBF_ASYNC)
79 return ret; 76 return ret;
80 ret2 = xfs_wait_on_pages(ip, first, last); 77 ret2 = xfs_wait_on_pages(ip, first, last);
@@ -91,7 +88,9 @@ xfs_wait_on_pages(
91{ 88{
92 struct address_space *mapping = VFS_I(ip)->i_mapping; 89 struct address_space *mapping = VFS_I(ip)->i_mapping;
93 90
94 if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 91 if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
95 return -filemap_fdatawait(mapping); 92 return -filemap_fdatawait_range(mapping, first,
93 last == -1 ? ip->i_size - 1 : last);
94 }
96 return 0; 95 return 0;
97} 96}
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index 2ae8b1ccb02e..76e81cff70b9 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -16,7 +16,6 @@
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_cred.h"
20#include "xfs_sysctl.h" 19#include "xfs_sysctl.h"
21 20
22/* 21/*
diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h
deleted file mode 100644
index 69f71caf061c..000000000000
--- a/fs/xfs/linux-2.6/xfs_globals.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_GLOBALS_H__
19#define __XFS_GLOBALS_H__
20
21extern uint64_t xfs_panic_mask; /* set to cause more panics */
22
23#endif /* __XFS_GLOBALS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 3b9e626f7cd1..2ea238f6d38e 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -790,7 +790,7 @@ xfs_ioc_fsgetxattr(
790 xfs_ilock(ip, XFS_ILOCK_SHARED); 790 xfs_ilock(ip, XFS_ILOCK_SHARED);
791 fa.fsx_xflags = xfs_ip2xflags(ip); 791 fa.fsx_xflags = xfs_ip2xflags(ip);
792 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 792 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
793 fa.fsx_projid = ip->i_d.di_projid; 793 fa.fsx_projid = xfs_get_projid(ip);
794 794
795 if (attr) { 795 if (attr) {
796 if (ip->i_afp) { 796 if (ip->i_afp) {
@@ -909,10 +909,10 @@ xfs_ioctl_setattr(
909 return XFS_ERROR(EIO); 909 return XFS_ERROR(EIO);
910 910
911 /* 911 /*
912 * Disallow 32bit project ids because on-disk structure 912 * Disallow 32bit project ids when projid32bit feature is not enabled.
913 * is 16bit only.
914 */ 913 */
915 if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1)) 914 if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
915 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
916 return XFS_ERROR(EINVAL); 916 return XFS_ERROR(EINVAL);
917 917
918 /* 918 /*
@@ -961,7 +961,7 @@ xfs_ioctl_setattr(
961 if (mask & FSX_PROJID) { 961 if (mask & FSX_PROJID) {
962 if (XFS_IS_QUOTA_RUNNING(mp) && 962 if (XFS_IS_QUOTA_RUNNING(mp) &&
963 XFS_IS_PQUOTA_ON(mp) && 963 XFS_IS_PQUOTA_ON(mp) &&
964 ip->i_d.di_projid != fa->fsx_projid) { 964 xfs_get_projid(ip) != fa->fsx_projid) {
965 ASSERT(tp); 965 ASSERT(tp);
966 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, 966 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
967 capable(CAP_FOWNER) ? 967 capable(CAP_FOWNER) ?
@@ -1063,12 +1063,12 @@ xfs_ioctl_setattr(
1063 * Change the ownerships and register quota modifications 1063 * Change the ownerships and register quota modifications
1064 * in the transaction. 1064 * in the transaction.
1065 */ 1065 */
1066 if (ip->i_d.di_projid != fa->fsx_projid) { 1066 if (xfs_get_projid(ip) != fa->fsx_projid) {
1067 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { 1067 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1068 olddquot = xfs_qm_vop_chown(tp, ip, 1068 olddquot = xfs_qm_vop_chown(tp, ip,
1069 &ip->i_gdquot, gdqp); 1069 &ip->i_gdquot, gdqp);
1070 } 1070 }
1071 ip->i_d.di_projid = fa->fsx_projid; 1071 xfs_set_projid(ip, fa->fsx_projid);
1072 1072
1073 /* 1073 /*
1074 * We may have to rev the inode as well as 1074 * We may have to rev the inode as well as
@@ -1088,8 +1088,8 @@ xfs_ioctl_setattr(
1088 xfs_diflags_to_linux(ip); 1088 xfs_diflags_to_linux(ip);
1089 } 1089 }
1090 1090
1091 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1091 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1092 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1092 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1093 1093
1094 XFS_STATS_INC(xs_ig_attrchg); 1094 XFS_STATS_INC(xs_ig_attrchg);
1095 1095
@@ -1301,7 +1301,8 @@ xfs_file_ioctl(
1301 case XFS_IOC_ALLOCSP64: 1301 case XFS_IOC_ALLOCSP64:
1302 case XFS_IOC_FREESP64: 1302 case XFS_IOC_FREESP64:
1303 case XFS_IOC_RESVSP64: 1303 case XFS_IOC_RESVSP64:
1304 case XFS_IOC_UNRESVSP64: { 1304 case XFS_IOC_UNRESVSP64:
1305 case XFS_IOC_ZERO_RANGE: {
1305 xfs_flock64_t bf; 1306 xfs_flock64_t bf;
1306 1307
1307 if (copy_from_user(&bf, arg, sizeof(bf))) 1308 if (copy_from_user(&bf, arg, sizeof(bf)))
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 6c83f7f62dc9..b3486dfa5520 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -164,7 +164,8 @@ xfs_ioctl32_bstat_copyin(
164 get_user(bstat->bs_extsize, &bstat32->bs_extsize) || 164 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
165 get_user(bstat->bs_extents, &bstat32->bs_extents) || 165 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
166 get_user(bstat->bs_gen, &bstat32->bs_gen) || 166 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
167 get_user(bstat->bs_projid, &bstat32->bs_projid) || 167 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
168 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
168 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || 169 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
169 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || 170 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
170 get_user(bstat->bs_aextents, &bstat32->bs_aextents)) 171 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
@@ -218,6 +219,7 @@ xfs_bulkstat_one_fmt_compat(
218 put_user(buffer->bs_extents, &p32->bs_extents) || 219 put_user(buffer->bs_extents, &p32->bs_extents) ||
219 put_user(buffer->bs_gen, &p32->bs_gen) || 220 put_user(buffer->bs_gen, &p32->bs_gen) ||
220 put_user(buffer->bs_projid, &p32->bs_projid) || 221 put_user(buffer->bs_projid, &p32->bs_projid) ||
222 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
221 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || 223 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
222 put_user(buffer->bs_dmstate, &p32->bs_dmstate) || 224 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
223 put_user(buffer->bs_aextents, &p32->bs_aextents)) 225 put_user(buffer->bs_aextents, &p32->bs_aextents))
@@ -574,6 +576,7 @@ xfs_file_compat_ioctl(
574 case XFS_IOC_FSGEOMETRY_V1: 576 case XFS_IOC_FSGEOMETRY_V1:
575 case XFS_IOC_FSGROWFSDATA: 577 case XFS_IOC_FSGROWFSDATA:
576 case XFS_IOC_FSGROWFSRT: 578 case XFS_IOC_FSGROWFSRT:
579 case XFS_IOC_ZERO_RANGE:
577 return xfs_file_ioctl(filp, cmd, p); 580 return xfs_file_ioctl(filp, cmd, p);
578#else 581#else
579 case XFS_IOC_ALLOCSP_32: 582 case XFS_IOC_ALLOCSP_32:
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h
index 1024c4f8ba0d..08b605792a99 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.h
@@ -65,8 +65,10 @@ typedef struct compat_xfs_bstat {
65 __s32 bs_extsize; /* extent size */ 65 __s32 bs_extsize; /* extent size */
66 __s32 bs_extents; /* number of extents */ 66 __s32 bs_extents; /* number of extents */
67 __u32 bs_gen; /* generation count */ 67 __u32 bs_gen; /* generation count */
68 __u16 bs_projid; /* project id */ 68 __u16 bs_projid_lo; /* lower part of project id */
69 unsigned char bs_pad[14]; /* pad space, unused */ 69#define bs_projid bs_projid_lo /* (previously just bs_projid) */
70 __u16 bs_projid_hi; /* high part of project id */
71 unsigned char bs_pad[12]; /* pad space, unused */
70 __u32 bs_dmevmask; /* DMIG event mask */ 72 __u32 bs_dmevmask; /* DMIG event mask */
71 __u16 bs_dmstate; /* DMIG state info */ 73 __u16 bs_dmstate; /* DMIG state info */
72 __u16 bs_aextents; /* attribute number of extents */ 74 __u16 bs_aextents; /* attribute number of extents */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index b1fc2a6bfe83..ec858e09d546 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -95,41 +95,6 @@ xfs_mark_inode_dirty(
95} 95}
96 96
97/* 97/*
98 * Change the requested timestamp in the given inode.
99 * We don't lock across timestamp updates, and we don't log them but
100 * we do record the fact that there is dirty information in core.
101 */
102void
103xfs_ichgtime(
104 xfs_inode_t *ip,
105 int flags)
106{
107 struct inode *inode = VFS_I(ip);
108 timespec_t tv;
109 int sync_it = 0;
110
111 tv = current_fs_time(inode->i_sb);
112
113 if ((flags & XFS_ICHGTIME_MOD) &&
114 !timespec_equal(&inode->i_mtime, &tv)) {
115 inode->i_mtime = tv;
116 sync_it = 1;
117 }
118 if ((flags & XFS_ICHGTIME_CHG) &&
119 !timespec_equal(&inode->i_ctime, &tv)) {
120 inode->i_ctime = tv;
121 sync_it = 1;
122 }
123
124 /*
125 * Update complete - now make sure everyone knows that the inode
126 * is dirty.
127 */
128 if (sync_it)
129 xfs_mark_inode_dirty_sync(ip);
130}
131
132/*
133 * Hook in SELinux. This is not quite correct yet, what we really need 98 * Hook in SELinux. This is not quite correct yet, what we really need
134 * here (as we do for default ACLs) is a mechanism by which creation of 99 * here (as we do for default ACLs) is a mechanism by which creation of
135 * these attrs can be journalled at inode creation time (along with the 100 * these attrs can be journalled at inode creation time (along with the
@@ -224,7 +189,7 @@ xfs_vn_mknod(
224 } 189 }
225 190
226 xfs_dentry_to_name(&name, dentry); 191 xfs_dentry_to_name(&name, dentry);
227 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); 192 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
228 if (unlikely(error)) 193 if (unlikely(error))
229 goto out_free_acl; 194 goto out_free_acl;
230 195
@@ -397,7 +362,7 @@ xfs_vn_symlink(
397 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); 362 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
398 xfs_dentry_to_name(&name, dentry); 363 xfs_dentry_to_name(&name, dentry);
399 364
400 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL); 365 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
401 if (unlikely(error)) 366 if (unlikely(error))
402 goto out; 367 goto out;
403 368
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 2fa0bd9ebc7f..214ddd71ff79 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -71,6 +71,7 @@
71#include <linux/random.h> 71#include <linux/random.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/writeback.h> 73#include <linux/writeback.h>
74#include <linux/capability.h>
74 75
75#include <asm/page.h> 76#include <asm/page.h>
76#include <asm/div64.h> 77#include <asm/div64.h>
@@ -79,14 +80,12 @@
79#include <asm/byteorder.h> 80#include <asm/byteorder.h>
80#include <asm/unaligned.h> 81#include <asm/unaligned.h>
81 82
82#include <xfs_cred.h>
83#include <xfs_vnode.h> 83#include <xfs_vnode.h>
84#include <xfs_stats.h> 84#include <xfs_stats.h>
85#include <xfs_sysctl.h> 85#include <xfs_sysctl.h>
86#include <xfs_iops.h> 86#include <xfs_iops.h>
87#include <xfs_aops.h> 87#include <xfs_aops.h>
88#include <xfs_super.h> 88#include <xfs_super.h>
89#include <xfs_globals.h>
90#include <xfs_buf.h> 89#include <xfs_buf.h>
91 90
92/* 91/*
@@ -144,7 +143,7 @@
144#define SYNCHRONIZE() barrier() 143#define SYNCHRONIZE() barrier()
145#define __return_address __builtin_return_address(0) 144#define __return_address __builtin_return_address(0)
146 145
147#define dfltprid 0 146#define XFS_PROJID_DEFAULT 0
148#define MAXPATHLEN 1024 147#define MAXPATHLEN 1024
149 148
150#define MIN(a,b) (min(a,b)) 149#define MIN(a,b) (min(a,b))
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index a4e07974955b..ab31ce5aeaf9 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -44,7 +44,6 @@
44#include "xfs_buf_item.h" 44#include "xfs_buf_item.h"
45#include "xfs_utils.h" 45#include "xfs_utils.h"
46#include "xfs_vnodeops.h" 46#include "xfs_vnodeops.h"
47#include "xfs_version.h"
48#include "xfs_log_priv.h" 47#include "xfs_log_priv.h"
49#include "xfs_trans_priv.h" 48#include "xfs_trans_priv.h"
50#include "xfs_filestream.h" 49#include "xfs_filestream.h"
@@ -645,7 +644,7 @@ xfs_barrier_test(
645 XFS_BUF_ORDERED(sbp); 644 XFS_BUF_ORDERED(sbp);
646 645
647 xfsbdstrat(mp, sbp); 646 xfsbdstrat(mp, sbp);
648 error = xfs_iowait(sbp); 647 error = xfs_buf_iowait(sbp);
649 648
650 /* 649 /*
651 * Clear all the flags we set and possible error state in the 650 * Clear all the flags we set and possible error state in the
@@ -693,8 +692,7 @@ void
693xfs_blkdev_issue_flush( 692xfs_blkdev_issue_flush(
694 xfs_buftarg_t *buftarg) 693 xfs_buftarg_t *buftarg)
695{ 694{
696 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL, 695 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
697 BLKDEV_IFL_WAIT);
698} 696}
699 697
700STATIC void 698STATIC void
@@ -758,18 +756,20 @@ xfs_open_devices(
758 * Setup xfs_mount buffer target pointers 756 * Setup xfs_mount buffer target pointers
759 */ 757 */
760 error = ENOMEM; 758 error = ENOMEM;
761 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname); 759 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
762 if (!mp->m_ddev_targp) 760 if (!mp->m_ddev_targp)
763 goto out_close_rtdev; 761 goto out_close_rtdev;
764 762
765 if (rtdev) { 763 if (rtdev) {
766 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname); 764 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
765 mp->m_fsname);
767 if (!mp->m_rtdev_targp) 766 if (!mp->m_rtdev_targp)
768 goto out_free_ddev_targ; 767 goto out_free_ddev_targ;
769 } 768 }
770 769
771 if (logdev && logdev != ddev) { 770 if (logdev && logdev != ddev) {
772 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname); 771 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
772 mp->m_fsname);
773 if (!mp->m_logdev_targp) 773 if (!mp->m_logdev_targp)
774 goto out_free_rtdev_targ; 774 goto out_free_rtdev_targ;
775 } else { 775 } else {
@@ -972,12 +972,7 @@ xfs_fs_inode_init_once(
972 972
973/* 973/*
974 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that 974 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
975 * we catch unlogged VFS level updates to the inode. Care must be taken 975 * we catch unlogged VFS level updates to the inode.
976 * here - the transaction code calls mark_inode_dirty_sync() to mark the
977 * VFS inode dirty in a transaction and clears the i_update_core field;
978 * it must clear the field after calling mark_inode_dirty_sync() to
979 * correctly indicate that the dirty state has been propagated into the
980 * inode log item.
981 * 976 *
982 * We need the barrier() to maintain correct ordering between unlogged 977 * We need the barrier() to maintain correct ordering between unlogged
983 * updates and the transaction commit code that clears the i_update_core 978 * updates and the transaction commit code that clears the i_update_core
@@ -1521,8 +1516,9 @@ xfs_fs_fill_super(
1521 if (error) 1516 if (error)
1522 goto out_free_fsname; 1517 goto out_free_fsname;
1523 1518
1524 if (xfs_icsb_init_counters(mp)) 1519 error = xfs_icsb_init_counters(mp);
1525 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1520 if (error)
1521 goto out_close_devices;
1526 1522
1527 error = xfs_readsb(mp, flags); 1523 error = xfs_readsb(mp, flags);
1528 if (error) 1524 if (error)
@@ -1583,6 +1579,7 @@ xfs_fs_fill_super(
1583 xfs_freesb(mp); 1579 xfs_freesb(mp);
1584 out_destroy_counters: 1580 out_destroy_counters:
1585 xfs_icsb_destroy_counters(mp); 1581 xfs_icsb_destroy_counters(mp);
1582 out_close_devices:
1586 xfs_close_devices(mp); 1583 xfs_close_devices(mp);
1587 out_free_fsname: 1584 out_free_fsname:
1588 xfs_free_fsname(mp); 1585 xfs_free_fsname(mp);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 1ef4a4d2d997..50a3266c999e 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -62,6 +62,7 @@ extern void xfs_qm_exit(void);
62# define XFS_DBG_STRING "no debug" 62# define XFS_DBG_STRING "no debug"
63#endif 63#endif
64 64
65#define XFS_VERSION_STRING "SGI XFS"
65#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ 66#define XFS_BUILD_OPTIONS XFS_ACL_STRING \
66 XFS_SECURITY_STRING \ 67 XFS_SECURITY_STRING \
67 XFS_REALTIME_STRING \ 68 XFS_REALTIME_STRING \
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 81976ffed7d6..37d33254981d 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -39,42 +39,39 @@
39#include <linux/kthread.h> 39#include <linux/kthread.h>
40#include <linux/freezer.h> 40#include <linux/freezer.h>
41 41
42/*
43 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between
45 * lookup reduction and stack usage. This is in the reclaim path, so we can't
46 * be too greedy.
47 */
48#define XFS_LOOKUP_BATCH 32
42 49
43STATIC xfs_inode_t * 50STATIC int
44xfs_inode_ag_lookup( 51xfs_inode_ag_walk_grab(
45 struct xfs_mount *mp, 52 struct xfs_inode *ip)
46 struct xfs_perag *pag,
47 uint32_t *first_index,
48 int tag)
49{ 53{
50 int nr_found; 54 struct inode *inode = VFS_I(ip);
51 struct xfs_inode *ip;
52 55
53 /* 56 /* nothing to sync during shutdown */
54 * use a gang lookup to find the next inode in the tree 57 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
55 * as the tree is sparse and a gang lookup walks to find 58 return EFSCORRUPTED;
56 * the number of objects requested. 59
57 */ 60 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
58 if (tag == XFS_ICI_NO_TAG) { 61 if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
59 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 62 return ENOENT;
60 (void **)&ip, *first_index, 1); 63
61 } else { 64 /* If we can't grab the inode, it must on it's way to reclaim. */
62 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, 65 if (!igrab(inode))
63 (void **)&ip, *first_index, 1, tag); 66 return ENOENT;
67
68 if (is_bad_inode(inode)) {
69 IRELE(ip);
70 return ENOENT;
64 } 71 }
65 if (!nr_found)
66 return NULL;
67 72
68 /* 73 /* inode is valid */
69 * Update the index for the next lookup. Catch overflows 74 return 0;
70 * into the next AG range which can occur if we have inodes
71 * in the last block of the AG and we are currently
72 * pointing to the last inode.
73 */
74 *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
75 if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
76 return NULL;
77 return ip;
78} 75}
79 76
80STATIC int 77STATIC int
@@ -83,49 +80,75 @@ xfs_inode_ag_walk(
83 struct xfs_perag *pag, 80 struct xfs_perag *pag,
84 int (*execute)(struct xfs_inode *ip, 81 int (*execute)(struct xfs_inode *ip,
85 struct xfs_perag *pag, int flags), 82 struct xfs_perag *pag, int flags),
86 int flags, 83 int flags)
87 int tag,
88 int exclusive,
89 int *nr_to_scan)
90{ 84{
91 uint32_t first_index; 85 uint32_t first_index;
92 int last_error = 0; 86 int last_error = 0;
93 int skipped; 87 int skipped;
88 int done;
89 int nr_found;
94 90
95restart: 91restart:
92 done = 0;
96 skipped = 0; 93 skipped = 0;
97 first_index = 0; 94 first_index = 0;
95 nr_found = 0;
98 do { 96 do {
97 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
99 int error = 0; 98 int error = 0;
100 xfs_inode_t *ip; 99 int i;
101 100
102 if (exclusive) 101 read_lock(&pag->pag_ici_lock);
103 write_lock(&pag->pag_ici_lock); 102 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
104 else 103 (void **)batch, first_index,
105 read_lock(&pag->pag_ici_lock); 104 XFS_LOOKUP_BATCH);
106 ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); 105 if (!nr_found) {
107 if (!ip) { 106 read_unlock(&pag->pag_ici_lock);
108 if (exclusive)
109 write_unlock(&pag->pag_ici_lock);
110 else
111 read_unlock(&pag->pag_ici_lock);
112 break; 107 break;
113 } 108 }
114 109
115 /* execute releases pag->pag_ici_lock */ 110 /*
116 error = execute(ip, pag, flags); 111 * Grab the inodes before we drop the lock. if we found
117 if (error == EAGAIN) { 112 * nothing, nr == 0 and the loop will be skipped.
118 skipped++; 113 */
119 continue; 114 for (i = 0; i < nr_found; i++) {
115 struct xfs_inode *ip = batch[i];
116
117 if (done || xfs_inode_ag_walk_grab(ip))
118 batch[i] = NULL;
119
120 /*
121 * Update the index for the next lookup. Catch overflows
122 * into the next AG range which can occur if we have inodes
123 * in the last block of the AG and we are currently
124 * pointing to the last inode.
125 */
126 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
127 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
128 done = 1;
129 }
130
131 /* unlock now we've grabbed the inodes. */
132 read_unlock(&pag->pag_ici_lock);
133
134 for (i = 0; i < nr_found; i++) {
135 if (!batch[i])
136 continue;
137 error = execute(batch[i], pag, flags);
138 IRELE(batch[i]);
139 if (error == EAGAIN) {
140 skipped++;
141 continue;
142 }
143 if (error && last_error != EFSCORRUPTED)
144 last_error = error;
120 } 145 }
121 if (error)
122 last_error = error;
123 146
124 /* bail out if the filesystem is corrupted. */ 147 /* bail out if the filesystem is corrupted. */
125 if (error == EFSCORRUPTED) 148 if (error == EFSCORRUPTED)
126 break; 149 break;
127 150
128 } while ((*nr_to_scan)--); 151 } while (nr_found && !done);
129 152
130 if (skipped) { 153 if (skipped) {
131 delay(1); 154 delay(1);
@@ -134,110 +157,32 @@ restart:
134 return last_error; 157 return last_error;
135} 158}
136 159
137/*
138 * Select the next per-ag structure to iterate during the walk. The reclaim
139 * walk is optimised only to walk AGs with reclaimable inodes in them.
140 */
141static struct xfs_perag *
142xfs_inode_ag_iter_next_pag(
143 struct xfs_mount *mp,
144 xfs_agnumber_t *first,
145 int tag)
146{
147 struct xfs_perag *pag = NULL;
148
149 if (tag == XFS_ICI_RECLAIM_TAG) {
150 int found;
151 int ref;
152
153 spin_lock(&mp->m_perag_lock);
154 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
155 (void **)&pag, *first, 1, tag);
156 if (found <= 0) {
157 spin_unlock(&mp->m_perag_lock);
158 return NULL;
159 }
160 *first = pag->pag_agno + 1;
161 /* open coded pag reference increment */
162 ref = atomic_inc_return(&pag->pag_ref);
163 spin_unlock(&mp->m_perag_lock);
164 trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
165 } else {
166 pag = xfs_perag_get(mp, *first);
167 (*first)++;
168 }
169 return pag;
170}
171
172int 160int
173xfs_inode_ag_iterator( 161xfs_inode_ag_iterator(
174 struct xfs_mount *mp, 162 struct xfs_mount *mp,
175 int (*execute)(struct xfs_inode *ip, 163 int (*execute)(struct xfs_inode *ip,
176 struct xfs_perag *pag, int flags), 164 struct xfs_perag *pag, int flags),
177 int flags, 165 int flags)
178 int tag,
179 int exclusive,
180 int *nr_to_scan)
181{ 166{
182 struct xfs_perag *pag; 167 struct xfs_perag *pag;
183 int error = 0; 168 int error = 0;
184 int last_error = 0; 169 int last_error = 0;
185 xfs_agnumber_t ag; 170 xfs_agnumber_t ag;
186 int nr;
187 171
188 nr = nr_to_scan ? *nr_to_scan : INT_MAX;
189 ag = 0; 172 ag = 0;
190 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) { 173 while ((pag = xfs_perag_get(mp, ag))) {
191 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, 174 ag = pag->pag_agno + 1;
192 exclusive, &nr); 175 error = xfs_inode_ag_walk(mp, pag, execute, flags);
193 xfs_perag_put(pag); 176 xfs_perag_put(pag);
194 if (error) { 177 if (error) {
195 last_error = error; 178 last_error = error;
196 if (error == EFSCORRUPTED) 179 if (error == EFSCORRUPTED)
197 break; 180 break;
198 } 181 }
199 if (nr <= 0)
200 break;
201 } 182 }
202 if (nr_to_scan)
203 *nr_to_scan = nr;
204 return XFS_ERROR(last_error); 183 return XFS_ERROR(last_error);
205} 184}
206 185
207/* must be called with pag_ici_lock held and releases it */
208int
209xfs_sync_inode_valid(
210 struct xfs_inode *ip,
211 struct xfs_perag *pag)
212{
213 struct inode *inode = VFS_I(ip);
214 int error = EFSCORRUPTED;
215
216 /* nothing to sync during shutdown */
217 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
218 goto out_unlock;
219
220 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
221 error = ENOENT;
222 if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
223 goto out_unlock;
224
225 /* If we can't grab the inode, it must on it's way to reclaim. */
226 if (!igrab(inode))
227 goto out_unlock;
228
229 if (is_bad_inode(inode)) {
230 IRELE(ip);
231 goto out_unlock;
232 }
233
234 /* inode is valid */
235 error = 0;
236out_unlock:
237 read_unlock(&pag->pag_ici_lock);
238 return error;
239}
240
241STATIC int 186STATIC int
242xfs_sync_inode_data( 187xfs_sync_inode_data(
243 struct xfs_inode *ip, 188 struct xfs_inode *ip,
@@ -248,10 +193,6 @@ xfs_sync_inode_data(
248 struct address_space *mapping = inode->i_mapping; 193 struct address_space *mapping = inode->i_mapping;
249 int error = 0; 194 int error = 0;
250 195
251 error = xfs_sync_inode_valid(ip, pag);
252 if (error)
253 return error;
254
255 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 196 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
256 goto out_wait; 197 goto out_wait;
257 198
@@ -268,7 +209,6 @@ xfs_sync_inode_data(
268 out_wait: 209 out_wait:
269 if (flags & SYNC_WAIT) 210 if (flags & SYNC_WAIT)
270 xfs_ioend_wait(ip); 211 xfs_ioend_wait(ip);
271 IRELE(ip);
272 return error; 212 return error;
273} 213}
274 214
@@ -280,10 +220,6 @@ xfs_sync_inode_attr(
280{ 220{
281 int error = 0; 221 int error = 0;
282 222
283 error = xfs_sync_inode_valid(ip, pag);
284 if (error)
285 return error;
286
287 xfs_ilock(ip, XFS_ILOCK_SHARED); 223 xfs_ilock(ip, XFS_ILOCK_SHARED);
288 if (xfs_inode_clean(ip)) 224 if (xfs_inode_clean(ip))
289 goto out_unlock; 225 goto out_unlock;
@@ -302,7 +238,6 @@ xfs_sync_inode_attr(
302 238
303 out_unlock: 239 out_unlock:
304 xfs_iunlock(ip, XFS_ILOCK_SHARED); 240 xfs_iunlock(ip, XFS_ILOCK_SHARED);
305 IRELE(ip);
306 return error; 241 return error;
307} 242}
308 243
@@ -318,8 +253,7 @@ xfs_sync_data(
318 253
319 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); 254 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
320 255
321 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, 256 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
322 XFS_ICI_NO_TAG, 0, NULL);
323 if (error) 257 if (error)
324 return XFS_ERROR(error); 258 return XFS_ERROR(error);
325 259
@@ -337,8 +271,7 @@ xfs_sync_attr(
337{ 271{
338 ASSERT((flags & ~SYNC_WAIT) == 0); 272 ASSERT((flags & ~SYNC_WAIT) == 0);
339 273
340 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, 274 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
341 XFS_ICI_NO_TAG, 0, NULL);
342} 275}
343 276
344STATIC int 277STATIC int
@@ -698,6 +631,43 @@ __xfs_inode_clear_reclaim_tag(
698} 631}
699 632
700/* 633/*
634 * Grab the inode for reclaim exclusively.
635 * Return 0 if we grabbed it, non-zero otherwise.
636 */
637STATIC int
638xfs_reclaim_inode_grab(
639 struct xfs_inode *ip,
640 int flags)
641{
642
643 /*
644 * do some unlocked checks first to avoid unnecceary lock traffic.
645 * The first is a flush lock check, the second is a already in reclaim
646 * check. Only do these checks if we are not going to block on locks.
647 */
648 if ((flags & SYNC_TRYLOCK) &&
649 (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
650 return 1;
651 }
652
653 /*
654 * The radix tree lock here protects a thread in xfs_iget from racing
655 * with us starting reclaim on the inode. Once we have the
656 * XFS_IRECLAIM flag set it will not touch us.
657 */
658 spin_lock(&ip->i_flags_lock);
659 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
660 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
661 /* ignore as it is already under reclaim */
662 spin_unlock(&ip->i_flags_lock);
663 return 1;
664 }
665 __xfs_iflags_set(ip, XFS_IRECLAIM);
666 spin_unlock(&ip->i_flags_lock);
667 return 0;
668}
669
670/*
701 * Inodes in different states need to be treated differently, and the return 671 * Inodes in different states need to be treated differently, and the return
702 * value of xfs_iflush is not sufficient to get this right. The following table 672 * value of xfs_iflush is not sufficient to get this right. The following table
703 * lists the inode states and the reclaim actions necessary for non-blocking 673 * lists the inode states and the reclaim actions necessary for non-blocking
@@ -755,23 +725,6 @@ xfs_reclaim_inode(
755{ 725{
756 int error = 0; 726 int error = 0;
757 727
758 /*
759 * The radix tree lock here protects a thread in xfs_iget from racing
760 * with us starting reclaim on the inode. Once we have the
761 * XFS_IRECLAIM flag set it will not touch us.
762 */
763 spin_lock(&ip->i_flags_lock);
764 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
765 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
766 /* ignore as it is already under reclaim */
767 spin_unlock(&ip->i_flags_lock);
768 write_unlock(&pag->pag_ici_lock);
769 return 0;
770 }
771 __xfs_iflags_set(ip, XFS_IRECLAIM);
772 spin_unlock(&ip->i_flags_lock);
773 write_unlock(&pag->pag_ici_lock);
774
775 xfs_ilock(ip, XFS_ILOCK_EXCL); 728 xfs_ilock(ip, XFS_ILOCK_EXCL);
776 if (!xfs_iflock_nowait(ip)) { 729 if (!xfs_iflock_nowait(ip)) {
777 if (!(sync_mode & SYNC_WAIT)) 730 if (!(sync_mode & SYNC_WAIT))
@@ -868,13 +821,126 @@ reclaim:
868 821
869} 822}
870 823
824/*
825 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
826 * corrupted, we still want to try to reclaim all the inodes. If we don't,
827 * then a shut down during filesystem unmount reclaim walk leak all the
828 * unreclaimed inodes.
829 */
830int
831xfs_reclaim_inodes_ag(
832 struct xfs_mount *mp,
833 int flags,
834 int *nr_to_scan)
835{
836 struct xfs_perag *pag;
837 int error = 0;
838 int last_error = 0;
839 xfs_agnumber_t ag;
840 int trylock = flags & SYNC_TRYLOCK;
841 int skipped;
842
843restart:
844 ag = 0;
845 skipped = 0;
846 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
847 unsigned long first_index = 0;
848 int done = 0;
849 int nr_found = 0;
850
851 ag = pag->pag_agno + 1;
852
853 if (trylock) {
854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
855 skipped++;
856 continue;
857 }
858 first_index = pag->pag_ici_reclaim_cursor;
859 } else
860 mutex_lock(&pag->pag_ici_reclaim_lock);
861
862 do {
863 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
864 int i;
865
866 write_lock(&pag->pag_ici_lock);
867 nr_found = radix_tree_gang_lookup_tag(
868 &pag->pag_ici_root,
869 (void **)batch, first_index,
870 XFS_LOOKUP_BATCH,
871 XFS_ICI_RECLAIM_TAG);
872 if (!nr_found) {
873 write_unlock(&pag->pag_ici_lock);
874 break;
875 }
876
877 /*
878 * Grab the inodes before we drop the lock. if we found
879 * nothing, nr == 0 and the loop will be skipped.
880 */
881 for (i = 0; i < nr_found; i++) {
882 struct xfs_inode *ip = batch[i];
883
884 if (done || xfs_reclaim_inode_grab(ip, flags))
885 batch[i] = NULL;
886
887 /*
888 * Update the index for the next lookup. Catch
889 * overflows into the next AG range which can
890 * occur if we have inodes in the last block of
891 * the AG and we are currently pointing to the
892 * last inode.
893 */
894 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
895 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
896 done = 1;
897 }
898
899 /* unlock now we've grabbed the inodes. */
900 write_unlock(&pag->pag_ici_lock);
901
902 for (i = 0; i < nr_found; i++) {
903 if (!batch[i])
904 continue;
905 error = xfs_reclaim_inode(batch[i], pag, flags);
906 if (error && last_error != EFSCORRUPTED)
907 last_error = error;
908 }
909
910 *nr_to_scan -= XFS_LOOKUP_BATCH;
911
912 } while (nr_found && !done && *nr_to_scan > 0);
913
914 if (trylock && !done)
915 pag->pag_ici_reclaim_cursor = first_index;
916 else
917 pag->pag_ici_reclaim_cursor = 0;
918 mutex_unlock(&pag->pag_ici_reclaim_lock);
919 xfs_perag_put(pag);
920 }
921
922 /*
923 * if we skipped any AG, and we still have scan count remaining, do
924 * another pass this time using blocking reclaim semantics (i.e
925 * waiting on the reclaim locks and ignoring the reclaim cursors). This
926 * ensure that when we get more reclaimers than AGs we block rather
927 * than spin trying to execute reclaim.
928 */
929 if (trylock && skipped && *nr_to_scan > 0) {
930 trylock = 0;
931 goto restart;
932 }
933 return XFS_ERROR(last_error);
934}
935
871int 936int
872xfs_reclaim_inodes( 937xfs_reclaim_inodes(
873 xfs_mount_t *mp, 938 xfs_mount_t *mp,
874 int mode) 939 int mode)
875{ 940{
876 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, 941 int nr_to_scan = INT_MAX;
877 XFS_ICI_RECLAIM_TAG, 1, NULL); 942
943 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
878} 944}
879 945
880/* 946/*
@@ -896,17 +962,16 @@ xfs_reclaim_inode_shrink(
896 if (!(gfp_mask & __GFP_FS)) 962 if (!(gfp_mask & __GFP_FS))
897 return -1; 963 return -1;
898 964
899 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, 965 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
900 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); 966 /* terminate if we don't exhaust the scan */
901 /* if we don't exhaust the scan, don't bother coming back */
902 if (nr_to_scan > 0) 967 if (nr_to_scan > 0)
903 return -1; 968 return -1;
904 } 969 }
905 970
906 reclaimable = 0; 971 reclaimable = 0;
907 ag = 0; 972 ag = 0;
908 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, 973 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
909 XFS_ICI_RECLAIM_TAG))) { 974 ag = pag->pag_agno + 1;
910 reclaimable += pag->pag_ici_reclaimable; 975 reclaimable += pag->pag_ici_reclaimable;
911 xfs_perag_put(pag); 976 xfs_perag_put(pag);
912 } 977 }
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index fe78726196f8..32ba6628290c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -47,10 +47,10 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
47void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, 47void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
48 struct xfs_inode *ip); 48 struct xfs_inode *ip);
49 49
50int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); 50int xfs_sync_inode_grab(struct xfs_inode *ip);
51int xfs_inode_ag_iterator(struct xfs_mount *mp, 51int xfs_inode_ag_iterator(struct xfs_mount *mp,
52 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 52 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
53 int flags, int tag, int write_lock, int *nr_to_scan); 53 int flags);
54 54
55void xfs_inode_shrinker_register(struct xfs_mount *mp); 55void xfs_inode_shrinker_register(struct xfs_mount *mp);
56void xfs_inode_shrinker_unregister(struct xfs_mount *mp); 56void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index be5dffd282a1..acef2e98c594 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -124,7 +124,7 @@ DEFINE_EVENT(xfs_perag_class, name, \
124 unsigned long caller_ip), \ 124 unsigned long caller_ip), \
125 TP_ARGS(mp, agno, refcount, caller_ip)) 125 TP_ARGS(mp, agno, refcount, caller_ip))
126DEFINE_PERAG_REF_EVENT(xfs_perag_get); 126DEFINE_PERAG_REF_EVENT(xfs_perag_get);
127DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim); 127DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
128DEFINE_PERAG_REF_EVENT(xfs_perag_put); 128DEFINE_PERAG_REF_EVENT(xfs_perag_put);
129DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); 129DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
130DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); 130DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
@@ -325,13 +325,12 @@ DEFINE_BUF_EVENT(xfs_buf_lock);
325DEFINE_BUF_EVENT(xfs_buf_lock_done); 325DEFINE_BUF_EVENT(xfs_buf_lock_done);
326DEFINE_BUF_EVENT(xfs_buf_cond_lock); 326DEFINE_BUF_EVENT(xfs_buf_cond_lock);
327DEFINE_BUF_EVENT(xfs_buf_unlock); 327DEFINE_BUF_EVENT(xfs_buf_unlock);
328DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
329DEFINE_BUF_EVENT(xfs_buf_iowait); 328DEFINE_BUF_EVENT(xfs_buf_iowait);
330DEFINE_BUF_EVENT(xfs_buf_iowait_done); 329DEFINE_BUF_EVENT(xfs_buf_iowait_done);
331DEFINE_BUF_EVENT(xfs_buf_delwri_queue); 330DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
332DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); 331DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
333DEFINE_BUF_EVENT(xfs_buf_delwri_split); 332DEFINE_BUF_EVENT(xfs_buf_delwri_split);
334DEFINE_BUF_EVENT(xfs_buf_get_noaddr); 333DEFINE_BUF_EVENT(xfs_buf_get_uncached);
335DEFINE_BUF_EVENT(xfs_bdstrat_shut); 334DEFINE_BUF_EVENT(xfs_bdstrat_shut);
336DEFINE_BUF_EVENT(xfs_buf_item_relse); 335DEFINE_BUF_EVENT(xfs_buf_item_relse);
337DEFINE_BUF_EVENT(xfs_buf_item_iodone); 336DEFINE_BUF_EVENT(xfs_buf_item_iodone);
diff --git a/fs/xfs/linux-2.6/xfs_version.h b/fs/xfs/linux-2.6/xfs_version.h
deleted file mode 100644
index f8d279d7563a..000000000000
--- a/fs/xfs/linux-2.6/xfs_version.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_VERSION_H__
19#define __XFS_VERSION_H__
20
21/*
22 * Dummy file that can contain a timestamp to put into the
23 * XFS init string, to help users keep track of what they're
24 * running
25 */
26
27#define XFS_VERSION_STRING "SGI XFS"
28
29#endif /* __XFS_VERSION_H__ */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index e1a2f6800e01..faf8e1a83a12 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -463,87 +463,68 @@ xfs_qm_dqtobp(
463 uint flags) 463 uint flags)
464{ 464{
465 xfs_bmbt_irec_t map; 465 xfs_bmbt_irec_t map;
466 int nmaps, error; 466 int nmaps = 1, error;
467 xfs_buf_t *bp; 467 xfs_buf_t *bp;
468 xfs_inode_t *quotip; 468 xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
469 xfs_mount_t *mp; 469 xfs_mount_t *mp = dqp->q_mount;
470 xfs_disk_dquot_t *ddq; 470 xfs_disk_dquot_t *ddq;
471 xfs_dqid_t id; 471 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
472 boolean_t newdquot;
473 xfs_trans_t *tp = (tpp ? *tpp : NULL); 472 xfs_trans_t *tp = (tpp ? *tpp : NULL);
474 473
475 mp = dqp->q_mount; 474 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
476 id = be32_to_cpu(dqp->q_core.d_id);
477 nmaps = 1;
478 newdquot = B_FALSE;
479 475
480 /* 476 xfs_ilock(quotip, XFS_ILOCK_SHARED);
481 * If we don't know where the dquot lives, find out. 477 if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
482 */
483 if (dqp->q_blkno == (xfs_daddr_t) 0) {
484 /* We use the id as an index */
485 dqp->q_fileoffset = (xfs_fileoff_t)id /
486 mp->m_quotainfo->qi_dqperchunk;
487 nmaps = 1;
488 quotip = XFS_DQ_TO_QIP(dqp);
489 xfs_ilock(quotip, XFS_ILOCK_SHARED);
490 /* 478 /*
491 * Return if this type of quotas is turned off while we didn't 479 * Return if this type of quotas is turned off while we
492 * have an inode lock 480 * didn't have the quota inode lock.
493 */ 481 */
494 if (XFS_IS_THIS_QUOTA_OFF(dqp)) { 482 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
495 xfs_iunlock(quotip, XFS_ILOCK_SHARED); 483 return ESRCH;
496 return (ESRCH); 484 }
497 } 485
486 /*
487 * Find the block map; no allocations yet
488 */
489 error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
490 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
491 NULL, 0, &map, &nmaps, NULL);
492
493 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
494 if (error)
495 return error;
496
497 ASSERT(nmaps == 1);
498 ASSERT(map.br_blockcount == 1);
499
500 /*
501 * Offset of dquot in the (fixed sized) dquot chunk.
502 */
503 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
504 sizeof(xfs_dqblk_t);
505
506 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
507 if (map.br_startblock == HOLESTARTBLOCK) {
498 /* 508 /*
499 * Find the block map; no allocations yet 509 * We don't allocate unless we're asked to
500 */ 510 */
501 error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, 511 if (!(flags & XFS_QMOPT_DQALLOC))
502 XFS_DQUOT_CLUSTER_SIZE_FSB, 512 return ENOENT;
503 XFS_BMAPI_METADATA,
504 NULL, 0, &map, &nmaps, NULL);
505 513
506 xfs_iunlock(quotip, XFS_ILOCK_SHARED); 514 ASSERT(tp);
515 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
516 dqp->q_fileoffset, &bp);
507 if (error) 517 if (error)
508 return (error); 518 return error;
509 ASSERT(nmaps == 1); 519 tp = *tpp;
510 ASSERT(map.br_blockcount == 1); 520 } else {
521 trace_xfs_dqtobp_read(dqp);
511 522
512 /* 523 /*
513 * offset of dquot in the (fixed sized) dquot chunk. 524 * store the blkno etc so that we don't have to do the
525 * mapping all the time
514 */ 526 */
515 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * 527 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
516 sizeof(xfs_dqblk_t);
517 if (map.br_startblock == HOLESTARTBLOCK) {
518 /*
519 * We don't allocate unless we're asked to
520 */
521 if (!(flags & XFS_QMOPT_DQALLOC))
522 return (ENOENT);
523
524 ASSERT(tp);
525 if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
526 dqp->q_fileoffset, &bp)))
527 return (error);
528 tp = *tpp;
529 newdquot = B_TRUE;
530 } else {
531 /*
532 * store the blkno etc so that we don't have to do the
533 * mapping all the time
534 */
535 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
536 }
537 }
538 ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
539 ASSERT(dqp->q_blkno != HOLESTARTBLOCK);
540
541 /*
542 * Read in the buffer, unless we've just done the allocation
543 * (in which case we already have the buf).
544 */
545 if (!newdquot) {
546 trace_xfs_dqtobp_read(dqp);
547 528
548 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 529 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
549 dqp->q_blkno, 530 dqp->q_blkno,
@@ -552,13 +533,14 @@ xfs_qm_dqtobp(
552 if (error || !bp) 533 if (error || !bp)
553 return XFS_ERROR(error); 534 return XFS_ERROR(error);
554 } 535 }
536
555 ASSERT(XFS_BUF_ISBUSY(bp)); 537 ASSERT(XFS_BUF_ISBUSY(bp));
556 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 538 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
557 539
558 /* 540 /*
559 * calculate the location of the dquot inside the buffer. 541 * calculate the location of the dquot inside the buffer.
560 */ 542 */
561 ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); 543 ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
562 544
563 /* 545 /*
564 * A simple sanity check in case we got a corrupted dquot... 546 * A simple sanity check in case we got a corrupted dquot...
@@ -1176,18 +1158,18 @@ xfs_qm_dqflush(
1176 xfs_dquot_t *dqp, 1158 xfs_dquot_t *dqp,
1177 uint flags) 1159 uint flags)
1178{ 1160{
1179 xfs_mount_t *mp; 1161 struct xfs_mount *mp = dqp->q_mount;
1180 xfs_buf_t *bp; 1162 struct xfs_buf *bp;
1181 xfs_disk_dquot_t *ddqp; 1163 struct xfs_disk_dquot *ddqp;
1182 int error; 1164 int error;
1183 1165
1184 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1166 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1185 ASSERT(!completion_done(&dqp->q_flush)); 1167 ASSERT(!completion_done(&dqp->q_flush));
1168
1186 trace_xfs_dqflush(dqp); 1169 trace_xfs_dqflush(dqp);
1187 1170
1188 /* 1171 /*
1189 * If not dirty, or it's pinned and we are not supposed to 1172 * If not dirty, or it's pinned and we are not supposed to block, nada.
1190 * block, nada.
1191 */ 1173 */
1192 if (!XFS_DQ_IS_DIRTY(dqp) || 1174 if (!XFS_DQ_IS_DIRTY(dqp) ||
1193 (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { 1175 (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
@@ -1201,40 +1183,46 @@ xfs_qm_dqflush(
1201 * down forcibly. If that's the case we must not write this dquot 1183 * down forcibly. If that's the case we must not write this dquot
1202 * to disk, because the log record didn't make it to disk! 1184 * to disk, because the log record didn't make it to disk!
1203 */ 1185 */
1204 if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { 1186 if (XFS_FORCED_SHUTDOWN(mp)) {
1205 dqp->dq_flags &= ~(XFS_DQ_DIRTY); 1187 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1206 xfs_dqfunlock(dqp); 1188 xfs_dqfunlock(dqp);
1207 return XFS_ERROR(EIO); 1189 return XFS_ERROR(EIO);
1208 } 1190 }
1209 1191
1210 /* 1192 /*
1211 * Get the buffer containing the on-disk dquot 1193 * Get the buffer containing the on-disk dquot
1212 * We don't need a transaction envelope because we know that the
1213 * the ondisk-dquot has already been allocated for.
1214 */ 1194 */
1215 if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { 1195 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1196 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1197 if (error) {
1216 ASSERT(error != ENOENT); 1198 ASSERT(error != ENOENT);
1217 /*
1218 * Quotas could have gotten turned off (ESRCH)
1219 */
1220 xfs_dqfunlock(dqp); 1199 xfs_dqfunlock(dqp);
1221 return (error); 1200 return error;
1222 } 1201 }
1223 1202
1224 if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 1203 /*
1225 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { 1204 * Calculate the location of the dquot inside the buffer.
1226 xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE); 1205 */
1206 ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
1207
1208 /*
1209 * A simple sanity check in case we got a corrupted dquot..
1210 */
1211 if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1212 XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
1213 xfs_buf_relse(bp);
1214 xfs_dqfunlock(dqp);
1215 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1227 return XFS_ERROR(EIO); 1216 return XFS_ERROR(EIO);
1228 } 1217 }
1229 1218
1230 /* This is the only portion of data that needs to persist */ 1219 /* This is the only portion of data that needs to persist */
1231 memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); 1220 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1232 1221
1233 /* 1222 /*
1234 * Clear the dirty field and remember the flush lsn for later use. 1223 * Clear the dirty field and remember the flush lsn for later use.
1235 */ 1224 */
1236 dqp->dq_flags &= ~(XFS_DQ_DIRTY); 1225 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1237 mp = dqp->q_mount;
1238 1226
1239 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, 1227 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1240 &dqp->q_logitem.qli_item.li_lsn); 1228 &dqp->q_logitem.qli_item.li_lsn);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 9a92407109a1..f8e854b4fde8 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -55,8 +55,6 @@ uint ndquot;
55kmem_zone_t *qm_dqzone; 55kmem_zone_t *qm_dqzone;
56kmem_zone_t *qm_dqtrxzone; 56kmem_zone_t *qm_dqtrxzone;
57 57
58static cred_t xfs_zerocr;
59
60STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); 58STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
61STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); 59STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
62 60
@@ -837,7 +835,7 @@ xfs_qm_dqattach_locked(
837 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 835 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
838 flags & XFS_QMOPT_DQALLOC, 836 flags & XFS_QMOPT_DQALLOC,
839 ip->i_udquot, &ip->i_gdquot) : 837 ip->i_udquot, &ip->i_gdquot) :
840 xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, 838 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
841 flags & XFS_QMOPT_DQALLOC, 839 flags & XFS_QMOPT_DQALLOC,
842 ip->i_udquot, &ip->i_gdquot); 840 ip->i_udquot, &ip->i_gdquot);
843 /* 841 /*
@@ -1199,87 +1197,6 @@ xfs_qm_list_destroy(
1199 mutex_destroy(&(list->qh_lock)); 1197 mutex_destroy(&(list->qh_lock));
1200} 1198}
1201 1199
1202
1203/*
1204 * Stripped down version of dqattach. This doesn't attach, or even look at the
1205 * dquots attached to the inode. The rationale is that there won't be any
1206 * attached at the time this is called from quotacheck.
1207 */
1208STATIC int
1209xfs_qm_dqget_noattach(
1210 xfs_inode_t *ip,
1211 xfs_dquot_t **O_udqpp,
1212 xfs_dquot_t **O_gdqpp)
1213{
1214 int error;
1215 xfs_mount_t *mp;
1216 xfs_dquot_t *udqp, *gdqp;
1217
1218 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1219 mp = ip->i_mount;
1220 udqp = NULL;
1221 gdqp = NULL;
1222
1223 if (XFS_IS_UQUOTA_ON(mp)) {
1224 ASSERT(ip->i_udquot == NULL);
1225 /*
1226 * We want the dquot allocated if it doesn't exist.
1227 */
1228 if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
1229 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
1230 &udqp))) {
1231 /*
1232 * Shouldn't be able to turn off quotas here.
1233 */
1234 ASSERT(error != ESRCH);
1235 ASSERT(error != ENOENT);
1236 return error;
1237 }
1238 ASSERT(udqp);
1239 }
1240
1241 if (XFS_IS_OQUOTA_ON(mp)) {
1242 ASSERT(ip->i_gdquot == NULL);
1243 if (udqp)
1244 xfs_dqunlock(udqp);
1245 error = XFS_IS_GQUOTA_ON(mp) ?
1246 xfs_qm_dqget(mp, ip,
1247 ip->i_d.di_gid, XFS_DQ_GROUP,
1248 XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
1249 &gdqp) :
1250 xfs_qm_dqget(mp, ip,
1251 ip->i_d.di_projid, XFS_DQ_PROJ,
1252 XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
1253 &gdqp);
1254 if (error) {
1255 if (udqp)
1256 xfs_qm_dqrele(udqp);
1257 ASSERT(error != ESRCH);
1258 ASSERT(error != ENOENT);
1259 return error;
1260 }
1261 ASSERT(gdqp);
1262
1263 /* Reacquire the locks in the right order */
1264 if (udqp) {
1265 if (! xfs_qm_dqlock_nowait(udqp)) {
1266 xfs_dqunlock(gdqp);
1267 xfs_dqlock(udqp);
1268 xfs_dqlock(gdqp);
1269 }
1270 }
1271 }
1272
1273 *O_udqpp = udqp;
1274 *O_gdqpp = gdqp;
1275
1276#ifdef QUOTADEBUG
1277 if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
1278 if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
1279#endif
1280 return 0;
1281}
1282
1283/* 1200/*
1284 * Create an inode and return with a reference already taken, but unlocked 1201 * Create an inode and return with a reference already taken, but unlocked
1285 * This is how we create quota inodes 1202 * This is how we create quota inodes
@@ -1305,8 +1222,8 @@ xfs_qm_qino_alloc(
1305 return error; 1222 return error;
1306 } 1223 }
1307 1224
1308 if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 1225 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
1309 &xfs_zerocr, 0, 1, ip, &committed))) { 1226 if (error) {
1310 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1227 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1311 XFS_TRANS_ABORT); 1228 XFS_TRANS_ABORT);
1312 return error; 1229 return error;
@@ -1516,7 +1433,7 @@ xfs_qm_dqiterate(
1516 rablkcnt = map[i+1].br_blockcount; 1433 rablkcnt = map[i+1].br_blockcount;
1517 rablkno = map[i+1].br_startblock; 1434 rablkno = map[i+1].br_startblock;
1518 while (rablkcnt--) { 1435 while (rablkcnt--) {
1519 xfs_baread(mp->m_ddev_targp, 1436 xfs_buf_readahead(mp->m_ddev_targp,
1520 XFS_FSB_TO_DADDR(mp, rablkno), 1437 XFS_FSB_TO_DADDR(mp, rablkno),
1521 mp->m_quotainfo->qi_dqchunklen); 1438 mp->m_quotainfo->qi_dqchunklen);
1522 rablkno++; 1439 rablkno++;
@@ -1546,18 +1463,34 @@ xfs_qm_dqiterate(
1546 1463
1547/* 1464/*
1548 * Called by dqusage_adjust in doing a quotacheck. 1465 * Called by dqusage_adjust in doing a quotacheck.
1549 * Given the inode, and a dquot (either USR or GRP, doesn't matter), 1466 *
1550 * this updates its incore copy as well as the buffer copy. This is 1467 * Given the inode, and a dquot id this updates both the incore dqout as well
1551 * so that once the quotacheck is done, we can just log all the buffers, 1468 * as the buffer copy. This is so that once the quotacheck is done, we can
1552 * as opposed to logging numerous updates to individual dquots. 1469 * just log all the buffers, as opposed to logging numerous updates to
1470 * individual dquots.
1553 */ 1471 */
1554STATIC void 1472STATIC int
1555xfs_qm_quotacheck_dqadjust( 1473xfs_qm_quotacheck_dqadjust(
1556 xfs_dquot_t *dqp, 1474 struct xfs_inode *ip,
1475 xfs_dqid_t id,
1476 uint type,
1557 xfs_qcnt_t nblks, 1477 xfs_qcnt_t nblks,
1558 xfs_qcnt_t rtblks) 1478 xfs_qcnt_t rtblks)
1559{ 1479{
1560 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1480 struct xfs_mount *mp = ip->i_mount;
1481 struct xfs_dquot *dqp;
1482 int error;
1483
1484 error = xfs_qm_dqget(mp, ip, id, type,
1485 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1486 if (error) {
1487 /*
1488 * Shouldn't be able to turn off quotas here.
1489 */
1490 ASSERT(error != ESRCH);
1491 ASSERT(error != ENOENT);
1492 return error;
1493 }
1561 1494
1562 trace_xfs_dqadjust(dqp); 1495 trace_xfs_dqadjust(dqp);
1563 1496
@@ -1582,11 +1515,13 @@ xfs_qm_quotacheck_dqadjust(
1582 * There are no timers for the default values set in the root dquot. 1515 * There are no timers for the default values set in the root dquot.
1583 */ 1516 */
1584 if (dqp->q_core.d_id) { 1517 if (dqp->q_core.d_id) {
1585 xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core); 1518 xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1586 xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); 1519 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1587 } 1520 }
1588 1521
1589 dqp->dq_flags |= XFS_DQ_DIRTY; 1522 dqp->dq_flags |= XFS_DQ_DIRTY;
1523 xfs_qm_dqput(dqp);
1524 return 0;
1590} 1525}
1591 1526
1592STATIC int 1527STATIC int
@@ -1629,8 +1564,7 @@ xfs_qm_dqusage_adjust(
1629 int *res) /* result code value */ 1564 int *res) /* result code value */
1630{ 1565{
1631 xfs_inode_t *ip; 1566 xfs_inode_t *ip;
1632 xfs_dquot_t *udqp, *gdqp; 1567 xfs_qcnt_t nblks, rtblks = 0;
1633 xfs_qcnt_t nblks, rtblks;
1634 int error; 1568 int error;
1635 1569
1636 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1570 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -1650,51 +1584,24 @@ xfs_qm_dqusage_adjust(
1650 * the case in all other instances. It's OK that we do this because 1584 * the case in all other instances. It's OK that we do this because
1651 * quotacheck is done only at mount time. 1585 * quotacheck is done only at mount time.
1652 */ 1586 */
1653 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { 1587 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1588 if (error) {
1654 *res = BULKSTAT_RV_NOTHING; 1589 *res = BULKSTAT_RV_NOTHING;
1655 return error; 1590 return error;
1656 } 1591 }
1657 1592
1658 /* 1593 ASSERT(ip->i_delayed_blks == 0);
1659 * Obtain the locked dquots. In case of an error (eg. allocation
1660 * fails for ENOSPC), we return the negative of the error number
1661 * to bulkstat, so that it can get propagated to quotacheck() and
1662 * making us disable quotas for the file system.
1663 */
1664 if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
1665 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1666 IRELE(ip);
1667 *res = BULKSTAT_RV_GIVEUP;
1668 return error;
1669 }
1670 1594
1671 rtblks = 0; 1595 if (XFS_IS_REALTIME_INODE(ip)) {
1672 if (! XFS_IS_REALTIME_INODE(ip)) {
1673 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
1674 } else {
1675 /* 1596 /*
1676 * Walk thru the extent list and count the realtime blocks. 1597 * Walk thru the extent list and count the realtime blocks.
1677 */ 1598 */
1678 if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { 1599 error = xfs_qm_get_rtblks(ip, &rtblks);
1679 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1600 if (error)
1680 IRELE(ip); 1601 goto error0;
1681 if (udqp)
1682 xfs_qm_dqput(udqp);
1683 if (gdqp)
1684 xfs_qm_dqput(gdqp);
1685 *res = BULKSTAT_RV_GIVEUP;
1686 return error;
1687 }
1688 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1689 } 1602 }
1690 ASSERT(ip->i_delayed_blks == 0);
1691 1603
1692 /* 1604 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1693 * We can't release the inode while holding its dquot locks.
1694 * The inode can go into inactive and might try to acquire the dquotlocks.
1695 * So, just unlock here and do a vn_rele at the end.
1696 */
1697 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1698 1605
1699 /* 1606 /*
1700 * Add the (disk blocks and inode) resources occupied by this 1607 * Add the (disk blocks and inode) resources occupied by this
@@ -1709,26 +1616,36 @@ xfs_qm_dqusage_adjust(
1709 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1616 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1710 */ 1617 */
1711 if (XFS_IS_UQUOTA_ON(mp)) { 1618 if (XFS_IS_UQUOTA_ON(mp)) {
1712 ASSERT(udqp); 1619 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1713 xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); 1620 XFS_DQ_USER, nblks, rtblks);
1714 xfs_qm_dqput(udqp); 1621 if (error)
1622 goto error0;
1715 } 1623 }
1716 if (XFS_IS_OQUOTA_ON(mp)) { 1624
1717 ASSERT(gdqp); 1625 if (XFS_IS_GQUOTA_ON(mp)) {
1718 xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks); 1626 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1719 xfs_qm_dqput(gdqp); 1627 XFS_DQ_GROUP, nblks, rtblks);
1628 if (error)
1629 goto error0;
1720 } 1630 }
1721 /*
1722 * Now release the inode. This will send it to 'inactive', and
1723 * possibly even free blocks.
1724 */
1725 IRELE(ip);
1726 1631
1727 /* 1632 if (XFS_IS_PQUOTA_ON(mp)) {
1728 * Goto next inode. 1633 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1729 */ 1634 XFS_DQ_PROJ, nblks, rtblks);
1635 if (error)
1636 goto error0;
1637 }
1638
1639 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1640 IRELE(ip);
1730 *res = BULKSTAT_RV_DIDONE; 1641 *res = BULKSTAT_RV_DIDONE;
1731 return 0; 1642 return 0;
1643
1644error0:
1645 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1646 IRELE(ip);
1647 *res = BULKSTAT_RV_GIVEUP;
1648 return error;
1732} 1649}
1733 1650
1734/* 1651/*
@@ -2224,7 +2141,7 @@ xfs_qm_write_sb_changes(
2224 2141
2225 2142
2226/* 2143/*
2227 * Given an inode, a uid and gid (from cred_t) make sure that we have 2144 * Given an inode, a uid, gid and prid make sure that we have
2228 * allocated relevant dquot(s) on disk, and that we won't exceed inode 2145 * allocated relevant dquot(s) on disk, and that we won't exceed inode
2229 * quotas by creating this file. 2146 * quotas by creating this file.
2230 * This also attaches dquot(s) to the given inode after locking it, 2147 * This also attaches dquot(s) to the given inode after locking it,
@@ -2332,7 +2249,7 @@ xfs_qm_vop_dqalloc(
2332 xfs_dqunlock(gq); 2249 xfs_dqunlock(gq);
2333 } 2250 }
2334 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 2251 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2335 if (ip->i_d.di_projid != prid) { 2252 if (xfs_get_projid(ip) != prid) {
2336 xfs_iunlock(ip, lockflags); 2253 xfs_iunlock(ip, lockflags);
2337 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 2254 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
2338 XFS_DQ_PROJ, 2255 XFS_DQ_PROJ,
@@ -2454,7 +2371,7 @@ xfs_qm_vop_chown_reserve(
2454 } 2371 }
2455 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { 2372 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
2456 if (XFS_IS_PQUOTA_ON(ip->i_mount) && 2373 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
2457 ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id)) 2374 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
2458 prjflags = XFS_QMOPT_ENOSPC; 2375 prjflags = XFS_QMOPT_ENOSPC;
2459 2376
2460 if (prjflags || 2377 if (prjflags ||
@@ -2558,7 +2475,7 @@ xfs_qm_vop_create_dqattach(
2558 ip->i_gdquot = gdqp; 2475 ip->i_gdquot = gdqp;
2559 ASSERT(XFS_IS_OQUOTA_ON(mp)); 2476 ASSERT(XFS_IS_OQUOTA_ON(mp));
2560 ASSERT((XFS_IS_GQUOTA_ON(mp) ? 2477 ASSERT((XFS_IS_GQUOTA_ON(mp) ?
2561 ip->i_d.di_gid : ip->i_d.di_projid) == 2478 ip->i_d.di_gid : xfs_get_projid(ip)) ==
2562 be32_to_cpu(gdqp->q_core.d_id)); 2479 be32_to_cpu(gdqp->q_core.d_id));
2563 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2480 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2564 } 2481 }
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index bea02d786c5d..45b5cb1788ab 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -81,7 +81,7 @@ xfs_qm_statvfs(
81 xfs_mount_t *mp = ip->i_mount; 81 xfs_mount_t *mp = ip->i_mount;
82 xfs_dquot_t *dqp; 82 xfs_dquot_t *dqp;
83 83
84 if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { 84 if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
85 xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); 85 xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
86 xfs_qm_dqput(dqp); 86 xfs_qm_dqput(dqp);
87 } 87 }
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 45e5849df238..bdebc183223e 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -276,7 +276,7 @@ xfs_qm_scall_trunc_qfile(
276 goto out_unlock; 276 goto out_unlock;
277 } 277 }
278 278
279 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 279 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
280 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 280 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
281 281
282out_unlock: 282out_unlock:
@@ -875,21 +875,14 @@ xfs_dqrele_inode(
875 struct xfs_perag *pag, 875 struct xfs_perag *pag,
876 int flags) 876 int flags)
877{ 877{
878 int error;
879
880 /* skip quota inodes */ 878 /* skip quota inodes */
881 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || 879 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
882 ip == ip->i_mount->m_quotainfo->qi_gquotaip) { 880 ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
883 ASSERT(ip->i_udquot == NULL); 881 ASSERT(ip->i_udquot == NULL);
884 ASSERT(ip->i_gdquot == NULL); 882 ASSERT(ip->i_gdquot == NULL);
885 read_unlock(&pag->pag_ici_lock);
886 return 0; 883 return 0;
887 } 884 }
888 885
889 error = xfs_sync_inode_valid(ip, pag);
890 if (error)
891 return error;
892
893 xfs_ilock(ip, XFS_ILOCK_EXCL); 886 xfs_ilock(ip, XFS_ILOCK_EXCL);
894 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { 887 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
895 xfs_qm_dqrele(ip->i_udquot); 888 xfs_qm_dqrele(ip->i_udquot);
@@ -900,8 +893,6 @@ xfs_dqrele_inode(
900 ip->i_gdquot = NULL; 893 ip->i_gdquot = NULL;
901 } 894 }
902 xfs_iunlock(ip, XFS_ILOCK_EXCL); 895 xfs_iunlock(ip, XFS_ILOCK_EXCL);
903
904 IRELE(ip);
905 return 0; 896 return 0;
906} 897}
907 898
@@ -918,8 +909,7 @@ xfs_qm_dqrele_all_inodes(
918 uint flags) 909 uint flags)
919{ 910{
920 ASSERT(mp->m_quotainfo); 911 ASSERT(mp->m_quotainfo);
921 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, 912 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
922 XFS_ICI_NO_TAG, 0, NULL);
923} 913}
924 914
925/*------------------------------------------------------------------------*/ 915/*------------------------------------------------------------------------*/
@@ -1175,7 +1165,7 @@ xfs_qm_internalqcheck_adjust(
1175 } 1165 }
1176 xfs_qm_internalqcheck_get_dquots(mp, 1166 xfs_qm_internalqcheck_get_dquots(mp,
1177 (xfs_dqid_t) ip->i_d.di_uid, 1167 (xfs_dqid_t) ip->i_d.di_uid,
1178 (xfs_dqid_t) ip->i_d.di_projid, 1168 (xfs_dqid_t) xfs_get_projid(ip),
1179 (xfs_dqid_t) ip->i_d.di_gid, 1169 (xfs_dqid_t) ip->i_d.di_gid,
1180 &ud, &gd); 1170 &ud, &gd);
1181 if (XFS_IS_UQUOTA_ON(mp)) { 1171 if (XFS_IS_UQUOTA_ON(mp)) {
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 4917d4eed4ed..63c7a1a6c022 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -230,6 +230,15 @@ typedef struct xfs_perag {
230 rwlock_t pag_ici_lock; /* incore inode lock */ 230 rwlock_t pag_ici_lock; /* incore inode lock */
231 struct radix_tree_root pag_ici_root; /* incore inode cache root */ 231 struct radix_tree_root pag_ici_root; /* incore inode cache root */
232 int pag_ici_reclaimable; /* reclaimable inodes */ 232 int pag_ici_reclaimable; /* reclaimable inodes */
233 struct mutex pag_ici_reclaim_lock; /* serialisation point */
234 unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
235
236 /* buffer cache index */
237 spinlock_t pag_buf_lock; /* lock for pag_buf_tree */
238 struct rb_root pag_buf_tree; /* ordered tree of active buffers */
239
240 /* for rcu-safe freeing */
241 struct rcu_head rcu_head;
233#endif 242#endif
234 int pagb_count; /* pagb slots in use */ 243 int pagb_count; /* pagb slots in use */
235} xfs_perag_t; 244} xfs_perag_t;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index af168faccc7a..112abc439ca5 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -675,7 +675,7 @@ xfs_alloc_ag_vextent_near(
675 xfs_agblock_t gtbnoa; /* aligned ... */ 675 xfs_agblock_t gtbnoa; /* aligned ... */
676 xfs_extlen_t gtdiff; /* difference to right side entry */ 676 xfs_extlen_t gtdiff; /* difference to right side entry */
677 xfs_extlen_t gtlen; /* length of right side entry */ 677 xfs_extlen_t gtlen; /* length of right side entry */
678 xfs_extlen_t gtlena; /* aligned ... */ 678 xfs_extlen_t gtlena = 0; /* aligned ... */
679 xfs_agblock_t gtnew; /* useful start bno of right side */ 679 xfs_agblock_t gtnew; /* useful start bno of right side */
680 int error; /* error code */ 680 int error; /* error code */
681 int i; /* result code, temporary */ 681 int i; /* result code, temporary */
@@ -684,7 +684,7 @@ xfs_alloc_ag_vextent_near(
684 xfs_agblock_t ltbnoa; /* aligned ... */ 684 xfs_agblock_t ltbnoa; /* aligned ... */
685 xfs_extlen_t ltdiff; /* difference to left side entry */ 685 xfs_extlen_t ltdiff; /* difference to left side entry */
686 xfs_extlen_t ltlen; /* length of left side entry */ 686 xfs_extlen_t ltlen; /* length of left side entry */
687 xfs_extlen_t ltlena; /* aligned ... */ 687 xfs_extlen_t ltlena = 0; /* aligned ... */
688 xfs_agblock_t ltnew; /* useful start bno of left side */ 688 xfs_agblock_t ltnew; /* useful start bno of left side */
689 xfs_extlen_t rlen; /* length of returned extent */ 689 xfs_extlen_t rlen; /* length of returned extent */
690#if defined(DEBUG) && defined(__KERNEL__) 690#if defined(DEBUG) && defined(__KERNEL__)
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 97f7328967fd..3916925e2584 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -280,38 +280,6 @@ xfs_allocbt_key_diff(
280 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 280 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
281} 281}
282 282
283STATIC int
284xfs_allocbt_kill_root(
285 struct xfs_btree_cur *cur,
286 struct xfs_buf *bp,
287 int level,
288 union xfs_btree_ptr *newroot)
289{
290 int error;
291
292 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
293 XFS_BTREE_STATS_INC(cur, killroot);
294
295 /*
296 * Update the root pointer, decreasing the level by 1 and then
297 * free the old root.
298 */
299 xfs_allocbt_set_root(cur, newroot, -1);
300 error = xfs_allocbt_free_block(cur, bp);
301 if (error) {
302 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
303 return error;
304 }
305
306 XFS_BTREE_STATS_INC(cur, free);
307
308 xfs_btree_setbuf(cur, level, NULL);
309 cur->bc_nlevels--;
310
311 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
312 return 0;
313}
314
315#ifdef DEBUG 283#ifdef DEBUG
316STATIC int 284STATIC int
317xfs_allocbt_keys_inorder( 285xfs_allocbt_keys_inorder(
@@ -423,7 +391,6 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
423 391
424 .dup_cursor = xfs_allocbt_dup_cursor, 392 .dup_cursor = xfs_allocbt_dup_cursor,
425 .set_root = xfs_allocbt_set_root, 393 .set_root = xfs_allocbt_set_root,
426 .kill_root = xfs_allocbt_kill_root,
427 .alloc_block = xfs_allocbt_alloc_block, 394 .alloc_block = xfs_allocbt_alloc_block,
428 .free_block = xfs_allocbt_free_block, 395 .free_block = xfs_allocbt_free_block,
429 .update_lastrec = xfs_allocbt_update_lastrec, 396 .update_lastrec = xfs_allocbt_update_lastrec,
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index c2568242a901..c86375378810 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -355,16 +355,15 @@ xfs_attr_set_int(
355 if (mp->m_flags & XFS_MOUNT_WSYNC) { 355 if (mp->m_flags & XFS_MOUNT_WSYNC) {
356 xfs_trans_set_sync(args.trans); 356 xfs_trans_set_sync(args.trans);
357 } 357 }
358
359 if (!error && (flags & ATTR_KERNOTIME) == 0) {
360 xfs_trans_ichgtime(args.trans, dp,
361 XFS_ICHGTIME_CHG);
362 }
358 err2 = xfs_trans_commit(args.trans, 363 err2 = xfs_trans_commit(args.trans,
359 XFS_TRANS_RELEASE_LOG_RES); 364 XFS_TRANS_RELEASE_LOG_RES);
360 xfs_iunlock(dp, XFS_ILOCK_EXCL); 365 xfs_iunlock(dp, XFS_ILOCK_EXCL);
361 366
362 /*
363 * Hit the inode change time.
364 */
365 if (!error && (flags & ATTR_KERNOTIME) == 0) {
366 xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
367 }
368 return(error == 0 ? err2 : error); 367 return(error == 0 ? err2 : error);
369 } 368 }
370 369
@@ -420,6 +419,9 @@ xfs_attr_set_int(
420 xfs_trans_set_sync(args.trans); 419 xfs_trans_set_sync(args.trans);
421 } 420 }
422 421
422 if ((flags & ATTR_KERNOTIME) == 0)
423 xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
424
423 /* 425 /*
424 * Commit the last in the sequence of transactions. 426 * Commit the last in the sequence of transactions.
425 */ 427 */
@@ -427,13 +429,6 @@ xfs_attr_set_int(
427 error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); 429 error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
428 xfs_iunlock(dp, XFS_ILOCK_EXCL); 430 xfs_iunlock(dp, XFS_ILOCK_EXCL);
429 431
430 /*
431 * Hit the inode change time.
432 */
433 if (!error && (flags & ATTR_KERNOTIME) == 0) {
434 xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
435 }
436
437 return(error); 432 return(error);
438 433
439out: 434out:
@@ -567,6 +562,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
567 xfs_trans_set_sync(args.trans); 562 xfs_trans_set_sync(args.trans);
568 } 563 }
569 564
565 if ((flags & ATTR_KERNOTIME) == 0)
566 xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
567
570 /* 568 /*
571 * Commit the last in the sequence of transactions. 569 * Commit the last in the sequence of transactions.
572 */ 570 */
@@ -574,13 +572,6 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
574 error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); 572 error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
575 xfs_iunlock(dp, XFS_ILOCK_EXCL); 573 xfs_iunlock(dp, XFS_ILOCK_EXCL);
576 574
577 /*
578 * Hit the inode change time.
579 */
580 if (!error && (flags & ATTR_KERNOTIME) == 0) {
581 xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
582 }
583
584 return(error); 575 return(error);
585 576
586out: 577out:
@@ -1995,7 +1986,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
1995 1986
1996 tmp = (valuelen < XFS_BUF_SIZE(bp)) 1987 tmp = (valuelen < XFS_BUF_SIZE(bp))
1997 ? valuelen : XFS_BUF_SIZE(bp); 1988 ? valuelen : XFS_BUF_SIZE(bp);
1998 xfs_biomove(bp, 0, tmp, dst, XBF_READ); 1989 xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ);
1999 xfs_buf_relse(bp); 1990 xfs_buf_relse(bp);
2000 dst += tmp; 1991 dst += tmp;
2001 valuelen -= tmp; 1992 valuelen -= tmp;
@@ -2125,9 +2116,9 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2125 2116
2126 tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : 2117 tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
2127 XFS_BUF_SIZE(bp); 2118 XFS_BUF_SIZE(bp);
2128 xfs_biomove(bp, 0, tmp, src, XBF_WRITE); 2119 xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE);
2129 if (tmp < XFS_BUF_SIZE(bp)) 2120 if (tmp < XFS_BUF_SIZE(bp))
2130 xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); 2121 xfs_buf_zero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
2131 if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ 2122 if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
2132 return (error); 2123 return (error);
2133 } 2124 }
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index f90dadd5a968..8abd12e32e13 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -614,7 +614,7 @@ xfs_bmap_add_extent(
614 nblks += cur->bc_private.b.allocated; 614 nblks += cur->bc_private.b.allocated;
615 ASSERT(nblks <= da_old); 615 ASSERT(nblks <= da_old);
616 if (nblks < da_old) 616 if (nblks < da_old)
617 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, 617 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
618 (int64_t)(da_old - nblks), rsvd); 618 (int64_t)(da_old - nblks), rsvd);
619 } 619 }
620 /* 620 /*
@@ -1079,7 +1079,8 @@ xfs_bmap_add_extent_delay_real(
1079 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 1079 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1080 (cur ? cur->bc_private.b.allocated : 0)); 1080 (cur ? cur->bc_private.b.allocated : 0));
1081 if (diff > 0 && 1081 if (diff > 0 &&
1082 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) { 1082 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1083 -((int64_t)diff), rsvd)) {
1083 /* 1084 /*
1084 * Ick gross gag me with a spoon. 1085 * Ick gross gag me with a spoon.
1085 */ 1086 */
@@ -1089,16 +1090,18 @@ xfs_bmap_add_extent_delay_real(
1089 temp--; 1090 temp--;
1090 diff--; 1091 diff--;
1091 if (!diff || 1092 if (!diff ||
1092 !xfs_mod_incore_sb(ip->i_mount, 1093 !xfs_icsb_modify_counters(ip->i_mount,
1093 XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) 1094 XFS_SBS_FDBLOCKS,
1095 -((int64_t)diff), rsvd))
1094 break; 1096 break;
1095 } 1097 }
1096 if (temp2) { 1098 if (temp2) {
1097 temp2--; 1099 temp2--;
1098 diff--; 1100 diff--;
1099 if (!diff || 1101 if (!diff ||
1100 !xfs_mod_incore_sb(ip->i_mount, 1102 !xfs_icsb_modify_counters(ip->i_mount,
1101 XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) 1103 XFS_SBS_FDBLOCKS,
1104 -((int64_t)diff), rsvd))
1102 break; 1105 break;
1103 } 1106 }
1104 } 1107 }
@@ -1766,7 +1769,7 @@ xfs_bmap_add_extent_hole_delay(
1766 } 1769 }
1767 if (oldlen != newlen) { 1770 if (oldlen != newlen) {
1768 ASSERT(oldlen > newlen); 1771 ASSERT(oldlen > newlen);
1769 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, 1772 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1770 (int64_t)(oldlen - newlen), rsvd); 1773 (int64_t)(oldlen - newlen), rsvd);
1771 /* 1774 /*
1772 * Nothing to do for disk quota accounting here. 1775 * Nothing to do for disk quota accounting here.
@@ -3111,9 +3114,10 @@ xfs_bmap_del_extent(
3111 * Nothing to do for disk quota accounting here. 3114 * Nothing to do for disk quota accounting here.
3112 */ 3115 */
3113 ASSERT(da_old >= da_new); 3116 ASSERT(da_old >= da_new);
3114 if (da_old > da_new) 3117 if (da_old > da_new) {
3115 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), 3118 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
3116 rsvd); 3119 (int64_t)(da_old - da_new), rsvd);
3120 }
3117done: 3121done:
3118 *logflagsp = flags; 3122 *logflagsp = flags;
3119 return error; 3123 return error;
@@ -4526,13 +4530,13 @@ xfs_bmapi(
4526 -((int64_t)extsz), (flags & 4530 -((int64_t)extsz), (flags &
4527 XFS_BMAPI_RSVBLOCKS)); 4531 XFS_BMAPI_RSVBLOCKS));
4528 } else { 4532 } else {
4529 error = xfs_mod_incore_sb(mp, 4533 error = xfs_icsb_modify_counters(mp,
4530 XFS_SBS_FDBLOCKS, 4534 XFS_SBS_FDBLOCKS,
4531 -((int64_t)alen), (flags & 4535 -((int64_t)alen), (flags &
4532 XFS_BMAPI_RSVBLOCKS)); 4536 XFS_BMAPI_RSVBLOCKS));
4533 } 4537 }
4534 if (!error) { 4538 if (!error) {
4535 error = xfs_mod_incore_sb(mp, 4539 error = xfs_icsb_modify_counters(mp,
4536 XFS_SBS_FDBLOCKS, 4540 XFS_SBS_FDBLOCKS,
4537 -((int64_t)indlen), (flags & 4541 -((int64_t)indlen), (flags &
4538 XFS_BMAPI_RSVBLOCKS)); 4542 XFS_BMAPI_RSVBLOCKS));
@@ -4542,7 +4546,7 @@ xfs_bmapi(
4542 (int64_t)extsz, (flags & 4546 (int64_t)extsz, (flags &
4543 XFS_BMAPI_RSVBLOCKS)); 4547 XFS_BMAPI_RSVBLOCKS));
4544 else if (error) 4548 else if (error)
4545 xfs_mod_incore_sb(mp, 4549 xfs_icsb_modify_counters(mp,
4546 XFS_SBS_FDBLOCKS, 4550 XFS_SBS_FDBLOCKS,
4547 (int64_t)alen, (flags & 4551 (int64_t)alen, (flags &
4548 XFS_BMAPI_RSVBLOCKS)); 4552 XFS_BMAPI_RSVBLOCKS));
@@ -4744,8 +4748,12 @@ xfs_bmapi(
4744 * Check if writing previously allocated but 4748 * Check if writing previously allocated but
4745 * unwritten extents. 4749 * unwritten extents.
4746 */ 4750 */
4747 if (wr && mval->br_state == XFS_EXT_UNWRITTEN && 4751 if (wr &&
4748 ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) { 4752 ((mval->br_state == XFS_EXT_UNWRITTEN &&
4753 ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) ||
4754 (mval->br_state == XFS_EXT_NORM &&
4755 ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT)) ==
4756 (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT))))) {
4749 /* 4757 /*
4750 * Modify (by adding) the state flag, if writing. 4758 * Modify (by adding) the state flag, if writing.
4751 */ 4759 */
@@ -4757,7 +4765,9 @@ xfs_bmapi(
4757 *firstblock; 4765 *firstblock;
4758 cur->bc_private.b.flist = flist; 4766 cur->bc_private.b.flist = flist;
4759 } 4767 }
4760 mval->br_state = XFS_EXT_NORM; 4768 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4769 ? XFS_EXT_NORM
4770 : XFS_EXT_UNWRITTEN;
4761 error = xfs_bmap_add_extent(ip, lastx, &cur, mval, 4771 error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
4762 firstblock, flist, &tmp_logflags, 4772 firstblock, flist, &tmp_logflags,
4763 whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); 4773 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
@@ -5200,7 +5210,7 @@ xfs_bunmapi(
5200 ip, -((long)del.br_blockcount), 0, 5210 ip, -((long)del.br_blockcount), 0,
5201 XFS_QMOPT_RES_RTBLKS); 5211 XFS_QMOPT_RES_RTBLKS);
5202 } else { 5212 } else {
5203 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, 5213 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
5204 (int64_t)del.br_blockcount, rsvd); 5214 (int64_t)del.br_blockcount, rsvd);
5205 (void)xfs_trans_reserve_quota_nblks(NULL, 5215 (void)xfs_trans_reserve_quota_nblks(NULL,
5206 ip, -((long)del.br_blockcount), 0, 5216 ip, -((long)del.br_blockcount), 0,
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index b13569a6179b..71ec9b6ecdfc 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -74,9 +74,12 @@ typedef struct xfs_bmap_free
74#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ 74#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
75 /* combine contig. space */ 75 /* combine contig. space */
76#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */ 76#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */
77#define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */ 77/*
78 /* need write cache flushing and no */ 78 * unwritten extent conversion - this needs write cache flushing and no additional
79 /* additional allocation alignments */ 79 * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
80 * from written to unwritten, otherwise convert from unwritten to written.
81 */
82#define XFS_BMAPI_CONVERT 0x200
80 83
81#define XFS_BMAPI_FLAGS \ 84#define XFS_BMAPI_FLAGS \
82 { XFS_BMAPI_WRITE, "WRITE" }, \ 85 { XFS_BMAPI_WRITE, "WRITE" }, \
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 829af92f0fba..04f9cca8da7e 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -217,7 +217,7 @@ xfs_btree_del_cursor(
217 */ 217 */
218 for (i = 0; i < cur->bc_nlevels; i++) { 218 for (i = 0; i < cur->bc_nlevels; i++) {
219 if (cur->bc_bufs[i]) 219 if (cur->bc_bufs[i])
220 xfs_btree_setbuf(cur, i, NULL); 220 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
221 else if (!error) 221 else if (!error)
222 break; 222 break;
223 } 223 }
@@ -656,7 +656,7 @@ xfs_btree_reada_bufl(
656 656
657 ASSERT(fsbno != NULLFSBLOCK); 657 ASSERT(fsbno != NULLFSBLOCK);
658 d = XFS_FSB_TO_DADDR(mp, fsbno); 658 d = XFS_FSB_TO_DADDR(mp, fsbno);
659 xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); 659 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
660} 660}
661 661
662/* 662/*
@@ -676,7 +676,7 @@ xfs_btree_reada_bufs(
676 ASSERT(agno != NULLAGNUMBER); 676 ASSERT(agno != NULLAGNUMBER);
677 ASSERT(agbno != NULLAGBLOCK); 677 ASSERT(agbno != NULLAGBLOCK);
678 d = XFS_AGB_TO_DADDR(mp, agno, agbno); 678 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
679 xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); 679 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
680} 680}
681 681
682STATIC int 682STATIC int
@@ -763,22 +763,19 @@ xfs_btree_readahead(
763 * Set the buffer for level "lev" in the cursor to bp, releasing 763 * Set the buffer for level "lev" in the cursor to bp, releasing
764 * any previous buffer. 764 * any previous buffer.
765 */ 765 */
766void 766STATIC void
767xfs_btree_setbuf( 767xfs_btree_setbuf(
768 xfs_btree_cur_t *cur, /* btree cursor */ 768 xfs_btree_cur_t *cur, /* btree cursor */
769 int lev, /* level in btree */ 769 int lev, /* level in btree */
770 xfs_buf_t *bp) /* new buffer to set */ 770 xfs_buf_t *bp) /* new buffer to set */
771{ 771{
772 struct xfs_btree_block *b; /* btree block */ 772 struct xfs_btree_block *b; /* btree block */
773 xfs_buf_t *obp; /* old buffer pointer */
774 773
775 obp = cur->bc_bufs[lev]; 774 if (cur->bc_bufs[lev])
776 if (obp) 775 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
777 xfs_trans_brelse(cur->bc_tp, obp);
778 cur->bc_bufs[lev] = bp; 776 cur->bc_bufs[lev] = bp;
779 cur->bc_ra[lev] = 0; 777 cur->bc_ra[lev] = 0;
780 if (!bp) 778
781 return;
782 b = XFS_BUF_TO_BLOCK(bp); 779 b = XFS_BUF_TO_BLOCK(bp);
783 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 780 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
784 if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) 781 if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
@@ -3011,6 +3008,43 @@ out0:
3011 return 0; 3008 return 0;
3012} 3009}
3013 3010
3011/*
3012 * Kill the current root node, and replace it with it's only child node.
3013 */
3014STATIC int
3015xfs_btree_kill_root(
3016 struct xfs_btree_cur *cur,
3017 struct xfs_buf *bp,
3018 int level,
3019 union xfs_btree_ptr *newroot)
3020{
3021 int error;
3022
3023 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3024 XFS_BTREE_STATS_INC(cur, killroot);
3025
3026 /*
3027 * Update the root pointer, decreasing the level by 1 and then
3028 * free the old root.
3029 */
3030 cur->bc_ops->set_root(cur, newroot, -1);
3031
3032 error = cur->bc_ops->free_block(cur, bp);
3033 if (error) {
3034 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3035 return error;
3036 }
3037
3038 XFS_BTREE_STATS_INC(cur, free);
3039
3040 cur->bc_bufs[level] = NULL;
3041 cur->bc_ra[level] = 0;
3042 cur->bc_nlevels--;
3043
3044 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3045 return 0;
3046}
3047
3014STATIC int 3048STATIC int
3015xfs_btree_dec_cursor( 3049xfs_btree_dec_cursor(
3016 struct xfs_btree_cur *cur, 3050 struct xfs_btree_cur *cur,
@@ -3195,7 +3229,7 @@ xfs_btree_delrec(
3195 * Make it the new root of the btree. 3229 * Make it the new root of the btree.
3196 */ 3230 */
3197 pp = xfs_btree_ptr_addr(cur, 1, block); 3231 pp = xfs_btree_ptr_addr(cur, 1, block);
3198 error = cur->bc_ops->kill_root(cur, bp, level, pp); 3232 error = xfs_btree_kill_root(cur, bp, level, pp);
3199 if (error) 3233 if (error)
3200 goto error0; 3234 goto error0;
3201 } else if (level > 0) { 3235 } else if (level > 0) {
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7fa07062bdda..82fafc66bd1f 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -152,9 +152,7 @@ struct xfs_btree_ops {
152 152
153 /* update btree root pointer */ 153 /* update btree root pointer */
154 void (*set_root)(struct xfs_btree_cur *cur, 154 void (*set_root)(struct xfs_btree_cur *cur,
155 union xfs_btree_ptr *nptr, int level_change); 155 union xfs_btree_ptr *nptr, int level_change);
156 int (*kill_root)(struct xfs_btree_cur *cur, struct xfs_buf *bp,
157 int level, union xfs_btree_ptr *newroot);
158 156
159 /* block allocation / freeing */ 157 /* block allocation / freeing */
160 int (*alloc_block)(struct xfs_btree_cur *cur, 158 int (*alloc_block)(struct xfs_btree_cur *cur,
@@ -399,16 +397,6 @@ xfs_btree_reada_bufs(
399 xfs_agblock_t agbno, /* allocation group block number */ 397 xfs_agblock_t agbno, /* allocation group block number */
400 xfs_extlen_t count); /* count of filesystem blocks */ 398 xfs_extlen_t count); /* count of filesystem blocks */
401 399
402/*
403 * Set the buffer for level "lev" in the cursor to bp, releasing
404 * any previous buffer.
405 */
406void
407xfs_btree_setbuf(
408 xfs_btree_cur_t *cur, /* btree cursor */
409 int lev, /* level in btree */
410 struct xfs_buf *bp); /* new buffer to set */
411
412 400
413/* 401/*
414 * Common btree core entry points. 402 * Common btree core entry points.
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1b09d7a280df..2686d0d54c5b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -692,8 +692,7 @@ xfs_buf_item_init(
692 * the first. If we do already have one, there is 692 * the first. If we do already have one, there is
693 * nothing to do here so return. 693 * nothing to do here so return.
694 */ 694 */
695 if (bp->b_mount != mp) 695 ASSERT(bp->b_target->bt_mount == mp);
696 bp->b_mount = mp;
697 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 696 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
698 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 697 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
699 if (lip->li_type == XFS_LI_BUF) { 698 if (lip->li_type == XFS_LI_BUF) {
@@ -974,7 +973,7 @@ xfs_buf_iodone_callbacks(
974 xfs_buf_do_callbacks(bp, lip); 973 xfs_buf_do_callbacks(bp, lip);
975 XFS_BUF_SET_FSPRIVATE(bp, NULL); 974 XFS_BUF_SET_FSPRIVATE(bp, NULL);
976 XFS_BUF_CLR_IODONE_FUNC(bp); 975 XFS_BUF_CLR_IODONE_FUNC(bp);
977 xfs_biodone(bp); 976 xfs_buf_ioend(bp, 0);
978 return; 977 return;
979 } 978 }
980 979
@@ -1033,7 +1032,7 @@ xfs_buf_iodone_callbacks(
1033 xfs_buf_do_callbacks(bp, lip); 1032 xfs_buf_do_callbacks(bp, lip);
1034 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1033 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1035 XFS_BUF_CLR_IODONE_FUNC(bp); 1034 XFS_BUF_CLR_IODONE_FUNC(bp);
1036 xfs_biodone(bp); 1035 xfs_buf_ioend(bp, 0);
1037} 1036}
1038 1037
1039/* 1038/*
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 30fa0e206fba..1c00bedb3175 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -2042,7 +2042,7 @@ xfs_da_do_buf(
2042 mappedbno, nmapped, 0, &bp); 2042 mappedbno, nmapped, 0, &bp);
2043 break; 2043 break;
2044 case 3: 2044 case 3:
2045 xfs_baread(mp->m_ddev_targp, mappedbno, nmapped); 2045 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2046 error = 0; 2046 error = 0;
2047 bp = NULL; 2047 bp = NULL;
2048 break; 2048 break;
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index e5b153b2e6a3..dffba9ba0db6 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -49,8 +49,9 @@ typedef struct xfs_dinode {
49 __be32 di_uid; /* owner's user id */ 49 __be32 di_uid; /* owner's user id */
50 __be32 di_gid; /* owner's group id */ 50 __be32 di_gid; /* owner's group id */
51 __be32 di_nlink; /* number of links to file */ 51 __be32 di_nlink; /* number of links to file */
52 __be16 di_projid; /* owner's project id */ 52 __be16 di_projid_lo; /* lower part of owner's project id */
53 __u8 di_pad[8]; /* unused, zeroed space */ 53 __be16 di_projid_hi; /* higher part owner's project id */
54 __u8 di_pad[6]; /* unused, zeroed space */
54 __be16 di_flushiter; /* incremented on flush */ 55 __be16 di_flushiter; /* incremented on flush */
55 xfs_timestamp_t di_atime; /* time last accessed */ 56 xfs_timestamp_t di_atime; /* time last accessed */
56 xfs_timestamp_t di_mtime; /* time last modified */ 57 xfs_timestamp_t di_mtime; /* time last modified */
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 504be8640e91..ae891223be90 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -961,7 +961,7 @@ xfs_dir2_leaf_getdents(
961 if (i > ra_current && 961 if (i > ra_current &&
962 map[ra_index].br_blockcount >= 962 map[ra_index].br_blockcount >=
963 mp->m_dirblkfsbs) { 963 mp->m_dirblkfsbs) {
964 xfs_baread(mp->m_ddev_targp, 964 xfs_buf_readahead(mp->m_ddev_targp,
965 XFS_FSB_TO_DADDR(mp, 965 XFS_FSB_TO_DADDR(mp,
966 map[ra_index].br_startblock + 966 map[ra_index].br_startblock +
967 ra_offset), 967 ra_offset),
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 87c2e9d02288..8f6fc1a96386 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -293,9 +293,11 @@ typedef struct xfs_bstat {
293 __s32 bs_extsize; /* extent size */ 293 __s32 bs_extsize; /* extent size */
294 __s32 bs_extents; /* number of extents */ 294 __s32 bs_extents; /* number of extents */
295 __u32 bs_gen; /* generation count */ 295 __u32 bs_gen; /* generation count */
296 __u16 bs_projid; /* project id */ 296 __u16 bs_projid_lo; /* lower part of project id */
297#define bs_projid bs_projid_lo /* (previously just bs_projid) */
297 __u16 bs_forkoff; /* inode fork offset in bytes */ 298 __u16 bs_forkoff; /* inode fork offset in bytes */
298 unsigned char bs_pad[12]; /* pad space, unused */ 299 __u16 bs_projid_hi; /* higher part of project id */
300 unsigned char bs_pad[10]; /* pad space, unused */
299 __u32 bs_dmevmask; /* DMIG event mask */ 301 __u32 bs_dmevmask; /* DMIG event mask */
300 __u16 bs_dmstate; /* DMIG state info */ 302 __u16 bs_dmstate; /* DMIG state info */
301 __u16 bs_aextents; /* attribute number of extents */ 303 __u16 bs_aextents; /* attribute number of extents */
@@ -448,6 +450,7 @@ typedef struct xfs_handle {
448/* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ 450/* XFS_IOC_SETBIOSIZE ---- deprecated 46 */
449/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ 451/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
450#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) 452#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
453#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
451 454
452/* 455/*
453 * ioctl commands that replace IRIX syssgi()'s 456 * ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 43b1d5699335..a7c116e814af 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -144,12 +144,11 @@ xfs_growfs_data_private(
144 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 144 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
145 return error; 145 return error;
146 dpct = pct - mp->m_sb.sb_imax_pct; 146 dpct = pct - mp->m_sb.sb_imax_pct;
147 error = xfs_read_buf(mp, mp->m_ddev_targp, 147 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
148 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 148 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
149 XFS_FSS_TO_BB(mp, 1), 0, &bp); 149 BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
150 if (error) 150 if (!bp)
151 return error; 151 return EIO;
152 ASSERT(bp);
153 xfs_buf_relse(bp); 152 xfs_buf_relse(bp);
154 153
155 new = nb; /* use new as a temporary here */ 154 new = nb; /* use new as a temporary here */
@@ -597,7 +596,8 @@ out:
597 * the extra reserve blocks from the reserve..... 596 * the extra reserve blocks from the reserve.....
598 */ 597 */
599 int error; 598 int error;
600 error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0); 599 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
600 fdblks_delta, 0);
601 if (error == ENOSPC) 601 if (error == ENOSPC)
602 goto retry; 602 goto retry;
603 } 603 }
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 5371d2dc360e..0626a32c3447 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -212,7 +212,7 @@ xfs_ialloc_inode_init(
212 * to log a whole cluster of inodes instead of all the 212 * to log a whole cluster of inodes instead of all the
213 * individual transactions causing a lot of log traffic. 213 * individual transactions causing a lot of log traffic.
214 */ 214 */
215 xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog); 215 xfs_buf_zero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
216 for (i = 0; i < ninodes; i++) { 216 for (i = 0; i < ninodes; i++) {
217 int ioffset = i << mp->m_sb.sb_inodelog; 217 int ioffset = i << mp->m_sb.sb_inodelog;
218 uint isize = sizeof(struct xfs_dinode); 218 uint isize = sizeof(struct xfs_dinode);
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index d352862cefa0..16921f55c542 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -183,38 +183,6 @@ xfs_inobt_key_diff(
183 cur->bc_rec.i.ir_startino; 183 cur->bc_rec.i.ir_startino;
184} 184}
185 185
186STATIC int
187xfs_inobt_kill_root(
188 struct xfs_btree_cur *cur,
189 struct xfs_buf *bp,
190 int level,
191 union xfs_btree_ptr *newroot)
192{
193 int error;
194
195 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
196 XFS_BTREE_STATS_INC(cur, killroot);
197
198 /*
199 * Update the root pointer, decreasing the level by 1 and then
200 * free the old root.
201 */
202 xfs_inobt_set_root(cur, newroot, -1);
203 error = xfs_inobt_free_block(cur, bp);
204 if (error) {
205 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
206 return error;
207 }
208
209 XFS_BTREE_STATS_INC(cur, free);
210
211 cur->bc_bufs[level] = NULL;
212 cur->bc_nlevels--;
213
214 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
215 return 0;
216}
217
218#ifdef DEBUG 186#ifdef DEBUG
219STATIC int 187STATIC int
220xfs_inobt_keys_inorder( 188xfs_inobt_keys_inorder(
@@ -309,7 +277,6 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
309 277
310 .dup_cursor = xfs_inobt_dup_cursor, 278 .dup_cursor = xfs_inobt_dup_cursor,
311 .set_root = xfs_inobt_set_root, 279 .set_root = xfs_inobt_set_root,
312 .kill_root = xfs_inobt_kill_root,
313 .alloc_block = xfs_inobt_alloc_block, 280 .alloc_block = xfs_inobt_alloc_block,
314 .free_block = xfs_inobt_free_block, 281 .free_block = xfs_inobt_free_block,
315 .get_minrecs = xfs_inobt_get_minrecs, 282 .get_minrecs = xfs_inobt_get_minrecs,
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b1ecc6f97ade..0cdd26932d8e 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -365,8 +365,8 @@ xfs_iget(
365 xfs_perag_t *pag; 365 xfs_perag_t *pag;
366 xfs_agino_t agino; 366 xfs_agino_t agino;
367 367
368 /* the radix tree exists only in inode capable AGs */ 368 /* reject inode numbers outside existing AGs */
369 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) 369 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
370 return EINVAL; 370 return EINVAL;
371 371
372 /* get the perag structure and ensure that it's inode capable */ 372 /* get the perag structure and ensure that it's inode capable */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 34798f391c49..108c7a085f94 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -660,7 +660,8 @@ xfs_dinode_from_disk(
660 to->di_uid = be32_to_cpu(from->di_uid); 660 to->di_uid = be32_to_cpu(from->di_uid);
661 to->di_gid = be32_to_cpu(from->di_gid); 661 to->di_gid = be32_to_cpu(from->di_gid);
662 to->di_nlink = be32_to_cpu(from->di_nlink); 662 to->di_nlink = be32_to_cpu(from->di_nlink);
663 to->di_projid = be16_to_cpu(from->di_projid); 663 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
664 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
664 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 665 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
665 to->di_flushiter = be16_to_cpu(from->di_flushiter); 666 to->di_flushiter = be16_to_cpu(from->di_flushiter);
666 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 667 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
@@ -695,7 +696,8 @@ xfs_dinode_to_disk(
695 to->di_uid = cpu_to_be32(from->di_uid); 696 to->di_uid = cpu_to_be32(from->di_uid);
696 to->di_gid = cpu_to_be32(from->di_gid); 697 to->di_gid = cpu_to_be32(from->di_gid);
697 to->di_nlink = cpu_to_be32(from->di_nlink); 698 to->di_nlink = cpu_to_be32(from->di_nlink);
698 to->di_projid = cpu_to_be16(from->di_projid); 699 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
700 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
699 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 701 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
700 to->di_flushiter = cpu_to_be16(from->di_flushiter); 702 to->di_flushiter = cpu_to_be16(from->di_flushiter);
701 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 703 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
@@ -874,7 +876,7 @@ xfs_iread(
874 if (ip->i_d.di_version == 1) { 876 if (ip->i_d.di_version == 1) {
875 ip->i_d.di_nlink = ip->i_d.di_onlink; 877 ip->i_d.di_nlink = ip->i_d.di_onlink;
876 ip->i_d.di_onlink = 0; 878 ip->i_d.di_onlink = 0;
877 ip->i_d.di_projid = 0; 879 xfs_set_projid(ip, 0);
878 } 880 }
879 881
880 ip->i_delayed_blks = 0; 882 ip->i_delayed_blks = 0;
@@ -982,8 +984,7 @@ xfs_ialloc(
982 mode_t mode, 984 mode_t mode,
983 xfs_nlink_t nlink, 985 xfs_nlink_t nlink,
984 xfs_dev_t rdev, 986 xfs_dev_t rdev,
985 cred_t *cr, 987 prid_t prid,
986 xfs_prid_t prid,
987 int okalloc, 988 int okalloc,
988 xfs_buf_t **ialloc_context, 989 xfs_buf_t **ialloc_context,
989 boolean_t *call_again, 990 boolean_t *call_again,
@@ -1027,7 +1028,7 @@ xfs_ialloc(
1027 ASSERT(ip->i_d.di_nlink == nlink); 1028 ASSERT(ip->i_d.di_nlink == nlink);
1028 ip->i_d.di_uid = current_fsuid(); 1029 ip->i_d.di_uid = current_fsuid();
1029 ip->i_d.di_gid = current_fsgid(); 1030 ip->i_d.di_gid = current_fsgid();
1030 ip->i_d.di_projid = prid; 1031 xfs_set_projid(ip, prid);
1031 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1032 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1032 1033
1033 /* 1034 /*
@@ -2725,7 +2726,7 @@ cluster_corrupt_out:
2725 XFS_BUF_UNDONE(bp); 2726 XFS_BUF_UNDONE(bp);
2726 XFS_BUF_STALE(bp); 2727 XFS_BUF_STALE(bp);
2727 XFS_BUF_ERROR(bp,EIO); 2728 XFS_BUF_ERROR(bp,EIO);
2728 xfs_biodone(bp); 2729 xfs_buf_ioend(bp, 0);
2729 } else { 2730 } else {
2730 XFS_BUF_STALE(bp); 2731 XFS_BUF_STALE(bp);
2731 xfs_buf_relse(bp); 2732 xfs_buf_relse(bp);
@@ -3008,7 +3009,7 @@ xfs_iflush_int(
3008 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3009 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3009 memset(&(dip->di_pad[0]), 0, 3010 memset(&(dip->di_pad[0]), 0,
3010 sizeof(dip->di_pad)); 3011 sizeof(dip->di_pad));
3011 ASSERT(ip->i_d.di_projid == 0); 3012 ASSERT(xfs_get_projid(ip) == 0);
3012 } 3013 }
3013 } 3014 }
3014 3015
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0898c5417d12..fac52290de90 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -134,8 +134,9 @@ typedef struct xfs_icdinode {
134 __uint32_t di_uid; /* owner's user id */ 134 __uint32_t di_uid; /* owner's user id */
135 __uint32_t di_gid; /* owner's group id */ 135 __uint32_t di_gid; /* owner's group id */
136 __uint32_t di_nlink; /* number of links to file */ 136 __uint32_t di_nlink; /* number of links to file */
137 __uint16_t di_projid; /* owner's project id */ 137 __uint16_t di_projid_lo; /* lower part of owner's project id */
138 __uint8_t di_pad[8]; /* unused, zeroed space */ 138 __uint16_t di_projid_hi; /* higher part of owner's project id */
139 __uint8_t di_pad[6]; /* unused, zeroed space */
139 __uint16_t di_flushiter; /* incremented on flush */ 140 __uint16_t di_flushiter; /* incremented on flush */
140 xfs_ictimestamp_t di_atime; /* time last accessed */ 141 xfs_ictimestamp_t di_atime; /* time last accessed */
141 xfs_ictimestamp_t di_mtime; /* time last modified */ 142 xfs_ictimestamp_t di_mtime; /* time last modified */
@@ -212,7 +213,6 @@ typedef struct xfs_icdinode {
212#ifdef __KERNEL__ 213#ifdef __KERNEL__
213 214
214struct bhv_desc; 215struct bhv_desc;
215struct cred;
216struct xfs_buf; 216struct xfs_buf;
217struct xfs_bmap_free; 217struct xfs_bmap_free;
218struct xfs_bmbt_irec; 218struct xfs_bmbt_irec;
@@ -335,6 +335,25 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
335} 335}
336 336
337/* 337/*
338 * Project quota id helpers (previously projid was 16bit only
339 * and using two 16bit values to hold new 32bit projid was choosen
340 * to retain compatibility with "old" filesystems).
341 */
342static inline prid_t
343xfs_get_projid(struct xfs_inode *ip)
344{
345 return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo;
346}
347
348static inline void
349xfs_set_projid(struct xfs_inode *ip,
350 prid_t projid)
351{
352 ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16);
353 ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff);
354}
355
356/*
338 * Manage the i_flush queue embedded in the inode. This completion 357 * Manage the i_flush queue embedded in the inode. This completion
339 * queue synchronizes processes attempting to flush the in-core 358 * queue synchronizes processes attempting to flush the in-core
340 * inode back to disk. 359 * inode back to disk.
@@ -456,8 +475,8 @@ void xfs_inode_free(struct xfs_inode *ip);
456 * xfs_inode.c prototypes. 475 * xfs_inode.c prototypes.
457 */ 476 */
458int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, 477int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t,
459 xfs_nlink_t, xfs_dev_t, cred_t *, xfs_prid_t, 478 xfs_nlink_t, xfs_dev_t, prid_t, int,
460 int, struct xfs_buf **, boolean_t *, xfs_inode_t **); 479 struct xfs_buf **, boolean_t *, xfs_inode_t **);
461 480
462uint xfs_ip2xflags(struct xfs_inode *); 481uint xfs_ip2xflags(struct xfs_inode *);
463uint xfs_dic2xflags(struct xfs_dinode *); 482uint xfs_dic2xflags(struct xfs_dinode *);
@@ -471,7 +490,6 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
471void xfs_iext_realloc(xfs_inode_t *, int, int); 490void xfs_iext_realloc(xfs_inode_t *, int, int);
472void xfs_iunpin_wait(xfs_inode_t *); 491void xfs_iunpin_wait(xfs_inode_t *);
473int xfs_iflush(xfs_inode_t *, uint); 492int xfs_iflush(xfs_inode_t *, uint);
474void xfs_ichgtime(xfs_inode_t *, int);
475void xfs_lock_inodes(xfs_inode_t **, int, uint); 493void xfs_lock_inodes(xfs_inode_t **, int, uint);
476void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); 494void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
477 495
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index fe00777e2796..c7ac020705df 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -223,15 +223,6 @@ xfs_inode_item_format(
223 nvecs = 1; 223 nvecs = 1;
224 224
225 /* 225 /*
226 * Make sure the linux inode is dirty. We do this before
227 * clearing i_update_core as the VFS will call back into
228 * XFS here and set i_update_core, so we need to dirty the
229 * inode first so that the ordering of i_update_core and
230 * unlogged modifications still works as described below.
231 */
232 xfs_mark_inode_dirty_sync(ip);
233
234 /*
235 * Clear i_update_core if the timestamps (or any other 226 * Clear i_update_core if the timestamps (or any other
236 * non-transactional modification) need flushing/logging 227 * non-transactional modification) need flushing/logging
237 * and we're about to log them with the rest of the core. 228 * and we're about to log them with the rest of the core.
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 7e3626e5925c..dc1882adaf54 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -92,7 +92,8 @@ xfs_bulkstat_one_int(
92 * further change. 92 * further change.
93 */ 93 */
94 buf->bs_nlink = dic->di_nlink; 94 buf->bs_nlink = dic->di_nlink;
95 buf->bs_projid = dic->di_projid; 95 buf->bs_projid_lo = dic->di_projid_lo;
96 buf->bs_projid_hi = dic->di_projid_hi;
96 buf->bs_ino = ino; 97 buf->bs_ino = ino;
97 buf->bs_mode = dic->di_mode; 98 buf->bs_mode = dic->di_mode;
98 buf->bs_uid = dic->di_uid; 99 buf->bs_uid = dic->di_uid;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 33f718f92a48..cee4ab9f8a9e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -917,19 +917,6 @@ xlog_iodone(xfs_buf_t *bp)
917 l = iclog->ic_log; 917 l = iclog->ic_log;
918 918
919 /* 919 /*
920 * If the _XFS_BARRIER_FAILED flag was set by a lower
921 * layer, it means the underlying device no longer supports
922 * barrier I/O. Warn loudly and turn off barriers.
923 */
924 if (bp->b_flags & _XFS_BARRIER_FAILED) {
925 bp->b_flags &= ~_XFS_BARRIER_FAILED;
926 l->l_mp->m_flags &= ~XFS_MOUNT_BARRIER;
927 xfs_fs_cmn_err(CE_WARN, l->l_mp,
928 "xlog_iodone: Barriers are no longer supported"
929 " by device. Disabling barriers\n");
930 }
931
932 /*
933 * Race to shutdown the filesystem if we see an error. 920 * Race to shutdown the filesystem if we see an error.
934 */ 921 */
935 if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp, 922 if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp,
@@ -1131,7 +1118,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1131 iclog->ic_prev = prev_iclog; 1118 iclog->ic_prev = prev_iclog;
1132 prev_iclog = iclog; 1119 prev_iclog = iclog;
1133 1120
1134 bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp); 1121 bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1122 log->l_iclog_size, 0);
1135 if (!bp) 1123 if (!bp)
1136 goto out_free_iclog; 1124 goto out_free_iclog;
1137 if (!XFS_BUF_CPSEMA(bp)) 1125 if (!XFS_BUF_CPSEMA(bp))
@@ -1309,7 +1297,7 @@ xlog_bdstrat(
1309 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1297 if (iclog->ic_state & XLOG_STATE_IOERROR) {
1310 XFS_BUF_ERROR(bp, EIO); 1298 XFS_BUF_ERROR(bp, EIO);
1311 XFS_BUF_STALE(bp); 1299 XFS_BUF_STALE(bp);
1312 xfs_biodone(bp); 1300 xfs_buf_ioend(bp, 0);
1313 /* 1301 /*
1314 * It would seem logical to return EIO here, but we rely on 1302 * It would seem logical to return EIO here, but we rely on
1315 * the log state machine to propagate I/O errors instead of 1303 * the log state machine to propagate I/O errors instead of
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7e206fc1fa36..23d6ceb5e97b 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -146,102 +146,6 @@ xlog_cil_init_post_recovery(
146} 146}
147 147
148/* 148/*
149 * Insert the log item into the CIL and calculate the difference in space
150 * consumed by the item. Add the space to the checkpoint ticket and calculate
151 * if the change requires additional log metadata. If it does, take that space
152 * as well. Remove the amount of space we addded to the checkpoint ticket from
153 * the current transaction ticket so that the accounting works out correctly.
154 *
155 * If this is the first time the item is being placed into the CIL in this
156 * context, pin it so it can't be written to disk until the CIL is flushed to
157 * the iclog and the iclog written to disk.
158 */
159static void
160xlog_cil_insert(
161 struct log *log,
162 struct xlog_ticket *ticket,
163 struct xfs_log_item *item,
164 struct xfs_log_vec *lv)
165{
166 struct xfs_cil *cil = log->l_cilp;
167 struct xfs_log_vec *old = lv->lv_item->li_lv;
168 struct xfs_cil_ctx *ctx = cil->xc_ctx;
169 int len;
170 int diff_iovecs;
171 int iclog_space;
172
173 if (old) {
174 /* existing lv on log item, space used is a delta */
175 ASSERT(!list_empty(&item->li_cil));
176 ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
177
178 len = lv->lv_buf_len - old->lv_buf_len;
179 diff_iovecs = lv->lv_niovecs - old->lv_niovecs;
180 kmem_free(old->lv_buf);
181 kmem_free(old);
182 } else {
183 /* new lv, must pin the log item */
184 ASSERT(!lv->lv_item->li_lv);
185 ASSERT(list_empty(&item->li_cil));
186
187 len = lv->lv_buf_len;
188 diff_iovecs = lv->lv_niovecs;
189 IOP_PIN(lv->lv_item);
190
191 }
192 len += diff_iovecs * sizeof(xlog_op_header_t);
193
194 /* attach new log vector to log item */
195 lv->lv_item->li_lv = lv;
196
197 spin_lock(&cil->xc_cil_lock);
198 list_move_tail(&item->li_cil, &cil->xc_cil);
199 ctx->nvecs += diff_iovecs;
200
201 /*
202 * If this is the first time the item is being committed to the CIL,
203 * store the sequence number on the log item so we can tell
204 * in future commits whether this is the first checkpoint the item is
205 * being committed into.
206 */
207 if (!item->li_seq)
208 item->li_seq = ctx->sequence;
209
210 /*
211 * Now transfer enough transaction reservation to the context ticket
212 * for the checkpoint. The context ticket is special - the unit
213 * reservation has to grow as well as the current reservation as we
214 * steal from tickets so we can correctly determine the space used
215 * during the transaction commit.
216 */
217 if (ctx->ticket->t_curr_res == 0) {
218 /* first commit in checkpoint, steal the header reservation */
219 ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
220 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
221 ticket->t_curr_res -= ctx->ticket->t_unit_res;
222 }
223
224 /* do we need space for more log record headers? */
225 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
226 if (len > 0 && (ctx->space_used / iclog_space !=
227 (ctx->space_used + len) / iclog_space)) {
228 int hdrs;
229
230 hdrs = (len + iclog_space - 1) / iclog_space;
231 /* need to take into account split region headers, too */
232 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
233 ctx->ticket->t_unit_res += hdrs;
234 ctx->ticket->t_curr_res += hdrs;
235 ticket->t_curr_res -= hdrs;
236 ASSERT(ticket->t_curr_res >= len);
237 }
238 ticket->t_curr_res -= len;
239 ctx->space_used += len;
240
241 spin_unlock(&cil->xc_cil_lock);
242}
243
244/*
245 * Format log item into a flat buffers 149 * Format log item into a flat buffers
246 * 150 *
247 * For delayed logging, we need to hold a formatted buffer containing all the 151 * For delayed logging, we need to hold a formatted buffer containing all the
@@ -286,7 +190,7 @@ xlog_cil_format_items(
286 len += lv->lv_iovecp[index].i_len; 190 len += lv->lv_iovecp[index].i_len;
287 191
288 lv->lv_buf_len = len; 192 lv->lv_buf_len = len;
289 lv->lv_buf = kmem_zalloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); 193 lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
290 ptr = lv->lv_buf; 194 ptr = lv->lv_buf;
291 195
292 for (index = 0; index < lv->lv_niovecs; index++) { 196 for (index = 0; index < lv->lv_niovecs; index++) {
@@ -300,21 +204,136 @@ xlog_cil_format_items(
300 } 204 }
301} 205}
302 206
207/*
208 * Prepare the log item for insertion into the CIL. Calculate the difference in
209 * log space and vectors it will consume, and if it is a new item pin it as
210 * well.
211 */
212STATIC void
213xfs_cil_prepare_item(
214 struct log *log,
215 struct xfs_log_vec *lv,
216 int *len,
217 int *diff_iovecs)
218{
219 struct xfs_log_vec *old = lv->lv_item->li_lv;
220
221 if (old) {
222 /* existing lv on log item, space used is a delta */
223 ASSERT(!list_empty(&lv->lv_item->li_cil));
224 ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
225
226 *len += lv->lv_buf_len - old->lv_buf_len;
227 *diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
228 kmem_free(old->lv_buf);
229 kmem_free(old);
230 } else {
231 /* new lv, must pin the log item */
232 ASSERT(!lv->lv_item->li_lv);
233 ASSERT(list_empty(&lv->lv_item->li_cil));
234
235 *len += lv->lv_buf_len;
236 *diff_iovecs += lv->lv_niovecs;
237 IOP_PIN(lv->lv_item);
238
239 }
240
241 /* attach new log vector to log item */
242 lv->lv_item->li_lv = lv;
243
244 /*
245 * If this is the first time the item is being committed to the
246 * CIL, store the sequence number on the log item so we can
247 * tell in future commits whether this is the first checkpoint
248 * the item is being committed into.
249 */
250 if (!lv->lv_item->li_seq)
251 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
252}
253
254/*
255 * Insert the log items into the CIL and calculate the difference in space
256 * consumed by the item. Add the space to the checkpoint ticket and calculate
257 * if the change requires additional log metadata. If it does, take that space
258 * as well. Remove the amount of space we addded to the checkpoint ticket from
259 * the current transaction ticket so that the accounting works out correctly.
260 */
303static void 261static void
304xlog_cil_insert_items( 262xlog_cil_insert_items(
305 struct log *log, 263 struct log *log,
306 struct xfs_log_vec *log_vector, 264 struct xfs_log_vec *log_vector,
307 struct xlog_ticket *ticket, 265 struct xlog_ticket *ticket)
308 xfs_lsn_t *start_lsn)
309{ 266{
310 struct xfs_log_vec *lv; 267 struct xfs_cil *cil = log->l_cilp;
311 268 struct xfs_cil_ctx *ctx = cil->xc_ctx;
312 if (start_lsn) 269 struct xfs_log_vec *lv;
313 *start_lsn = log->l_cilp->xc_ctx->sequence; 270 int len = 0;
271 int diff_iovecs = 0;
272 int iclog_space;
314 273
315 ASSERT(log_vector); 274 ASSERT(log_vector);
275
276 /*
277 * Do all the accounting aggregation and switching of log vectors
278 * around in a separate loop to the insertion of items into the CIL.
279 * Then we can do a separate loop to update the CIL within a single
280 * lock/unlock pair. This reduces the number of round trips on the CIL
281 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
282 * hold time for the transaction commit.
283 *
284 * If this is the first time the item is being placed into the CIL in
285 * this context, pin it so it can't be written to disk until the CIL is
286 * flushed to the iclog and the iclog written to disk.
287 *
288 * We can do this safely because the context can't checkpoint until we
289 * are done so it doesn't matter exactly how we update the CIL.
290 */
291 for (lv = log_vector; lv; lv = lv->lv_next)
292 xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
293
294 /* account for space used by new iovec headers */
295 len += diff_iovecs * sizeof(xlog_op_header_t);
296
297 spin_lock(&cil->xc_cil_lock);
298
299 /* move the items to the tail of the CIL */
316 for (lv = log_vector; lv; lv = lv->lv_next) 300 for (lv = log_vector; lv; lv = lv->lv_next)
317 xlog_cil_insert(log, ticket, lv->lv_item, lv); 301 list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
302
303 ctx->nvecs += diff_iovecs;
304
305 /*
306 * Now transfer enough transaction reservation to the context ticket
307 * for the checkpoint. The context ticket is special - the unit
308 * reservation has to grow as well as the current reservation as we
309 * steal from tickets so we can correctly determine the space used
310 * during the transaction commit.
311 */
312 if (ctx->ticket->t_curr_res == 0) {
313 /* first commit in checkpoint, steal the header reservation */
314 ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
315 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
316 ticket->t_curr_res -= ctx->ticket->t_unit_res;
317 }
318
319 /* do we need space for more log record headers? */
320 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
321 if (len > 0 && (ctx->space_used / iclog_space !=
322 (ctx->space_used + len) / iclog_space)) {
323 int hdrs;
324
325 hdrs = (len + iclog_space - 1) / iclog_space;
326 /* need to take into account split region headers, too */
327 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
328 ctx->ticket->t_unit_res += hdrs;
329 ctx->ticket->t_curr_res += hdrs;
330 ticket->t_curr_res -= hdrs;
331 ASSERT(ticket->t_curr_res >= len);
332 }
333 ticket->t_curr_res -= len;
334 ctx->space_used += len;
335
336 spin_unlock(&cil->xc_cil_lock);
318} 337}
319 338
320static void 339static void
@@ -638,7 +657,10 @@ xfs_log_commit_cil(
638 657
639 /* lock out background commit */ 658 /* lock out background commit */
640 down_read(&log->l_cilp->xc_ctx_lock); 659 down_read(&log->l_cilp->xc_ctx_lock);
641 xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn); 660 if (commit_lsn)
661 *commit_lsn = log->l_cilp->xc_ctx->sequence;
662
663 xlog_cil_insert_items(log, log_vector, tp->t_ticket);
642 664
643 /* check we didn't blow the reservation */ 665 /* check we didn't blow the reservation */
644 if (tp->t_ticket->t_curr_res < 0) 666 if (tp->t_ticket->t_curr_res < 0)
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6f3f5fa37acf..966d3f97458c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -107,7 +107,8 @@ xlog_get_bp(
107 nbblks += log->l_sectBBsize; 107 nbblks += log->l_sectBBsize;
108 nbblks = round_up(nbblks, log->l_sectBBsize); 108 nbblks = round_up(nbblks, log->l_sectBBsize);
109 109
110 return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); 110 return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
111 BBTOB(nbblks), 0);
111} 112}
112 113
113STATIC void 114STATIC void
@@ -167,7 +168,7 @@ xlog_bread_noalign(
167 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); 168 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
168 169
169 xfsbdstrat(log->l_mp, bp); 170 xfsbdstrat(log->l_mp, bp);
170 error = xfs_iowait(bp); 171 error = xfs_buf_iowait(bp);
171 if (error) 172 if (error)
172 xfs_ioerror_alert("xlog_bread", log->l_mp, 173 xfs_ioerror_alert("xlog_bread", log->l_mp,
173 bp, XFS_BUF_ADDR(bp)); 174 bp, XFS_BUF_ADDR(bp));
@@ -321,12 +322,13 @@ xlog_recover_iodone(
321 * this during recovery. One strike! 322 * this during recovery. One strike!
322 */ 323 */
323 xfs_ioerror_alert("xlog_recover_iodone", 324 xfs_ioerror_alert("xlog_recover_iodone",
324 bp->b_mount, bp, XFS_BUF_ADDR(bp)); 325 bp->b_target->bt_mount, bp,
325 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); 326 XFS_BUF_ADDR(bp));
327 xfs_force_shutdown(bp->b_target->bt_mount,
328 SHUTDOWN_META_IO_ERROR);
326 } 329 }
327 bp->b_mount = NULL;
328 XFS_BUF_CLR_IODONE_FUNC(bp); 330 XFS_BUF_CLR_IODONE_FUNC(bp);
329 xfs_biodone(bp); 331 xfs_buf_ioend(bp, 0);
330} 332}
331 333
332/* 334/*
@@ -2275,8 +2277,7 @@ xlog_recover_do_buffer_trans(
2275 XFS_BUF_STALE(bp); 2277 XFS_BUF_STALE(bp);
2276 error = xfs_bwrite(mp, bp); 2278 error = xfs_bwrite(mp, bp);
2277 } else { 2279 } else {
2278 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2280 ASSERT(bp->b_target->bt_mount == mp);
2279 bp->b_mount = mp;
2280 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2281 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2281 xfs_bdwrite(mp, bp); 2282 xfs_bdwrite(mp, bp);
2282 } 2283 }
@@ -2540,8 +2541,7 @@ xlog_recover_do_inode_trans(
2540 } 2541 }
2541 2542
2542write_inode_buffer: 2543write_inode_buffer:
2543 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2544 ASSERT(bp->b_target->bt_mount == mp);
2544 bp->b_mount = mp;
2545 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2545 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2546 xfs_bdwrite(mp, bp); 2546 xfs_bdwrite(mp, bp);
2547error: 2547error:
@@ -2678,8 +2678,7 @@ xlog_recover_do_dquot_trans(
2678 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2678 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2679 2679
2680 ASSERT(dq_f->qlf_size == 2); 2680 ASSERT(dq_f->qlf_size == 2);
2681 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2681 ASSERT(bp->b_target->bt_mount == mp);
2682 bp->b_mount = mp;
2683 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2682 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2684 xfs_bdwrite(mp, bp); 2683 xfs_bdwrite(mp, bp);
2685 2684
@@ -3817,7 +3816,7 @@ xlog_do_recover(
3817 XFS_BUF_READ(bp); 3816 XFS_BUF_READ(bp);
3818 XFS_BUF_UNASYNC(bp); 3817 XFS_BUF_UNASYNC(bp);
3819 xfsbdstrat(log->l_mp, bp); 3818 xfsbdstrat(log->l_mp, bp);
3820 error = xfs_iowait(bp); 3819 error = xfs_buf_iowait(bp);
3821 if (error) { 3820 if (error) {
3822 xfs_ioerror_alert("xlog_do_recover", 3821 xfs_ioerror_alert("xlog_do_recover",
3823 log->l_mp, bp, XFS_BUF_ADDR(bp)); 3822 log->l_mp, bp, XFS_BUF_ADDR(bp));
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index aeb9d72ebf6e..b1498ab5a399 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -52,16 +52,11 @@ STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
52 int); 52 int);
53STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, 53STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
54 int); 54 int);
55STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
56 int64_t, int);
57STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 55STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
58
59#else 56#else
60 57
61#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 58#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
62#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) 59#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
63#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
64
65#endif 60#endif
66 61
67static const struct { 62static const struct {
@@ -199,6 +194,8 @@ xfs_uuid_unmount(
199 194
200/* 195/*
201 * Reference counting access wrappers to the perag structures. 196 * Reference counting access wrappers to the perag structures.
197 * Because we never free per-ag structures, the only thing we
198 * have to protect against changes is the tree structure itself.
202 */ 199 */
203struct xfs_perag * 200struct xfs_perag *
204xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) 201xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,19 +203,43 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
206 struct xfs_perag *pag; 203 struct xfs_perag *pag;
207 int ref = 0; 204 int ref = 0;
208 205
209 spin_lock(&mp->m_perag_lock); 206 rcu_read_lock();
210 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 207 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
211 if (pag) { 208 if (pag) {
212 ASSERT(atomic_read(&pag->pag_ref) >= 0); 209 ASSERT(atomic_read(&pag->pag_ref) >= 0);
213 /* catch leaks in the positive direction during testing */
214 ASSERT(atomic_read(&pag->pag_ref) < 1000);
215 ref = atomic_inc_return(&pag->pag_ref); 210 ref = atomic_inc_return(&pag->pag_ref);
216 } 211 }
217 spin_unlock(&mp->m_perag_lock); 212 rcu_read_unlock();
218 trace_xfs_perag_get(mp, agno, ref, _RET_IP_); 213 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
219 return pag; 214 return pag;
220} 215}
221 216
217/*
218 * search from @first to find the next perag with the given tag set.
219 */
220struct xfs_perag *
221xfs_perag_get_tag(
222 struct xfs_mount *mp,
223 xfs_agnumber_t first,
224 int tag)
225{
226 struct xfs_perag *pag;
227 int found;
228 int ref;
229
230 rcu_read_lock();
231 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
232 (void **)&pag, first, 1, tag);
233 if (found <= 0) {
234 rcu_read_unlock();
235 return NULL;
236 }
237 ref = atomic_inc_return(&pag->pag_ref);
238 rcu_read_unlock();
239 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
240 return pag;
241}
242
222void 243void
223xfs_perag_put(struct xfs_perag *pag) 244xfs_perag_put(struct xfs_perag *pag)
224{ 245{
@@ -229,10 +250,18 @@ xfs_perag_put(struct xfs_perag *pag)
229 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); 250 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
230} 251}
231 252
253STATIC void
254__xfs_free_perag(
255 struct rcu_head *head)
256{
257 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
258
259 ASSERT(atomic_read(&pag->pag_ref) == 0);
260 kmem_free(pag);
261}
262
232/* 263/*
233 * Free up the resources associated with a mount structure. Assume that 264 * Free up the per-ag resources associated with the mount structure.
234 * the structure was initially zeroed, so we can tell which fields got
235 * initialized.
236 */ 265 */
237STATIC void 266STATIC void
238xfs_free_perag( 267xfs_free_perag(
@@ -244,10 +273,9 @@ xfs_free_perag(
244 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 273 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
245 spin_lock(&mp->m_perag_lock); 274 spin_lock(&mp->m_perag_lock);
246 pag = radix_tree_delete(&mp->m_perag_tree, agno); 275 pag = radix_tree_delete(&mp->m_perag_tree, agno);
247 ASSERT(pag);
248 ASSERT(atomic_read(&pag->pag_ref) == 0);
249 spin_unlock(&mp->m_perag_lock); 276 spin_unlock(&mp->m_perag_lock);
250 kmem_free(pag); 277 ASSERT(pag);
278 call_rcu(&pag->rcu_head, __xfs_free_perag);
251 } 279 }
252} 280}
253 281
@@ -444,7 +472,10 @@ xfs_initialize_perag(
444 pag->pag_agno = index; 472 pag->pag_agno = index;
445 pag->pag_mount = mp; 473 pag->pag_mount = mp;
446 rwlock_init(&pag->pag_ici_lock); 474 rwlock_init(&pag->pag_ici_lock);
475 mutex_init(&pag->pag_ici_reclaim_lock);
447 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 476 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
477 spin_lock_init(&pag->pag_buf_lock);
478 pag->pag_buf_tree = RB_ROOT;
448 479
449 if (radix_tree_preload(GFP_NOFS)) 480 if (radix_tree_preload(GFP_NOFS))
450 goto out_unwind; 481 goto out_unwind;
@@ -639,7 +670,6 @@ int
639xfs_readsb(xfs_mount_t *mp, int flags) 670xfs_readsb(xfs_mount_t *mp, int flags)
640{ 671{
641 unsigned int sector_size; 672 unsigned int sector_size;
642 unsigned int extra_flags;
643 xfs_buf_t *bp; 673 xfs_buf_t *bp;
644 int error; 674 int error;
645 675
@@ -652,28 +682,24 @@ xfs_readsb(xfs_mount_t *mp, int flags)
652 * access to the superblock. 682 * access to the superblock.
653 */ 683 */
654 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 684 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
655 extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
656 685
657 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), 686reread:
658 extra_flags); 687 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
659 if (!bp || XFS_BUF_ISERROR(bp)) { 688 XFS_SB_DADDR, sector_size, 0);
660 xfs_fs_mount_cmn_err(flags, "SB read failed"); 689 if (!bp) {
661 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 690 xfs_fs_mount_cmn_err(flags, "SB buffer read failed");
662 goto fail; 691 return EIO;
663 } 692 }
664 ASSERT(XFS_BUF_ISBUSY(bp));
665 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
666 693
667 /* 694 /*
668 * Initialize the mount structure from the superblock. 695 * Initialize the mount structure from the superblock.
669 * But first do some basic consistency checking. 696 * But first do some basic consistency checking.
670 */ 697 */
671 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); 698 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
672
673 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); 699 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
674 if (error) { 700 if (error) {
675 xfs_fs_mount_cmn_err(flags, "SB validate failed"); 701 xfs_fs_mount_cmn_err(flags, "SB validate failed");
676 goto fail; 702 goto release_buf;
677 } 703 }
678 704
679 /* 705 /*
@@ -684,7 +710,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
684 "device supports only %u byte sectors (not %u)", 710 "device supports only %u byte sectors (not %u)",
685 sector_size, mp->m_sb.sb_sectsize); 711 sector_size, mp->m_sb.sb_sectsize);
686 error = ENOSYS; 712 error = ENOSYS;
687 goto fail; 713 goto release_buf;
688 } 714 }
689 715
690 /* 716 /*
@@ -692,33 +718,20 @@ xfs_readsb(xfs_mount_t *mp, int flags)
692 * re-read the superblock so the buffer is correctly sized. 718 * re-read the superblock so the buffer is correctly sized.
693 */ 719 */
694 if (sector_size < mp->m_sb.sb_sectsize) { 720 if (sector_size < mp->m_sb.sb_sectsize) {
695 XFS_BUF_UNMANAGE(bp);
696 xfs_buf_relse(bp); 721 xfs_buf_relse(bp);
697 sector_size = mp->m_sb.sb_sectsize; 722 sector_size = mp->m_sb.sb_sectsize;
698 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, 723 goto reread;
699 BTOBB(sector_size), extra_flags);
700 if (!bp || XFS_BUF_ISERROR(bp)) {
701 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
702 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
703 goto fail;
704 }
705 ASSERT(XFS_BUF_ISBUSY(bp));
706 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
707 } 724 }
708 725
709 /* Initialize per-cpu counters */ 726 /* Initialize per-cpu counters */
710 xfs_icsb_reinit_counters(mp); 727 xfs_icsb_reinit_counters(mp);
711 728
712 mp->m_sb_bp = bp; 729 mp->m_sb_bp = bp;
713 xfs_buf_relse(bp); 730 xfs_buf_unlock(bp);
714 ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
715 return 0; 731 return 0;
716 732
717 fail: 733release_buf:
718 if (bp) { 734 xfs_buf_relse(bp);
719 XFS_BUF_UNMANAGE(bp);
720 xfs_buf_relse(bp);
721 }
722 return error; 735 return error;
723} 736}
724 737
@@ -991,42 +1004,35 @@ xfs_check_sizes(xfs_mount_t *mp)
991{ 1004{
992 xfs_buf_t *bp; 1005 xfs_buf_t *bp;
993 xfs_daddr_t d; 1006 xfs_daddr_t d;
994 int error;
995 1007
996 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 1008 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
997 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 1009 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
998 cmn_err(CE_WARN, "XFS: size check 1 failed"); 1010 cmn_err(CE_WARN, "XFS: filesystem size mismatch detected");
999 return XFS_ERROR(EFBIG); 1011 return XFS_ERROR(EFBIG);
1000 } 1012 }
1001 error = xfs_read_buf(mp, mp->m_ddev_targp, 1013 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
1002 d - XFS_FSS_TO_BB(mp, 1), 1014 d - XFS_FSS_TO_BB(mp, 1),
1003 XFS_FSS_TO_BB(mp, 1), 0, &bp); 1015 BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
1004 if (!error) { 1016 if (!bp) {
1005 xfs_buf_relse(bp); 1017 cmn_err(CE_WARN, "XFS: last sector read failed");
1006 } else { 1018 return EIO;
1007 cmn_err(CE_WARN, "XFS: size check 2 failed");
1008 if (error == ENOSPC)
1009 error = XFS_ERROR(EFBIG);
1010 return error;
1011 } 1019 }
1020 xfs_buf_relse(bp);
1012 1021
1013 if (mp->m_logdev_targp != mp->m_ddev_targp) { 1022 if (mp->m_logdev_targp != mp->m_ddev_targp) {
1014 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 1023 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1015 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 1024 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
1016 cmn_err(CE_WARN, "XFS: size check 3 failed"); 1025 cmn_err(CE_WARN, "XFS: log size mismatch detected");
1017 return XFS_ERROR(EFBIG); 1026 return XFS_ERROR(EFBIG);
1018 } 1027 }
1019 error = xfs_read_buf(mp, mp->m_logdev_targp, 1028 bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
1020 d - XFS_FSB_TO_BB(mp, 1), 1029 d - XFS_FSB_TO_BB(mp, 1),
1021 XFS_FSB_TO_BB(mp, 1), 0, &bp); 1030 XFS_FSB_TO_B(mp, 1), 0);
1022 if (!error) { 1031 if (!bp) {
1023 xfs_buf_relse(bp); 1032 cmn_err(CE_WARN, "XFS: log device read failed");
1024 } else { 1033 return EIO;
1025 cmn_err(CE_WARN, "XFS: size check 3 failed");
1026 if (error == ENOSPC)
1027 error = XFS_ERROR(EFBIG);
1028 return error;
1029 } 1034 }
1035 xfs_buf_relse(bp);
1030 } 1036 }
1031 return 0; 1037 return 0;
1032} 1038}
@@ -1601,7 +1607,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
1601 XFS_BUF_UNASYNC(sbp); 1607 XFS_BUF_UNASYNC(sbp);
1602 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); 1608 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1603 xfsbdstrat(mp, sbp); 1609 xfsbdstrat(mp, sbp);
1604 error = xfs_iowait(sbp); 1610 error = xfs_buf_iowait(sbp);
1605 if (error) 1611 if (error)
1606 xfs_ioerror_alert("xfs_unmountfs_writesb", 1612 xfs_ioerror_alert("xfs_unmountfs_writesb",
1607 mp, sbp, XFS_BUF_ADDR(sbp)); 1613 mp, sbp, XFS_BUF_ADDR(sbp));
@@ -1832,135 +1838,72 @@ xfs_mod_incore_sb_unlocked(
1832 */ 1838 */
1833int 1839int
1834xfs_mod_incore_sb( 1840xfs_mod_incore_sb(
1835 xfs_mount_t *mp, 1841 struct xfs_mount *mp,
1836 xfs_sb_field_t field, 1842 xfs_sb_field_t field,
1837 int64_t delta, 1843 int64_t delta,
1838 int rsvd) 1844 int rsvd)
1839{ 1845{
1840 int status; 1846 int status;
1841 1847
1842 /* check for per-cpu counters */
1843 switch (field) {
1844#ifdef HAVE_PERCPU_SB 1848#ifdef HAVE_PERCPU_SB
1845 case XFS_SBS_ICOUNT: 1849 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1846 case XFS_SBS_IFREE:
1847 case XFS_SBS_FDBLOCKS:
1848 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1849 status = xfs_icsb_modify_counters(mp, field,
1850 delta, rsvd);
1851 break;
1852 }
1853 /* FALLTHROUGH */
1854#endif 1850#endif
1855 default: 1851 spin_lock(&mp->m_sb_lock);
1856 spin_lock(&mp->m_sb_lock); 1852 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1857 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1853 spin_unlock(&mp->m_sb_lock);
1858 spin_unlock(&mp->m_sb_lock);
1859 break;
1860 }
1861 1854
1862 return status; 1855 return status;
1863} 1856}
1864 1857
1865/* 1858/*
1866 * xfs_mod_incore_sb_batch() is used to change more than one field 1859 * Change more than one field in the in-core superblock structure at a time.
1867 * in the in-core superblock structure at a time. This modification
1868 * is protected by a lock internal to this module. The fields and
1869 * changes to those fields are specified in the array of xfs_mod_sb
1870 * structures passed in.
1871 * 1860 *
1872 * Either all of the specified deltas will be applied or none of 1861 * The fields and changes to those fields are specified in the array of
1873 * them will. If any modified field dips below 0, then all modifications 1862 * xfs_mod_sb structures passed in. Either all of the specified deltas
1874 * will be backed out and EINVAL will be returned. 1863 * will be applied or none of them will. If any modified field dips below 0,
1864 * then all modifications will be backed out and EINVAL will be returned.
1865 *
1866 * Note that this function may not be used for the superblock values that
1867 * are tracked with the in-memory per-cpu counters - a direct call to
1868 * xfs_icsb_modify_counters is required for these.
1875 */ 1869 */
1876int 1870int
1877xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) 1871xfs_mod_incore_sb_batch(
1872 struct xfs_mount *mp,
1873 xfs_mod_sb_t *msb,
1874 uint nmsb,
1875 int rsvd)
1878{ 1876{
1879 int status=0; 1877 xfs_mod_sb_t *msbp = &msb[0];
1880 xfs_mod_sb_t *msbp; 1878 int error = 0;
1881 1879
1882 /* 1880 /*
1883 * Loop through the array of mod structures and apply each 1881 * Loop through the array of mod structures and apply each individually.
1884 * individually. If any fail, then back out all those 1882 * If any fail, then back out all those which have already been applied.
1885 * which have already been applied. Do all of this within 1883 * Do all of this within the scope of the m_sb_lock so that all of the
1886 * the scope of the m_sb_lock so that all of the changes will 1884 * changes will be atomic.
1887 * be atomic.
1888 */ 1885 */
1889 spin_lock(&mp->m_sb_lock); 1886 spin_lock(&mp->m_sb_lock);
1890 msbp = &msb[0];
1891 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { 1887 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1892 /* 1888 ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1893 * Apply the delta at index n. If it fails, break 1889 msbp->msb_field > XFS_SBS_FDBLOCKS);
1894 * from the loop so we'll fall into the undo loop
1895 * below.
1896 */
1897 switch (msbp->msb_field) {
1898#ifdef HAVE_PERCPU_SB
1899 case XFS_SBS_ICOUNT:
1900 case XFS_SBS_IFREE:
1901 case XFS_SBS_FDBLOCKS:
1902 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1903 spin_unlock(&mp->m_sb_lock);
1904 status = xfs_icsb_modify_counters(mp,
1905 msbp->msb_field,
1906 msbp->msb_delta, rsvd);
1907 spin_lock(&mp->m_sb_lock);
1908 break;
1909 }
1910 /* FALLTHROUGH */
1911#endif
1912 default:
1913 status = xfs_mod_incore_sb_unlocked(mp,
1914 msbp->msb_field,
1915 msbp->msb_delta, rsvd);
1916 break;
1917 }
1918 1890
1919 if (status != 0) { 1891 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1920 break; 1892 msbp->msb_delta, rsvd);
1921 } 1893 if (error)
1894 goto unwind;
1922 } 1895 }
1896 spin_unlock(&mp->m_sb_lock);
1897 return 0;
1923 1898
1924 /* 1899unwind:
1925 * If we didn't complete the loop above, then back out 1900 while (--msbp >= msb) {
1926 * any changes made to the superblock. If you add code 1901 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1927 * between the loop above and here, make sure that you 1902 -msbp->msb_delta, rsvd);
1928 * preserve the value of status. Loop back until 1903 ASSERT(error == 0);
1929 * we step below the beginning of the array. Make sure
1930 * we don't touch anything back there.
1931 */
1932 if (status != 0) {
1933 msbp--;
1934 while (msbp >= msb) {
1935 switch (msbp->msb_field) {
1936#ifdef HAVE_PERCPU_SB
1937 case XFS_SBS_ICOUNT:
1938 case XFS_SBS_IFREE:
1939 case XFS_SBS_FDBLOCKS:
1940 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1941 spin_unlock(&mp->m_sb_lock);
1942 status = xfs_icsb_modify_counters(mp,
1943 msbp->msb_field,
1944 -(msbp->msb_delta),
1945 rsvd);
1946 spin_lock(&mp->m_sb_lock);
1947 break;
1948 }
1949 /* FALLTHROUGH */
1950#endif
1951 default:
1952 status = xfs_mod_incore_sb_unlocked(mp,
1953 msbp->msb_field,
1954 -(msbp->msb_delta),
1955 rsvd);
1956 break;
1957 }
1958 ASSERT(status == 0);
1959 msbp--;
1960 }
1961 } 1904 }
1962 spin_unlock(&mp->m_sb_lock); 1905 spin_unlock(&mp->m_sb_lock);
1963 return status; 1906 return error;
1964} 1907}
1965 1908
1966/* 1909/*
@@ -1998,18 +1941,13 @@ xfs_getsb(
1998 */ 1941 */
1999void 1942void
2000xfs_freesb( 1943xfs_freesb(
2001 xfs_mount_t *mp) 1944 struct xfs_mount *mp)
2002{ 1945{
2003 xfs_buf_t *bp; 1946 struct xfs_buf *bp = mp->m_sb_bp;
2004 1947
2005 /* 1948 xfs_buf_lock(bp);
2006 * Use xfs_getsb() so that the buffer will be locked
2007 * when we call xfs_buf_relse().
2008 */
2009 bp = xfs_getsb(mp, 0);
2010 XFS_BUF_UNMANAGE(bp);
2011 xfs_buf_relse(bp);
2012 mp->m_sb_bp = NULL; 1949 mp->m_sb_bp = NULL;
1950 xfs_buf_relse(bp);
2013} 1951}
2014 1952
2015/* 1953/*
@@ -2496,7 +2434,7 @@ xfs_icsb_balance_counter(
2496 spin_unlock(&mp->m_sb_lock); 2434 spin_unlock(&mp->m_sb_lock);
2497} 2435}
2498 2436
2499STATIC int 2437int
2500xfs_icsb_modify_counters( 2438xfs_icsb_modify_counters(
2501 xfs_mount_t *mp, 2439 xfs_mount_t *mp,
2502 xfs_sb_field_t field, 2440 xfs_sb_field_t field,
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 622da2179a57..5861b4980740 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,7 +53,6 @@ typedef struct xfs_trans_reservations {
53 53
54#include "xfs_sync.h" 54#include "xfs_sync.h"
55 55
56struct cred;
57struct log; 56struct log;
58struct xfs_mount_args; 57struct xfs_mount_args;
59struct xfs_inode; 58struct xfs_inode;
@@ -91,6 +90,8 @@ extern void xfs_icsb_reinit_counters(struct xfs_mount *);
91extern void xfs_icsb_destroy_counters(struct xfs_mount *); 90extern void xfs_icsb_destroy_counters(struct xfs_mount *);
92extern void xfs_icsb_sync_counters(struct xfs_mount *, int); 91extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
93extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); 92extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
93extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
94 int64_t, int);
94 95
95#else 96#else
96#define xfs_icsb_init_counters(mp) (0) 97#define xfs_icsb_init_counters(mp) (0)
@@ -98,6 +99,8 @@ extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
98#define xfs_icsb_reinit_counters(mp) do { } while (0) 99#define xfs_icsb_reinit_counters(mp) do { } while (0)
99#define xfs_icsb_sync_counters(mp, flags) do { } while (0) 100#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
100#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) 101#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
102#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \
103 xfs_mod_incore_sb(mp, field, delta, rsvd)
101#endif 104#endif
102 105
103typedef struct xfs_mount { 106typedef struct xfs_mount {
@@ -232,8 +235,6 @@ typedef struct xfs_mount {
232#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ 235#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
233#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred 236#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
234 * I/O size in stat() */ 237 * I/O size in stat() */
235#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock
236 counters */
237#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams 238#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
238 allocator */ 239 allocator */
239#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ 240#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
@@ -327,6 +328,8 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
327 * perag get/put wrappers for ref counting 328 * perag get/put wrappers for ref counting
328 */ 329 */
329struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); 330struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
331struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
332 int tag);
330void xfs_perag_put(struct xfs_perag *pag); 333void xfs_perag_put(struct xfs_perag *pag);
331 334
332/* 335/*
diff --git a/fs/xfs/xfs_refcache.h b/fs/xfs/xfs_refcache.h
deleted file mode 100644
index 2dec79edb510..000000000000
--- a/fs/xfs/xfs_refcache.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_REFCACHE_H__
19#define __XFS_REFCACHE_H__
20
21#ifdef HAVE_REFCACHE
22/*
23 * Maximum size (in inodes) for the NFS reference cache
24 */
25#define XFS_REFCACHE_SIZE_MAX 512
26
27struct xfs_inode;
28struct xfs_mount;
29
30extern void xfs_refcache_insert(struct xfs_inode *);
31extern void xfs_refcache_purge_ip(struct xfs_inode *);
32extern void xfs_refcache_purge_mp(struct xfs_mount *);
33extern void xfs_refcache_purge_some(struct xfs_mount *);
34extern void xfs_refcache_resize(int);
35extern void xfs_refcache_destroy(void);
36
37extern void xfs_refcache_iunlock(struct xfs_inode *, uint);
38
39#else
40
41#define xfs_refcache_insert(ip) do { } while (0)
42#define xfs_refcache_purge_ip(ip) do { } while (0)
43#define xfs_refcache_purge_mp(mp) do { } while (0)
44#define xfs_refcache_purge_some(mp) do { } while (0)
45#define xfs_refcache_resize(size) do { } while (0)
46#define xfs_refcache_destroy() do { } while (0)
47
48#define xfs_refcache_iunlock(ip, flags) xfs_iunlock(ip, flags)
49
50#endif
51
52#endif /* __XFS_REFCACHE_H__ */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 8fca957200df..d2af0a8381a6 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -183,7 +183,7 @@ xfs_rename(
183 * tree quota mechanism would be circumvented. 183 * tree quota mechanism would be circumvented.
184 */ 184 */
185 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 185 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
186 (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { 186 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
187 error = XFS_ERROR(EXDEV); 187 error = XFS_ERROR(EXDEV);
188 goto error_return; 188 goto error_return;
189 } 189 }
@@ -211,7 +211,9 @@ xfs_rename(
211 goto error_return; 211 goto error_return;
212 if (error) 212 if (error)
213 goto abort_return; 213 goto abort_return;
214 xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 214
215 xfs_trans_ichgtime(tp, target_dp,
216 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
215 217
216 if (new_parent && src_is_directory) { 218 if (new_parent && src_is_directory) {
217 error = xfs_bumplink(tp, target_dp); 219 error = xfs_bumplink(tp, target_dp);
@@ -249,7 +251,9 @@ xfs_rename(
249 &first_block, &free_list, spaceres); 251 &first_block, &free_list, spaceres);
250 if (error) 252 if (error)
251 goto abort_return; 253 goto abort_return;
252 xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 254
255 xfs_trans_ichgtime(tp, target_dp,
256 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
253 257
254 /* 258 /*
255 * Decrement the link count on the target since the target 259 * Decrement the link count on the target since the target
@@ -292,7 +296,7 @@ xfs_rename(
292 * inode isn't really being changed, but old unix file systems did 296 * inode isn't really being changed, but old unix file systems did
293 * it and some incremental backup programs won't work without it. 297 * it and some incremental backup programs won't work without it.
294 */ 298 */
295 xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); 299 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
296 300
297 /* 301 /*
298 * Adjust the link count on src_dp. This is necessary when 302 * Adjust the link count on src_dp. This is necessary when
@@ -315,7 +319,7 @@ xfs_rename(
315 if (error) 319 if (error)
316 goto abort_return; 320 goto abort_return;
317 321
318 xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 322 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
319 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 323 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
320 if (new_parent) 324 if (new_parent)
321 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 325 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 891260fea11e..12a191385310 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -39,6 +39,7 @@
39#include "xfs_trans_space.h" 39#include "xfs_trans_space.h"
40#include "xfs_utils.h" 40#include "xfs_utils.h"
41#include "xfs_trace.h" 41#include "xfs_trace.h"
42#include "xfs_buf.h"
42 43
43 44
44/* 45/*
@@ -1883,13 +1884,13 @@ xfs_growfs_rt(
1883 /* 1884 /*
1884 * Read in the last block of the device, make sure it exists. 1885 * Read in the last block of the device, make sure it exists.
1885 */ 1886 */
1886 error = xfs_read_buf(mp, mp->m_rtdev_targp, 1887 bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
1887 XFS_FSB_TO_BB(mp, nrblocks - 1), 1888 XFS_FSB_TO_BB(mp, nrblocks - 1),
1888 XFS_FSB_TO_BB(mp, 1), 0, &bp); 1889 XFS_FSB_TO_B(mp, 1), 0);
1889 if (error) 1890 if (!bp)
1890 return error; 1891 return EIO;
1891 ASSERT(bp);
1892 xfs_buf_relse(bp); 1892 xfs_buf_relse(bp);
1893
1893 /* 1894 /*
1894 * Calculate new parameters. These are the final values to be reached. 1895 * Calculate new parameters. These are the final values to be reached.
1895 */ 1896 */
@@ -2215,7 +2216,6 @@ xfs_rtmount_init(
2215{ 2216{
2216 xfs_buf_t *bp; /* buffer for last block of subvolume */ 2217 xfs_buf_t *bp; /* buffer for last block of subvolume */
2217 xfs_daddr_t d; /* address of last block of subvolume */ 2218 xfs_daddr_t d; /* address of last block of subvolume */
2218 int error; /* error return value */
2219 xfs_sb_t *sbp; /* filesystem superblock copy in mount */ 2219 xfs_sb_t *sbp; /* filesystem superblock copy in mount */
2220 2220
2221 sbp = &mp->m_sb; 2221 sbp = &mp->m_sb;
@@ -2242,15 +2242,12 @@ xfs_rtmount_init(
2242 (unsigned long long) mp->m_sb.sb_rblocks); 2242 (unsigned long long) mp->m_sb.sb_rblocks);
2243 return XFS_ERROR(EFBIG); 2243 return XFS_ERROR(EFBIG);
2244 } 2244 }
2245 error = xfs_read_buf(mp, mp->m_rtdev_targp, 2245 bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
2246 d - XFS_FSB_TO_BB(mp, 1), 2246 d - XFS_FSB_TO_BB(mp, 1),
2247 XFS_FSB_TO_BB(mp, 1), 0, &bp); 2247 XFS_FSB_TO_B(mp, 1), 0);
2248 if (error) { 2248 if (!bp) {
2249 cmn_err(CE_WARN, 2249 cmn_err(CE_WARN, "XFS: realtime device size check failed");
2250 "XFS: realtime mount -- xfs_read_buf failed, returned %d", error); 2250 return EIO;
2251 if (error == ENOSPC)
2252 return XFS_ERROR(EFBIG);
2253 return error;
2254 } 2251 }
2255 xfs_buf_relse(bp); 2252 xfs_buf_relse(bp);
2256 return 0; 2253 return 0;
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 1b017c657494..1eb2ba586814 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -80,10 +80,12 @@ struct xfs_mount;
80#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 80#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
81#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ 81#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
82#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */ 82#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
83#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
83 84
84#define XFS_SB_VERSION2_OKREALFBITS \ 85#define XFS_SB_VERSION2_OKREALFBITS \
85 (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ 86 (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
86 XFS_SB_VERSION2_ATTR2BIT) 87 XFS_SB_VERSION2_ATTR2BIT | \
88 XFS_SB_VERSION2_PROJID32BIT)
87#define XFS_SB_VERSION2_OKSASHFBITS \ 89#define XFS_SB_VERSION2_OKSASHFBITS \
88 (0) 90 (0)
89#define XFS_SB_VERSION2_OKREALBITS \ 91#define XFS_SB_VERSION2_OKREALBITS \
@@ -495,6 +497,12 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp)
495 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; 497 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
496} 498}
497 499
500static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp)
501{
502 return xfs_sb_version_hasmorebits(sbp) &&
503 (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT);
504}
505
498/* 506/*
499 * end of superblock version macros 507 * end of superblock version macros
500 */ 508 */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 1c47edaea0d2..f6d956b7711e 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -696,7 +696,7 @@ xfs_trans_reserve(
696 * fail if the count would go below zero. 696 * fail if the count would go below zero.
697 */ 697 */
698 if (blocks > 0) { 698 if (blocks > 0) {
699 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 699 error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
700 -((int64_t)blocks), rsvd); 700 -((int64_t)blocks), rsvd);
701 if (error != 0) { 701 if (error != 0) {
702 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 702 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
@@ -767,7 +767,7 @@ undo_log:
767 767
768undo_blocks: 768undo_blocks:
769 if (blocks > 0) { 769 if (blocks > 0) {
770 (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 770 xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
771 (int64_t)blocks, rsvd); 771 (int64_t)blocks, rsvd);
772 tp->t_blk_res = 0; 772 tp->t_blk_res = 0;
773 } 773 }
@@ -1009,7 +1009,7 @@ void
1009xfs_trans_unreserve_and_mod_sb( 1009xfs_trans_unreserve_and_mod_sb(
1010 xfs_trans_t *tp) 1010 xfs_trans_t *tp)
1011{ 1011{
1012 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 1012 xfs_mod_sb_t msb[9]; /* If you add cases, add entries */
1013 xfs_mod_sb_t *msbp; 1013 xfs_mod_sb_t *msbp;
1014 xfs_mount_t *mp = tp->t_mountp; 1014 xfs_mount_t *mp = tp->t_mountp;
1015 /* REFERENCED */ 1015 /* REFERENCED */
@@ -1017,55 +1017,61 @@ xfs_trans_unreserve_and_mod_sb(
1017 int rsvd; 1017 int rsvd;
1018 int64_t blkdelta = 0; 1018 int64_t blkdelta = 0;
1019 int64_t rtxdelta = 0; 1019 int64_t rtxdelta = 0;
1020 int64_t idelta = 0;
1021 int64_t ifreedelta = 0;
1020 1022
1021 msbp = msb; 1023 msbp = msb;
1022 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 1024 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
1023 1025
1024 /* calculate free blocks delta */ 1026 /* calculate deltas */
1025 if (tp->t_blk_res > 0) 1027 if (tp->t_blk_res > 0)
1026 blkdelta = tp->t_blk_res; 1028 blkdelta = tp->t_blk_res;
1027
1028 if ((tp->t_fdblocks_delta != 0) && 1029 if ((tp->t_fdblocks_delta != 0) &&
1029 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 1030 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
1030 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 1031 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
1031 blkdelta += tp->t_fdblocks_delta; 1032 blkdelta += tp->t_fdblocks_delta;
1032 1033
1033 if (blkdelta != 0) {
1034 msbp->msb_field = XFS_SBS_FDBLOCKS;
1035 msbp->msb_delta = blkdelta;
1036 msbp++;
1037 }
1038
1039 /* calculate free realtime extents delta */
1040 if (tp->t_rtx_res > 0) 1034 if (tp->t_rtx_res > 0)
1041 rtxdelta = tp->t_rtx_res; 1035 rtxdelta = tp->t_rtx_res;
1042
1043 if ((tp->t_frextents_delta != 0) && 1036 if ((tp->t_frextents_delta != 0) &&
1044 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 1037 (tp->t_flags & XFS_TRANS_SB_DIRTY))
1045 rtxdelta += tp->t_frextents_delta; 1038 rtxdelta += tp->t_frextents_delta;
1046 1039
1040 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
1041 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
1042 idelta = tp->t_icount_delta;
1043 ifreedelta = tp->t_ifree_delta;
1044 }
1045
1046 /* apply the per-cpu counters */
1047 if (blkdelta) {
1048 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
1049 blkdelta, rsvd);
1050 if (error)
1051 goto out;
1052 }
1053
1054 if (idelta) {
1055 error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
1056 idelta, rsvd);
1057 if (error)
1058 goto out_undo_fdblocks;
1059 }
1060
1061 if (ifreedelta) {
1062 error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
1063 ifreedelta, rsvd);
1064 if (error)
1065 goto out_undo_icount;
1066 }
1067
1068 /* apply remaining deltas */
1047 if (rtxdelta != 0) { 1069 if (rtxdelta != 0) {
1048 msbp->msb_field = XFS_SBS_FREXTENTS; 1070 msbp->msb_field = XFS_SBS_FREXTENTS;
1049 msbp->msb_delta = rtxdelta; 1071 msbp->msb_delta = rtxdelta;
1050 msbp++; 1072 msbp++;
1051 } 1073 }
1052 1074
1053 /* apply remaining deltas */
1054
1055 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
1056 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
1057 if (tp->t_icount_delta != 0) {
1058 msbp->msb_field = XFS_SBS_ICOUNT;
1059 msbp->msb_delta = tp->t_icount_delta;
1060 msbp++;
1061 }
1062 if (tp->t_ifree_delta != 0) {
1063 msbp->msb_field = XFS_SBS_IFREE;
1064 msbp->msb_delta = tp->t_ifree_delta;
1065 msbp++;
1066 }
1067 }
1068
1069 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 1075 if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
1070 if (tp->t_dblocks_delta != 0) { 1076 if (tp->t_dblocks_delta != 0) {
1071 msbp->msb_field = XFS_SBS_DBLOCKS; 1077 msbp->msb_field = XFS_SBS_DBLOCKS;
@@ -1115,8 +1121,24 @@ xfs_trans_unreserve_and_mod_sb(
1115 if (msbp > msb) { 1121 if (msbp > msb) {
1116 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, 1122 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
1117 (uint)(msbp - msb), rsvd); 1123 (uint)(msbp - msb), rsvd);
1118 ASSERT(error == 0); 1124 if (error)
1125 goto out_undo_ifreecount;
1119 } 1126 }
1127
1128 return;
1129
1130out_undo_ifreecount:
1131 if (ifreedelta)
1132 xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
1133out_undo_icount:
1134 if (idelta)
1135 xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
1136out_undo_fdblocks:
1137 if (blkdelta)
1138 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
1139out:
1140 ASSERT(error = 0);
1141 return;
1120} 1142}
1121 1143
1122/* 1144/*
@@ -1389,15 +1411,12 @@ xfs_trans_item_committed(
1389 */ 1411 */
1390STATIC void 1412STATIC void
1391xfs_trans_committed( 1413xfs_trans_committed(
1392 struct xfs_trans *tp, 1414 void *arg,
1393 int abortflag) 1415 int abortflag)
1394{ 1416{
1417 struct xfs_trans *tp = arg;
1395 struct xfs_log_item_desc *lidp, *next; 1418 struct xfs_log_item_desc *lidp, *next;
1396 1419
1397 /* Call the transaction's completion callback if there is one. */
1398 if (tp->t_callback != NULL)
1399 tp->t_callback(tp, tp->t_callarg);
1400
1401 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { 1420 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1402 xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); 1421 xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
1403 xfs_trans_free_item_desc(lidp); 1422 xfs_trans_free_item_desc(lidp);
@@ -1525,7 +1544,7 @@ xfs_trans_commit_iclog(
1525 * running in simulation mode (the log is explicitly turned 1544 * running in simulation mode (the log is explicitly turned
1526 * off). 1545 * off).
1527 */ 1546 */
1528 tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; 1547 tp->t_logcb.cb_func = xfs_trans_committed;
1529 tp->t_logcb.cb_arg = tp; 1548 tp->t_logcb.cb_arg = tp;
1530 1549
1531 /* 1550 /*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c13c0f97b494..246286b77a86 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -399,8 +399,6 @@ typedef struct xfs_trans {
399 * transaction. */ 399 * transaction. */
400 struct xfs_mount *t_mountp; /* ptr to fs mount struct */ 400 struct xfs_mount *t_mountp; /* ptr to fs mount struct */
401 struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ 401 struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
402 xfs_trans_callback_t t_callback; /* transaction callback */
403 void *t_callarg; /* callback arg */
404 unsigned int t_flags; /* misc flags */ 402 unsigned int t_flags; /* misc flags */
405 int64_t t_icount_delta; /* superblock icount change */ 403 int64_t t_icount_delta; /* superblock icount change */
406 int64_t t_ifree_delta; /* superblock ifree change */ 404 int64_t t_ifree_delta; /* superblock ifree change */
@@ -473,6 +471,7 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
473void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); 471void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
474int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, 472int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
475 xfs_ino_t , uint, uint, struct xfs_inode **); 473 xfs_ino_t , uint, uint, struct xfs_inode **);
474void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
476void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); 475void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
477void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *); 476void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
478void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); 477void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 90af025e6839..c47918c302a5 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -336,7 +336,7 @@ xfs_trans_read_buf(
336 ASSERT(!XFS_BUF_ISASYNC(bp)); 336 ASSERT(!XFS_BUF_ISASYNC(bp));
337 XFS_BUF_READ(bp); 337 XFS_BUF_READ(bp);
338 xfsbdstrat(tp->t_mountp, bp); 338 xfsbdstrat(tp->t_mountp, bp);
339 error = xfs_iowait(bp); 339 error = xfs_buf_iowait(bp);
340 if (error) { 340 if (error) {
341 xfs_ioerror_alert("xfs_trans_read_buf", mp, 341 xfs_ioerror_alert("xfs_trans_read_buf", mp,
342 bp, blkno); 342 bp, blkno);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index cdc53a1050c5..ccb34532768b 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -118,6 +118,36 @@ xfs_trans_ijoin_ref(
118} 118}
119 119
120/* 120/*
121 * Transactional inode timestamp update. Requires the inode to be locked and
122 * joined to the transaction supplied. Relies on the transaction subsystem to
123 * track dirty state and update/writeback the inode accordingly.
124 */
125void
126xfs_trans_ichgtime(
127 struct xfs_trans *tp,
128 struct xfs_inode *ip,
129 int flags)
130{
131 struct inode *inode = VFS_I(ip);
132 timespec_t tv;
133
134 ASSERT(tp);
135 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
136 ASSERT(ip->i_transp == tp);
137
138 tv = current_fs_time(inode->i_sb);
139
140 if ((flags & XFS_ICHGTIME_MOD) &&
141 !timespec_equal(&inode->i_mtime, &tv)) {
142 inode->i_mtime = tv;
143 }
144 if ((flags & XFS_ICHGTIME_CHG) &&
145 !timespec_equal(&inode->i_ctime, &tv)) {
146 inode->i_ctime = tv;
147 }
148}
149
150/*
121 * This is called to mark the fields indicated in fieldmask as needing 151 * This is called to mark the fields indicated in fieldmask as needing
122 * to be logged when the transaction is committed. The inode must 152 * to be logged when the transaction is committed. The inode must
123 * already be associated with the given transaction. 153 * already be associated with the given transaction.
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 320775295e32..26d1867d8156 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -73,8 +73,6 @@ typedef __int32_t xfs_tid_t; /* transaction identifier */
73typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ 73typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
74typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ 74typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */
75 75
76typedef __uint16_t xfs_prid_t; /* prid_t truncated to 16bits in XFS */
77
78typedef __uint32_t xlog_tid_t; /* transaction ID type */ 76typedef __uint32_t xlog_tid_t; /* transaction ID type */
79 77
80/* 78/*
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index b7d5769d2df0..8b32d1a4c5a1 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -56,7 +56,6 @@ xfs_dir_ialloc(
56 mode_t mode, 56 mode_t mode,
57 xfs_nlink_t nlink, 57 xfs_nlink_t nlink,
58 xfs_dev_t rdev, 58 xfs_dev_t rdev,
59 cred_t *credp,
60 prid_t prid, /* project id */ 59 prid_t prid, /* project id */
61 int okalloc, /* ok to allocate new space */ 60 int okalloc, /* ok to allocate new space */
62 xfs_inode_t **ipp, /* pointer to inode; it will be 61 xfs_inode_t **ipp, /* pointer to inode; it will be
@@ -93,7 +92,7 @@ xfs_dir_ialloc(
93 * transaction commit so that no other process can steal 92 * transaction commit so that no other process can steal
94 * the inode(s) that we've just allocated. 93 * the inode(s) that we've just allocated.
95 */ 94 */
96 code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc, 95 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
97 &ialloc_context, &call_again, &ip); 96 &ialloc_context, &call_again, &ip);
98 97
99 /* 98 /*
@@ -197,7 +196,7 @@ xfs_dir_ialloc(
197 * other allocations in this allocation group, 196 * other allocations in this allocation group,
198 * this call should always succeed. 197 * this call should always succeed.
199 */ 198 */
200 code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, 199 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
201 okalloc, &ialloc_context, &call_again, &ip); 200 okalloc, &ialloc_context, &call_again, &ip);
202 201
203 /* 202 /*
@@ -235,7 +234,7 @@ xfs_droplink(
235{ 234{
236 int error; 235 int error;
237 236
238 xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 237 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
239 238
240 ASSERT (ip->i_d.di_nlink > 0); 239 ASSERT (ip->i_d.di_nlink > 0);
241 ip->i_d.di_nlink--; 240 ip->i_d.di_nlink--;
@@ -299,7 +298,7 @@ xfs_bumplink(
299{ 298{
300 if (ip->i_d.di_nlink >= XFS_MAXLINK) 299 if (ip->i_d.di_nlink >= XFS_MAXLINK)
301 return XFS_ERROR(EMLINK); 300 return XFS_ERROR(EMLINK);
302 xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 301 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
303 302
304 ASSERT(ip->i_d.di_nlink > 0); 303 ASSERT(ip->i_d.di_nlink > 0);
305 ip->i_d.di_nlink++; 304 ip->i_d.di_nlink++;
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f55b9678264f..456fca314933 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -19,8 +19,7 @@
19#define __XFS_UTILS_H__ 19#define __XFS_UTILS_H__
20 20
21extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, 21extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
22 xfs_dev_t, cred_t *, prid_t, int, 22 xfs_dev_t, prid_t, int, xfs_inode_t **, int *);
23 xfs_inode_t **, int *);
24extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *); 23extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
25extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *); 24extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
26extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *); 25extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4c7c7bfb2b2f..8e4a63c4151a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -114,7 +114,7 @@ xfs_setattr(
114 */ 114 */
115 ASSERT(udqp == NULL); 115 ASSERT(udqp == NULL);
116 ASSERT(gdqp == NULL); 116 ASSERT(gdqp == NULL);
117 code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid, 117 code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
118 qflags, &udqp, &gdqp); 118 qflags, &udqp, &gdqp);
119 if (code) 119 if (code)
120 return code; 120 return code;
@@ -184,8 +184,11 @@ xfs_setattr(
184 ip->i_size == 0 && ip->i_d.di_nextents == 0) { 184 ip->i_size == 0 && ip->i_d.di_nextents == 0) {
185 xfs_iunlock(ip, XFS_ILOCK_EXCL); 185 xfs_iunlock(ip, XFS_ILOCK_EXCL);
186 lock_flags &= ~XFS_ILOCK_EXCL; 186 lock_flags &= ~XFS_ILOCK_EXCL;
187 if (mask & ATTR_CTIME) 187 if (mask & ATTR_CTIME) {
188 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 188 inode->i_mtime = inode->i_ctime =
189 current_fs_time(inode->i_sb);
190 xfs_mark_inode_dirty_sync(ip);
191 }
189 code = 0; 192 code = 0;
190 goto error_return; 193 goto error_return;
191 } 194 }
@@ -1253,8 +1256,7 @@ xfs_create(
1253 struct xfs_name *name, 1256 struct xfs_name *name,
1254 mode_t mode, 1257 mode_t mode,
1255 xfs_dev_t rdev, 1258 xfs_dev_t rdev,
1256 xfs_inode_t **ipp, 1259 xfs_inode_t **ipp)
1257 cred_t *credp)
1258{ 1260{
1259 int is_dir = S_ISDIR(mode); 1261 int is_dir = S_ISDIR(mode);
1260 struct xfs_mount *mp = dp->i_mount; 1262 struct xfs_mount *mp = dp->i_mount;
@@ -1266,7 +1268,7 @@ xfs_create(
1266 boolean_t unlock_dp_on_error = B_FALSE; 1268 boolean_t unlock_dp_on_error = B_FALSE;
1267 uint cancel_flags; 1269 uint cancel_flags;
1268 int committed; 1270 int committed;
1269 xfs_prid_t prid; 1271 prid_t prid;
1270 struct xfs_dquot *udqp = NULL; 1272 struct xfs_dquot *udqp = NULL;
1271 struct xfs_dquot *gdqp = NULL; 1273 struct xfs_dquot *gdqp = NULL;
1272 uint resblks; 1274 uint resblks;
@@ -1279,9 +1281,9 @@ xfs_create(
1279 return XFS_ERROR(EIO); 1281 return XFS_ERROR(EIO);
1280 1282
1281 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1283 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1282 prid = dp->i_d.di_projid; 1284 prid = xfs_get_projid(dp);
1283 else 1285 else
1284 prid = dfltprid; 1286 prid = XFS_PROJID_DEFAULT;
1285 1287
1286 /* 1288 /*
1287 * Make sure that we have allocated dquot(s) on disk. 1289 * Make sure that we have allocated dquot(s) on disk.
@@ -1360,7 +1362,7 @@ xfs_create(
1360 * entry pointing to them, but a directory also the "." entry 1362 * entry pointing to them, but a directory also the "." entry
1361 * pointing to itself. 1363 * pointing to itself.
1362 */ 1364 */
1363 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp, 1365 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
1364 prid, resblks > 0, &ip, &committed); 1366 prid, resblks > 0, &ip, &committed);
1365 if (error) { 1367 if (error) {
1366 if (error == ENOSPC) 1368 if (error == ENOSPC)
@@ -1391,7 +1393,7 @@ xfs_create(
1391 ASSERT(error != ENOSPC); 1393 ASSERT(error != ENOSPC);
1392 goto out_trans_abort; 1394 goto out_trans_abort;
1393 } 1395 }
1394 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1396 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1395 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1397 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1396 1398
1397 if (is_dir) { 1399 if (is_dir) {
@@ -1742,7 +1744,7 @@ xfs_remove(
1742 ASSERT(error != ENOENT); 1744 ASSERT(error != ENOENT);
1743 goto out_bmap_cancel; 1745 goto out_bmap_cancel;
1744 } 1746 }
1745 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1747 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1746 1748
1747 if (is_dir) { 1749 if (is_dir) {
1748 /* 1750 /*
@@ -1880,7 +1882,7 @@ xfs_link(
1880 * the tree quota mechanism could be circumvented. 1882 * the tree quota mechanism could be circumvented.
1881 */ 1883 */
1882 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1884 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1883 (tdp->i_d.di_projid != sip->i_d.di_projid))) { 1885 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1884 error = XFS_ERROR(EXDEV); 1886 error = XFS_ERROR(EXDEV);
1885 goto error_return; 1887 goto error_return;
1886 } 1888 }
@@ -1895,7 +1897,7 @@ xfs_link(
1895 &first_block, &free_list, resblks); 1897 &first_block, &free_list, resblks);
1896 if (error) 1898 if (error)
1897 goto abort_return; 1899 goto abort_return;
1898 xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1900 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1899 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1901 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1900 1902
1901 error = xfs_bumplink(tp, sip); 1903 error = xfs_bumplink(tp, sip);
@@ -1933,8 +1935,7 @@ xfs_symlink(
1933 struct xfs_name *link_name, 1935 struct xfs_name *link_name,
1934 const char *target_path, 1936 const char *target_path,
1935 mode_t mode, 1937 mode_t mode,
1936 xfs_inode_t **ipp, 1938 xfs_inode_t **ipp)
1937 cred_t *credp)
1938{ 1939{
1939 xfs_mount_t *mp = dp->i_mount; 1940 xfs_mount_t *mp = dp->i_mount;
1940 xfs_trans_t *tp; 1941 xfs_trans_t *tp;
@@ -1955,7 +1956,7 @@ xfs_symlink(
1955 int byte_cnt; 1956 int byte_cnt;
1956 int n; 1957 int n;
1957 xfs_buf_t *bp; 1958 xfs_buf_t *bp;
1958 xfs_prid_t prid; 1959 prid_t prid;
1959 struct xfs_dquot *udqp, *gdqp; 1960 struct xfs_dquot *udqp, *gdqp;
1960 uint resblks; 1961 uint resblks;
1961 1962
@@ -1978,9 +1979,9 @@ xfs_symlink(
1978 1979
1979 udqp = gdqp = NULL; 1980 udqp = gdqp = NULL;
1980 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1981 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1981 prid = dp->i_d.di_projid; 1982 prid = xfs_get_projid(dp);
1982 else 1983 else
1983 prid = (xfs_prid_t)dfltprid; 1984 prid = XFS_PROJID_DEFAULT;
1984 1985
1985 /* 1986 /*
1986 * Make sure that we have allocated dquot(s) on disk. 1987 * Make sure that we have allocated dquot(s) on disk.
@@ -2046,8 +2047,8 @@ xfs_symlink(
2046 /* 2047 /*
2047 * Allocate an inode for the symlink. 2048 * Allocate an inode for the symlink.
2048 */ 2049 */
2049 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 2050 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
2050 1, 0, credp, prid, resblks > 0, &ip, NULL); 2051 prid, resblks > 0, &ip, NULL);
2051 if (error) { 2052 if (error) {
2052 if (error == ENOSPC) 2053 if (error == ENOSPC)
2053 goto error_return; 2054 goto error_return;
@@ -2129,7 +2130,7 @@ xfs_symlink(
2129 &first_block, &free_list, resblks); 2130 &first_block, &free_list, resblks);
2130 if (error) 2131 if (error)
2131 goto error1; 2132 goto error1;
2132 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2133 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2133 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2134 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2134 2135
2135 /* 2136 /*
@@ -2272,7 +2273,7 @@ xfs_alloc_file_space(
2272 count = len; 2273 count = len;
2273 imapp = &imaps[0]; 2274 imapp = &imaps[0];
2274 nimaps = 1; 2275 nimaps = 1;
2275 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 2276 bmapi_flag = XFS_BMAPI_WRITE | alloc_type;
2276 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 2277 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
2277 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 2278 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
2278 2279
@@ -2431,9 +2432,9 @@ xfs_zero_remaining_bytes(
2431 if (endoff > ip->i_size) 2432 if (endoff > ip->i_size)
2432 endoff = ip->i_size; 2433 endoff = ip->i_size;
2433 2434
2434 bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, 2435 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
2435 XFS_IS_REALTIME_INODE(ip) ? 2436 mp->m_rtdev_targp : mp->m_ddev_targp,
2436 mp->m_rtdev_targp : mp->m_ddev_targp); 2437 mp->m_sb.sb_blocksize, XBF_DONT_BLOCK);
2437 if (!bp) 2438 if (!bp)
2438 return XFS_ERROR(ENOMEM); 2439 return XFS_ERROR(ENOMEM);
2439 2440
@@ -2459,7 +2460,7 @@ xfs_zero_remaining_bytes(
2459 XFS_BUF_READ(bp); 2460 XFS_BUF_READ(bp);
2460 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 2461 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
2461 xfsbdstrat(mp, bp); 2462 xfsbdstrat(mp, bp);
2462 error = xfs_iowait(bp); 2463 error = xfs_buf_iowait(bp);
2463 if (error) { 2464 if (error) {
2464 xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", 2465 xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
2465 mp, bp, XFS_BUF_ADDR(bp)); 2466 mp, bp, XFS_BUF_ADDR(bp));
@@ -2472,7 +2473,7 @@ xfs_zero_remaining_bytes(
2472 XFS_BUF_UNREAD(bp); 2473 XFS_BUF_UNREAD(bp);
2473 XFS_BUF_WRITE(bp); 2474 XFS_BUF_WRITE(bp);
2474 xfsbdstrat(mp, bp); 2475 xfsbdstrat(mp, bp);
2475 error = xfs_iowait(bp); 2476 error = xfs_buf_iowait(bp);
2476 if (error) { 2477 if (error) {
2477 xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", 2478 xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
2478 mp, bp, XFS_BUF_ADDR(bp)); 2479 mp, bp, XFS_BUF_ADDR(bp));
@@ -2711,6 +2712,7 @@ xfs_change_file_space(
2711 xfs_off_t llen; 2712 xfs_off_t llen;
2712 xfs_trans_t *tp; 2713 xfs_trans_t *tp;
2713 struct iattr iattr; 2714 struct iattr iattr;
2715 int prealloc_type;
2714 2716
2715 if (!S_ISREG(ip->i_d.di_mode)) 2717 if (!S_ISREG(ip->i_d.di_mode))
2716 return XFS_ERROR(EINVAL); 2718 return XFS_ERROR(EINVAL);
@@ -2753,12 +2755,17 @@ xfs_change_file_space(
2753 * size to be changed. 2755 * size to be changed.
2754 */ 2756 */
2755 setprealloc = clrprealloc = 0; 2757 setprealloc = clrprealloc = 0;
2758 prealloc_type = XFS_BMAPI_PREALLOC;
2756 2759
2757 switch (cmd) { 2760 switch (cmd) {
2761 case XFS_IOC_ZERO_RANGE:
2762 prealloc_type |= XFS_BMAPI_CONVERT;
2763 xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0);
2764 /* FALLTHRU */
2758 case XFS_IOC_RESVSP: 2765 case XFS_IOC_RESVSP:
2759 case XFS_IOC_RESVSP64: 2766 case XFS_IOC_RESVSP64:
2760 error = xfs_alloc_file_space(ip, startoffset, bf->l_len, 2767 error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
2761 1, attr_flags); 2768 prealloc_type, attr_flags);
2762 if (error) 2769 if (error)
2763 return error; 2770 return error;
2764 setprealloc = 1; 2771 setprealloc = 1;
@@ -2827,7 +2834,7 @@ xfs_change_file_space(
2827 if (ip->i_d.di_mode & S_IXGRP) 2834 if (ip->i_d.di_mode & S_IXGRP)
2828 ip->i_d.di_mode &= ~S_ISGID; 2835 ip->i_d.di_mode &= ~S_ISGID;
2829 2836
2830 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2837 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2831 } 2838 }
2832 if (setprealloc) 2839 if (setprealloc)
2833 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 2840 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index d8dfa8d0dadd..f6702927eee4 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -2,7 +2,6 @@
2#define _XFS_VNODEOPS_H 1 2#define _XFS_VNODEOPS_H 1
3 3
4struct attrlist_cursor_kern; 4struct attrlist_cursor_kern;
5struct cred;
6struct file; 5struct file;
7struct iattr; 6struct iattr;
8struct inode; 7struct inode;
@@ -26,7 +25,7 @@ int xfs_inactive(struct xfs_inode *ip);
26int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, 25int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
27 struct xfs_inode **ipp, struct xfs_name *ci_name); 26 struct xfs_inode **ipp, struct xfs_name *ci_name);
28int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, 27int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
29 xfs_dev_t rdev, struct xfs_inode **ipp, cred_t *credp); 28 xfs_dev_t rdev, struct xfs_inode **ipp);
30int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, 29int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
31 struct xfs_inode *ip); 30 struct xfs_inode *ip);
32int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, 31int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
@@ -34,8 +33,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
34int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, 33int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
35 xfs_off_t *offset, filldir_t filldir); 34 xfs_off_t *offset, filldir_t filldir);
36int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, 35int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
37 const char *target_path, mode_t mode, struct xfs_inode **ipp, 36 const char *target_path, mode_t mode, struct xfs_inode **ipp);
38 cred_t *credp);
39int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); 37int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
40int xfs_change_file_space(struct xfs_inode *ip, int cmd, 38int xfs_change_file_space(struct xfs_inode *ip, int cmd,
41 xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); 39 xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
index 8554cb6a81b9..a3216655d657 100644
--- a/include/asm-generic/ioctls.h
+++ b/include/asm-generic/ioctls.h
@@ -62,7 +62,9 @@
62#define TCSETSW2 _IOW('T', 0x2C, struct termios2) 62#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
63#define TCSETSF2 _IOW('T', 0x2D, struct termios2) 63#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
64#define TIOCGRS485 0x542E 64#define TIOCGRS485 0x542E
65#ifndef TIOCSRS485
65#define TIOCSRS485 0x542F 66#define TIOCSRS485 0x542F
67#endif
66#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 68#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
67#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ 69#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
68#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ 70#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 08923b684768..d17784ea37ff 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
55 */ 55 */
56#define per_cpu(var, cpu) \ 56#define per_cpu(var, cpu) \
57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) 57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
58#define __get_cpu_var(var) \
59 (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
60#define __raw_get_cpu_var(var) \
61 (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
62 58
63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 59#ifndef __this_cpu_ptr
64#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) 60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61#endif
62#ifdef CONFIG_DEBUG_PREEMPT
63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
64#else
65#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
66#endif
65 67
68#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
69#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
66 70
67#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 71#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
68extern void setup_per_cpu_areas(void); 72extern void setup_per_cpu_areas(void);
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
index 8d441064a30d..a10a90791976 100644
--- a/include/linux/altera_uart.h
+++ b/include/linux/altera_uart.h
@@ -5,10 +5,15 @@
5#ifndef __ALTUART_H 5#ifndef __ALTUART_H
6#define __ALTUART_H 6#define __ALTUART_H
7 7
8#include <linux/init.h>
9
8struct altera_uart_platform_uart { 10struct altera_uart_platform_uart {
9 unsigned long mapbase; /* Physical address base */ 11 unsigned long mapbase; /* Physical address base */
10 unsigned int irq; /* Interrupt vector */ 12 unsigned int irq; /* Interrupt vector */
11 unsigned int uartclk; /* UART clock rate */ 13 unsigned int uartclk; /* UART clock rate */
14 unsigned int bus_shift; /* Bus shift (address stride) */
12}; 15};
13 16
17int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp);
18
14#endif /* __ALTUART_H */ 19#endif /* __ALTUART_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5274103434ad..ba679992d39b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -346,8 +346,15 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
346} 346}
347 347
348#else 348#else
349#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) 349static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
350#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) 350{
351 return page_address(bvec->bv_page) + bvec->bv_offset;
352}
353
354static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
355{
356 *flags = 0;
357}
351#endif 358#endif
352 359
353static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 360static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
@@ -496,6 +503,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
496#define bip_for_each_vec(bvl, bip, i) \ 503#define bip_for_each_vec(bvl, bip, i) \
497 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 504 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
498 505
506#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
507 for_each_bio(_bio) \
508 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
509
499#define bio_integrity(bio) (bio->bi_integrity != NULL) 510#define bio_integrity(bio) (bio->bi_integrity != NULL)
500 511
501extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 512extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ca83a97c9715..0437ab6bb54c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -97,6 +97,7 @@ struct bio {
97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
99#define BIO_QUIET 11 /* Make BIO Quiet */ 99#define BIO_QUIET 11 /* Make BIO Quiet */
100#define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */
100#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 101#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
101 102
102/* 103/*
@@ -130,6 +131,8 @@ enum rq_flag_bits {
130 /* bio only flags */ 131 /* bio only flags */
131 __REQ_UNPLUG, /* unplug the immediately after submission */ 132 __REQ_UNPLUG, /* unplug the immediately after submission */
132 __REQ_RAHEAD, /* read ahead, can fail anytime */ 133 __REQ_RAHEAD, /* read ahead, can fail anytime */
134 __REQ_THROTTLED, /* This bio has already been subjected to
135 * throttling rules. Don't do it again. */
133 136
134 /* request only flags */ 137 /* request only flags */
135 __REQ_SORTED, /* elevator knows about this request */ 138 __REQ_SORTED, /* elevator knows about this request */
@@ -143,10 +146,8 @@ enum rq_flag_bits {
143 __REQ_FAILED, /* set if the request failed */ 146 __REQ_FAILED, /* set if the request failed */
144 __REQ_QUIET, /* don't worry about errors */ 147 __REQ_QUIET, /* don't worry about errors */
145 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 148 __REQ_PREEMPT, /* set for "ide_preempt" requests */
146 __REQ_ORDERED_COLOR, /* is before or after barrier */
147 __REQ_ALLOCED, /* request came from our alloc pool */ 149 __REQ_ALLOCED, /* request came from our alloc pool */
148 __REQ_COPY_USER, /* contains copies of user pages */ 150 __REQ_COPY_USER, /* contains copies of user pages */
149 __REQ_INTEGRITY, /* integrity metadata has been remapped */
150 __REQ_FLUSH, /* request for cache flush */ 151 __REQ_FLUSH, /* request for cache flush */
151 __REQ_IO_STAT, /* account I/O stat */ 152 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 153 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
@@ -168,10 +169,12 @@ enum rq_flag_bits {
168 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 169 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
169#define REQ_COMMON_MASK \ 170#define REQ_COMMON_MASK \
170 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ 171 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
171 REQ_META| REQ_DISCARD | REQ_NOIDLE) 172 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
173#define REQ_CLONE_MASK REQ_COMMON_MASK
172 174
173#define REQ_UNPLUG (1 << __REQ_UNPLUG) 175#define REQ_UNPLUG (1 << __REQ_UNPLUG)
174#define REQ_RAHEAD (1 << __REQ_RAHEAD) 176#define REQ_RAHEAD (1 << __REQ_RAHEAD)
177#define REQ_THROTTLED (1 << __REQ_THROTTLED)
175 178
176#define REQ_SORTED (1 << __REQ_SORTED) 179#define REQ_SORTED (1 << __REQ_SORTED)
177#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 180#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -184,10 +187,8 @@ enum rq_flag_bits {
184#define REQ_FAILED (1 << __REQ_FAILED) 187#define REQ_FAILED (1 << __REQ_FAILED)
185#define REQ_QUIET (1 << __REQ_QUIET) 188#define REQ_QUIET (1 << __REQ_QUIET)
186#define REQ_PREEMPT (1 << __REQ_PREEMPT) 189#define REQ_PREEMPT (1 << __REQ_PREEMPT)
187#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
188#define REQ_ALLOCED (1 << __REQ_ALLOCED) 190#define REQ_ALLOCED (1 << __REQ_ALLOCED)
189#define REQ_COPY_USER (1 << __REQ_COPY_USER) 191#define REQ_COPY_USER (1 << __REQ_COPY_USER)
190#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
191#define REQ_FLUSH (1 << __REQ_FLUSH) 192#define REQ_FLUSH (1 << __REQ_FLUSH)
192#define REQ_IO_STAT (1 << __REQ_IO_STAT) 193#define REQ_IO_STAT (1 << __REQ_IO_STAT)
193#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 194#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c54906f678f..009b80e49f53 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -115,6 +115,7 @@ struct request {
115 void *elevator_private3; 115 void *elevator_private3;
116 116
117 struct gendisk *rq_disk; 117 struct gendisk *rq_disk;
118 struct hd_struct *part;
118 unsigned long start_time; 119 unsigned long start_time;
119#ifdef CONFIG_BLK_CGROUP 120#ifdef CONFIG_BLK_CGROUP
120 unsigned long long start_time_ns; 121 unsigned long long start_time_ns;
@@ -124,6 +125,9 @@ struct request {
124 * physical address coalescing is performed. 125 * physical address coalescing is performed.
125 */ 126 */
126 unsigned short nr_phys_segments; 127 unsigned short nr_phys_segments;
128#if defined(CONFIG_BLK_DEV_INTEGRITY)
129 unsigned short nr_integrity_segments;
130#endif
127 131
128 unsigned short ioprio; 132 unsigned short ioprio;
129 133
@@ -243,6 +247,7 @@ struct queue_limits {
243 247
244 unsigned short logical_block_size; 248 unsigned short logical_block_size;
245 unsigned short max_segments; 249 unsigned short max_segments;
250 unsigned short max_integrity_segments;
246 251
247 unsigned char misaligned; 252 unsigned char misaligned;
248 unsigned char discard_misaligned; 253 unsigned char discard_misaligned;
@@ -355,18 +360,25 @@ struct request_queue
355 struct blk_trace *blk_trace; 360 struct blk_trace *blk_trace;
356#endif 361#endif
357 /* 362 /*
358 * reserved for flush operations 363 * for flush operations
359 */ 364 */
360 unsigned int ordered, next_ordered, ordseq; 365 unsigned int flush_flags;
361 int orderr, ordcolor; 366 unsigned int flush_seq;
362 struct request pre_flush_rq, bar_rq, post_flush_rq; 367 int flush_err;
363 struct request *orig_bar_rq; 368 struct request flush_rq;
369 struct request *orig_flush_rq;
370 struct list_head pending_flushes;
364 371
365 struct mutex sysfs_lock; 372 struct mutex sysfs_lock;
366 373
367#if defined(CONFIG_BLK_DEV_BSG) 374#if defined(CONFIG_BLK_DEV_BSG)
368 struct bsg_class_device bsg_dev; 375 struct bsg_class_device bsg_dev;
369#endif 376#endif
377
378#ifdef CONFIG_BLK_DEV_THROTTLING
379 /* Throttle data */
380 struct throtl_data *td;
381#endif
370}; 382};
371 383
372#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 384#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -462,56 +474,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
462 __clear_bit(flag, &q->queue_flags); 474 __clear_bit(flag, &q->queue_flags);
463} 475}
464 476
465enum {
466 /*
467 * Hardbarrier is supported with one of the following methods.
468 *
469 * NONE : hardbarrier unsupported
470 * DRAIN : ordering by draining is enough
471 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
472 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
473 * TAG : ordering by tag is enough
474 * TAG_FLUSH : ordering by tag w/ pre and post flushes
475 * TAG_FUA : ordering by tag w/ pre flush and FUA write
476 */
477 QUEUE_ORDERED_BY_DRAIN = 0x01,
478 QUEUE_ORDERED_BY_TAG = 0x02,
479 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
480 QUEUE_ORDERED_DO_BAR = 0x20,
481 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
482 QUEUE_ORDERED_DO_FUA = 0x80,
483
484 QUEUE_ORDERED_NONE = 0x00,
485
486 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
487 QUEUE_ORDERED_DO_BAR,
488 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
489 QUEUE_ORDERED_DO_PREFLUSH |
490 QUEUE_ORDERED_DO_POSTFLUSH,
491 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
492 QUEUE_ORDERED_DO_PREFLUSH |
493 QUEUE_ORDERED_DO_FUA,
494
495 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
496 QUEUE_ORDERED_DO_BAR,
497 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
498 QUEUE_ORDERED_DO_PREFLUSH |
499 QUEUE_ORDERED_DO_POSTFLUSH,
500 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
501 QUEUE_ORDERED_DO_PREFLUSH |
502 QUEUE_ORDERED_DO_FUA,
503
504 /*
505 * Ordered operation sequence
506 */
507 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
508 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
509 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
510 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
511 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
512 QUEUE_ORDSEQ_DONE = 0x20,
513};
514
515#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 477#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
516#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 478#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
517#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 479#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
@@ -521,7 +483,6 @@ enum {
521#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 483#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
522#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 484#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
523#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 485#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
524#define blk_queue_flushing(q) ((q)->ordseq)
525#define blk_queue_stackable(q) \ 486#define blk_queue_stackable(q) \
526 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 487 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
527#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 488#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
@@ -592,7 +553,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
592 * it already be started by driver. 553 * it already be started by driver.
593 */ 554 */
594#define RQ_NOMERGE_FLAGS \ 555#define RQ_NOMERGE_FLAGS \
595 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 556 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \
557 REQ_FLUSH | REQ_FUA)
596#define rq_mergeable(rq) \ 558#define rq_mergeable(rq) \
597 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 559 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
598 (((rq)->cmd_flags & REQ_DISCARD) || \ 560 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -851,7 +813,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
851extern void blk_queue_max_discard_sectors(struct request_queue *q, 813extern void blk_queue_max_discard_sectors(struct request_queue *q,
852 unsigned int max_discard_sectors); 814 unsigned int max_discard_sectors);
853extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 815extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
854extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 816extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
855extern void blk_queue_alignment_offset(struct request_queue *q, 817extern void blk_queue_alignment_offset(struct request_queue *q,
856 unsigned int alignment); 818 unsigned int alignment);
857extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 819extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -881,12 +843,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
881extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 843extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
882extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 844extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
883extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 845extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
846extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
884extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 847extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
885extern int blk_queue_ordered(struct request_queue *, unsigned);
886extern bool blk_do_ordered(struct request_queue *, struct request **);
887extern unsigned blk_ordered_cur_seq(struct request_queue *);
888extern unsigned blk_ordered_req_seq(struct request *);
889extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
890 848
891extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 849extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
892extern void blk_dump_rq_flags(struct request *, char *); 850extern void blk_dump_rq_flags(struct request *, char *);
@@ -919,27 +877,20 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
919 return NULL; 877 return NULL;
920 return bqt->tag_index[tag]; 878 return bqt->tag_index[tag];
921} 879}
922enum{ 880
923 BLKDEV_WAIT, /* wait for completion */ 881#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
924 BLKDEV_BARRIER, /* issue request with barrier */ 882
925 BLKDEV_SECURE, /* secure discard */ 883extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
926};
927#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
928#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
929#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
930extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
931 unsigned long);
932extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 884extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
933 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 885 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
934extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 886extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
935 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 887 sector_t nr_sects, gfp_t gfp_mask);
936static inline int sb_issue_discard(struct super_block *sb, 888static inline int sb_issue_discard(struct super_block *sb, sector_t block,
937 sector_t block, sector_t nr_blocks) 889 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
938{ 890{
939 block <<= (sb->s_blocksize_bits - 9); 891 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
940 nr_blocks <<= (sb->s_blocksize_bits - 9); 892 nr_blocks << (sb->s_blocksize_bits - 9),
941 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 893 gfp_mask, flags);
942 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
943} 894}
944 895
945extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 896extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1004,7 +955,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1004 return q->limits.physical_block_size; 955 return q->limits.physical_block_size;
1005} 956}
1006 957
1007static inline int bdev_physical_block_size(struct block_device *bdev) 958static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1008{ 959{
1009 return queue_physical_block_size(bdev_get_queue(bdev)); 960 return queue_physical_block_size(bdev_get_queue(bdev));
1010} 961}
@@ -1093,11 +1044,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
1093 return q ? q->dma_alignment : 511; 1044 return q ? q->dma_alignment : 511;
1094} 1045}
1095 1046
1096static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1047static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1097 unsigned int len) 1048 unsigned int len)
1098{ 1049{
1099 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1050 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1100 return !((unsigned long)addr & alignment) && !(len & alignment); 1051 return !(addr & alignment) && !(len & alignment);
1101} 1052}
1102 1053
1103/* assumes size > 256 */ 1054/* assumes size > 256 */
@@ -1127,6 +1078,7 @@ static inline void put_dev_sector(Sector p)
1127 1078
1128struct work_struct; 1079struct work_struct;
1129int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1080int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1081int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1130 1082
1131#ifdef CONFIG_BLK_CGROUP 1083#ifdef CONFIG_BLK_CGROUP
1132/* 1084/*
@@ -1170,6 +1122,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1170} 1122}
1171#endif 1123#endif
1172 1124
1125#ifdef CONFIG_BLK_DEV_THROTTLING
1126extern int blk_throtl_init(struct request_queue *q);
1127extern void blk_throtl_exit(struct request_queue *q);
1128extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1129extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1130extern void throtl_shutdown_timer_wq(struct request_queue *q);
1131#else /* CONFIG_BLK_DEV_THROTTLING */
1132static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1133{
1134 return 0;
1135}
1136
1137static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1138static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1139static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1140static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1141#endif /* CONFIG_BLK_DEV_THROTTLING */
1142
1173#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1143#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1174 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1144 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1175#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1145#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
@@ -1213,8 +1183,13 @@ struct blk_integrity {
1213extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1183extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1214extern void blk_integrity_unregister(struct gendisk *); 1184extern void blk_integrity_unregister(struct gendisk *);
1215extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1185extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1216extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1186extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1217extern int blk_rq_count_integrity_sg(struct request *); 1187 struct scatterlist *);
1188extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1189extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1190 struct request *);
1191extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1192 struct bio *);
1218 1193
1219static inline 1194static inline
1220struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1195struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1235,16 +1210,32 @@ static inline int blk_integrity_rq(struct request *rq)
1235 return bio_integrity(rq->bio); 1210 return bio_integrity(rq->bio);
1236} 1211}
1237 1212
1213static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1214 unsigned int segs)
1215{
1216 q->limits.max_integrity_segments = segs;
1217}
1218
1219static inline unsigned short
1220queue_max_integrity_segments(struct request_queue *q)
1221{
1222 return q->limits.max_integrity_segments;
1223}
1224
1238#else /* CONFIG_BLK_DEV_INTEGRITY */ 1225#else /* CONFIG_BLK_DEV_INTEGRITY */
1239 1226
1240#define blk_integrity_rq(rq) (0) 1227#define blk_integrity_rq(rq) (0)
1241#define blk_rq_count_integrity_sg(a) (0) 1228#define blk_rq_count_integrity_sg(a, b) (0)
1242#define blk_rq_map_integrity_sg(a, b) (0) 1229#define blk_rq_map_integrity_sg(a, b, c) (0)
1243#define bdev_get_integrity(a) (0) 1230#define bdev_get_integrity(a) (0)
1244#define blk_get_integrity(a) (0) 1231#define blk_get_integrity(a) (0)
1245#define blk_integrity_compare(a, b) (0) 1232#define blk_integrity_compare(a, b) (0)
1246#define blk_integrity_register(a, b) (0) 1233#define blk_integrity_register(a, b) (0)
1247#define blk_integrity_unregister(a) do { } while (0); 1234#define blk_integrity_unregister(a) do { } while (0);
1235#define blk_queue_max_integrity_segments(a, b) do { } while (0);
1236#define queue_max_integrity_segments(a) (0)
1237#define blk_integrity_merge_rq(a, b, c) (0)
1238#define blk_integrity_merge_bio(a, b, c) (0)
1248 1239
1249#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1240#endif /* CONFIG_BLK_DEV_INTEGRITY */
1250 1241
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index ec94c12f21da..dd1b25b2641c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -32,7 +32,6 @@ enum bh_state_bits {
32 BH_Delay, /* Buffer is not yet allocated on disk */ 32 BH_Delay, /* Buffer is not yet allocated on disk */
33 BH_Boundary, /* Block is followed by a discontiguity */ 33 BH_Boundary, /* Block is followed by a discontiguity */
34 BH_Write_EIO, /* I/O error on write */ 34 BH_Write_EIO, /* I/O error on write */
35 BH_Eopnotsupp, /* operation not supported (barrier) */
36 BH_Unwritten, /* Buffer is allocated on disk but not written */ 35 BH_Unwritten, /* Buffer is allocated on disk but not written */
37 BH_Quiet, /* Buffer Error Prinks to be quiet */ 36 BH_Quiet, /* Buffer Error Prinks to be quiet */
38 37
@@ -124,7 +123,6 @@ BUFFER_FNS(Async_Write, async_write)
124BUFFER_FNS(Delay, delay) 123BUFFER_FNS(Delay, delay)
125BUFFER_FNS(Boundary, boundary) 124BUFFER_FNS(Boundary, boundary)
126BUFFER_FNS(Write_EIO, write_io_error) 125BUFFER_FNS(Write_EIO, write_io_error)
127BUFFER_FNS(Eopnotsupp, eopnotsupp)
128BUFFER_FNS(Unwritten, unwritten) 126BUFFER_FNS(Unwritten, unwritten)
129 127
130#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) 128#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
diff --git a/include/linux/device.h b/include/linux/device.h
index 516fecacf27b..dd4895313468 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -751,4 +751,11 @@ do { \
751 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 751 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
752#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 752#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
753 MODULE_ALIAS("char-major-" __stringify(major) "-*") 753 MODULE_ALIAS("char-major-" __stringify(major) "-*")
754
755#ifdef CONFIG_SYSFS_DEPRECATED
756extern long sysfs_deprecated;
757#else
758#define sysfs_deprecated 0
759#endif
760
754#endif /* _DEVICE_H_ */ 761#endif /* _DEVICE_H_ */
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 0b3518c42356..d4e02f5353a0 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -48,10 +48,10 @@ typedef void dlm_lockspace_t;
48 * 48 *
49 * 0 if lock request was successful 49 * 0 if lock request was successful
50 * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE 50 * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE
51 * -ENOMEM if there is no memory to process request
52 * -EINVAL if there are invalid parameters
53 * -DLM_EUNLOCK if unlock request was successful 51 * -DLM_EUNLOCK if unlock request was successful
54 * -DLM_ECANCEL if a cancel completed successfully 52 * -DLM_ECANCEL if a cancel completed successfully
53 * -EDEADLK if a deadlock was detected
54 * -ETIMEDOUT if the lock request was canceled due to a timeout
55 */ 55 */
56 56
57#define DLM_SBF_DEMOTED 0x01 57#define DLM_SBF_DEMOTED 0x01
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 479ee3a1d901..9b2a0158f399 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,10 +53,10 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.8.1" 56#define REL_VERSION "8.3.9rc2"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 94 59#define PRO_VERSION_MAX 95
60 60
61 61
62enum drbd_io_error_p { 62enum drbd_io_error_p {
@@ -91,6 +91,11 @@ enum drbd_after_sb_p {
91 ASB_VIOLENTLY 91 ASB_VIOLENTLY
92}; 92};
93 93
94enum drbd_on_no_data {
95 OND_IO_ERROR,
96 OND_SUSPEND_IO
97};
98
94/* KEEP the order, do not delete or insert. Only append. */ 99/* KEEP the order, do not delete or insert. Only append. */
95enum drbd_ret_codes { 100enum drbd_ret_codes {
96 ERR_CODE_BASE = 100, 101 ERR_CODE_BASE = 100,
@@ -140,6 +145,7 @@ enum drbd_ret_codes {
140 ERR_CONNECTED = 151, /* DRBD 8.3 only */ 145 ERR_CONNECTED = 151, /* DRBD 8.3 only */
141 ERR_PERM = 152, 146 ERR_PERM = 152,
142 ERR_NEED_APV_93 = 153, 147 ERR_NEED_APV_93 = 153,
148 ERR_STONITH_AND_PROT_A = 154,
143 149
144 /* insert new ones above this line */ 150 /* insert new ones above this line */
145 AFTER_LAST_ERR_CODE 151 AFTER_LAST_ERR_CODE
@@ -226,13 +232,17 @@ union drbd_state {
226 unsigned conn:5 ; /* 17/32 cstates */ 232 unsigned conn:5 ; /* 17/32 cstates */
227 unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ 233 unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
228 unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ 234 unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
229 unsigned susp:1 ; /* 2/2 IO suspended no/yes */ 235 unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */
230 unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ 236 unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
231 unsigned peer_isp:1 ; 237 unsigned peer_isp:1 ;
232 unsigned user_isp:1 ; 238 unsigned user_isp:1 ;
233 unsigned _pad:11; /* 0 unused */ 239 unsigned susp_nod:1 ; /* IO suspended because no data */
240 unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/
241 unsigned _pad:9; /* 0 unused */
234#elif defined(__BIG_ENDIAN_BITFIELD) 242#elif defined(__BIG_ENDIAN_BITFIELD)
235 unsigned _pad:11; /* 0 unused */ 243 unsigned _pad:9;
244 unsigned susp_fen:1 ;
245 unsigned susp_nod:1 ;
236 unsigned user_isp:1 ; 246 unsigned user_isp:1 ;
237 unsigned peer_isp:1 ; 247 unsigned peer_isp:1 ;
238 unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ 248 unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
@@ -312,6 +322,8 @@ enum drbd_timeout_flag {
312 322
313#define DRBD_MAGIC 0x83740267 323#define DRBD_MAGIC 0x83740267
314#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) 324#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC)
325#define DRBD_MAGIC_BIG 0x835a
326#define BE_DRBD_MAGIC_BIG __constant_cpu_to_be16(DRBD_MAGIC_BIG)
315 327
316/* these are of type "int" */ 328/* these are of type "int" */
317#define DRBD_MD_INDEX_INTERNAL -1 329#define DRBD_MD_INDEX_INTERNAL -1
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 440b42e38e89..4ac33f34b77e 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -128,26 +128,31 @@
128#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT 128#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT
129#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT 129#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
130#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT 130#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
131#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
131 132
132#define DRBD_MAX_BIO_BVECS_MIN 0 133#define DRBD_MAX_BIO_BVECS_MIN 0
133#define DRBD_MAX_BIO_BVECS_MAX 128 134#define DRBD_MAX_BIO_BVECS_MAX 128
134#define DRBD_MAX_BIO_BVECS_DEF 0 135#define DRBD_MAX_BIO_BVECS_DEF 0
135 136
136#define DRBD_DP_VOLUME_MIN 4 137#define DRBD_C_PLAN_AHEAD_MIN 0
137#define DRBD_DP_VOLUME_MAX 1048576 138#define DRBD_C_PLAN_AHEAD_MAX 300
138#define DRBD_DP_VOLUME_DEF 16384 139#define DRBD_C_PLAN_AHEAD_DEF 0 /* RS rate controller disabled by default */
139 140
140#define DRBD_DP_INTERVAL_MIN 1 141#define DRBD_C_DELAY_TARGET_MIN 1
141#define DRBD_DP_INTERVAL_MAX 600 142#define DRBD_C_DELAY_TARGET_MAX 100
142#define DRBD_DP_INTERVAL_DEF 5 143#define DRBD_C_DELAY_TARGET_DEF 10
143 144
144#define DRBD_RS_THROTTLE_TH_MIN 1 145#define DRBD_C_FILL_TARGET_MIN 0
145#define DRBD_RS_THROTTLE_TH_MAX 600 146#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
146#define DRBD_RS_THROTTLE_TH_DEF 20 147#define DRBD_C_FILL_TARGET_DEF 0 /* By default disabled -> controlled by delay_target */
147 148
148#define DRBD_RS_HOLD_OFF_TH_MIN 1 149#define DRBD_C_MAX_RATE_MIN 250 /* kByte/sec */
149#define DRBD_RS_HOLD_OFF_TH_MAX 6000 150#define DRBD_C_MAX_RATE_MAX (4 << 20)
150#define DRBD_RS_HOLD_OFF_TH_DEF 100 151#define DRBD_C_MAX_RATE_DEF 102400
152
153#define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */
154#define DRBD_C_MIN_RATE_MAX (4 << 20)
155#define DRBD_C_MIN_RATE_DEF 4096
151 156
152#undef RANGE 157#undef RANGE
153#endif 158#endif
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index 5f042810a56c..ade91107c9a5 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -87,6 +87,12 @@ NL_PACKET(syncer_conf, 8,
87 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) 87 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32)
88 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) 88 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
89 NL_BIT( 65, T_MAY_IGNORE, use_rle) 89 NL_BIT( 65, T_MAY_IGNORE, use_rle)
90 NL_INTEGER( 75, T_MAY_IGNORE, on_no_data)
91 NL_INTEGER( 76, T_MAY_IGNORE, c_plan_ahead)
92 NL_INTEGER( 77, T_MAY_IGNORE, c_delay_target)
93 NL_INTEGER( 78, T_MAY_IGNORE, c_fill_target)
94 NL_INTEGER( 79, T_MAY_IGNORE, c_max_rate)
95 NL_INTEGER( 80, T_MAY_IGNORE, c_min_rate)
90) 96)
91 97
92NL_PACKET(invalidate, 9, ) 98NL_PACKET(invalidate, 9, )
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index bef3cda44c4c..a90b3892074a 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -83,7 +83,7 @@ static inline int ddebug_remove_module(const char *mod)
83 83
84#define dynamic_pr_debug(fmt, ...) \ 84#define dynamic_pr_debug(fmt, ...) \
85 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) 85 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
86#define dynamic_dev_dbg(dev, format, ...) \ 86#define dynamic_dev_dbg(dev, fmt, ...) \
87 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) 87 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
88#endif 88#endif
89 89
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4fd978e7eb83..80a0ece8f7e4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -122,6 +122,8 @@ extern void elv_completed_request(struct request_queue *, struct request *);
122extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 122extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
123extern void elv_put_request(struct request_queue *, struct request *); 123extern void elv_put_request(struct request_queue *, struct request *);
124extern void elv_drain_elevator(struct request_queue *); 124extern void elv_drain_elevator(struct request_queue *);
125extern void elv_quiesce_start(struct request_queue *);
126extern void elv_quiesce_end(struct request_queue *);
125 127
126/* 128/*
127 * io scheduler registration 129 * io scheduler registration
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0a81b87ea158..4f34ff6e5558 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,12 +135,12 @@ struct inodes_stat_t {
135 * immediately after submission. The write equivalent 135 * immediately after submission. The write equivalent
136 * of READ_SYNC. 136 * of READ_SYNC.
137 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. 137 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
138 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all 138 * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
139 * previously submitted writes must be safely on storage 139 * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
140 * before this one is started. Also guarantees that when 140 * non-volatile media on completion.
141 * this write is complete, it itself is also safely on 141 * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
142 * storage. Prevents reordering of writes on both sides 142 * by a cache flush and data is guaranteed to be on
143 * of this IO. 143 * non-volatile media on completion.
144 * 144 *
145 */ 145 */
146#define RW_MASK REQ_WRITE 146#define RW_MASK REQ_WRITE
@@ -156,16 +156,12 @@ struct inodes_stat_t {
156#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) 156#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
157#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) 157#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
158#define WRITE_META (WRITE | REQ_META) 158#define WRITE_META (WRITE | REQ_META)
159#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 159#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
160 REQ_HARDBARRIER) 160 REQ_FLUSH)
161 161#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
162/* 162 REQ_FUA)
163 * These aren't really reads or writes, they pass down information about 163#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
164 * parts of device that are now unused by the file system. 164 REQ_FLUSH | REQ_FUA)
165 */
166#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD)
167#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
168#define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE)
169 165
170#define SEL_IN 1 166#define SEL_IN 1
171#define SEL_OUT 2 167#define SEL_OUT 2
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 28e33fea5107..4eb56ed75fbc 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -58,17 +58,35 @@ enum fsl_usb2_phy_modes {
58 FSL_USB2_PHY_SERIAL, 58 FSL_USB2_PHY_SERIAL,
59}; 59};
60 60
61struct clk;
62struct platform_device;
63
61struct fsl_usb2_platform_data { 64struct fsl_usb2_platform_data {
62 /* board specific information */ 65 /* board specific information */
63 enum fsl_usb2_operating_modes operating_mode; 66 enum fsl_usb2_operating_modes operating_mode;
64 enum fsl_usb2_phy_modes phy_mode; 67 enum fsl_usb2_phy_modes phy_mode;
65 unsigned int port_enables; 68 unsigned int port_enables;
69 unsigned int workaround;
70
71 int (*init)(struct platform_device *);
72 void (*exit)(struct platform_device *);
73 void __iomem *regs; /* ioremap'd register base */
74 struct clk *clk;
75 unsigned big_endian_mmio:1;
76 unsigned big_endian_desc:1;
77 unsigned es:1; /* need USBMODE:ES */
78 unsigned le_setup_buf:1;
79 unsigned have_sysif_regs:1;
80 unsigned invert_drvvbus:1;
81 unsigned invert_pwr_fault:1;
66}; 82};
67 83
68/* Flags in fsl_usb2_mph_platform_data */ 84/* Flags in fsl_usb2_mph_platform_data */
69#define FSL_USB2_PORT0_ENABLED 0x00000001 85#define FSL_USB2_PORT0_ENABLED 0x00000001
70#define FSL_USB2_PORT1_ENABLED 0x00000002 86#define FSL_USB2_PORT1_ENABLED 0x00000002
71 87
88#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0)
89
72struct spi_device; 90struct spi_device;
73 91
74struct fsl_spi_platform_data { 92struct fsl_spi_platform_data {
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index af3f06b41dc1..557c3927e70f 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/slab.h>
15 16
16#ifdef CONFIG_BLOCK 17#ifdef CONFIG_BLOCK
17 18
@@ -86,7 +87,15 @@ struct disk_stats {
86 unsigned long io_ticks; 87 unsigned long io_ticks;
87 unsigned long time_in_queue; 88 unsigned long time_in_queue;
88}; 89};
89 90
91#define PARTITION_META_INFO_VOLNAMELTH 64
92#define PARTITION_META_INFO_UUIDLTH 16
93
94struct partition_meta_info {
95 u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */
96 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
97};
98
90struct hd_struct { 99struct hd_struct {
91 sector_t start_sect; 100 sector_t start_sect;
92 sector_t nr_sects; 101 sector_t nr_sects;
@@ -95,6 +104,7 @@ struct hd_struct {
95 struct device __dev; 104 struct device __dev;
96 struct kobject *holder_dir; 105 struct kobject *holder_dir;
97 int policy, partno; 106 int policy, partno;
107 struct partition_meta_info *info;
98#ifdef CONFIG_FAIL_MAKE_REQUEST 108#ifdef CONFIG_FAIL_MAKE_REQUEST
99 int make_it_fail; 109 int make_it_fail;
100#endif 110#endif
@@ -130,6 +140,7 @@ struct disk_part_tbl {
130 struct rcu_head rcu_head; 140 struct rcu_head rcu_head;
131 int len; 141 int len;
132 struct hd_struct __rcu *last_lookup; 142 struct hd_struct __rcu *last_lookup;
143 struct gendisk *disk;
133 struct hd_struct __rcu *part[]; 144 struct hd_struct __rcu *part[];
134}; 145};
135 146
@@ -181,6 +192,30 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
181 return NULL; 192 return NULL;
182} 193}
183 194
195static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
196{
197 int i;
198 for (i = 0; i < 16; ++i) {
199 *to++ = (hex_to_bin(*uuid_str) << 4) |
200 (hex_to_bin(*(uuid_str + 1)));
201 uuid_str += 2;
202 switch (i) {
203 case 3:
204 case 5:
205 case 7:
206 case 9:
207 uuid_str++;
208 continue;
209 }
210 }
211}
212
213static inline char *part_unpack_uuid(const u8 *uuid, char *out)
214{
215 sprintf(out, "%pU", uuid);
216 return out;
217}
218
184static inline int disk_max_parts(struct gendisk *disk) 219static inline int disk_max_parts(struct gendisk *disk)
185{ 220{
186 if (disk->flags & GENHD_FL_EXT_DEVT) 221 if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -342,6 +377,19 @@ static inline int part_in_flight(struct hd_struct *part)
342 return part->in_flight[0] + part->in_flight[1]; 377 return part->in_flight[0] + part->in_flight[1];
343} 378}
344 379
380static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
381{
382 if (disk)
383 return kzalloc_node(sizeof(struct partition_meta_info),
384 GFP_KERNEL, disk->node_id);
385 return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
386}
387
388static inline void free_part_info(struct hd_struct *part)
389{
390 kfree(part->info);
391}
392
345/* block/blk-core.c */ 393/* block/blk-core.c */
346extern void part_round_stats(int cpu, struct hd_struct *part); 394extern void part_round_stats(int cpu, struct hd_struct *part);
347 395
@@ -533,7 +581,9 @@ extern int disk_expand_part_tbl(struct gendisk *disk, int target);
533extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 581extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
534extern struct hd_struct * __must_check add_partition(struct gendisk *disk, 582extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
535 int partno, sector_t start, 583 int partno, sector_t start,
536 sector_t len, int flags); 584 sector_t len, int flags,
585 struct partition_meta_info
586 *info);
537extern void delete_partition(struct gendisk *, int); 587extern void delete_partition(struct gendisk *, int);
538extern void printk_all_partitions(void); 588extern void printk_all_partitions(void);
539 589
diff --git a/include/linux/init.h b/include/linux/init.h
index de994304e0bb..577671c55153 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -46,16 +46,23 @@
46#define __exitdata __section(.exit.data) 46#define __exitdata __section(.exit.data)
47#define __exit_call __used __section(.exitcall.exit) 47#define __exit_call __used __section(.exitcall.exit)
48 48
49/* modpost check for section mismatches during the kernel build. 49/*
50 * modpost check for section mismatches during the kernel build.
50 * A section mismatch happens when there are references from a 51 * A section mismatch happens when there are references from a
51 * code or data section to an init section (both code or data). 52 * code or data section to an init section (both code or data).
52 * The init sections are (for most archs) discarded by the kernel 53 * The init sections are (for most archs) discarded by the kernel
53 * when early init has completed so all such references are potential bugs. 54 * when early init has completed so all such references are potential bugs.
54 * For exit sections the same issue exists. 55 * For exit sections the same issue exists.
56 *
55 * The following markers are used for the cases where the reference to 57 * The following markers are used for the cases where the reference to
56 * the *init / *exit section (code or data) is valid and will teach 58 * the *init / *exit section (code or data) is valid and will teach
57 * modpost not to issue a warning. 59 * modpost not to issue a warning. Intended semantics is that a code or
58 * The markers follow same syntax rules as __init / __initdata. */ 60 * data tagged __ref* can reference code or data from init section without
61 * producing a warning (of course, no warning does not mean code is
62 * correct, so optimally document why the __ref is needed and why it's OK).
63 *
64 * The markers follow same syntax rules as __init / __initdata.
65 */
59#define __ref __section(.ref.text) noinline 66#define __ref __section(.ref.text) noinline
60#define __refdata __section(.ref.data) 67#define __refdata __section(.ref.data)
61#define __refconst __section(.ref.rodata) 68#define __refconst __section(.ref.rodata)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1759ba5adce8..edef168a0406 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -652,6 +652,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
652 _max1 > _max2 ? _max1 : _max2; }) 652 _max1 > _max2 ? _max1 : _max2; })
653 653
654/** 654/**
655 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
656 * @x: value1
657 * @y: value2
658 */
659#define min_not_zero(x, y) ({ \
660 typeof(x) __x = (x); \
661 typeof(y) __y = (y); \
662 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
663
664/**
655 * clamp - return a value clamped to a given range with strict typechecking 665 * clamp - return a value clamped to a given range with strict typechecking
656 * @val: current value 666 * @val: current value
657 * @min: minimum allowable value 667 * @min: minimum allowable value
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 7950a37a7146..8f6d12151048 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -191,6 +191,8 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj)
191} 191}
192 192
193extern struct kobject *kset_find_obj(struct kset *, const char *); 193extern struct kobject *kset_find_obj(struct kset *, const char *);
194extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
195 struct kobject *);
194 196
195/* The global /sys/kernel/ kobject for people to chain off of */ 197/* The global /sys/kernel/ kobject for people to chain off of */
196extern struct kobject *kernel_kobj; 198extern struct kobject *kernel_kobj;
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 85582e1bcee9..06c1fa0a5c7b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -23,6 +23,8 @@
23struct memory_block { 23struct memory_block {
24 unsigned long phys_index; 24 unsigned long phys_index;
25 unsigned long state; 25 unsigned long state;
26 int section_count;
27
26 /* 28 /*
27 * This serializes all state change requests. It isn't 29 * This serializes all state change requests. It isn't
28 * held during creation because the control files are 30 * held during creation because the control files are
@@ -113,6 +115,8 @@ extern int memory_dev_init(void);
113extern int remove_memory_block(unsigned long, struct mem_section *, int); 115extern int remove_memory_block(unsigned long, struct mem_section *, int);
114extern int memory_notify(unsigned long val, void *v); 116extern int memory_notify(unsigned long val, void *v);
115extern int memory_isolate_notify(unsigned long val, void *v); 117extern int memory_isolate_notify(unsigned long val, void *v);
118extern struct memory_block *find_memory_block_hinted(struct mem_section *,
119 struct memory_block *);
116extern struct memory_block *find_memory_block(struct mem_section *); 120extern struct memory_block *find_memory_block(struct mem_section *);
117#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) 121#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
118enum mem_add_context { BOOT, HOTPLUG }; 122enum mem_add_context { BOOT, HOTPLUG };
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
index ef01d6aa5934..8f825756c459 100644
--- a/include/linux/mtio.h
+++ b/include/linux/mtio.h
@@ -63,6 +63,7 @@ struct mtop {
63#define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ 63#define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */
64#define MTSETPART 33 /* Change the active tape partition */ 64#define MTSETPART 33 /* Change the active tape partition */
65#define MTMKPART 34 /* Format the tape with one or two partitions */ 65#define MTMKPART 34 /* Format the tape with one or two partitions */
66#define MTWEOFI 35 /* write an end-of-file record (mark) in immediate mode */
66 67
67/* structure for MTIOCGET - mag tape get status command */ 68/* structure for MTIOCGET - mag tape get status command */
68 69
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index dad30734432a..e4471b27c396 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -743,6 +743,7 @@
743#define PCI_DEVICE_ID_HP_CISSC 0x3230 743#define PCI_DEVICE_ID_HP_CISSC 0x3230
744#define PCI_DEVICE_ID_HP_CISSD 0x3238 744#define PCI_DEVICE_ID_HP_CISSD 0x3238
745#define PCI_DEVICE_ID_HP_CISSE 0x323a 745#define PCI_DEVICE_ID_HP_CISSE 0x323a
746#define PCI_DEVICE_ID_HP_CISSF 0x323b
746#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 747#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
747 748
748#define PCI_VENDOR_ID_PCTECH 0x1042 749#define PCI_VENDOR_ID_PCTECH 0x1042
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 0eb50832aa00..5095b834a6fb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,10 +48,8 @@
48 preempt_enable(); \ 48 preempt_enable(); \
49} while (0) 49} while (0)
50 50
51#ifdef CONFIG_SMP
52
53/* minimum unit size, also is the maximum supported allocation size */ 51/* minimum unit size, also is the maximum supported allocation size */
54#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 52#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
55 53
56/* 54/*
57 * Percpu allocator can serve percpu allocations before slab is 55 * Percpu allocator can serve percpu allocations before slab is
@@ -146,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
146 * dynamically allocated. Non-atomic access to the current CPU's 144 * dynamically allocated. Non-atomic access to the current CPU's
147 * version should probably be combined with get_cpu()/put_cpu(). 145 * version should probably be combined with get_cpu()/put_cpu().
148 */ 146 */
147#ifdef CONFIG_SMP
149#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 148#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
149#else
150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
151#endif
150 152
151extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 153extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
152extern bool is_kernel_percpu_address(unsigned long addr); 154extern bool is_kernel_percpu_address(unsigned long addr);
153 155
154#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 156#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
155extern void __init setup_per_cpu_areas(void); 157extern void __init setup_per_cpu_areas(void);
156#endif 158#endif
157extern void __init percpu_init_late(void); 159extern void __init percpu_init_late(void);
158 160
159#else /* CONFIG_SMP */
160
161#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
162
163/* can't distinguish from other static vars, always false */
164static inline bool is_kernel_percpu_address(unsigned long addr)
165{
166 return false;
167}
168
169static inline void __init setup_per_cpu_areas(void) { }
170
171static inline void __init percpu_init_late(void) { }
172
173static inline void *pcpu_lpage_remapped(void *kaddr)
174{
175 return NULL;
176}
177
178#endif /* CONFIG_SMP */
179
180extern void __percpu *__alloc_percpu(size_t size, size_t align); 161extern void __percpu *__alloc_percpu(size_t size, size_t align);
181extern void free_percpu(void __percpu *__pdata); 162extern void free_percpu(void __percpu *__pdata);
182extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 163extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d7ecad0093bb..2e700ec0601f 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -138,6 +138,9 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr
138 struct resource *res, unsigned int n_res, 138 struct resource *res, unsigned int n_res,
139 const void *data, size_t size); 139 const void *data, size_t size);
140 140
141extern const struct dev_pm_ops * platform_bus_get_pm_ops(void);
142extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm);
143
141/* early platform driver interface */ 144/* early platform driver interface */
142struct early_platform_driver { 145struct early_platform_driver {
143 const char *class_str; 146 const char *class_str;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0383601a927c..56154bbb8da9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings;
336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
337 void __user *buffer, 337 void __user *buffer,
338 size_t *lenp, loff_t *ppos); 338 size_t *lenp, loff_t *ppos);
339#else
340/* Avoid need for ifdefs elsewhere in the code */
341enum { sysctl_hung_task_timeout_secs = 0 };
339#endif 342#endif
340 343
341/* Attach to any functions which should be ignored in wchan output. */ 344/* Attach to any functions which should be ignored in wchan output. */
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 8cdaa1151d2e..85193aa8c1e3 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -39,5 +39,6 @@ extern void putconsxy(struct vc_data *vc, unsigned char *p);
39 39
40extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); 40extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
41extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); 41extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
42extern void vcs_scr_updated(struct vc_data *vc);
42 43
43#endif 44#endif
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 7638deaaba65..97f5b45bbc07 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -35,6 +35,8 @@ struct plat_serial8250_port {
35 void (*set_termios)(struct uart_port *, 35 void (*set_termios)(struct uart_port *,
36 struct ktermios *new, 36 struct ktermios *new,
37 struct ktermios *old); 37 struct ktermios *old);
38 void (*pm)(struct uart_port *, unsigned int state,
39 unsigned old);
38}; 40};
39 41
40/* 42/*
@@ -76,5 +78,11 @@ extern int serial8250_find_port_for_earlycon(void);
76extern int setup_early_serial8250_console(char *cmdline); 78extern int setup_early_serial8250_console(char *cmdline);
77extern void serial8250_do_set_termios(struct uart_port *port, 79extern void serial8250_do_set_termios(struct uart_port *port,
78 struct ktermios *termios, struct ktermios *old); 80 struct ktermios *termios, struct ktermios *old);
81extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
82 unsigned int oldstate);
83
84extern void serial8250_set_isa_configurator(void (*v)
85 (int port, struct uart_port *up,
86 unsigned short *capabilities));
79 87
80#endif 88#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 563e23400913..99e5994e6f84 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -289,6 +289,8 @@ struct uart_port {
289 void (*set_termios)(struct uart_port *, 289 void (*set_termios)(struct uart_port *,
290 struct ktermios *new, 290 struct ktermios *new,
291 struct ktermios *old); 291 struct ktermios *old);
292 void (*pm)(struct uart_port *, unsigned int state,
293 unsigned int old);
292 unsigned int irq; /* irq number */ 294 unsigned int irq; /* irq number */
293 unsigned long irqflags; /* irq flags */ 295 unsigned long irqflags; /* irq flags */
294 unsigned int uartclk; /* base uart clock */ 296 unsigned int uartclk; /* base uart clock */
@@ -411,6 +413,14 @@ unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios
411 unsigned int max); 413 unsigned int max);
412unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); 414unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
413 415
416/* Base timer interval for polling */
417static inline int uart_poll_timeout(struct uart_port *port)
418{
419 int timeout = port->timeout;
420
421 return timeout > 6 ? (timeout / 2 - 2) : 1;
422}
423
414/* 424/*
415 * Console helpers. 425 * Console helpers.
416 */ 426 */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 67d64e6efe7a..86be0cdeb11b 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -256,6 +256,7 @@ struct tty_operations;
256struct tty_struct { 256struct tty_struct {
257 int magic; 257 int magic;
258 struct kref kref; 258 struct kref kref;
259 struct device *dev;
259 struct tty_driver *driver; 260 struct tty_driver *driver;
260 const struct tty_operations *ops; 261 const struct tty_operations *ops;
261 int index; 262 int index;
@@ -465,7 +466,7 @@ extern void proc_clear_tty(struct task_struct *p);
465extern struct tty_struct *get_current_tty(void); 466extern struct tty_struct *get_current_tty(void);
466extern void tty_default_fops(struct file_operations *fops); 467extern void tty_default_fops(struct file_operations *fops);
467extern struct tty_struct *alloc_tty_struct(void); 468extern struct tty_struct *alloc_tty_struct(void);
468extern void tty_add_file(struct tty_struct *tty, struct file *file); 469extern int tty_add_file(struct tty_struct *tty, struct file *file);
469extern void free_tty_struct(struct tty_struct *tty); 470extern void free_tty_struct(struct tty_struct *tty);
470extern void initialize_tty_struct(struct tty_struct *tty, 471extern void initialize_tty_struct(struct tty_struct *tty,
471 struct tty_driver *driver, int idx); 472 struct tty_driver *driver, int idx);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index b08677982525..db2d227694da 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -224,6 +224,12 @@
224 * unless the tty also has a valid tty->termiox pointer. 224 * unless the tty also has a valid tty->termiox pointer.
225 * 225 *
226 * Optional: Called under the termios lock 226 * Optional: Called under the termios lock
227 *
228 * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
229 *
230 * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
231 * structure to complete. This method is optional and will only be called
232 * if provided (otherwise EINVAL will be returned).
227 */ 233 */
228 234
229#include <linux/fs.h> 235#include <linux/fs.h>
@@ -232,6 +238,7 @@
232 238
233struct tty_struct; 239struct tty_struct;
234struct tty_driver; 240struct tty_driver;
241struct serial_icounter_struct;
235 242
236struct tty_operations { 243struct tty_operations {
237 struct tty_struct * (*lookup)(struct tty_driver *driver, 244 struct tty_struct * (*lookup)(struct tty_driver *driver,
@@ -268,6 +275,8 @@ struct tty_operations {
268 unsigned int set, unsigned int clear); 275 unsigned int set, unsigned int clear);
269 int (*resize)(struct tty_struct *tty, struct winsize *ws); 276 int (*resize)(struct tty_struct *tty, struct winsize *ws);
270 int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); 277 int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
278 int (*get_icount)(struct tty_struct *tty,
279 struct serial_icounter_struct *icount);
271#ifdef CONFIG_CONSOLE_POLL 280#ifdef CONFIG_CONSOLE_POLL
272 int (*poll_init)(struct tty_driver *driver, int line, char *options); 281 int (*poll_init)(struct tty_driver *driver, int line, char *options);
273 int (*poll_get_char)(struct tty_driver *driver, int line); 282 int (*poll_get_char)(struct tty_driver *driver, int line);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 5dcc9ff72f69..d6188e5a52df 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -108,7 +108,7 @@ extern void uio_event_notify(struct uio_info *info);
108 108
109/* defines for uio_info->irq */ 109/* defines for uio_info->irq */
110#define UIO_IRQ_CUSTOM -1 110#define UIO_IRQ_CUSTOM -1
111#define UIO_IRQ_NONE -2 111#define UIO_IRQ_NONE 0
112 112
113/* defines for uio_mem->memtype */ 113/* defines for uio_mem->memtype */
114#define UIO_MEM_NONE 0 114#define UIO_MEM_NONE 0
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index c117a68d04a7..5e86dc771da4 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -32,6 +32,8 @@
32 32
33#define USB_CDC_PROTO_EEM 7 33#define USB_CDC_PROTO_EEM 7
34 34
35#define USB_CDC_NCM_PROTO_NTB 1
36
35/*-------------------------------------------------------------------------*/ 37/*-------------------------------------------------------------------------*/
36 38
37/* 39/*
@@ -274,13 +276,13 @@ struct usb_cdc_notification {
274/* 276/*
275 * Class Specific structures and constants 277 * Class Specific structures and constants
276 * 278 *
277 * CDC NCM parameter structure, CDC NCM subclass 6.2.1 279 * CDC NCM NTB parameters structure, CDC NCM subclass 6.2.1
278 * 280 *
279 */ 281 */
280 282
281struct usb_cdc_ncm_ntb_parameter { 283struct usb_cdc_ncm_ntb_parameters {
282 __le16 wLength; 284 __le16 wLength;
283 __le16 bmNtbFormatSupported; 285 __le16 bmNtbFormatsSupported;
284 __le32 dwNtbInMaxSize; 286 __le32 dwNtbInMaxSize;
285 __le16 wNdpInDivisor; 287 __le16 wNdpInDivisor;
286 __le16 wNdpInPayloadRemainder; 288 __le16 wNdpInPayloadRemainder;
@@ -297,8 +299,8 @@ struct usb_cdc_ncm_ntb_parameter {
297 * CDC NCM transfer headers, CDC NCM subclass 3.2 299 * CDC NCM transfer headers, CDC NCM subclass 3.2
298 */ 300 */
299 301
300#define NCM_NTH16_SIGN 0x484D434E /* NCMH */ 302#define USB_CDC_NCM_NTH16_SIGN 0x484D434E /* NCMH */
301#define NCM_NTH32_SIGN 0x686D636E /* ncmh */ 303#define USB_CDC_NCM_NTH32_SIGN 0x686D636E /* ncmh */
302 304
303struct usb_cdc_ncm_nth16 { 305struct usb_cdc_ncm_nth16 {
304 __le32 dwSignature; 306 __le32 dwSignature;
@@ -320,25 +322,78 @@ struct usb_cdc_ncm_nth32 {
320 * CDC NCM datagram pointers, CDC NCM subclass 3.3 322 * CDC NCM datagram pointers, CDC NCM subclass 3.3
321 */ 323 */
322 324
323#define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ 325#define USB_CDC_NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */
324#define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ 326#define USB_CDC_NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */
325#define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ 327#define USB_CDC_NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */
326#define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ 328#define USB_CDC_NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */
329
330/* 16-bit NCM Datagram Pointer Entry */
331struct usb_cdc_ncm_dpe16 {
332 __le16 wDatagramIndex;
333 __le16 wDatagramLength;
334} __attribute__((__packed__));
327 335
336/* 16-bit NCM Datagram Pointer Table */
328struct usb_cdc_ncm_ndp16 { 337struct usb_cdc_ncm_ndp16 {
329 __le32 dwSignature; 338 __le32 dwSignature;
330 __le16 wLength; 339 __le16 wLength;
331 __le16 wNextFpIndex; 340 __le16 wNextFpIndex;
332 __u8 data[0]; 341 struct usb_cdc_ncm_dpe16 dpe16[0];
333} __attribute__ ((packed)); 342} __attribute__ ((packed));
334 343
344/* 32-bit NCM Datagram Pointer Entry */
345struct usb_cdc_ncm_dpe32 {
346 __le32 dwDatagramIndex;
347 __le32 dwDatagramLength;
348} __attribute__((__packed__));
349
350/* 32-bit NCM Datagram Pointer Table */
335struct usb_cdc_ncm_ndp32 { 351struct usb_cdc_ncm_ndp32 {
336 __le32 dwSignature; 352 __le32 dwSignature;
337 __le16 wLength; 353 __le16 wLength;
338 __le16 wReserved6; 354 __le16 wReserved6;
339 __le32 dwNextFpIndex; 355 __le32 dwNextNdpIndex;
340 __le32 dwReserved12; 356 __le32 dwReserved12;
341 __u8 data[0]; 357 struct usb_cdc_ncm_dpe32 dpe32[0];
342} __attribute__ ((packed)); 358} __attribute__ ((packed));
343 359
360/* CDC NCM subclass 3.2.1 and 3.2.2 */
361#define USB_CDC_NCM_NDP16_INDEX_MIN 0x000C
362#define USB_CDC_NCM_NDP32_INDEX_MIN 0x0010
363
364/* CDC NCM subclass 3.3.3 Datagram Formatting */
365#define USB_CDC_NCM_DATAGRAM_FORMAT_CRC 0x30
366#define USB_CDC_NCM_DATAGRAM_FORMAT_NOCRC 0X31
367
368/* CDC NCM subclass 4.2 NCM Communications Interface Protocol Code */
369#define USB_CDC_NCM_PROTO_CODE_NO_ENCAP_COMMANDS 0x00
370#define USB_CDC_NCM_PROTO_CODE_EXTERN_PROTO 0xFE
371
372/* CDC NCM subclass 5.2.1 NCM Functional Descriptor, bmNetworkCapabilities */
373#define USB_CDC_NCM_NCAP_ETH_FILTER (1 << 0)
374#define USB_CDC_NCM_NCAP_NET_ADDRESS (1 << 1)
375#define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2)
376#define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3)
377#define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4)
378
379/* CDC NCM subclass Table 6-3: NTB Parameter Structure */
380#define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0)
381#define USB_CDC_NCM_NTB32_SUPPORTED (1 << 1)
382
383/* CDC NCM subclass Table 6-3: NTB Parameter Structure */
384#define USB_CDC_NCM_NDP_ALIGN_MIN_SIZE 0x04
385#define USB_CDC_NCM_NTB_MAX_LENGTH 0x1C
386
387/* CDC NCM subclass 6.2.5 SetNtbFormat */
388#define USB_CDC_NCM_NTB16_FORMAT 0x00
389#define USB_CDC_NCM_NTB32_FORMAT 0x01
390
391/* CDC NCM subclass 6.2.7 SetNtbInputSize */
392#define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048
393#define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048
394
395/* CDC NCM subclass 6.2.11 SetCrcMode */
396#define USB_CDC_NCM_CRC_NOT_APPENDED 0x00
397#define USB_CDC_NCM_CRC_APPENDED 0x01
398
344#endif /* __LINUX_USB_CDC_H */ 399#endif /* __LINUX_USB_CDC_H */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index da2ed77d3e8d..f917bbbc8901 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -123,8 +123,23 @@
123#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ 123#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
124#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ 124#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
125 125
126/*
127 * New Feature Selectors as added by USB 3.0
128 * See USB 3.0 spec Table 9-6
129 */
130#define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */
131#define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */
132#define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */
133#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
134
135#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
136
126#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ 137#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
127 138
139/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
140#define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */
141#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */
142#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */
128 143
129/** 144/**
130 * struct usb_ctrlrequest - SETUP data for a USB device control request 145 * struct usb_ctrlrequest - SETUP data for a USB device control request
@@ -675,6 +690,7 @@ struct usb_bos_descriptor {
675 __u8 bNumDeviceCaps; 690 __u8 bNumDeviceCaps;
676} __attribute__((packed)); 691} __attribute__((packed));
677 692
693#define USB_DT_BOS_SIZE 5
678/*-------------------------------------------------------------------------*/ 694/*-------------------------------------------------------------------------*/
679 695
680/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ 696/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */
@@ -712,16 +728,56 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
712 __u8 bReserved; 728 __u8 bReserved;
713} __attribute__((packed)); 729} __attribute__((packed));
714 730
731/* USB 2.0 Extension descriptor */
715#define USB_CAP_TYPE_EXT 2 732#define USB_CAP_TYPE_EXT 2
716 733
717struct usb_ext_cap_descriptor { /* Link Power Management */ 734struct usb_ext_cap_descriptor { /* Link Power Management */
718 __u8 bLength; 735 __u8 bLength;
719 __u8 bDescriptorType; 736 __u8 bDescriptorType;
720 __u8 bDevCapabilityType; 737 __u8 bDevCapabilityType;
721 __u8 bmAttributes; 738 __le32 bmAttributes;
722#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ 739#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */
723} __attribute__((packed)); 740} __attribute__((packed));
724 741
742#define USB_DT_USB_EXT_CAP_SIZE 7
743
744/*
745 * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB
746 * specific device level capabilities
747 */
748#define USB_SS_CAP_TYPE 3
749struct usb_ss_cap_descriptor { /* Link Power Management */
750 __u8 bLength;
751 __u8 bDescriptorType;
752 __u8 bDevCapabilityType;
753 __u8 bmAttributes;
754#define USB_LTM_SUPPORT (1 << 1) /* supports LTM */
755 __le16 wSpeedSupported;
756#define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */
757#define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */
758#define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */
759#define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */
760 __u8 bFunctionalitySupport;
761 __u8 bU1devExitLat;
762 __le16 bU2DevExitLat;
763} __attribute__((packed));
764
765#define USB_DT_USB_SS_CAP_SIZE 10
766
767/*
768 * Container ID Capability descriptor: Defines the instance unique ID used to
769 * identify the instance across all operating modes
770 */
771#define CONTAINER_ID_TYPE 4
772struct usb_ss_container_id_descriptor {
773 __u8 bLength;
774 __u8 bDescriptorType;
775 __u8 bDevCapabilityType;
776 __u8 bReserved;
777 __u8 ContainerID[16]; /* 128-bit number */
778} __attribute__((packed));
779
780#define USB_DT_USB_SS_CONTN_ID_SIZE 20
725/*-------------------------------------------------------------------------*/ 781/*-------------------------------------------------------------------------*/
726 782
727/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with 783/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
@@ -808,4 +864,14 @@ enum usb_device_state {
808 */ 864 */
809}; 865};
810 866
867/*-------------------------------------------------------------------------*/
868
869/*
870 * As per USB compliance update, a device that is actively drawing
871 * more than 100mA from USB must report itself as bus-powered in
872 * the GetStatus(DEVICE) call.
873 * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
874 */
875#define USB_SELF_POWER_VBUS_MAX_DRAW 100
876
811#endif /* __LINUX_USB_CH9_H */ 877#endif /* __LINUX_USB_CH9_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 617068134ae8..3d29a7dcac2d 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -161,8 +161,6 @@ ep_choose(struct usb_gadget *g, struct usb_endpoint_descriptor *hs,
161 * and by language IDs provided in control requests. 161 * and by language IDs provided in control requests.
162 * @descriptors: Table of descriptors preceding all function descriptors. 162 * @descriptors: Table of descriptors preceding all function descriptors.
163 * Examples include OTG and vendor-specific descriptors. 163 * Examples include OTG and vendor-specific descriptors.
164 * @bind: Called from @usb_add_config() to allocate resources unique to this
165 * configuration and to call @usb_add_function() for each function used.
166 * @unbind: Reverses @bind; called as a side effect of unregistering the 164 * @unbind: Reverses @bind; called as a side effect of unregistering the
167 * driver which added this configuration. 165 * driver which added this configuration.
168 * @setup: Used to delegate control requests that aren't handled by standard 166 * @setup: Used to delegate control requests that aren't handled by standard
@@ -207,8 +205,7 @@ struct usb_configuration {
207 * we can't restructure things to avoid mismatching... 205 * we can't restructure things to avoid mismatching...
208 */ 206 */
209 207
210 /* configuration management: bind/unbind */ 208 /* configuration management: unbind/setup */
211 int (*bind)(struct usb_configuration *);
212 void (*unbind)(struct usb_configuration *); 209 void (*unbind)(struct usb_configuration *);
213 int (*setup)(struct usb_configuration *, 210 int (*setup)(struct usb_configuration *,
214 const struct usb_ctrlrequest *); 211 const struct usb_ctrlrequest *);
@@ -232,20 +229,24 @@ struct usb_configuration {
232}; 229};
233 230
234int usb_add_config(struct usb_composite_dev *, 231int usb_add_config(struct usb_composite_dev *,
235 struct usb_configuration *); 232 struct usb_configuration *,
233 int (*)(struct usb_configuration *));
236 234
237/** 235/**
238 * struct usb_composite_driver - groups configurations into a gadget 236 * struct usb_composite_driver - groups configurations into a gadget
239 * @name: For diagnostics, identifies the driver. 237 * @name: For diagnostics, identifies the driver.
238 * @iProduct: Used as iProduct override if @dev->iProduct is not set.
239 * If NULL value of @name is taken.
240 * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is
241 * not set. If NULL a default "<system> <release> with <udc>" value
242 * will be used.
240 * @dev: Template descriptor for the device, including default device 243 * @dev: Template descriptor for the device, including default device
241 * identifiers. 244 * identifiers.
242 * @strings: tables of strings, keyed by identifiers assigned during bind() 245 * @strings: tables of strings, keyed by identifiers assigned during bind()
243 * and language IDs provided in control requests 246 * and language IDs provided in control requests
244 * @bind: (REQUIRED) Used to allocate resources that are shared across the 247 * @needs_serial: set to 1 if the gadget needs userspace to provide
245 * whole device, such as string IDs, and add its configurations using 248 * a serial number. If one is not provided, warning will be printed.
246 * @usb_add_config(). This may fail by returning a negative errno 249 * @unbind: Reverses bind; called as a side effect of unregistering
247 * value; it should return zero on successful initialization.
248 * @unbind: Reverses @bind(); called as a side effect of unregistering
249 * this driver. 250 * this driver.
250 * @disconnect: optional driver disconnect method 251 * @disconnect: optional driver disconnect method
251 * @suspend: Notifies when the host stops sending USB traffic, 252 * @suspend: Notifies when the host stops sending USB traffic,
@@ -256,7 +257,7 @@ int usb_add_config(struct usb_composite_dev *,
256 * Devices default to reporting self powered operation. Devices which rely 257 * Devices default to reporting self powered operation. Devices which rely
257 * on bus powered operation should report this in their @bind() method. 258 * on bus powered operation should report this in their @bind() method.
258 * 259 *
259 * Before returning from @bind, various fields in the template descriptor 260 * Before returning from bind, various fields in the template descriptor
260 * may be overridden. These include the idVendor/idProduct/bcdDevice values 261 * may be overridden. These include the idVendor/idProduct/bcdDevice values
261 * normally to bind the appropriate host side driver, and the three strings 262 * normally to bind the appropriate host side driver, and the three strings
262 * (iManufacturer, iProduct, iSerialNumber) normally used to provide user 263 * (iManufacturer, iProduct, iSerialNumber) normally used to provide user
@@ -266,15 +267,12 @@ int usb_add_config(struct usb_composite_dev *,
266 */ 267 */
267struct usb_composite_driver { 268struct usb_composite_driver {
268 const char *name; 269 const char *name;
270 const char *iProduct;
271 const char *iManufacturer;
269 const struct usb_device_descriptor *dev; 272 const struct usb_device_descriptor *dev;
270 struct usb_gadget_strings **strings; 273 struct usb_gadget_strings **strings;
274 unsigned needs_serial:1;
271 275
272 /* REVISIT: bind() functions can be marked __init, which
273 * makes trouble for section mismatch analysis. See if
274 * we can't restructure things to avoid mismatching...
275 */
276
277 int (*bind)(struct usb_composite_dev *);
278 int (*unbind)(struct usb_composite_dev *); 276 int (*unbind)(struct usb_composite_dev *);
279 277
280 void (*disconnect)(struct usb_composite_dev *); 278 void (*disconnect)(struct usb_composite_dev *);
@@ -284,8 +282,9 @@ struct usb_composite_driver {
284 void (*resume)(struct usb_composite_dev *); 282 void (*resume)(struct usb_composite_dev *);
285}; 283};
286 284
287extern int usb_composite_register(struct usb_composite_driver *); 285extern int usb_composite_probe(struct usb_composite_driver *driver,
288extern void usb_composite_unregister(struct usb_composite_driver *); 286 int (*bind)(struct usb_composite_dev *cdev));
287extern void usb_composite_unregister(struct usb_composite_driver *driver);
289 288
290 289
291/** 290/**
@@ -334,6 +333,9 @@ struct usb_composite_dev {
334 struct list_head configs; 333 struct list_head configs;
335 struct usb_composite_driver *driver; 334 struct usb_composite_driver *driver;
336 u8 next_string_id; 335 u8 next_string_id;
336 u8 manufacturer_override;
337 u8 product_override;
338 u8 serial_override;
337 339
338 /* the gadget driver won't enable the data pullup 340 /* the gadget driver won't enable the data pullup
339 * while the deactivation count is nonzero. 341 * while the deactivation count is nonzero.
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index d3ef42d7d2f0..006412ce2303 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -705,11 +705,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
705 * struct usb_gadget_driver - driver for usb 'slave' devices 705 * struct usb_gadget_driver - driver for usb 'slave' devices
706 * @function: String describing the gadget's function 706 * @function: String describing the gadget's function
707 * @speed: Highest speed the driver handles. 707 * @speed: Highest speed the driver handles.
708 * @bind: Invoked when the driver is bound to a gadget, usually
709 * after registering the driver.
710 * At that point, ep0 is fully initialized, and ep_list holds
711 * the currently-available endpoints.
712 * Called in a context that permits sleeping.
713 * @setup: Invoked for ep0 control requests that aren't handled by 708 * @setup: Invoked for ep0 control requests that aren't handled by
714 * the hardware level driver. Most calls must be handled by 709 * the hardware level driver. Most calls must be handled by
715 * the gadget driver, including descriptor and configuration 710 * the gadget driver, including descriptor and configuration
@@ -774,7 +769,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
774struct usb_gadget_driver { 769struct usb_gadget_driver {
775 char *function; 770 char *function;
776 enum usb_device_speed speed; 771 enum usb_device_speed speed;
777 int (*bind)(struct usb_gadget *);
778 void (*unbind)(struct usb_gadget *); 772 void (*unbind)(struct usb_gadget *);
779 int (*setup)(struct usb_gadget *, 773 int (*setup)(struct usb_gadget *,
780 const struct usb_ctrlrequest *); 774 const struct usb_ctrlrequest *);
@@ -798,17 +792,19 @@ struct usb_gadget_driver {
798 */ 792 */
799 793
800/** 794/**
801 * usb_gadget_register_driver - register a gadget driver 795 * usb_gadget_probe_driver - probe a gadget driver
802 * @driver:the driver being registered 796 * @driver: the driver being registered
797 * @bind: the driver's bind callback
803 * Context: can sleep 798 * Context: can sleep
804 * 799 *
805 * Call this in your gadget driver's module initialization function, 800 * Call this in your gadget driver's module initialization function,
806 * to tell the underlying usb controller driver about your driver. 801 * to tell the underlying usb controller driver about your driver.
807 * The driver's bind() function will be called to bind it to a 802 * The @bind() function will be called to bind it to a gadget before this
808 * gadget before this registration call returns. It's expected that 803 * registration call returns. It's expected that the @bind() function will
809 * the bind() functions will be in init sections. 804 * be in init sections.
810 */ 805 */
811int usb_gadget_register_driver(struct usb_gadget_driver *driver); 806int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
807 int (*bind)(struct usb_gadget *));
812 808
813/** 809/**
814 * usb_gadget_unregister_driver - unregister a gadget driver 810 * usb_gadget_unregister_driver - unregister a gadget driver
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 3b571f1ffbb3..0b6e751ea0b1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -329,6 +329,8 @@ extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags);
329extern int usb_hcd_unlink_urb(struct urb *urb, int status); 329extern int usb_hcd_unlink_urb(struct urb *urb, int status);
330extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, 330extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
331 int status); 331 int status);
332extern void unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *);
333extern void unmap_urb_for_dma(struct usb_hcd *, struct urb *);
332extern void usb_hcd_flush_endpoint(struct usb_device *udev, 334extern void usb_hcd_flush_endpoint(struct usb_device *udev,
333 struct usb_host_endpoint *ep); 335 struct usb_host_endpoint *ep);
334extern void usb_hcd_disable_endpoint(struct usb_device *udev, 336extern void usb_hcd_disable_endpoint(struct usb_device *udev,
diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h
new file mode 100644
index 000000000000..a0ccf795f362
--- /dev/null
+++ b/include/linux/usb/intel_mid_otg.h
@@ -0,0 +1,180 @@
1/*
2 * Intel MID (Langwell/Penwell) USB OTG Transceiver driver
3 * Copyright (C) 2008 - 2010, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef __INTEL_MID_OTG_H
21#define __INTEL_MID_OTG_H
22
23#include <linux/pm.h>
24#include <linux/usb/otg.h>
25#include <linux/notifier.h>
26
27struct intel_mid_otg_xceiv;
28
29/* This is a common data structure for Intel MID platform to
30 * save values of the OTG state machine */
31struct otg_hsm {
32 /* Input */
33 int a_bus_resume;
34 int a_bus_suspend;
35 int a_conn;
36 int a_sess_vld;
37 int a_srp_det;
38 int a_vbus_vld;
39 int b_bus_resume;
40 int b_bus_suspend;
41 int b_conn;
42 int b_se0_srp;
43 int b_ssend_srp;
44 int b_sess_end;
45 int b_sess_vld;
46 int id;
47/* id values */
48#define ID_B 0x05
49#define ID_A 0x04
50#define ID_ACA_C 0x03
51#define ID_ACA_B 0x02
52#define ID_ACA_A 0x01
53 int power_up;
54 int adp_change;
55 int test_device;
56
57 /* Internal variables */
58 int a_set_b_hnp_en;
59 int b_srp_done;
60 int b_hnp_enable;
61 int hnp_poll_enable;
62
63 /* Timeout indicator for timers */
64 int a_wait_vrise_tmout;
65 int a_wait_bcon_tmout;
66 int a_aidl_bdis_tmout;
67 int a_bidl_adis_tmout;
68 int a_bidl_adis_tmr;
69 int a_wait_vfall_tmout;
70 int b_ase0_brst_tmout;
71 int b_bus_suspend_tmout;
72 int b_srp_init_tmout;
73 int b_srp_fail_tmout;
74 int b_srp_fail_tmr;
75 int b_adp_sense_tmout;
76
77 /* Informative variables */
78 int a_bus_drop;
79 int a_bus_req;
80 int a_clr_err;
81 int b_bus_req;
82 int a_suspend_req;
83 int b_bus_suspend_vld;
84
85 /* Output */
86 int drv_vbus;
87 int loc_conn;
88 int loc_sof;
89
90 /* Others */
91 int vbus_srp_up;
92};
93
94/* must provide ULPI access function to read/write registers implemented in
95 * ULPI address space */
96struct iotg_ulpi_access_ops {
97 int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val);
98 int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val);
99};
100
101#define OTG_A_DEVICE 0x0
102#define OTG_B_DEVICE 0x1
103
104/*
105 * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact
106 * with device and host drivers to implement the USB OTG related feature. More
107 * function members are added based on otg_transceiver data structure for this
108 * purpose.
109 */
110struct intel_mid_otg_xceiv {
111 struct otg_transceiver otg;
112 struct otg_hsm hsm;
113
114 /* base address */
115 void __iomem *base;
116
117 /* ops to access ulpi */
118 struct iotg_ulpi_access_ops ulpi_ops;
119
120 /* atomic notifier for interrupt context */
121 struct atomic_notifier_head iotg_notifier;
122
123 /* start/stop USB Host function */
124 int (*start_host)(struct intel_mid_otg_xceiv *iotg);
125 int (*stop_host)(struct intel_mid_otg_xceiv *iotg);
126
127 /* start/stop USB Peripheral function */
128 int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg);
129 int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg);
130
131 /* start/stop ADP sense/probe function */
132 int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg,
133 bool enabled, int dev);
134 int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg,
135 bool enabled);
136
137#ifdef CONFIG_PM
138 /* suspend/resume USB host function */
139 int (*suspend_host)(struct intel_mid_otg_xceiv *iotg,
140 pm_message_t message);
141 int (*resume_host)(struct intel_mid_otg_xceiv *iotg);
142
143 int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg,
144 pm_message_t message);
145 int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg);
146#endif
147
148};
149static inline
150struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct otg_transceiver *otg)
151{
152 return container_of(otg, struct intel_mid_otg_xceiv, otg);
153}
154
155#define MID_OTG_NOTIFY_CONNECT 0x0001
156#define MID_OTG_NOTIFY_DISCONN 0x0002
157#define MID_OTG_NOTIFY_HSUSPEND 0x0003
158#define MID_OTG_NOTIFY_HRESUME 0x0004
159#define MID_OTG_NOTIFY_CSUSPEND 0x0005
160#define MID_OTG_NOTIFY_CRESUME 0x0006
161#define MID_OTG_NOTIFY_HOSTADD 0x0007
162#define MID_OTG_NOTIFY_HOSTREMOVE 0x0008
163#define MID_OTG_NOTIFY_CLIENTADD 0x0009
164#define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a
165
166static inline int
167intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg,
168 struct notifier_block *nb)
169{
170 return atomic_notifier_chain_register(&iotg->iotg_notifier, nb);
171}
172
173static inline void
174intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg,
175 struct notifier_block *nb)
176{
177 atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb);
178}
179
180#endif /* __INTEL_MID_OTG_H */
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
new file mode 100644
index 000000000000..51f17b16d312
--- /dev/null
+++ b/include/linux/usb/langwell_otg.h
@@ -0,0 +1,139 @@
1/*
2 * Intel Langwell USB OTG transceiver driver
3 * Copyright (C) 2008 - 2010, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef __LANGWELL_OTG_H
21#define __LANGWELL_OTG_H
22
23#include <linux/usb/intel_mid_otg.h>
24
25#define CI_USBCMD 0x30
26# define USBCMD_RST BIT(1)
27# define USBCMD_RS BIT(0)
28#define CI_USBSTS 0x34
29# define USBSTS_SLI BIT(8)
30# define USBSTS_URI BIT(6)
31# define USBSTS_PCI BIT(2)
32#define CI_PORTSC1 0x74
33# define PORTSC_PP BIT(12)
34# define PORTSC_LS (BIT(11) | BIT(10))
35# define PORTSC_SUSP BIT(7)
36# define PORTSC_CCS BIT(0)
37#define CI_HOSTPC1 0xb4
38# define HOSTPC1_PHCD BIT(22)
39#define CI_OTGSC 0xf4
40# define OTGSC_DPIE BIT(30)
41# define OTGSC_1MSE BIT(29)
42# define OTGSC_BSEIE BIT(28)
43# define OTGSC_BSVIE BIT(27)
44# define OTGSC_ASVIE BIT(26)
45# define OTGSC_AVVIE BIT(25)
46# define OTGSC_IDIE BIT(24)
47# define OTGSC_DPIS BIT(22)
48# define OTGSC_1MSS BIT(21)
49# define OTGSC_BSEIS BIT(20)
50# define OTGSC_BSVIS BIT(19)
51# define OTGSC_ASVIS BIT(18)
52# define OTGSC_AVVIS BIT(17)
53# define OTGSC_IDIS BIT(16)
54# define OTGSC_DPS BIT(14)
55# define OTGSC_1MST BIT(13)
56# define OTGSC_BSE BIT(12)
57# define OTGSC_BSV BIT(11)
58# define OTGSC_ASV BIT(10)
59# define OTGSC_AVV BIT(9)
60# define OTGSC_ID BIT(8)
61# define OTGSC_HABA BIT(7)
62# define OTGSC_HADP BIT(6)
63# define OTGSC_IDPU BIT(5)
64# define OTGSC_DP BIT(4)
65# define OTGSC_OT BIT(3)
66# define OTGSC_HAAR BIT(2)
67# define OTGSC_VC BIT(1)
68# define OTGSC_VD BIT(0)
69# define OTGSC_INTEN_MASK (0x7f << 24)
70# define OTGSC_INT_MASK (0x5f << 24)
71# define OTGSC_INTSTS_MASK (0x7f << 16)
72#define CI_USBMODE 0xf8
73# define USBMODE_CM (BIT(1) | BIT(0))
74# define USBMODE_IDLE 0
75# define USBMODE_DEVICE 0x2
76# define USBMODE_HOST 0x3
77#define USBCFG_ADDR 0xff10801c
78#define USBCFG_LEN 4
79# define USBCFG_VBUSVAL BIT(14)
80# define USBCFG_AVALID BIT(13)
81# define USBCFG_BVALID BIT(12)
82# define USBCFG_SESEND BIT(11)
83
84#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
85
86enum langwell_otg_timer_type {
87 TA_WAIT_VRISE_TMR,
88 TA_WAIT_BCON_TMR,
89 TA_AIDL_BDIS_TMR,
90 TB_ASE0_BRST_TMR,
91 TB_SE0_SRP_TMR,
92 TB_SRP_INIT_TMR,
93 TB_SRP_FAIL_TMR,
94 TB_BUS_SUSPEND_TMR
95};
96
97#define TA_WAIT_VRISE 100
98#define TA_WAIT_BCON 30000
99#define TA_AIDL_BDIS 15000
100#define TB_ASE0_BRST 5000
101#define TB_SE0_SRP 2
102#define TB_SRP_INIT 100
103#define TB_SRP_FAIL 5500
104#define TB_BUS_SUSPEND 500
105
106struct langwell_otg_timer {
107 unsigned long expires; /* Number of count increase to timeout */
108 unsigned long count; /* Tick counter */
109 void (*function)(unsigned long); /* Timeout function */
110 unsigned long data; /* Data passed to function */
111 struct list_head list;
112};
113
114struct langwell_otg {
115 struct intel_mid_otg_xceiv iotg;
116 struct device *dev;
117
118 void __iomem *usbcfg; /* SCCBUSB config Reg */
119
120 unsigned region;
121 unsigned cfg_region;
122
123 struct work_struct work;
124 struct workqueue_struct *qwork;
125 struct timer_list hsm_timer;
126
127 spinlock_t lock;
128 spinlock_t wq_lock;
129
130 struct notifier_block iotg_notifier;
131};
132
133static inline
134struct langwell_otg *mid_xceiv_to_lnw(struct intel_mid_otg_xceiv *iotg)
135{
136 return container_of(iotg, struct langwell_otg, iotg);
137}
138
139#endif /* __LANGWELL_OTG_H__ */
diff --git a/include/linux/usb/ncm.h b/include/linux/usb/ncm.h
deleted file mode 100644
index 006d1064c8b2..000000000000
--- a/include/linux/usb/ncm.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * USB CDC NCM auxiliary definitions
3 */
4
5#ifndef __LINUX_USB_NCM_H
6#define __LINUX_USB_NCM_H
7
8#include <linux/types.h>
9#include <linux/usb/cdc.h>
10#include <asm/unaligned.h>
11
12#define NCM_NTB_MIN_IN_SIZE 2048
13#define NCM_NTB_MIN_OUT_SIZE 2048
14
15#define NCM_CONTROL_TIMEOUT (5 * 1000)
16
17/* bmNetworkCapabilities */
18
19#define NCM_NCAP_ETH_FILTER (1 << 0)
20#define NCM_NCAP_NET_ADDRESS (1 << 1)
21#define NCM_NCAP_ENCAP_COMM (1 << 2)
22#define NCM_NCAP_MAX_DGRAM (1 << 3)
23#define NCM_NCAP_CRC_MODE (1 << 4)
24
25/*
26 * Here are options for NCM Datagram Pointer table (NDP) parser.
27 * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
28 * in NDP16 offsets and sizes fields are 1 16bit word wide,
29 * in NDP32 -- 2 16bit words wide. Also signatures are different.
30 * To make the parser code the same, put the differences in the structure,
31 * and switch pointers to the structures when the format is changed.
32 */
33
34struct ndp_parser_opts {
35 u32 nth_sign;
36 u32 ndp_sign;
37 unsigned nth_size;
38 unsigned ndp_size;
39 unsigned ndplen_align;
40 /* sizes in u16 units */
41 unsigned dgram_item_len; /* index or length */
42 unsigned block_length;
43 unsigned fp_index;
44 unsigned reserved1;
45 unsigned reserved2;
46 unsigned next_fp_index;
47};
48
49#define INIT_NDP16_OPTS { \
50 .nth_sign = NCM_NTH16_SIGN, \
51 .ndp_sign = NCM_NDP16_NOCRC_SIGN, \
52 .nth_size = sizeof(struct usb_cdc_ncm_nth16), \
53 .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \
54 .ndplen_align = 4, \
55 .dgram_item_len = 1, \
56 .block_length = 1, \
57 .fp_index = 1, \
58 .reserved1 = 0, \
59 .reserved2 = 0, \
60 .next_fp_index = 1, \
61 }
62
63
64#define INIT_NDP32_OPTS { \
65 .nth_sign = NCM_NTH32_SIGN, \
66 .ndp_sign = NCM_NDP32_NOCRC_SIGN, \
67 .nth_size = sizeof(struct usb_cdc_ncm_nth32), \
68 .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \
69 .ndplen_align = 8, \
70 .dgram_item_len = 2, \
71 .block_length = 2, \
72 .fp_index = 2, \
73 .reserved1 = 1, \
74 .reserved2 = 2, \
75 .next_fp_index = 2, \
76 }
77
78static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
79{
80 switch (size) {
81 case 1:
82 put_unaligned_le16((u16)val, *p);
83 break;
84 case 2:
85 put_unaligned_le32((u32)val, *p);
86
87 break;
88 default:
89 BUG();
90 }
91
92 *p += size;
93}
94
95static inline unsigned get_ncm(__le16 **p, unsigned size)
96{
97 unsigned tmp;
98
99 switch (size) {
100 case 1:
101 tmp = get_unaligned_le16(*p);
102 break;
103 case 2:
104 tmp = get_unaligned_le32(*p);
105 break;
106 default:
107 BUG();
108 }
109
110 *p += size;
111 return tmp;
112}
113
114#endif /* __LINUX_USB_NCM_H */
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 545cba73ccaf..0a5b3711e502 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -164,8 +164,19 @@ otg_shutdown(struct otg_transceiver *otg)
164} 164}
165 165
166/* for usb host and peripheral controller drivers */ 166/* for usb host and peripheral controller drivers */
167#ifdef CONFIG_USB_OTG_UTILS
167extern struct otg_transceiver *otg_get_transceiver(void); 168extern struct otg_transceiver *otg_get_transceiver(void);
168extern void otg_put_transceiver(struct otg_transceiver *); 169extern void otg_put_transceiver(struct otg_transceiver *);
170#else
171static inline struct otg_transceiver *otg_get_transceiver(void)
172{
173 return NULL;
174}
175
176static inline void otg_put_transceiver(struct otg_transceiver *x)
177{
178}
179#endif
169 180
170/* Context: can sleep */ 181/* Context: can sleep */
171static inline int 182static inline int
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 55675b1efb28..16d682f4f7c3 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -271,6 +271,8 @@ struct usb_serial_driver {
271 int (*tiocmget)(struct tty_struct *tty, struct file *file); 271 int (*tiocmget)(struct tty_struct *tty, struct file *file);
272 int (*tiocmset)(struct tty_struct *tty, struct file *file, 272 int (*tiocmset)(struct tty_struct *tty, struct file *file,
273 unsigned int set, unsigned int clear); 273 unsigned int set, unsigned int clear);
274 int (*get_icount)(struct tty_struct *tty,
275 struct serial_icounter_struct *icount);
274 /* Called by the tty layer for port level work. There may or may not 276 /* Called by the tty layer for port level work. There may or may not
275 be an attached tty at this point */ 277 be an attached tty at this point */
276 void (*dtr_rts)(struct usb_serial_port *port, int on); 278 void (*dtr_rts)(struct usb_serial_port *port, int on);
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
new file mode 100644
index 000000000000..d7fc910f1dc4
--- /dev/null
+++ b/include/linux/usb/storage.h
@@ -0,0 +1,48 @@
1#ifndef __LINUX_USB_STORAGE_H
2#define __LINUX_USB_STORAGE_H
3
4/*
5 * linux/usb/storage.h
6 *
7 * Copyright Matthew Wilcox for Intel Corp, 2010
8 *
9 * This file contains definitions taken from the
10 * USB Mass Storage Class Specification Overview
11 *
12 * Distributed under the terms of the GNU GPL, version two.
13 */
14
15/* Storage subclass codes */
16
17#define USB_SC_RBC 0x01 /* Typically, flash devices */
18#define USB_SC_8020 0x02 /* CD-ROM */
19#define USB_SC_QIC 0x03 /* QIC-157 Tapes */
20#define USB_SC_UFI 0x04 /* Floppy */
21#define USB_SC_8070 0x05 /* Removable media */
22#define USB_SC_SCSI 0x06 /* Transparent */
23#define USB_SC_LOCKABLE 0x07 /* Password-protected */
24
25#define USB_SC_ISD200 0xf0 /* ISD200 ATA */
26#define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */
27#define USB_SC_DEVICE 0xff /* Use device's value */
28
29/* Storage protocol codes */
30
31#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */
32#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */
33#define USB_PR_BULK 0x50 /* bulk only */
34#define USB_PR_UAS 0x62 /* USB Attached SCSI */
35
36#define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */
37#define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */
38#define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */
39#define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */
40#define USB_PR_FREECOM 0xf1 /* Freecom */
41#define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */
42#define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */
43#define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */
44#define USB_PR_KARMA 0xf5 /* Rio Karma */
45
46#define USB_PR_DEVICE 0xff /* Use device's value */
47
48#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index a4b947e470a5..71693d4a4fe1 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -58,7 +58,11 @@
58 US_FLAG(CAPACITY_OK, 0x00010000) \ 58 US_FLAG(CAPACITY_OK, 0x00010000) \
59 /* READ CAPACITY response is correct */ \ 59 /* READ CAPACITY response is correct */ \
60 US_FLAG(BAD_SENSE, 0x00020000) \ 60 US_FLAG(BAD_SENSE, 0x00020000) \
61 /* Bad Sense (never more than 18 bytes) */ 61 /* Bad Sense (never more than 18 bytes) */ \
62 US_FLAG(NO_READ_DISC_INFO, 0x00040000) \
63 /* cannot handle READ_DISC_INFO */ \
64 US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \
65 /* cannot handle READ_CAPACITY_16 */
62 66
63#define US_FLAG(name, value) US_FL_##name = value , 67#define US_FLAG(name, value) US_FL_##name = value ,
64enum { US_DO_ALL_FLAGS }; 68enum { US_DO_ALL_FLAGS };
@@ -74,42 +78,7 @@ enum { US_DO_ALL_FLAGS };
74#define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) 78#define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF)
75#define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) 79#define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF)
76 80
77/* 81#include <linux/usb/storage.h>
78 * This is probably not the best place to keep these constants, conceptually.
79 * But it's the only header included into all places which need them.
80 */
81
82/* Sub Classes */
83
84#define US_SC_RBC 0x01 /* Typically, flash devices */
85#define US_SC_8020 0x02 /* CD-ROM */
86#define US_SC_QIC 0x03 /* QIC-157 Tapes */
87#define US_SC_UFI 0x04 /* Floppy */
88#define US_SC_8070 0x05 /* Removable media */
89#define US_SC_SCSI 0x06 /* Transparent */
90#define US_SC_LOCKABLE 0x07 /* Password-protected */
91
92#define US_SC_ISD200 0xf0 /* ISD200 ATA */
93#define US_SC_CYP_ATACB 0xf1 /* Cypress ATACB */
94#define US_SC_DEVICE 0xff /* Use device's value */
95
96/* Protocols */
97
98#define US_PR_CBI 0x00 /* Control/Bulk/Interrupt */
99#define US_PR_CB 0x01 /* Control/Bulk w/o interrupt */
100#define US_PR_BULK 0x50 /* bulk only */
101
102#define US_PR_USBAT 0x80 /* SCM-ATAPI bridge */
103#define US_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */
104#define US_PR_SDDR55 0x82 /* SDDR-55 (made up) */
105#define US_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */
106#define US_PR_FREECOM 0xf1 /* Freecom */
107#define US_PR_DATAFAB 0xf2 /* Datafab chipsets */
108#define US_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */
109#define US_PR_ALAUDA 0xf4 /* Alauda chipsets */
110#define US_PR_KARMA 0xf5 /* Rio Karma */
111
112#define US_PR_DEVICE 0xff /* Use device's value */
113 82
114/* 83/*
115 */ 84 */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 01c2145118dc..63a4fe6d51bd 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock;
117extern struct vm_struct *vmlist; 117extern struct vm_struct *vmlist;
118extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 118extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
119 119
120#ifdef CONFIG_SMP
120struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 121struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
121 const size_t *sizes, int nr_vms, 122 const size_t *sizes, int nr_vms,
122 size_t align, gfp_t gfp_mask); 123 size_t align, gfp_t gfp_mask);
123 124
124void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 125void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
126#endif
125 127
126#endif /* _LINUX_VMALLOC_H */ 128#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 25e02c941bac..070bb7a88936 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -243,11 +243,12 @@ enum {
243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
246 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 246 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
247 WQ_HIGHPRI = 1 << 4, /* high priority */ 247 WQ_HIGHPRI = 1 << 4, /* high priority */
248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
249 249
250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */ 250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
251 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
251 252
252 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
253 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -306,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
306 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) 307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
307#endif 308#endif
308 309
310/**
311 * alloc_ordered_workqueue - allocate an ordered workqueue
312 * @name: name of the workqueue
313 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
314 *
315 * Allocate an ordered workqueue. An ordered workqueue executes at
316 * most one work item at any given time in the queued order. They are
317 * implemented as unbound workqueues with @max_active of one.
318 *
319 * RETURNS:
320 * Pointer to the allocated workqueue on success, %NULL on failure.
321 */
322static inline struct workqueue_struct *
323alloc_ordered_workqueue(const char *name, unsigned int flags)
324{
325 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
326}
327
309#define create_workqueue(name) \ 328#define create_workqueue(name) \
310 alloc_workqueue((name), WQ_RESCUER, 1) 329 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
311#define create_freezeable_workqueue(name) \ 330#define create_freezeable_workqueue(name) \
312 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) 331 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
313#define create_singlethread_workqueue(name) \ 332#define create_singlethread_workqueue(name) \
314 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) 333 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
315 334
316extern void destroy_workqueue(struct workqueue_struct *wq); 335extern void destroy_workqueue(struct workqueue_struct *wq);
317 336
@@ -325,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
325 344
326extern void flush_workqueue(struct workqueue_struct *wq); 345extern void flush_workqueue(struct workqueue_struct *wq);
327extern void flush_scheduled_work(void); 346extern void flush_scheduled_work(void);
328extern void flush_delayed_work(struct delayed_work *work);
329 347
330extern int schedule_work(struct work_struct *work); 348extern int schedule_work(struct work_struct *work);
331extern int schedule_work_on(int cpu, struct work_struct *work); 349extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -337,8 +355,13 @@ extern int keventd_up(void);
337 355
338int execute_in_process_context(work_func_t fn, struct execute_work *); 356int execute_in_process_context(work_func_t fn, struct execute_work *);
339 357
340extern int flush_work(struct work_struct *work); 358extern bool flush_work(struct work_struct *work);
341extern int cancel_work_sync(struct work_struct *work); 359extern bool flush_work_sync(struct work_struct *work);
360extern bool cancel_work_sync(struct work_struct *work);
361
362extern bool flush_delayed_work(struct delayed_work *dwork);
363extern bool flush_delayed_work_sync(struct delayed_work *work);
364extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
342 365
343extern void workqueue_set_max_active(struct workqueue_struct *wq, 366extern void workqueue_set_max_active(struct workqueue_struct *wq,
344 int max_active); 367 int max_active);
@@ -352,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work);
352 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
353 * cancel_work_sync() to wait on it. 376 * cancel_work_sync() to wait on it.
354 */ 377 */
355static inline int cancel_delayed_work(struct delayed_work *work) 378static inline bool cancel_delayed_work(struct delayed_work *work)
356{ 379{
357 int ret; 380 bool ret;
358 381
359 ret = del_timer_sync(&work->timer); 382 ret = del_timer_sync(&work->timer);
360 if (ret) 383 if (ret)
@@ -367,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
367 * if it returns 0 the timer function may be running and the queueing is in 390 * if it returns 0 the timer function may be running and the queueing is in
368 * progress. 391 * progress.
369 */ 392 */
370static inline int __cancel_delayed_work(struct delayed_work *work) 393static inline bool __cancel_delayed_work(struct delayed_work *work)
371{ 394{
372 int ret; 395 bool ret;
373 396
374 ret = del_timer(&work->timer); 397 ret = del_timer(&work->timer);
375 if (ret) 398 if (ret)
@@ -377,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
377 return ret; 400 return ret;
378} 401}
379 402
380extern int cancel_delayed_work_sync(struct delayed_work *work);
381
382/* Obsolete. use cancel_delayed_work_sync() */ 403/* Obsolete. use cancel_delayed_work_sync() */
383static inline 404static inline
384void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 405void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
@@ -409,8 +430,4 @@ extern bool freeze_workqueues_busy(void);
409extern void thaw_workqueues(void); 430extern void thaw_workqueues(void);
410#endif /* CONFIG_FREEZER */ 431#endif /* CONFIG_FREEZER */
411 432
412#ifdef CONFIG_LOCKDEP
413int in_workqueue_context(struct workqueue_struct *wq);
414#endif
415
416#endif 433#endif
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index d06e13be717b..3dec1949f69c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -205,6 +205,7 @@ struct domain_device {
205 }; 205 };
206 206
207 void *lldd_dev; 207 void *lldd_dev;
208 int gone;
208}; 209};
209 210
210struct sas_discovery_event { 211struct sas_discovery_event {
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8fcb6e0e9e72..216af8538cc9 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -32,6 +32,12 @@ struct scsi_cmnd;
32#endif 32#endif
33 33
34/* 34/*
35 * DIX-capable adapters effectively support infinite chaining for the
36 * protection information scatterlist
37 */
38#define SCSI_MAX_PROT_SG_SEGMENTS 0xFFFF
39
40/*
35 * Special value for scanning to specify scanning or rescanning of all 41 * Special value for scanning to specify scanning or rescanning of all
36 * possible channels, (target) ids, or luns on a given shost. 42 * possible channels, (target) ids, or luns on a given shost.
37 */ 43 */
@@ -67,6 +73,7 @@ struct scsi_cmnd;
67#define SEND_DIAGNOSTIC 0x1d 73#define SEND_DIAGNOSTIC 0x1d
68#define ALLOW_MEDIUM_REMOVAL 0x1e 74#define ALLOW_MEDIUM_REMOVAL 0x1e
69 75
76#define READ_FORMAT_CAPACITIES 0x23
70#define SET_WINDOW 0x24 77#define SET_WINDOW 0x24
71#define READ_CAPACITY 0x25 78#define READ_CAPACITY 0x25
72#define READ_10 0x28 79#define READ_10 0x28
@@ -96,6 +103,7 @@ struct scsi_cmnd;
96#define WRITE_SAME 0x41 103#define WRITE_SAME 0x41
97#define UNMAP 0x42 104#define UNMAP 0x42
98#define READ_TOC 0x43 105#define READ_TOC 0x43
106#define READ_HEADER 0x44
99#define LOG_SELECT 0x4c 107#define LOG_SELECT 0x4c
100#define LOG_SENSE 0x4d 108#define LOG_SENSE 0x4d
101#define XDWRITEREAD_10 0x53 109#define XDWRITEREAD_10 0x53
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 50cb34ffef11..85867dcde335 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -148,6 +148,8 @@ struct scsi_device {
148 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 148 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
149 unsigned last_sector_bug:1; /* do not use multisector accesses on 149 unsigned last_sector_bug:1; /* do not use multisector accesses on
150 SD_LAST_BUGGY_SECTORS */ 150 SD_LAST_BUGGY_SECTORS */
151 unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */
152 unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
151 unsigned is_visible:1; /* is the device visible in sysfs */ 153 unsigned is_visible:1; /* is the device visible in sysfs */
152 154
153 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 155 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index b7bdecb7b76e..d0a6a845f204 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -388,6 +388,7 @@ struct scsi_host_template {
388 * of scatter-gather. 388 * of scatter-gather.
389 */ 389 */
390 unsigned short sg_tablesize; 390 unsigned short sg_tablesize;
391 unsigned short sg_prot_tablesize;
391 392
392 /* 393 /*
393 * Set this if the host adapter has limitations beside segment count. 394 * Set this if the host adapter has limitations beside segment count.
@@ -599,6 +600,7 @@ struct Scsi_Host {
599 int can_queue; 600 int can_queue;
600 short cmd_per_lun; 601 short cmd_per_lun;
601 short unsigned int sg_tablesize; 602 short unsigned int sg_tablesize;
603 short unsigned int sg_prot_tablesize;
602 short unsigned int max_sectors; 604 short unsigned int max_sectors;
603 unsigned long dma_boundary; 605 unsigned long dma_boundary;
604 /* 606 /*
@@ -823,6 +825,11 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
823 return shost->prot_capabilities; 825 return shost->prot_capabilities;
824} 826}
825 827
828static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
829{
830 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
831}
832
826static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 833static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
827{ 834{
828 static unsigned char cap[] = { 0, 835 static unsigned char cap[] = { 0,
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 17231385cb37..d6e7994aa634 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -97,13 +97,9 @@ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
97static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) 97static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
98{ 98{
99 struct request *req = cmd->request; 99 struct request *req = cmd->request;
100 struct scsi_device *sdev = cmd->device;
101 100
102 if (blk_rq_tagged(req)) { 101 if (blk_rq_tagged(req)) {
103 if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER) 102 *msg++ = MSG_SIMPLE_TAG;
104 *msg++ = MSG_ORDERED_TAG;
105 else
106 *msg++ = MSG_SIMPLE_TAG;
107 *msg++ = req->tag; 103 *msg++ = req->tag;
108 return 2; 104 return 2;
109 } 105 }
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 87d81b3ce564..59816fe31e68 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -496,6 +496,7 @@ struct fc_host_attrs {
496 u64 fabric_name; 496 u64 fabric_name;
497 char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; 497 char symbolic_name[FC_SYMBOLIC_NAME_SIZE];
498 char system_hostname[FC_SYMBOLIC_NAME_SIZE]; 498 char system_hostname[FC_SYMBOLIC_NAME_SIZE];
499 u32 dev_loss_tmo;
499 500
500 /* Private (Transport-managed) Attributes */ 501 /* Private (Transport-managed) Attributes */
501 enum fc_tgtid_binding_type tgtid_bind_type; 502 enum fc_tgtid_binding_type tgtid_bind_type;
@@ -580,6 +581,8 @@ struct fc_host_attrs {
580 (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name) 581 (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
581#define fc_host_devloss_work_q(x) \ 582#define fc_host_devloss_work_q(x) \
582 (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) 583 (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
584#define fc_host_dev_loss_tmo(x) \
585 (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
583 586
584 587
585struct fc_bsg_buffer { 588struct fc_bsg_buffer {
diff --git a/include/sound/core.h b/include/sound/core.h
index 89e0ac17f44a..df26ebbfa9c6 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -133,9 +133,7 @@ struct snd_card {
133 int free_on_last_close; /* free in context of file_release */ 133 int free_on_last_close; /* free in context of file_release */
134 wait_queue_head_t shutdown_sleep; 134 wait_queue_head_t shutdown_sleep;
135 struct device *dev; /* device assigned to this card */ 135 struct device *dev; /* device assigned to this card */
136#ifndef CONFIG_SYSFS_DEPRECATED
137 struct device *card_dev; /* cardX object for sysfs */ 136 struct device *card_dev; /* cardX object for sysfs */
138#endif
139 137
140#ifdef CONFIG_PM 138#ifdef CONFIG_PM
141 unsigned int power_state; /* power state */ 139 unsigned int power_state; /* power state */
@@ -196,11 +194,7 @@ struct snd_minor {
196/* return a device pointer linked to each sound device as a parent */ 194/* return a device pointer linked to each sound device as a parent */
197static inline struct device *snd_card_get_device_link(struct snd_card *card) 195static inline struct device *snd_card_get_device_link(struct snd_card *card)
198{ 196{
199#ifdef CONFIG_SYSFS_DEPRECATED
200 return card ? card->dev : NULL;
201#else
202 return card ? card->card_dev : NULL; 197 return card ? card->card_dev : NULL;
203#endif
204} 198}
205 199
206/* sound.c */ 200/* sound.c */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 49682d7e9d60..7d497291c85d 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -7,38 +7,83 @@
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8#include <linux/workqueue.h> 8#include <linux/workqueue.h>
9 9
10DECLARE_EVENT_CLASS(workqueue_work,
11
12 TP_PROTO(struct work_struct *work),
13
14 TP_ARGS(work),
15
16 TP_STRUCT__entry(
17 __field( void *, work )
18 ),
19
20 TP_fast_assign(
21 __entry->work = work;
22 ),
23
24 TP_printk("work struct %p", __entry->work)
25);
26
10/** 27/**
11 * workqueue_execute_start - called immediately before the workqueue callback 28 * workqueue_queue_work - called when a work gets queued
29 * @req_cpu: the requested cpu
30 * @cwq: pointer to struct cpu_workqueue_struct
12 * @work: pointer to struct work_struct 31 * @work: pointer to struct work_struct
13 * 32 *
14 * Allows to track workqueue execution. 33 * This event occurs when a work is queued immediately or once a
34 * delayed work is actually queued on a workqueue (ie: once the delay
35 * has been reached).
15 */ 36 */
16TRACE_EVENT(workqueue_execute_start, 37TRACE_EVENT(workqueue_queue_work,
17 38
18 TP_PROTO(struct work_struct *work), 39 TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
40 struct work_struct *work),
19 41
20 TP_ARGS(work), 42 TP_ARGS(req_cpu, cwq, work),
21 43
22 TP_STRUCT__entry( 44 TP_STRUCT__entry(
23 __field( void *, work ) 45 __field( void *, work )
24 __field( void *, function) 46 __field( void *, function)
47 __field( void *, workqueue)
48 __field( unsigned int, req_cpu )
49 __field( unsigned int, cpu )
25 ), 50 ),
26 51
27 TP_fast_assign( 52 TP_fast_assign(
28 __entry->work = work; 53 __entry->work = work;
29 __entry->function = work->func; 54 __entry->function = work->func;
55 __entry->workqueue = cwq->wq;
56 __entry->req_cpu = req_cpu;
57 __entry->cpu = cwq->gcwq->cpu;
30 ), 58 ),
31 59
32 TP_printk("work struct %p: function %pf", __entry->work, __entry->function) 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
61 __entry->work, __entry->function, __entry->workqueue,
62 __entry->req_cpu, __entry->cpu)
33); 63);
34 64
35/** 65/**
36 * workqueue_execute_end - called immediately before the workqueue callback 66 * workqueue_activate_work - called when a work gets activated
67 * @work: pointer to struct work_struct
68 *
69 * This event occurs when a queued work is put on the active queue,
70 * which happens immediately after queueing unless @max_active limit
71 * is reached.
72 */
73DEFINE_EVENT(workqueue_work, workqueue_activate_work,
74
75 TP_PROTO(struct work_struct *work),
76
77 TP_ARGS(work)
78);
79
80/**
81 * workqueue_execute_start - called immediately before the workqueue callback
37 * @work: pointer to struct work_struct 82 * @work: pointer to struct work_struct
38 * 83 *
39 * Allows to track workqueue execution. 84 * Allows to track workqueue execution.
40 */ 85 */
41TRACE_EVENT(workqueue_execute_end, 86TRACE_EVENT(workqueue_execute_start,
42 87
43 TP_PROTO(struct work_struct *work), 88 TP_PROTO(struct work_struct *work),
44 89
@@ -46,15 +91,29 @@ TRACE_EVENT(workqueue_execute_end,
46 91
47 TP_STRUCT__entry( 92 TP_STRUCT__entry(
48 __field( void *, work ) 93 __field( void *, work )
94 __field( void *, function)
49 ), 95 ),
50 96
51 TP_fast_assign( 97 TP_fast_assign(
52 __entry->work = work; 98 __entry->work = work;
99 __entry->function = work->func;
53 ), 100 ),
54 101
55 TP_printk("work struct %p", __entry->work) 102 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
56); 103);
57 104
105/**
106 * workqueue_execute_end - called immediately before the workqueue callback
107 * @work: pointer to struct work_struct
108 *
109 * Allows to track workqueue execution.
110 */
111DEFINE_EVENT(workqueue_work, workqueue_execute_end,
112
113 TP_PROTO(struct work_struct *work),
114
115 TP_ARGS(work)
116);
58 117
59#endif /* _TRACE_WORKQUEUE_H */ 118#endif /* _TRACE_WORKQUEUE_H */
60 119
diff --git a/init/Kconfig b/init/Kconfig
index be85a0ab1b82..fdfd97efe0e0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -661,11 +661,14 @@ config BLK_CGROUP
661 661
662 Currently, CFQ IO scheduler uses it to recognize task groups and 662 Currently, CFQ IO scheduler uses it to recognize task groups and
663 control disk bandwidth allocation (proportional time slice allocation) 663 control disk bandwidth allocation (proportional time slice allocation)
664 to such task groups. 664 to such task groups. It is also used by bio throttling logic in
665 block layer to implement upper limit in IO rates on a device.
665 666
666 This option only enables generic Block IO controller infrastructure. 667 This option only enables generic Block IO controller infrastructure.
667 One needs to also enable actual IO controlling logic in CFQ for it 668 One needs to also enable actual IO controlling logic/policy. For
668 to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y). 669 enabling proportional weight division of disk bandwidth in CFQ seti
670 CONFIG_CFQ_GROUP_IOSCHED=y and for enabling throttling policy set
671 CONFIG_BLK_THROTTLE=y.
669 672
670 See Documentation/cgroups/blkio-controller.txt for more information. 673 See Documentation/cgroups/blkio-controller.txt for more information.
671 674
@@ -683,40 +686,42 @@ config MM_OWNER
683 bool 686 bool
684 687
685config SYSFS_DEPRECATED 688config SYSFS_DEPRECATED
686 bool
687
688config SYSFS_DEPRECATED_V2
689 bool "enable deprecated sysfs features to support old userspace tools" 689 bool "enable deprecated sysfs features to support old userspace tools"
690 depends on SYSFS 690 depends on SYSFS
691 default n 691 default n
692 select SYSFS_DEPRECATED 692 help
693 help 693 This option adds code that switches the layout of the "block" class
694 This option switches the layout of sysfs to the deprecated 694 devices, to not show up in /sys/class/block/, but only in
695 version. Do not use it on recent distributions. 695 /sys/block/.
696 696
697 The current sysfs layout features a unified device tree at 697 This switch is only active when the sysfs.deprecated=1 boot option is
698 /sys/devices/, which is able to express a hierarchy between 698 passed or the SYSFS_DEPRECATED_V2 option is set.
699 class devices. If the deprecated option is set to Y, the 699
700 unified device tree is split into a bus device tree at 700 This option allows new kernels to run on old distributions and tools,
701 /sys/devices/ and several individual class device trees at 701 which might get confused by /sys/class/block/. Since 2007/2008 all
702 /sys/class/. The class and bus devices will be connected by 702 major distributions and tools handle this just fine.
703 "<subsystem>:<name>" and the "device" links. The "block" 703
704 class devices, will not show up in /sys/class/block/. Some 704 Recent distributions and userspace tools after 2009/2010 depend on
705 subsystems will suppress the creation of some devices which 705 the existence of /sys/class/block/, and will not work with this
706 depend on the unified device tree. 706 option enabled.
707 707
708 This option is not a pure compatibility option that can 708 Only if you are using a new kernel on an old distribution, you might
709 be safely enabled on newer distributions. It will change the 709 need to say Y here.
710 layout of sysfs to the non-extensible deprecated version, 710
711 and disable some features, which can not be exported without 711config SYSFS_DEPRECATED_V2
712 confusing older userspace tools. Since 2007/2008 all major 712 bool "enabled deprecated sysfs features by default"
713 distributions do not enable this option, and ship no tools which 713 default n
714 depend on the deprecated layout or this option. 714 depends on SYSFS
715 715 depends on SYSFS_DEPRECATED
716 If you are using a new kernel on an older distribution, or use 716 help
717 older userspace tools, you might need to say Y here. Do not say Y, 717 Enable deprecated sysfs by default.
718 if the original kernel, that came with your distribution, has 718
719 this option set to N. 719 See the CONFIG_SYSFS_DEPRECATED option for more details about this
720 option.
721
722 Only if you are using a new kernel on an old distribution, you might
723 need to say Y here. Even then, odds are you would not need it
724 enabled, you can always pass the boot option if absolutely necessary.
720 725
721config RELAY 726config RELAY
722 bool "Kernel->user space relay support (formerly relayfs)" 727 bool "Kernel->user space relay support (formerly relayfs)"
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 02e3ca4fc527..42db0551c3aa 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -58,6 +58,62 @@ static int __init readwrite(char *str)
58__setup("ro", readonly); 58__setup("ro", readonly);
59__setup("rw", readwrite); 59__setup("rw", readwrite);
60 60
61#ifdef CONFIG_BLOCK
62/**
63 * match_dev_by_uuid - callback for finding a partition using its uuid
64 * @dev: device passed in by the caller
65 * @data: opaque pointer to a 36 byte char array with a UUID
66 *
67 * Returns 1 if the device matches, and 0 otherwise.
68 */
69static int match_dev_by_uuid(struct device *dev, void *data)
70{
71 u8 *uuid = data;
72 struct hd_struct *part = dev_to_part(dev);
73
74 if (!part->info)
75 goto no_match;
76
77 if (memcmp(uuid, part->info->uuid, sizeof(part->info->uuid)))
78 goto no_match;
79
80 return 1;
81no_match:
82 return 0;
83}
84
85
86/**
87 * devt_from_partuuid - looks up the dev_t of a partition by its UUID
88 * @uuid: 36 byte char array containing a hex ascii UUID
89 *
90 * The function will return the first partition which contains a matching
91 * UUID value in its partition_meta_info struct. This does not search
92 * by filesystem UUIDs.
93 *
94 * Returns the matching dev_t on success or 0 on failure.
95 */
96static dev_t __init devt_from_partuuid(char *uuid_str)
97{
98 dev_t res = 0;
99 struct device *dev = NULL;
100 u8 uuid[16];
101
102 /* Pack the requested UUID in the expected format. */
103 part_pack_uuid(uuid_str, uuid);
104
105 dev = class_find_device(&block_class, NULL, uuid, &match_dev_by_uuid);
106 if (!dev)
107 goto done;
108
109 res = dev->devt;
110 put_device(dev);
111
112done:
113 return res;
114}
115#endif
116
61/* 117/*
62 * Convert a name into device number. We accept the following variants: 118 * Convert a name into device number. We accept the following variants:
63 * 119 *
@@ -68,6 +124,8 @@ __setup("rw", readwrite);
68 * of partition - device number of disk plus the partition number 124 * of partition - device number of disk plus the partition number
69 * 5) /dev/<disk_name>p<decimal> - same as the above, that form is 125 * 5) /dev/<disk_name>p<decimal> - same as the above, that form is
70 * used when disk name of partitioned disk ends on a digit. 126 * used when disk name of partitioned disk ends on a digit.
127 * 6) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the
128 * unique id of a partition if the partition table provides it.
71 * 129 *
72 * If name doesn't have fall into the categories above, we return (0,0). 130 * If name doesn't have fall into the categories above, we return (0,0).
73 * block_class is used to check if something is a disk name. If the disk 131 * block_class is used to check if something is a disk name. If the disk
@@ -82,6 +140,18 @@ dev_t name_to_dev_t(char *name)
82 dev_t res = 0; 140 dev_t res = 0;
83 int part; 141 int part;
84 142
143#ifdef CONFIG_BLOCK
144 if (strncmp(name, "PARTUUID=", 9) == 0) {
145 name += 9;
146 if (strlen(name) != 36)
147 goto fail;
148 res = devt_from_partuuid(name);
149 if (!res)
150 goto fail;
151 goto done;
152 }
153#endif
154
85 if (strncmp(name, "/dev/", 5) != 0) { 155 if (strncmp(name, "/dev/", 5) != 0) {
86 unsigned maj, min; 156 unsigned maj, min;
87 157
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f77afd939229..30acdb74cc23 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -42,9 +42,6 @@
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44 44
45#define CREATE_TRACE_POINTS
46#include <trace/events/workqueue.h>
47
48#include "workqueue_sched.h" 45#include "workqueue_sched.h"
49 46
50enum { 47enum {
@@ -257,6 +254,9 @@ EXPORT_SYMBOL_GPL(system_long_wq);
257EXPORT_SYMBOL_GPL(system_nrt_wq); 254EXPORT_SYMBOL_GPL(system_nrt_wq);
258EXPORT_SYMBOL_GPL(system_unbound_wq); 255EXPORT_SYMBOL_GPL(system_unbound_wq);
259 256
257#define CREATE_TRACE_POINTS
258#include <trace/events/workqueue.h>
259
260#define for_each_busy_worker(worker, i, pos, gcwq) \ 260#define for_each_busy_worker(worker, i, pos, gcwq) \
261 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 261 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
262 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 262 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
@@ -310,21 +310,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
310 (cpu) < WORK_CPU_NONE; \ 310 (cpu) < WORK_CPU_NONE; \
311 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 311 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
312 312
313#ifdef CONFIG_LOCKDEP
314/**
315 * in_workqueue_context() - in context of specified workqueue?
316 * @wq: the workqueue of interest
317 *
318 * Checks lockdep state to see if the current task is executing from
319 * within a workqueue item. This function exists only if lockdep is
320 * enabled.
321 */
322int in_workqueue_context(struct workqueue_struct *wq)
323{
324 return lock_is_held(&wq->lockdep_map);
325}
326#endif
327
328#ifdef CONFIG_DEBUG_OBJECTS_WORK 313#ifdef CONFIG_DEBUG_OBJECTS_WORK
329 314
330static struct debug_obj_descr work_debug_descr; 315static struct debug_obj_descr work_debug_descr;
@@ -604,7 +589,9 @@ static bool keep_working(struct global_cwq *gcwq)
604{ 589{
605 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 590 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
606 591
607 return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; 592 return !list_empty(&gcwq->worklist) &&
593 (atomic_read(nr_running) <= 1 ||
594 gcwq->flags & GCWQ_HIGHPRI_PENDING);
608} 595}
609 596
610/* Do we need a new worker? Called from manager. */ 597/* Do we need a new worker? Called from manager. */
@@ -997,6 +984,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
997 984
998 /* gcwq determined, get cwq and queue */ 985 /* gcwq determined, get cwq and queue */
999 cwq = get_cwq(gcwq->cpu, wq); 986 cwq = get_cwq(gcwq->cpu, wq);
987 trace_workqueue_queue_work(cpu, cwq, work);
1000 988
1001 BUG_ON(!list_empty(&work->entry)); 989 BUG_ON(!list_empty(&work->entry));
1002 990
@@ -1004,6 +992,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1004 work_flags = work_color_to_flags(cwq->work_color); 992 work_flags = work_color_to_flags(cwq->work_color);
1005 993
1006 if (likely(cwq->nr_active < cwq->max_active)) { 994 if (likely(cwq->nr_active < cwq->max_active)) {
995 trace_workqueue_activate_work(work);
1007 cwq->nr_active++; 996 cwq->nr_active++;
1008 worklist = gcwq_determine_ins_pos(gcwq, cwq); 997 worklist = gcwq_determine_ins_pos(gcwq, cwq);
1009 } else { 998 } else {
@@ -1679,6 +1668,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1679 struct work_struct, entry); 1668 struct work_struct, entry);
1680 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1669 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1681 1670
1671 trace_workqueue_activate_work(work);
1682 move_linked_works(work, pos, NULL); 1672 move_linked_works(work, pos, NULL);
1683 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1673 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1684 cwq->nr_active++; 1674 cwq->nr_active++;
@@ -2326,27 +2316,17 @@ out_unlock:
2326} 2316}
2327EXPORT_SYMBOL_GPL(flush_workqueue); 2317EXPORT_SYMBOL_GPL(flush_workqueue);
2328 2318
2329/** 2319static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2330 * flush_work - block until a work_struct's callback has terminated 2320 bool wait_executing)
2331 * @work: the work which is to be flushed
2332 *
2333 * Returns false if @work has already terminated.
2334 *
2335 * It is expected that, prior to calling flush_work(), the caller has
2336 * arranged for the work to not be requeued, otherwise it doesn't make
2337 * sense to use this function.
2338 */
2339int flush_work(struct work_struct *work)
2340{ 2321{
2341 struct worker *worker = NULL; 2322 struct worker *worker = NULL;
2342 struct global_cwq *gcwq; 2323 struct global_cwq *gcwq;
2343 struct cpu_workqueue_struct *cwq; 2324 struct cpu_workqueue_struct *cwq;
2344 struct wq_barrier barr;
2345 2325
2346 might_sleep(); 2326 might_sleep();
2347 gcwq = get_work_gcwq(work); 2327 gcwq = get_work_gcwq(work);
2348 if (!gcwq) 2328 if (!gcwq)
2349 return 0; 2329 return false;
2350 2330
2351 spin_lock_irq(&gcwq->lock); 2331 spin_lock_irq(&gcwq->lock);
2352 if (!list_empty(&work->entry)) { 2332 if (!list_empty(&work->entry)) {
@@ -2359,28 +2339,127 @@ int flush_work(struct work_struct *work)
2359 cwq = get_work_cwq(work); 2339 cwq = get_work_cwq(work);
2360 if (unlikely(!cwq || gcwq != cwq->gcwq)) 2340 if (unlikely(!cwq || gcwq != cwq->gcwq))
2361 goto already_gone; 2341 goto already_gone;
2362 } else { 2342 } else if (wait_executing) {
2363 worker = find_worker_executing_work(gcwq, work); 2343 worker = find_worker_executing_work(gcwq, work);
2364 if (!worker) 2344 if (!worker)
2365 goto already_gone; 2345 goto already_gone;
2366 cwq = worker->current_cwq; 2346 cwq = worker->current_cwq;
2367 } 2347 } else
2348 goto already_gone;
2368 2349
2369 insert_wq_barrier(cwq, &barr, work, worker); 2350 insert_wq_barrier(cwq, barr, work, worker);
2370 spin_unlock_irq(&gcwq->lock); 2351 spin_unlock_irq(&gcwq->lock);
2371 2352
2372 lock_map_acquire(&cwq->wq->lockdep_map); 2353 lock_map_acquire(&cwq->wq->lockdep_map);
2373 lock_map_release(&cwq->wq->lockdep_map); 2354 lock_map_release(&cwq->wq->lockdep_map);
2374 2355 return true;
2375 wait_for_completion(&barr.done);
2376 destroy_work_on_stack(&barr.work);
2377 return 1;
2378already_gone: 2356already_gone:
2379 spin_unlock_irq(&gcwq->lock); 2357 spin_unlock_irq(&gcwq->lock);
2380 return 0; 2358 return false;
2359}
2360
2361/**
2362 * flush_work - wait for a work to finish executing the last queueing instance
2363 * @work: the work to flush
2364 *
2365 * Wait until @work has finished execution. This function considers
2366 * only the last queueing instance of @work. If @work has been
2367 * enqueued across different CPUs on a non-reentrant workqueue or on
2368 * multiple workqueues, @work might still be executing on return on
2369 * some of the CPUs from earlier queueing.
2370 *
2371 * If @work was queued only on a non-reentrant, ordered or unbound
2372 * workqueue, @work is guaranteed to be idle on return if it hasn't
2373 * been requeued since flush started.
2374 *
2375 * RETURNS:
2376 * %true if flush_work() waited for the work to finish execution,
2377 * %false if it was already idle.
2378 */
2379bool flush_work(struct work_struct *work)
2380{
2381 struct wq_barrier barr;
2382
2383 if (start_flush_work(work, &barr, true)) {
2384 wait_for_completion(&barr.done);
2385 destroy_work_on_stack(&barr.work);
2386 return true;
2387 } else
2388 return false;
2381} 2389}
2382EXPORT_SYMBOL_GPL(flush_work); 2390EXPORT_SYMBOL_GPL(flush_work);
2383 2391
2392static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2393{
2394 struct wq_barrier barr;
2395 struct worker *worker;
2396
2397 spin_lock_irq(&gcwq->lock);
2398
2399 worker = find_worker_executing_work(gcwq, work);
2400 if (unlikely(worker))
2401 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2402
2403 spin_unlock_irq(&gcwq->lock);
2404
2405 if (unlikely(worker)) {
2406 wait_for_completion(&barr.done);
2407 destroy_work_on_stack(&barr.work);
2408 return true;
2409 } else
2410 return false;
2411}
2412
2413static bool wait_on_work(struct work_struct *work)
2414{
2415 bool ret = false;
2416 int cpu;
2417
2418 might_sleep();
2419
2420 lock_map_acquire(&work->lockdep_map);
2421 lock_map_release(&work->lockdep_map);
2422
2423 for_each_gcwq_cpu(cpu)
2424 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2425 return ret;
2426}
2427
2428/**
2429 * flush_work_sync - wait until a work has finished execution
2430 * @work: the work to flush
2431 *
2432 * Wait until @work has finished execution. On return, it's
2433 * guaranteed that all queueing instances of @work which happened
2434 * before this function is called are finished. In other words, if
2435 * @work hasn't been requeued since this function was called, @work is
2436 * guaranteed to be idle on return.
2437 *
2438 * RETURNS:
2439 * %true if flush_work_sync() waited for the work to finish execution,
2440 * %false if it was already idle.
2441 */
2442bool flush_work_sync(struct work_struct *work)
2443{
2444 struct wq_barrier barr;
2445 bool pending, waited;
2446
2447 /* we'll wait for executions separately, queue barr only if pending */
2448 pending = start_flush_work(work, &barr, false);
2449
2450 /* wait for executions to finish */
2451 waited = wait_on_work(work);
2452
2453 /* wait for the pending one */
2454 if (pending) {
2455 wait_for_completion(&barr.done);
2456 destroy_work_on_stack(&barr.work);
2457 }
2458
2459 return pending || waited;
2460}
2461EXPORT_SYMBOL_GPL(flush_work_sync);
2462
2384/* 2463/*
2385 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2464 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2386 * so this work can't be re-armed in any way. 2465 * so this work can't be re-armed in any way.
@@ -2423,39 +2502,7 @@ static int try_to_grab_pending(struct work_struct *work)
2423 return ret; 2502 return ret;
2424} 2503}
2425 2504
2426static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2505static bool __cancel_work_timer(struct work_struct *work,
2427{
2428 struct wq_barrier barr;
2429 struct worker *worker;
2430
2431 spin_lock_irq(&gcwq->lock);
2432
2433 worker = find_worker_executing_work(gcwq, work);
2434 if (unlikely(worker))
2435 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2436
2437 spin_unlock_irq(&gcwq->lock);
2438
2439 if (unlikely(worker)) {
2440 wait_for_completion(&barr.done);
2441 destroy_work_on_stack(&barr.work);
2442 }
2443}
2444
2445static void wait_on_work(struct work_struct *work)
2446{
2447 int cpu;
2448
2449 might_sleep();
2450
2451 lock_map_acquire(&work->lockdep_map);
2452 lock_map_release(&work->lockdep_map);
2453
2454 for_each_gcwq_cpu(cpu)
2455 wait_on_cpu_work(get_gcwq(cpu), work);
2456}
2457
2458static int __cancel_work_timer(struct work_struct *work,
2459 struct timer_list* timer) 2506 struct timer_list* timer)
2460{ 2507{
2461 int ret; 2508 int ret;
@@ -2472,42 +2519,81 @@ static int __cancel_work_timer(struct work_struct *work,
2472} 2519}
2473 2520
2474/** 2521/**
2475 * cancel_work_sync - block until a work_struct's callback has terminated 2522 * cancel_work_sync - cancel a work and wait for it to finish
2476 * @work: the work which is to be flushed 2523 * @work: the work to cancel
2477 *
2478 * Returns true if @work was pending.
2479 * 2524 *
2480 * cancel_work_sync() will cancel the work if it is queued. If the work's 2525 * Cancel @work and wait for its execution to finish. This function
2481 * callback appears to be running, cancel_work_sync() will block until it 2526 * can be used even if the work re-queues itself or migrates to
2482 * has completed. 2527 * another workqueue. On return from this function, @work is
2483 * 2528 * guaranteed to be not pending or executing on any CPU.
2484 * It is possible to use this function if the work re-queues itself. It can
2485 * cancel the work even if it migrates to another workqueue, however in that
2486 * case it only guarantees that work->func() has completed on the last queued
2487 * workqueue.
2488 * 2529 *
2489 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 2530 * cancel_work_sync(&delayed_work->work) must not be used for
2490 * pending, otherwise it goes into a busy-wait loop until the timer expires. 2531 * delayed_work's. Use cancel_delayed_work_sync() instead.
2491 * 2532 *
2492 * The caller must ensure that workqueue_struct on which this work was last 2533 * The caller must ensure that the workqueue on which @work was last
2493 * queued can't be destroyed before this function returns. 2534 * queued can't be destroyed before this function returns.
2535 *
2536 * RETURNS:
2537 * %true if @work was pending, %false otherwise.
2494 */ 2538 */
2495int cancel_work_sync(struct work_struct *work) 2539bool cancel_work_sync(struct work_struct *work)
2496{ 2540{
2497 return __cancel_work_timer(work, NULL); 2541 return __cancel_work_timer(work, NULL);
2498} 2542}
2499EXPORT_SYMBOL_GPL(cancel_work_sync); 2543EXPORT_SYMBOL_GPL(cancel_work_sync);
2500 2544
2501/** 2545/**
2502 * cancel_delayed_work_sync - reliably kill off a delayed work. 2546 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2503 * @dwork: the delayed work struct 2547 * @dwork: the delayed work to flush
2548 *
2549 * Delayed timer is cancelled and the pending work is queued for
2550 * immediate execution. Like flush_work(), this function only
2551 * considers the last queueing instance of @dwork.
2552 *
2553 * RETURNS:
2554 * %true if flush_work() waited for the work to finish execution,
2555 * %false if it was already idle.
2556 */
2557bool flush_delayed_work(struct delayed_work *dwork)
2558{
2559 if (del_timer_sync(&dwork->timer))
2560 __queue_work(raw_smp_processor_id(),
2561 get_work_cwq(&dwork->work)->wq, &dwork->work);
2562 return flush_work(&dwork->work);
2563}
2564EXPORT_SYMBOL(flush_delayed_work);
2565
2566/**
2567 * flush_delayed_work_sync - wait for a dwork to finish
2568 * @dwork: the delayed work to flush
2504 * 2569 *
2505 * Returns true if @dwork was pending. 2570 * Delayed timer is cancelled and the pending work is queued for
2571 * execution immediately. Other than timer handling, its behavior
2572 * is identical to flush_work_sync().
2506 * 2573 *
2507 * It is possible to use this function if @dwork rearms itself via queue_work() 2574 * RETURNS:
2508 * or queue_delayed_work(). See also the comment for cancel_work_sync(). 2575 * %true if flush_work_sync() waited for the work to finish execution,
2576 * %false if it was already idle.
2509 */ 2577 */
2510int cancel_delayed_work_sync(struct delayed_work *dwork) 2578bool flush_delayed_work_sync(struct delayed_work *dwork)
2579{
2580 if (del_timer_sync(&dwork->timer))
2581 __queue_work(raw_smp_processor_id(),
2582 get_work_cwq(&dwork->work)->wq, &dwork->work);
2583 return flush_work_sync(&dwork->work);
2584}
2585EXPORT_SYMBOL(flush_delayed_work_sync);
2586
2587/**
2588 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2589 * @dwork: the delayed work cancel
2590 *
2591 * This is cancel_work_sync() for delayed works.
2592 *
2593 * RETURNS:
2594 * %true if @dwork was pending, %false otherwise.
2595 */
2596bool cancel_delayed_work_sync(struct delayed_work *dwork)
2511{ 2597{
2512 return __cancel_work_timer(&dwork->work, &dwork->timer); 2598 return __cancel_work_timer(&dwork->work, &dwork->timer);
2513} 2599}
@@ -2559,23 +2645,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
2559EXPORT_SYMBOL(schedule_delayed_work); 2645EXPORT_SYMBOL(schedule_delayed_work);
2560 2646
2561/** 2647/**
2562 * flush_delayed_work - block until a dwork_struct's callback has terminated
2563 * @dwork: the delayed work which is to be flushed
2564 *
2565 * Any timeout is cancelled, and any pending work is run immediately.
2566 */
2567void flush_delayed_work(struct delayed_work *dwork)
2568{
2569 if (del_timer_sync(&dwork->timer)) {
2570 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2571 &dwork->work);
2572 put_cpu();
2573 }
2574 flush_work(&dwork->work);
2575}
2576EXPORT_SYMBOL(flush_delayed_work);
2577
2578/**
2579 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 2648 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2580 * @cpu: cpu to use 2649 * @cpu: cpu to use
2581 * @dwork: job to be done 2650 * @dwork: job to be done
@@ -2592,13 +2661,15 @@ int schedule_delayed_work_on(int cpu,
2592EXPORT_SYMBOL(schedule_delayed_work_on); 2661EXPORT_SYMBOL(schedule_delayed_work_on);
2593 2662
2594/** 2663/**
2595 * schedule_on_each_cpu - call a function on each online CPU from keventd 2664 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2596 * @func: the function to call 2665 * @func: the function to call
2597 * 2666 *
2598 * Returns zero on success. 2667 * schedule_on_each_cpu() executes @func on each online CPU using the
2599 * Returns -ve errno on failure. 2668 * system workqueue and blocks until all CPUs have completed.
2600 *
2601 * schedule_on_each_cpu() is very slow. 2669 * schedule_on_each_cpu() is very slow.
2670 *
2671 * RETURNS:
2672 * 0 on success, -errno on failure.
2602 */ 2673 */
2603int schedule_on_each_cpu(work_func_t func) 2674int schedule_on_each_cpu(work_func_t func)
2604{ 2675{
@@ -2764,6 +2835,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2764 unsigned int cpu; 2835 unsigned int cpu;
2765 2836
2766 /* 2837 /*
2838 * Workqueues which may be used during memory reclaim should
2839 * have a rescuer to guarantee forward progress.
2840 */
2841 if (flags & WQ_MEM_RECLAIM)
2842 flags |= WQ_RESCUER;
2843
2844 /*
2767 * Unbound workqueues aren't concurrency managed and should be 2845 * Unbound workqueues aren't concurrency managed and should be
2768 * dispatched to workers immediately. 2846 * dispatched to workers immediately.
2769 */ 2847 */
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7bd6df781ce5..3094318bfea7 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -393,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
393 return 0; 393 return 0;
394} 394}
395 395
396static int ddebug_exec_query(char *query_string)
397{
398 unsigned int flags = 0, mask = 0;
399 struct ddebug_query query;
400#define MAXWORDS 9
401 int nwords;
402 char *words[MAXWORDS];
403
404 nwords = ddebug_tokenize(query_string, words, MAXWORDS);
405 if (nwords <= 0)
406 return -EINVAL;
407 if (ddebug_parse_query(words, nwords-1, &query))
408 return -EINVAL;
409 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
410 return -EINVAL;
411
412 /* actually go and implement the change */
413 ddebug_change(&query, flags, mask);
414 return 0;
415}
416
417static __initdata char ddebug_setup_string[1024];
418static __init int ddebug_setup_query(char *str)
419{
420 if (strlen(str) >= 1024) {
421 pr_warning("ddebug boot param string too large\n");
422 return 0;
423 }
424 strcpy(ddebug_setup_string, str);
425 return 1;
426}
427
428__setup("ddebug_query=", ddebug_setup_query);
429
396/* 430/*
397 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the 431 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
398 * command text from userspace, parses and executes it. 432 * command text from userspace, parses and executes it.
@@ -400,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
400static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, 434static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
401 size_t len, loff_t *offp) 435 size_t len, loff_t *offp)
402{ 436{
403 unsigned int flags = 0, mask = 0;
404 struct ddebug_query query;
405#define MAXWORDS 9
406 int nwords;
407 char *words[MAXWORDS];
408 char tmpbuf[256]; 437 char tmpbuf[256];
438 int ret;
409 439
410 if (len == 0) 440 if (len == 0)
411 return 0; 441 return 0;
@@ -419,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
419 printk(KERN_INFO "%s: read %d bytes from userspace\n", 449 printk(KERN_INFO "%s: read %d bytes from userspace\n",
420 __func__, (int)len); 450 __func__, (int)len);
421 451
422 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 452 ret = ddebug_exec_query(tmpbuf);
423 if (nwords <= 0) 453 if (ret)
424 return -EINVAL; 454 return ret;
425 if (ddebug_parse_query(words, nwords-1, &query))
426 return -EINVAL;
427 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
428 return -EINVAL;
429
430 /* actually go and implement the change */
431 ddebug_change(&query, flags, mask);
432 455
433 *offp += len; 456 *offp += len;
434 return len; 457 return len;
@@ -689,13 +712,14 @@ static void ddebug_remove_all_tables(void)
689 mutex_unlock(&ddebug_lock); 712 mutex_unlock(&ddebug_lock);
690} 713}
691 714
692static int __init dynamic_debug_init(void) 715static __initdata int ddebug_init_success;
716
717static int __init dynamic_debug_init_debugfs(void)
693{ 718{
694 struct dentry *dir, *file; 719 struct dentry *dir, *file;
695 struct _ddebug *iter, *iter_start; 720
696 const char *modname = NULL; 721 if (!ddebug_init_success)
697 int ret = 0; 722 return -ENODEV;
698 int n = 0;
699 723
700 dir = debugfs_create_dir("dynamic_debug", NULL); 724 dir = debugfs_create_dir("dynamic_debug", NULL);
701 if (!dir) 725 if (!dir)
@@ -706,6 +730,16 @@ static int __init dynamic_debug_init(void)
706 debugfs_remove(dir); 730 debugfs_remove(dir);
707 return -ENOMEM; 731 return -ENOMEM;
708 } 732 }
733 return 0;
734}
735
736static int __init dynamic_debug_init(void)
737{
738 struct _ddebug *iter, *iter_start;
739 const char *modname = NULL;
740 int ret = 0;
741 int n = 0;
742
709 if (__start___verbose != __stop___verbose) { 743 if (__start___verbose != __stop___verbose) {
710 iter = __start___verbose; 744 iter = __start___verbose;
711 modname = iter->modname; 745 modname = iter->modname;
@@ -723,12 +757,26 @@ static int __init dynamic_debug_init(void)
723 } 757 }
724 ret = ddebug_add_module(iter_start, n, modname); 758 ret = ddebug_add_module(iter_start, n, modname);
725 } 759 }
760
761 /* ddebug_query boot param got passed -> set it up */
762 if (ddebug_setup_string[0] != '\0') {
763 ret = ddebug_exec_query(ddebug_setup_string);
764 if (ret)
765 pr_warning("Invalid ddebug boot param %s",
766 ddebug_setup_string);
767 else
768 pr_info("ddebug initialized with string %s",
769 ddebug_setup_string);
770 }
771
726out_free: 772out_free:
727 if (ret) { 773 if (ret)
728 ddebug_remove_all_tables(); 774 ddebug_remove_all_tables();
729 debugfs_remove(dir); 775 else
730 debugfs_remove(file); 776 ddebug_init_success = 1;
731 }
732 return 0; 777 return 0;
733} 778}
734module_init(dynamic_debug_init); 779/* Allow early initialization for boot messages via boot param */
780arch_initcall(dynamic_debug_init);
781/* Debugfs setup must be done later */
782module_init(dynamic_debug_init_debugfs);
diff --git a/lib/kobject.c b/lib/kobject.c
index f07c57252e82..82dc34c095c2 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,17 +746,56 @@ void kset_unregister(struct kset *k)
746 */ 746 */
747struct kobject *kset_find_obj(struct kset *kset, const char *name) 747struct kobject *kset_find_obj(struct kset *kset, const char *name)
748{ 748{
749 return kset_find_obj_hinted(kset, name, NULL);
750}
751
752/**
753 * kset_find_obj_hinted - search for object in kset given a predecessor hint.
754 * @kset: kset we're looking in.
755 * @name: object's name.
756 * @hint: hint to possible object's predecessor.
757 *
758 * Check the hint's next object and if it is a match return it directly,
759 * otherwise, fall back to the behavior of kset_find_obj(). Either way
760 * a reference for the returned object is held and the reference on the
761 * hinted object is released.
762 */
763struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
764 struct kobject *hint)
765{
749 struct kobject *k; 766 struct kobject *k;
750 struct kobject *ret = NULL; 767 struct kobject *ret = NULL;
751 768
752 spin_lock(&kset->list_lock); 769 spin_lock(&kset->list_lock);
770
771 if (!hint)
772 goto slow_search;
773
774 /* end of list detection */
775 if (hint->entry.next == kset->list.next)
776 goto slow_search;
777
778 k = container_of(hint->entry.next, struct kobject, entry);
779 if (!kobject_name(k) || strcmp(kobject_name(k), name))
780 goto slow_search;
781
782 ret = kobject_get(k);
783 goto unlock_exit;
784
785slow_search:
753 list_for_each_entry(k, &kset->list, entry) { 786 list_for_each_entry(k, &kset->list, entry) {
754 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 787 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
755 ret = kobject_get(k); 788 ret = kobject_get(k);
756 break; 789 break;
757 } 790 }
758 } 791 }
792
793unlock_exit:
759 spin_unlock(&kset->list_lock); 794 spin_unlock(&kset->list_lock);
795
796 if (hint)
797 kobject_put(hint);
798
760 return ret; 799 return ret;
761} 800}
762 801
diff --git a/mm/Kconfig b/mm/Kconfig
index f0fb9124e410..c2c8a4a11898 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -301,3 +301,11 @@ config NOMMU_INITIAL_TRIM_EXCESS
301 of 1 says that all excess pages should be trimmed. 301 of 1 says that all excess pages should be trimmed.
302 302
303 See Documentation/nommu-mmap.txt for more information. 303 See Documentation/nommu-mmap.txt for more information.
304
305#
306# UP and nommu archs use km based percpu allocator
307#
308config NEED_PER_CPU_KM
309 depends on !SMP
310 bool
311 default y
diff --git a/mm/Makefile b/mm/Makefile
index 34b2546a9e37..f73f75a29f82 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 maccess.o page_alloc.o page-writeback.o \ 11 maccess.o page_alloc.o page-writeback.o \
12 readahead.o swap.o truncate.o vmscan.o shmem.o \ 12 readahead.o swap.o truncate.o vmscan.o shmem.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 page_isolation.o mm_init.o mmu_context.o \ 14 page_isolation.o mm_init.o mmu_context.o percpu.o \
15 $(mmu-y) 15 $(mmu-y)
16obj-y += init-mm.o 16obj-y += init-mm.o
17 17
@@ -36,11 +36,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
36obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 36obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
37obj-$(CONFIG_FS_XIP) += filemap_xip.o 37obj-$(CONFIG_FS_XIP) += filemap_xip.o
38obj-$(CONFIG_MIGRATION) += migrate.o 38obj-$(CONFIG_MIGRATION) += migrate.o
39ifdef CONFIG_SMP
40obj-y += percpu.o
41else
42obj-y += percpu_up.o
43endif
44obj-$(CONFIG_QUICKLIST) += quicklist.o 39obj-$(CONFIG_QUICKLIST) += quicklist.o
45obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 40obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
46obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 41obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index dd186c1a5d53..d4e940a26945 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -840,7 +840,6 @@ repeat:
840 ret = 0; 840 ret = 0;
841 if (drain) { 841 if (drain) {
842 lru_add_drain_all(); 842 lru_add_drain_all();
843 flush_scheduled_work();
844 cond_resched(); 843 cond_resched();
845 drain_all_pages(); 844 drain_all_pages();
846 } 845 }
@@ -862,7 +861,6 @@ repeat:
862 } 861 }
863 /* drain all zone's lru pagevec, this is asyncronous... */ 862 /* drain all zone's lru pagevec, this is asyncronous... */
864 lru_add_drain_all(); 863 lru_add_drain_all();
865 flush_scheduled_work();
866 yield(); 864 yield();
867 /* drain pcp pages , this is synchrouns. */ 865 /* drain pcp pages , this is synchrouns. */
868 drain_all_pages(); 866 drain_all_pages();
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index df680855540a..89633fefc6a2 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -27,7 +27,7 @@
27 * chunk size is not aligned. percpu-km code will whine about it. 27 * chunk size is not aligned. percpu-km code will whine about it.
28 */ 28 */
29 29
30#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 30#if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
31#error "contiguous percpu allocation is incompatible with paged first chunk" 31#error "contiguous percpu allocation is incompatible with paged first chunk"
32#endif 32#endif
33 33
@@ -35,7 +35,11 @@
35 35
36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
37{ 37{
38 /* noop */ 38 unsigned int cpu;
39
40 for_each_possible_cpu(cpu)
41 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
42
39 return 0; 43 return 0;
40} 44}
41 45
diff --git a/mm/percpu.c b/mm/percpu.c
index c76ef3891e0d..6fc9015534f8 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -76,6 +76,7 @@
76#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 76#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
78 78
79#ifdef CONFIG_SMP
79/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80#ifndef __addr_to_pcpu_ptr 81#ifndef __addr_to_pcpu_ptr
81#define __addr_to_pcpu_ptr(addr) \ 82#define __addr_to_pcpu_ptr(addr) \
@@ -89,6 +90,11 @@
89 (unsigned long)pcpu_base_addr - \ 90 (unsigned long)pcpu_base_addr - \
90 (unsigned long)__per_cpu_start) 91 (unsigned long)__per_cpu_start)
91#endif 92#endif
93#else /* CONFIG_SMP */
94/* on UP, it's always identity mapped */
95#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
96#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
97#endif /* CONFIG_SMP */
92 98
93struct pcpu_chunk { 99struct pcpu_chunk {
94 struct list_head list; /* linked to pcpu_slot lists */ 100 struct list_head list; /* linked to pcpu_slot lists */
@@ -820,8 +826,8 @@ fail_unlock_mutex:
820 * @size: size of area to allocate in bytes 826 * @size: size of area to allocate in bytes
821 * @align: alignment of area (max PAGE_SIZE) 827 * @align: alignment of area (max PAGE_SIZE)
822 * 828 *
823 * Allocate percpu area of @size bytes aligned at @align. Might 829 * Allocate zero-filled percpu area of @size bytes aligned at @align.
824 * sleep. Might trigger writeouts. 830 * Might sleep. Might trigger writeouts.
825 * 831 *
826 * CONTEXT: 832 * CONTEXT:
827 * Does GFP_KERNEL allocation. 833 * Does GFP_KERNEL allocation.
@@ -840,9 +846,10 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
840 * @size: size of area to allocate in bytes 846 * @size: size of area to allocate in bytes
841 * @align: alignment of area (max PAGE_SIZE) 847 * @align: alignment of area (max PAGE_SIZE)
842 * 848 *
843 * Allocate percpu area of @size bytes aligned at @align from reserved 849 * Allocate zero-filled percpu area of @size bytes aligned at @align
844 * percpu area if arch has set it up; otherwise, allocation is served 850 * from reserved percpu area if arch has set it up; otherwise,
845 * from the same dynamic area. Might sleep. Might trigger writeouts. 851 * allocation is served from the same dynamic area. Might sleep.
852 * Might trigger writeouts.
846 * 853 *
847 * CONTEXT: 854 * CONTEXT:
848 * Does GFP_KERNEL allocation. 855 * Does GFP_KERNEL allocation.
@@ -949,6 +956,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
949 */ 956 */
950bool is_kernel_percpu_address(unsigned long addr) 957bool is_kernel_percpu_address(unsigned long addr)
951{ 958{
959#ifdef CONFIG_SMP
952 const size_t static_size = __per_cpu_end - __per_cpu_start; 960 const size_t static_size = __per_cpu_end - __per_cpu_start;
953 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 961 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
954 unsigned int cpu; 962 unsigned int cpu;
@@ -959,6 +967,8 @@ bool is_kernel_percpu_address(unsigned long addr)
959 if ((void *)addr >= start && (void *)addr < start + static_size) 967 if ((void *)addr >= start && (void *)addr < start + static_size)
960 return true; 968 return true;
961 } 969 }
970#endif
971 /* on UP, can't distinguish from other static vars, always false */
962 return false; 972 return false;
963} 973}
964 974
@@ -1067,161 +1077,6 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1067} 1077}
1068 1078
1069/** 1079/**
1070 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1071 * @reserved_size: the size of reserved percpu area in bytes
1072 * @dyn_size: minimum free size for dynamic allocation in bytes
1073 * @atom_size: allocation atom size
1074 * @cpu_distance_fn: callback to determine distance between cpus, optional
1075 *
1076 * This function determines grouping of units, their mappings to cpus
1077 * and other parameters considering needed percpu size, allocation
1078 * atom size and distances between CPUs.
1079 *
1080 * Groups are always mutliples of atom size and CPUs which are of
1081 * LOCAL_DISTANCE both ways are grouped together and share space for
1082 * units in the same group. The returned configuration is guaranteed
1083 * to have CPUs on different nodes on different groups and >=75% usage
1084 * of allocated virtual address space.
1085 *
1086 * RETURNS:
1087 * On success, pointer to the new allocation_info is returned. On
1088 * failure, ERR_PTR value is returned.
1089 */
1090static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1091 size_t reserved_size, size_t dyn_size,
1092 size_t atom_size,
1093 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1094{
1095 static int group_map[NR_CPUS] __initdata;
1096 static int group_cnt[NR_CPUS] __initdata;
1097 const size_t static_size = __per_cpu_end - __per_cpu_start;
1098 int nr_groups = 1, nr_units = 0;
1099 size_t size_sum, min_unit_size, alloc_size;
1100 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1101 int last_allocs, group, unit;
1102 unsigned int cpu, tcpu;
1103 struct pcpu_alloc_info *ai;
1104 unsigned int *cpu_map;
1105
1106 /* this function may be called multiple times */
1107 memset(group_map, 0, sizeof(group_map));
1108 memset(group_cnt, 0, sizeof(group_cnt));
1109
1110 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1111 size_sum = PFN_ALIGN(static_size + reserved_size +
1112 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1113 dyn_size = size_sum - static_size - reserved_size;
1114
1115 /*
1116 * Determine min_unit_size, alloc_size and max_upa such that
1117 * alloc_size is multiple of atom_size and is the smallest
1118 * which can accomodate 4k aligned segments which are equal to
1119 * or larger than min_unit_size.
1120 */
1121 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1122
1123 alloc_size = roundup(min_unit_size, atom_size);
1124 upa = alloc_size / min_unit_size;
1125 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1126 upa--;
1127 max_upa = upa;
1128
1129 /* group cpus according to their proximity */
1130 for_each_possible_cpu(cpu) {
1131 group = 0;
1132 next_group:
1133 for_each_possible_cpu(tcpu) {
1134 if (cpu == tcpu)
1135 break;
1136 if (group_map[tcpu] == group && cpu_distance_fn &&
1137 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1138 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1139 group++;
1140 nr_groups = max(nr_groups, group + 1);
1141 goto next_group;
1142 }
1143 }
1144 group_map[cpu] = group;
1145 group_cnt[group]++;
1146 }
1147
1148 /*
1149 * Expand unit size until address space usage goes over 75%
1150 * and then as much as possible without using more address
1151 * space.
1152 */
1153 last_allocs = INT_MAX;
1154 for (upa = max_upa; upa; upa--) {
1155 int allocs = 0, wasted = 0;
1156
1157 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1158 continue;
1159
1160 for (group = 0; group < nr_groups; group++) {
1161 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1162 allocs += this_allocs;
1163 wasted += this_allocs * upa - group_cnt[group];
1164 }
1165
1166 /*
1167 * Don't accept if wastage is over 1/3. The
1168 * greater-than comparison ensures upa==1 always
1169 * passes the following check.
1170 */
1171 if (wasted > num_possible_cpus() / 3)
1172 continue;
1173
1174 /* and then don't consume more memory */
1175 if (allocs > last_allocs)
1176 break;
1177 last_allocs = allocs;
1178 best_upa = upa;
1179 }
1180 upa = best_upa;
1181
1182 /* allocate and fill alloc_info */
1183 for (group = 0; group < nr_groups; group++)
1184 nr_units += roundup(group_cnt[group], upa);
1185
1186 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1187 if (!ai)
1188 return ERR_PTR(-ENOMEM);
1189 cpu_map = ai->groups[0].cpu_map;
1190
1191 for (group = 0; group < nr_groups; group++) {
1192 ai->groups[group].cpu_map = cpu_map;
1193 cpu_map += roundup(group_cnt[group], upa);
1194 }
1195
1196 ai->static_size = static_size;
1197 ai->reserved_size = reserved_size;
1198 ai->dyn_size = dyn_size;
1199 ai->unit_size = alloc_size / upa;
1200 ai->atom_size = atom_size;
1201 ai->alloc_size = alloc_size;
1202
1203 for (group = 0, unit = 0; group_cnt[group]; group++) {
1204 struct pcpu_group_info *gi = &ai->groups[group];
1205
1206 /*
1207 * Initialize base_offset as if all groups are located
1208 * back-to-back. The caller should update this to
1209 * reflect actual allocation.
1210 */
1211 gi->base_offset = unit * ai->unit_size;
1212
1213 for_each_possible_cpu(cpu)
1214 if (group_map[cpu] == group)
1215 gi->cpu_map[gi->nr_units++] = cpu;
1216 gi->nr_units = roundup(gi->nr_units, upa);
1217 unit += gi->nr_units;
1218 }
1219 BUG_ON(unit != nr_units);
1220
1221 return ai;
1222}
1223
1224/**
1225 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1080 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1226 * @lvl: loglevel 1081 * @lvl: loglevel
1227 * @ai: allocation info to dump 1082 * @ai: allocation info to dump
@@ -1363,7 +1218,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1363 1218
1364 /* sanity checks */ 1219 /* sanity checks */
1365 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1220 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1221#ifdef CONFIG_SMP
1366 PCPU_SETUP_BUG_ON(!ai->static_size); 1222 PCPU_SETUP_BUG_ON(!ai->static_size);
1223#endif
1367 PCPU_SETUP_BUG_ON(!base_addr); 1224 PCPU_SETUP_BUG_ON(!base_addr);
1368 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1225 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1369 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1226 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
@@ -1488,6 +1345,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1488 return 0; 1345 return 0;
1489} 1346}
1490 1347
1348#ifdef CONFIG_SMP
1349
1491const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1350const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1492 [PCPU_FC_AUTO] = "auto", 1351 [PCPU_FC_AUTO] = "auto",
1493 [PCPU_FC_EMBED] = "embed", 1352 [PCPU_FC_EMBED] = "embed",
@@ -1515,8 +1374,180 @@ static int __init percpu_alloc_setup(char *str)
1515} 1374}
1516early_param("percpu_alloc", percpu_alloc_setup); 1375early_param("percpu_alloc", percpu_alloc_setup);
1517 1376
1377/*
1378 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1379 * Build it if needed by the arch config or the generic setup is going
1380 * to be used.
1381 */
1518#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 1382#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1519 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1383 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1384#define BUILD_EMBED_FIRST_CHUNK
1385#endif
1386
1387/* build pcpu_page_first_chunk() iff needed by the arch config */
1388#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1389#define BUILD_PAGE_FIRST_CHUNK
1390#endif
1391
1392/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1393#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1394/**
1395 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1396 * @reserved_size: the size of reserved percpu area in bytes
1397 * @dyn_size: minimum free size for dynamic allocation in bytes
1398 * @atom_size: allocation atom size
1399 * @cpu_distance_fn: callback to determine distance between cpus, optional
1400 *
1401 * This function determines grouping of units, their mappings to cpus
1402 * and other parameters considering needed percpu size, allocation
1403 * atom size and distances between CPUs.
1404 *
1405 * Groups are always mutliples of atom size and CPUs which are of
1406 * LOCAL_DISTANCE both ways are grouped together and share space for
1407 * units in the same group. The returned configuration is guaranteed
1408 * to have CPUs on different nodes on different groups and >=75% usage
1409 * of allocated virtual address space.
1410 *
1411 * RETURNS:
1412 * On success, pointer to the new allocation_info is returned. On
1413 * failure, ERR_PTR value is returned.
1414 */
1415static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1416 size_t reserved_size, size_t dyn_size,
1417 size_t atom_size,
1418 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1419{
1420 static int group_map[NR_CPUS] __initdata;
1421 static int group_cnt[NR_CPUS] __initdata;
1422 const size_t static_size = __per_cpu_end - __per_cpu_start;
1423 int nr_groups = 1, nr_units = 0;
1424 size_t size_sum, min_unit_size, alloc_size;
1425 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1426 int last_allocs, group, unit;
1427 unsigned int cpu, tcpu;
1428 struct pcpu_alloc_info *ai;
1429 unsigned int *cpu_map;
1430
1431 /* this function may be called multiple times */
1432 memset(group_map, 0, sizeof(group_map));
1433 memset(group_cnt, 0, sizeof(group_cnt));
1434
1435 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1436 size_sum = PFN_ALIGN(static_size + reserved_size +
1437 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1438 dyn_size = size_sum - static_size - reserved_size;
1439
1440 /*
1441 * Determine min_unit_size, alloc_size and max_upa such that
1442 * alloc_size is multiple of atom_size and is the smallest
1443 * which can accomodate 4k aligned segments which are equal to
1444 * or larger than min_unit_size.
1445 */
1446 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1447
1448 alloc_size = roundup(min_unit_size, atom_size);
1449 upa = alloc_size / min_unit_size;
1450 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1451 upa--;
1452 max_upa = upa;
1453
1454 /* group cpus according to their proximity */
1455 for_each_possible_cpu(cpu) {
1456 group = 0;
1457 next_group:
1458 for_each_possible_cpu(tcpu) {
1459 if (cpu == tcpu)
1460 break;
1461 if (group_map[tcpu] == group && cpu_distance_fn &&
1462 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1463 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1464 group++;
1465 nr_groups = max(nr_groups, group + 1);
1466 goto next_group;
1467 }
1468 }
1469 group_map[cpu] = group;
1470 group_cnt[group]++;
1471 }
1472
1473 /*
1474 * Expand unit size until address space usage goes over 75%
1475 * and then as much as possible without using more address
1476 * space.
1477 */
1478 last_allocs = INT_MAX;
1479 for (upa = max_upa; upa; upa--) {
1480 int allocs = 0, wasted = 0;
1481
1482 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1483 continue;
1484
1485 for (group = 0; group < nr_groups; group++) {
1486 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1487 allocs += this_allocs;
1488 wasted += this_allocs * upa - group_cnt[group];
1489 }
1490
1491 /*
1492 * Don't accept if wastage is over 1/3. The
1493 * greater-than comparison ensures upa==1 always
1494 * passes the following check.
1495 */
1496 if (wasted > num_possible_cpus() / 3)
1497 continue;
1498
1499 /* and then don't consume more memory */
1500 if (allocs > last_allocs)
1501 break;
1502 last_allocs = allocs;
1503 best_upa = upa;
1504 }
1505 upa = best_upa;
1506
1507 /* allocate and fill alloc_info */
1508 for (group = 0; group < nr_groups; group++)
1509 nr_units += roundup(group_cnt[group], upa);
1510
1511 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1512 if (!ai)
1513 return ERR_PTR(-ENOMEM);
1514 cpu_map = ai->groups[0].cpu_map;
1515
1516 for (group = 0; group < nr_groups; group++) {
1517 ai->groups[group].cpu_map = cpu_map;
1518 cpu_map += roundup(group_cnt[group], upa);
1519 }
1520
1521 ai->static_size = static_size;
1522 ai->reserved_size = reserved_size;
1523 ai->dyn_size = dyn_size;
1524 ai->unit_size = alloc_size / upa;
1525 ai->atom_size = atom_size;
1526 ai->alloc_size = alloc_size;
1527
1528 for (group = 0, unit = 0; group_cnt[group]; group++) {
1529 struct pcpu_group_info *gi = &ai->groups[group];
1530
1531 /*
1532 * Initialize base_offset as if all groups are located
1533 * back-to-back. The caller should update this to
1534 * reflect actual allocation.
1535 */
1536 gi->base_offset = unit * ai->unit_size;
1537
1538 for_each_possible_cpu(cpu)
1539 if (group_map[cpu] == group)
1540 gi->cpu_map[gi->nr_units++] = cpu;
1541 gi->nr_units = roundup(gi->nr_units, upa);
1542 unit += gi->nr_units;
1543 }
1544 BUG_ON(unit != nr_units);
1545
1546 return ai;
1547}
1548#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1549
1550#if defined(BUILD_EMBED_FIRST_CHUNK)
1520/** 1551/**
1521 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1552 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1522 * @reserved_size: the size of reserved percpu area in bytes 1553 * @reserved_size: the size of reserved percpu area in bytes
@@ -1645,10 +1676,9 @@ out_free:
1645 free_bootmem(__pa(areas), areas_size); 1676 free_bootmem(__pa(areas), areas_size);
1646 return rc; 1677 return rc;
1647} 1678}
1648#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 1679#endif /* BUILD_EMBED_FIRST_CHUNK */
1649 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1650 1680
1651#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1681#ifdef BUILD_PAGE_FIRST_CHUNK
1652/** 1682/**
1653 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1683 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1654 * @reserved_size: the size of reserved percpu area in bytes 1684 * @reserved_size: the size of reserved percpu area in bytes
@@ -1756,10 +1786,11 @@ out_free_ar:
1756 pcpu_free_alloc_info(ai); 1786 pcpu_free_alloc_info(ai);
1757 return rc; 1787 return rc;
1758} 1788}
1759#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 1789#endif /* BUILD_PAGE_FIRST_CHUNK */
1760 1790
1791#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1761/* 1792/*
1762 * Generic percpu area setup. 1793 * Generic SMP percpu area setup.
1763 * 1794 *
1764 * The embedding helper is used because its behavior closely resembles 1795 * The embedding helper is used because its behavior closely resembles
1765 * the original non-dynamic generic percpu area setup. This is 1796 * the original non-dynamic generic percpu area setup. This is
@@ -1770,7 +1801,6 @@ out_free_ar:
1770 * on the physical linear memory mapping which uses large page 1801 * on the physical linear memory mapping which uses large page
1771 * mappings on applicable archs. 1802 * mappings on applicable archs.
1772 */ 1803 */
1773#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1774unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 1804unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1775EXPORT_SYMBOL(__per_cpu_offset); 1805EXPORT_SYMBOL(__per_cpu_offset);
1776 1806
@@ -1799,13 +1829,48 @@ void __init setup_per_cpu_areas(void)
1799 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 1829 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1800 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 1830 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1801 if (rc < 0) 1831 if (rc < 0)
1802 panic("Failed to initialized percpu areas."); 1832 panic("Failed to initialize percpu areas.");
1803 1833
1804 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1834 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1805 for_each_possible_cpu(cpu) 1835 for_each_possible_cpu(cpu)
1806 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1836 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1807} 1837}
1808#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1838#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1839
1840#else /* CONFIG_SMP */
1841
1842/*
1843 * UP percpu area setup.
1844 *
1845 * UP always uses km-based percpu allocator with identity mapping.
1846 * Static percpu variables are indistinguishable from the usual static
1847 * variables and don't require any special preparation.
1848 */
1849void __init setup_per_cpu_areas(void)
1850{
1851 const size_t unit_size =
1852 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1853 PERCPU_DYNAMIC_RESERVE));
1854 struct pcpu_alloc_info *ai;
1855 void *fc;
1856
1857 ai = pcpu_alloc_alloc_info(1, 1);
1858 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1859 if (!ai || !fc)
1860 panic("Failed to allocate memory for percpu areas.");
1861
1862 ai->dyn_size = unit_size;
1863 ai->unit_size = unit_size;
1864 ai->atom_size = unit_size;
1865 ai->alloc_size = unit_size;
1866 ai->groups[0].nr_units = 1;
1867 ai->groups[0].cpu_map[0] = 0;
1868
1869 if (pcpu_setup_first_chunk(ai, fc) < 0)
1870 panic("Failed to initialize percpu areas.");
1871}
1872
1873#endif /* CONFIG_SMP */
1809 1874
1810/* 1875/*
1811 * First and reserved chunks are initialized with temporary allocation 1876 * First and reserved chunks are initialized with temporary allocation
diff --git a/mm/percpu_up.c b/mm/percpu_up.c
deleted file mode 100644
index db884fae5721..000000000000
--- a/mm/percpu_up.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * mm/percpu_up.c - dummy percpu memory allocator implementation for UP
3 */
4
5#include <linux/module.h>
6#include <linux/percpu.h>
7#include <linux/slab.h>
8
9void __percpu *__alloc_percpu(size_t size, size_t align)
10{
11 /*
12 * Can't easily make larger alignment work with kmalloc. WARN
13 * on it. Larger alignment should only be used for module
14 * percpu sections on SMP for which this path isn't used.
15 */
16 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
17 return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
18}
19EXPORT_SYMBOL_GPL(__alloc_percpu);
20
21void free_percpu(void __percpu *p)
22{
23 kfree(this_cpu_ptr(p));
24}
25EXPORT_SYMBOL_GPL(free_percpu);
26
27phys_addr_t per_cpu_ptr_to_phys(void *addr)
28{
29 return __pa(addr);
30}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7c703ff2f36f..9fc7bac7db0c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -139,7 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
140 if (nr_blocks) { 140 if (nr_blocks) {
141 err = blkdev_issue_discard(si->bdev, start_block, 141 err = blkdev_issue_discard(si->bdev, start_block,
142 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); 142 nr_blocks, GFP_KERNEL, 0);
143 if (err) 143 if (err)
144 return err; 144 return err;
145 cond_resched(); 145 cond_resched();
@@ -150,7 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
151 151
152 err = blkdev_issue_discard(si->bdev, start_block, 152 err = blkdev_issue_discard(si->bdev, start_block,
153 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); 153 nr_blocks, GFP_KERNEL, 0);
154 if (err) 154 if (err)
155 break; 155 break;
156 156
@@ -189,7 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
189 start_block <<= PAGE_SHIFT - 9; 189 start_block <<= PAGE_SHIFT - 9;
190 nr_blocks <<= PAGE_SHIFT - 9; 190 nr_blocks <<= PAGE_SHIFT - 9;
191 if (blkdev_issue_discard(si->bdev, start_block, 191 if (blkdev_issue_discard(si->bdev, start_block,
192 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) 192 nr_blocks, GFP_NOIO, 0))
193 break; 193 break;
194 } 194 }
195 195
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d8087f0db507..9f909622a25e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2065,6 +2065,7 @@ void free_vm_area(struct vm_struct *area)
2065} 2065}
2066EXPORT_SYMBOL_GPL(free_vm_area); 2066EXPORT_SYMBOL_GPL(free_vm_area);
2067 2067
2068#ifdef CONFIG_SMP
2068static struct vmap_area *node_to_va(struct rb_node *n) 2069static struct vmap_area *node_to_va(struct rb_node *n)
2069{ 2070{
2070 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2071 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
@@ -2345,6 +2346,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2345 free_vm_area(vms[i]); 2346 free_vm_area(vms[i]);
2346 kfree(vms); 2347 kfree(vms);
2347} 2348}
2349#endif /* CONFIG_SMP */
2348 2350
2349#ifdef CONFIG_PROC_FS 2351#ifdef CONFIG_PROC_FS
2350static void *s_start(struct seq_file *m, loff_t *pos) 2352static void *s_start(struct seq_file *m, loff_t *pos)
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index befc3a52aa04..84c2a4d013c6 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -844,10 +844,6 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned
844 BT_DBG("TIOCMIWAIT"); 844 BT_DBG("TIOCMIWAIT");
845 break; 845 break;
846 846
847 case TIOCGICOUNT:
848 BT_DBG("TIOCGICOUNT");
849 break;
850
851 case TIOCGSERIAL: 847 case TIOCGSERIAL:
852 BT_ERR("TIOCGSERIAL is not supported"); 848 BT_ERR("TIOCGSERIAL is not supported");
853 return -ENOIOCTLCMD; 849 return -ENOIOCTLCMD;
diff --git a/sound/core/init.c b/sound/core/init.c
index ec4a50ce5656..f7c3df8b521b 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -395,12 +395,10 @@ int snd_card_disconnect(struct snd_card *card)
395 snd_printk(KERN_ERR "not all devices for card %i can be disconnected\n", card->number); 395 snd_printk(KERN_ERR "not all devices for card %i can be disconnected\n", card->number);
396 396
397 snd_info_card_disconnect(card); 397 snd_info_card_disconnect(card);
398#ifndef CONFIG_SYSFS_DEPRECATED
399 if (card->card_dev) { 398 if (card->card_dev) {
400 device_unregister(card->card_dev); 399 device_unregister(card->card_dev);
401 card->card_dev = NULL; 400 card->card_dev = NULL;
402 } 401 }
403#endif
404#ifdef CONFIG_PM 402#ifdef CONFIG_PM
405 wake_up(&card->power_sleep); 403 wake_up(&card->power_sleep);
406#endif 404#endif
@@ -573,7 +571,6 @@ void snd_card_set_id(struct snd_card *card, const char *nid)
573} 571}
574EXPORT_SYMBOL(snd_card_set_id); 572EXPORT_SYMBOL(snd_card_set_id);
575 573
576#ifndef CONFIG_SYSFS_DEPRECATED
577static ssize_t 574static ssize_t
578card_id_show_attr(struct device *dev, 575card_id_show_attr(struct device *dev,
579 struct device_attribute *attr, char *buf) 576 struct device_attribute *attr, char *buf)
@@ -630,7 +627,6 @@ card_number_show_attr(struct device *dev,
630 627
631static struct device_attribute card_number_attrs = 628static struct device_attribute card_number_attrs =
632 __ATTR(number, S_IRUGO, card_number_show_attr, NULL); 629 __ATTR(number, S_IRUGO, card_number_show_attr, NULL);
633#endif /* CONFIG_SYSFS_DEPRECATED */
634 630
635/** 631/**
636 * snd_card_register - register the soundcard 632 * snd_card_register - register the soundcard
@@ -649,7 +645,7 @@ int snd_card_register(struct snd_card *card)
649 645
650 if (snd_BUG_ON(!card)) 646 if (snd_BUG_ON(!card))
651 return -EINVAL; 647 return -EINVAL;
652#ifndef CONFIG_SYSFS_DEPRECATED 648
653 if (!card->card_dev) { 649 if (!card->card_dev) {
654 card->card_dev = device_create(sound_class, card->dev, 650 card->card_dev = device_create(sound_class, card->dev,
655 MKDEV(0, 0), card, 651 MKDEV(0, 0), card,
@@ -657,7 +653,7 @@ int snd_card_register(struct snd_card *card)
657 if (IS_ERR(card->card_dev)) 653 if (IS_ERR(card->card_dev))
658 card->card_dev = NULL; 654 card->card_dev = NULL;
659 } 655 }
660#endif 656
661 if ((err = snd_device_register_all(card)) < 0) 657 if ((err = snd_device_register_all(card)) < 0)
662 return err; 658 return err;
663 mutex_lock(&snd_card_mutex); 659 mutex_lock(&snd_card_mutex);
@@ -674,7 +670,6 @@ int snd_card_register(struct snd_card *card)
674 if (snd_mixer_oss_notify_callback) 670 if (snd_mixer_oss_notify_callback)
675 snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_REGISTER); 671 snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_REGISTER);
676#endif 672#endif
677#ifndef CONFIG_SYSFS_DEPRECATED
678 if (card->card_dev) { 673 if (card->card_dev) {
679 err = device_create_file(card->card_dev, &card_id_attrs); 674 err = device_create_file(card->card_dev, &card_id_attrs);
680 if (err < 0) 675 if (err < 0)
@@ -683,7 +678,7 @@ int snd_card_register(struct snd_card *card)
683 if (err < 0) 678 if (err < 0)
684 return err; 679 return err;
685 } 680 }
686#endif 681
687 return 0; 682 return 0;
688} 683}
689 684