aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-15 07:57:32 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-15 07:57:32 -0400
commit975439fe73d1f0f7ce8c235c66783bd34dc459c3 (patch)
tree84e29852d96283b13c6e603f86bd506a631343c5
parentef31023743e66de7184e9aad432291c842a6384b (diff)
parent129d6aba444d1e99d4cbfb9866a4652912426b65 (diff)
Merge branch 'x86/amd-iommu' into x86/urgent
-rw-r--r--Documentation/devices.txt3
-rw-r--r--Documentation/ioctl-number.txt1
-rw-r--r--Documentation/usb/auerswald.txt30
-rw-r--r--Documentation/usb/power-management.txt7
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c1
-rw-r--r--arch/h8300/include/asm/Kbuild (renamed from include/asm-h8300/Kbuild)0
-rw-r--r--arch/h8300/include/asm/a.out.h (renamed from include/asm-h8300/a.out.h)0
-rw-r--r--arch/h8300/include/asm/atomic.h (renamed from include/asm-h8300/atomic.h)0
-rw-r--r--arch/h8300/include/asm/auxvec.h (renamed from include/asm-h8300/auxvec.h)0
-rw-r--r--arch/h8300/include/asm/bitops.h (renamed from include/asm-h8300/bitops.h)0
-rw-r--r--arch/h8300/include/asm/bootinfo.h (renamed from include/asm-h8300/bootinfo.h)0
-rw-r--r--arch/h8300/include/asm/bug.h (renamed from include/asm-h8300/bug.h)0
-rw-r--r--arch/h8300/include/asm/bugs.h (renamed from include/asm-h8300/bugs.h)0
-rw-r--r--arch/h8300/include/asm/byteorder.h (renamed from include/asm-h8300/byteorder.h)0
-rw-r--r--arch/h8300/include/asm/cache.h (renamed from include/asm-h8300/cache.h)0
-rw-r--r--arch/h8300/include/asm/cachectl.h (renamed from include/asm-h8300/cachectl.h)0
-rw-r--r--arch/h8300/include/asm/cacheflush.h (renamed from include/asm-h8300/cacheflush.h)0
-rw-r--r--arch/h8300/include/asm/checksum.h (renamed from include/asm-h8300/checksum.h)0
-rw-r--r--arch/h8300/include/asm/cputime.h (renamed from include/asm-h8300/cputime.h)0
-rw-r--r--arch/h8300/include/asm/current.h (renamed from include/asm-h8300/current.h)0
-rw-r--r--arch/h8300/include/asm/dbg.h (renamed from include/asm-h8300/dbg.h)0
-rw-r--r--arch/h8300/include/asm/delay.h (renamed from include/asm-h8300/delay.h)0
-rw-r--r--arch/h8300/include/asm/device.h (renamed from include/asm-h8300/device.h)0
-rw-r--r--arch/h8300/include/asm/div64.h (renamed from include/asm-h8300/div64.h)0
-rw-r--r--arch/h8300/include/asm/dma.h (renamed from include/asm-h8300/dma.h)0
-rw-r--r--arch/h8300/include/asm/elf.h (renamed from include/asm-h8300/elf.h)0
-rw-r--r--arch/h8300/include/asm/emergency-restart.h (renamed from include/asm-h8300/emergency-restart.h)0
-rw-r--r--arch/h8300/include/asm/errno.h (renamed from include/asm-h8300/errno.h)0
-rw-r--r--arch/h8300/include/asm/fb.h (renamed from include/asm-h8300/fb.h)0
-rw-r--r--arch/h8300/include/asm/fcntl.h (renamed from include/asm-h8300/fcntl.h)0
-rw-r--r--arch/h8300/include/asm/flat.h (renamed from include/asm-h8300/flat.h)0
-rw-r--r--arch/h8300/include/asm/fpu.h (renamed from include/asm-h8300/fpu.h)0
-rw-r--r--arch/h8300/include/asm/futex.h (renamed from include/asm-h8300/futex.h)0
-rw-r--r--arch/h8300/include/asm/gpio.h (renamed from include/asm-h8300/gpio.h)0
-rw-r--r--arch/h8300/include/asm/hardirq.h (renamed from include/asm-h8300/hardirq.h)0
-rw-r--r--arch/h8300/include/asm/hw_irq.h (renamed from include/asm-h8300/hw_irq.h)0
-rw-r--r--arch/h8300/include/asm/io.h (renamed from include/asm-h8300/io.h)0
-rw-r--r--arch/h8300/include/asm/ioctl.h (renamed from include/asm-h8300/ioctl.h)0
-rw-r--r--arch/h8300/include/asm/ioctls.h (renamed from include/asm-h8300/ioctls.h)0
-rw-r--r--arch/h8300/include/asm/ipcbuf.h (renamed from include/asm-h8300/ipcbuf.h)0
-rw-r--r--arch/h8300/include/asm/irq.h (renamed from include/asm-h8300/irq.h)0
-rw-r--r--arch/h8300/include/asm/irq_regs.h (renamed from include/asm-h8300/irq_regs.h)0
-rw-r--r--arch/h8300/include/asm/kdebug.h (renamed from include/asm-h8300/kdebug.h)0
-rw-r--r--arch/h8300/include/asm/kmap_types.h (renamed from include/asm-h8300/kmap_types.h)0
-rw-r--r--arch/h8300/include/asm/linkage.h (renamed from include/asm-h8300/linkage.h)0
-rw-r--r--arch/h8300/include/asm/local.h (renamed from include/asm-h8300/local.h)0
-rw-r--r--arch/h8300/include/asm/mc146818rtc.h (renamed from include/asm-h8300/mc146818rtc.h)0
-rw-r--r--arch/h8300/include/asm/md.h (renamed from include/asm-h8300/md.h)0
-rw-r--r--arch/h8300/include/asm/mman.h (renamed from include/asm-h8300/mman.h)0
-rw-r--r--arch/h8300/include/asm/mmu.h (renamed from include/asm-h8300/mmu.h)0
-rw-r--r--arch/h8300/include/asm/mmu_context.h (renamed from include/asm-h8300/mmu_context.h)0
-rw-r--r--arch/h8300/include/asm/module.h (renamed from include/asm-h8300/module.h)0
-rw-r--r--arch/h8300/include/asm/msgbuf.h (renamed from include/asm-h8300/msgbuf.h)0
-rw-r--r--arch/h8300/include/asm/mutex.h (renamed from include/asm-h8300/mutex.h)0
-rw-r--r--arch/h8300/include/asm/page.h (renamed from include/asm-h8300/page.h)0
-rw-r--r--arch/h8300/include/asm/page_offset.h (renamed from include/asm-h8300/page_offset.h)0
-rw-r--r--arch/h8300/include/asm/param.h (renamed from include/asm-h8300/param.h)0
-rw-r--r--arch/h8300/include/asm/pci.h (renamed from include/asm-h8300/pci.h)0
-rw-r--r--arch/h8300/include/asm/percpu.h (renamed from include/asm-h8300/percpu.h)0
-rw-r--r--arch/h8300/include/asm/pgalloc.h (renamed from include/asm-h8300/pgalloc.h)0
-rw-r--r--arch/h8300/include/asm/pgtable.h (renamed from include/asm-h8300/pgtable.h)0
-rw-r--r--arch/h8300/include/asm/poll.h (renamed from include/asm-h8300/poll.h)0
-rw-r--r--arch/h8300/include/asm/posix_types.h (renamed from include/asm-h8300/posix_types.h)0
-rw-r--r--arch/h8300/include/asm/processor.h (renamed from include/asm-h8300/processor.h)0
-rw-r--r--arch/h8300/include/asm/ptrace.h (renamed from include/asm-h8300/ptrace.h)0
-rw-r--r--arch/h8300/include/asm/regs267x.h (renamed from include/asm-h8300/regs267x.h)0
-rw-r--r--arch/h8300/include/asm/regs306x.h (renamed from include/asm-h8300/regs306x.h)0
-rw-r--r--arch/h8300/include/asm/resource.h (renamed from include/asm-h8300/resource.h)0
-rw-r--r--arch/h8300/include/asm/scatterlist.h (renamed from include/asm-h8300/scatterlist.h)0
-rw-r--r--arch/h8300/include/asm/sections.h (renamed from include/asm-h8300/sections.h)0
-rw-r--r--arch/h8300/include/asm/segment.h (renamed from include/asm-h8300/segment.h)0
-rw-r--r--arch/h8300/include/asm/sembuf.h (renamed from include/asm-h8300/sembuf.h)0
-rw-r--r--arch/h8300/include/asm/setup.h (renamed from include/asm-h8300/setup.h)0
-rw-r--r--arch/h8300/include/asm/sh_bios.h (renamed from include/asm-h8300/sh_bios.h)0
-rw-r--r--arch/h8300/include/asm/shm.h (renamed from include/asm-h8300/shm.h)0
-rw-r--r--arch/h8300/include/asm/shmbuf.h (renamed from include/asm-h8300/shmbuf.h)0
-rw-r--r--arch/h8300/include/asm/shmparam.h (renamed from include/asm-h8300/shmparam.h)0
-rw-r--r--arch/h8300/include/asm/sigcontext.h (renamed from include/asm-h8300/sigcontext.h)0
-rw-r--r--arch/h8300/include/asm/siginfo.h (renamed from include/asm-h8300/siginfo.h)0
-rw-r--r--arch/h8300/include/asm/signal.h (renamed from include/asm-h8300/signal.h)0
-rw-r--r--arch/h8300/include/asm/smp.h (renamed from include/asm-h8300/smp.h)0
-rw-r--r--arch/h8300/include/asm/socket.h (renamed from include/asm-h8300/socket.h)0
-rw-r--r--arch/h8300/include/asm/sockios.h (renamed from include/asm-h8300/sockios.h)0
-rw-r--r--arch/h8300/include/asm/spinlock.h (renamed from include/asm-h8300/spinlock.h)0
-rw-r--r--arch/h8300/include/asm/stat.h (renamed from include/asm-h8300/stat.h)0
-rw-r--r--arch/h8300/include/asm/statfs.h (renamed from include/asm-h8300/statfs.h)0
-rw-r--r--arch/h8300/include/asm/string.h (renamed from include/asm-h8300/string.h)0
-rw-r--r--arch/h8300/include/asm/system.h (renamed from include/asm-h8300/system.h)0
-rw-r--r--arch/h8300/include/asm/target_time.h (renamed from include/asm-h8300/target_time.h)0
-rw-r--r--arch/h8300/include/asm/termbits.h (renamed from include/asm-h8300/termbits.h)0
-rw-r--r--arch/h8300/include/asm/termios.h (renamed from include/asm-h8300/termios.h)0
-rw-r--r--arch/h8300/include/asm/thread_info.h (renamed from include/asm-h8300/thread_info.h)0
-rw-r--r--arch/h8300/include/asm/timex.h (renamed from include/asm-h8300/timex.h)0
-rw-r--r--arch/h8300/include/asm/tlb.h (renamed from include/asm-h8300/tlb.h)0
-rw-r--r--arch/h8300/include/asm/tlbflush.h (renamed from include/asm-h8300/tlbflush.h)0
-rw-r--r--arch/h8300/include/asm/topology.h (renamed from include/asm-h8300/topology.h)0
-rw-r--r--arch/h8300/include/asm/traps.h (renamed from include/asm-h8300/traps.h)0
-rw-r--r--arch/h8300/include/asm/types.h (renamed from include/asm-h8300/types.h)0
-rw-r--r--arch/h8300/include/asm/uaccess.h (renamed from include/asm-h8300/uaccess.h)0
-rw-r--r--arch/h8300/include/asm/ucontext.h (renamed from include/asm-h8300/ucontext.h)0
-rw-r--r--arch/h8300/include/asm/unaligned.h (renamed from include/asm-h8300/unaligned.h)0
-rw-r--r--arch/h8300/include/asm/unistd.h (renamed from include/asm-h8300/unistd.h)0
-rw-r--r--arch/h8300/include/asm/user.h (renamed from include/asm-h8300/user.h)0
-rw-r--r--arch/h8300/include/asm/virtconvert.h (renamed from include/asm-h8300/virtconvert.h)0
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/of_device.h3
-rw-r--r--arch/sparc64/kernel/irq.c52
-rw-r--r--arch/sparc64/kernel/kstack.h60
-rw-r--r--arch/sparc64/kernel/process.c27
-rw-r--r--arch/sparc64/kernel/smp.c4
-rw-r--r--arch/sparc64/kernel/stacktrace.c13
-rw-r--r--arch/sparc64/kernel/traps.c7
-rw-r--r--arch/sparc64/lib/mcount.S39
-rw-r--r--arch/sparc64/mm/init.c11
-rw-r--r--arch/sparc64/mm/ultra.S2
-rw-r--r--arch/x86/kernel/amd_iommu.c19
-rw-r--r--arch/x86/kernel/amd_iommu_init.c24
-rw-r--r--crypto/digest.c2
-rw-r--r--crypto/tcrypt.c28
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/crypto/padlock-aes.c28
-rw-r--r--drivers/crypto/padlock-sha.c9
-rw-r--r--drivers/crypto/talitos.c54
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/input/serio/i8042-sparcio.h3
-rw-r--r--drivers/net/bnx2x.h87
-rw-r--r--drivers/net/bnx2x_fw_defs.h160
-rw-r--r--drivers/net/bnx2x_hsi.h16
-rw-r--r--drivers/net/bnx2x_init.h26
-rw-r--r--drivers/net/bnx2x_init_values.h533
-rw-r--r--drivers/net/bnx2x_link.c1258
-rw-r--r--drivers/net/bnx2x_link.h11
-rw-r--r--drivers/net/bnx2x_main.c1212
-rw-r--r--drivers/net/bnx2x_reg.h210
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/serial/sunzilog.c2
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/class/cdc-acm.c86
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_acm.c196
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/u_serial.c290
-rw-r--r--drivers/usb/gadget/u_serial.h12
-rw-r--r--drivers/usb/host/isp1760-hcd.c53
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/ohci-hcd.c23
-rw-r--r--drivers/usb/host/ohci-hub.c11
-rw-r--r--drivers/usb/host/ohci-omap.c3
-rw-r--r--drivers/usb/host/ohci-pci.c132
-rw-r--r--drivers/usb/host/ohci-q.c6
-rw-r--r--drivers/usb/host/ohci.h11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c2152
-rw-r--r--drivers/usb/musb/Kconfig176
-rw-r--r--drivers/usb/musb/Makefile86
-rw-r--r--drivers/usb/musb/cppi_dma.c1540
-rw-r--r--drivers/usb/musb/cppi_dma.h133
-rw-r--r--drivers/usb/musb/davinci.c462
-rw-r--r--drivers/usb/musb/davinci.h100
-rw-r--r--drivers/usb/musb/musb_core.c2261
-rw-r--r--drivers/usb/musb/musb_core.h507
-rw-r--r--drivers/usb/musb/musb_debug.h66
-rw-r--r--drivers/usb/musb/musb_dma.h172
-rw-r--r--drivers/usb/musb/musb_gadget.c2031
-rw-r--r--drivers/usb/musb/musb_gadget.h108
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c981
-rw-r--r--drivers/usb/musb/musb_host.c2170
-rw-r--r--drivers/usb/musb/musb_host.h110
-rw-r--r--drivers/usb/musb/musb_io.h115
-rw-r--r--drivers/usb/musb/musb_procfs.c830
-rw-r--r--drivers/usb/musb/musb_regs.h300
-rw-r--r--drivers/usb/musb/musb_virthub.c425
-rw-r--r--drivers/usb/musb/musbhsdma.c433
-rw-r--r--drivers/usb/musb/omap2430.c324
-rw-r--r--drivers/usb/musb/omap2430.h56
-rw-r--r--drivers/usb/musb/tusb6010.c1151
-rw-r--r--drivers/usb/musb/tusb6010.h233
-rw-r--r--drivers/usb/musb/tusb6010_omap.c719
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/option.c44
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c170
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/sierra_ms.c207
-rw-r--r--drivers/usb/storage/sierra_ms.h4
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h40
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--fs/dlm/config.c203
-rw-r--r--fs/dlm/user.c10
-rw-r--r--fs/xfs/linux-2.6/sema.h52
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c16
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c192
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h15
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c189
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c22
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h65
-rw-r--r--fs/xfs/quota/xfs_dquot.c38
-rw-r--r--fs/xfs/quota/xfs_dquot.h29
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c8
-rw-r--r--fs/xfs/quota/xfs_qm.c14
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c7
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/xfs_acl.c52
-rw-r--r--fs/xfs/xfs_acl.h14
-rw-r--r--fs/xfs/xfs_arch.h68
-rw-r--r--fs/xfs/xfs_attr.c110
-rw-r--r--fs/xfs/xfs_attr.h1
-rw-r--r--fs/xfs/xfs_attr_leaf.c75
-rw-r--r--fs/xfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/xfs_bit.c103
-rw-r--r--fs/xfs/xfs_bit.h34
-rw-r--r--fs/xfs/xfs_bmap.c34
-rw-r--r--fs/xfs/xfs_btree.c105
-rw-r--r--fs/xfs/xfs_btree.h8
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dfrag.c33
-rw-r--r--fs/xfs/xfs_error.c5
-rw-r--r--fs/xfs/xfs_error.h12
-rw-r--r--fs/xfs/xfs_filestream.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.c30
-rw-r--r--fs/xfs/xfs_iget.c48
-rw-r--r--fs/xfs/xfs_inode.c70
-rw-r--r--fs/xfs/xfs_inode.h46
-rw-r--r--fs/xfs/xfs_inode_item.c11
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_log.c86
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_log_priv.h14
-rw-r--r--fs/xfs/xfs_log_recover.c7
-rw-r--r--fs/xfs/xfs_mount.c82
-rw-r--r--fs/xfs/xfs_mount.h17
-rw-r--r--fs/xfs/xfs_rtalloc.c19
-rw-r--r--fs/xfs/xfs_rw.c2
-rw-r--r--fs/xfs/xfs_trans.c75
-rw-r--r--fs/xfs/xfs_trans.h12
-rw-r--r--fs/xfs/xfs_trans_buf.c12
-rw-r--r--fs/xfs/xfs_trans_item.c66
-rw-r--r--fs/xfs/xfs_utils.c4
-rw-r--r--fs/xfs/xfs_utils.h3
-rw-r--r--fs/xfs/xfs_vfsops.c13
-rw-r--r--fs/xfs/xfs_vnodeops.c198
-rw-r--r--include/asm-x86/amd_iommu_types.h8
-rw-r--r--include/asm-x86/i387.h32
-rw-r--r--include/crypto/hash.h18
-rw-r--r--include/linux/completion.h45
-rw-r--r--include/linux/cred.h50
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h98
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_vs.h32
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/pktgen.c29
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/ipv4/igmp.c71
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c27
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c116
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/rxrpc/ar-accept.c2
-rw-r--r--net/sched/act_api.c13
-rw-r--r--net/sched/sch_api.c50
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/wireless/wext.c1
-rw-r--r--net/xfrm/xfrm_output.c5
318 files changed, 20418 insertions, 5763 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index e6244cde26e9..05c80645e4ee 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2560,9 +2560,6 @@ Your cooperation is appreciated.
2560 96 = /dev/usb/hiddev0 1st USB HID device 2560 96 = /dev/usb/hiddev0 1st USB HID device
2561 ... 2561 ...
2562 111 = /dev/usb/hiddev15 16th USB HID device 2562 111 = /dev/usb/hiddev15 16th USB HID device
2563 112 = /dev/usb/auer0 1st auerswald ISDN device
2564 ...
2565 127 = /dev/usb/auer15 16th auerswald ISDN device
2566 128 = /dev/usb/brlvgr0 First Braille Voyager device 2563 128 = /dev/usb/brlvgr0 First Braille Voyager device
2567 ... 2564 ...
2568 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device 2565 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 3bb5f466a90d..1c6b545635a2 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -105,7 +105,6 @@ Code Seq# Include File Comments
105'T' all linux/soundcard.h conflict! 105'T' all linux/soundcard.h conflict!
106'T' all asm-i386/ioctls.h conflict! 106'T' all asm-i386/ioctls.h conflict!
107'U' 00-EF linux/drivers/usb/usb.h 107'U' 00-EF linux/drivers/usb/usb.h
108'U' F0-FF drivers/usb/auerswald.c
109'V' all linux/vt.h 108'V' all linux/vt.h
110'W' 00-1F linux/watchdog.h conflict! 109'W' 00-1F linux/watchdog.h conflict!
111'W' 00-1F linux/wanrouter.h conflict! 110'W' 00-1F linux/wanrouter.h conflict!
diff --git a/Documentation/usb/auerswald.txt b/Documentation/usb/auerswald.txt
deleted file mode 100644
index 7ee4d8f69116..000000000000
--- a/Documentation/usb/auerswald.txt
+++ /dev/null
@@ -1,30 +0,0 @@
1 Auerswald USB kernel driver
2 ===========================
3
4What is it? What can I do with it?
5==================================
6The auerswald USB kernel driver connects your linux 2.4.x
7system to the auerswald usb-enabled devices.
8
9There are two types of auerswald usb devices:
10a) small PBX systems (ISDN)
11b) COMfort system telephones (ISDN)
12
13The driver installation creates the devices
14/dev/usb/auer0..15. These devices carry a vendor-
15specific protocol. You may run all auerswald java
16software on it. The java software needs a native
17library "libAuerUsbJNINative.so" installed on
18your system. This library is available from
19auerswald and shipped as part of the java software.
20
21You may create the devices with:
22 mknod -m 666 /dev/usb/auer0 c 180 112
23 ...
24 mknod -m 666 /dev/usb/auer15 c 180 127
25
26Future plans
27============
28- Connection to ISDN4LINUX (the hisax interface)
29
30The maintainer of this driver is wolfgang@iksw-muees.de
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b2fc4d4a9917..9d31140e3f5b 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -436,7 +436,12 @@ post_reset; the USB core guarantees that this is true of internal
436suspend/resume events as well. 436suspend/resume events as well.
437 437
438If a driver wants to block all suspend/resume calls during some 438If a driver wants to block all suspend/resume calls during some
439critical section, it can simply acquire udev->pm_mutex. 439critical section, it can simply acquire udev->pm_mutex. Note that
440calls to resume may be triggered indirectly. Block IO due to memory
441allocations can make the vm subsystem resume a device. Thus while
442holding this lock you must not allocate memory with GFP_KERNEL or
443GFP_NOFS.
444
440Alternatively, if the critical section might call some of the 445Alternatively, if the critical section might call some of the
441usb_autopm_* routines, the driver can avoid deadlock by doing: 446usb_autopm_* routines, the driver can avoid deadlock by doing:
442 447
diff --git a/MAINTAINERS b/MAINTAINERS
index af6aa4e4b392..4c5e9fe0f7db 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2928,6 +2928,12 @@ M: jirislaby@gmail.com
2928L: linux-kernel@vger.kernel.org 2928L: linux-kernel@vger.kernel.org
2929S: Maintained 2929S: Maintained
2930 2930
2931MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
2932P: Felipe Balbi
2933M: felipe.balbi@nokia.com
2934L: linux-usb@vger.kernel.org
2935S: Maintained
2936
2931MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 2937MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
2932P: Andrew Gallatin 2938P: Andrew Gallatin
2933M: gallatin@myri.com 2939M: gallatin@myri.com
@@ -3076,6 +3082,7 @@ M: horms@verge.net.au
3076P: Julian Anastasov 3082P: Julian Anastasov
3077M: ja@ssi.bg 3083M: ja@ssi.bg
3078L: netdev@vger.kernel.org 3084L: netdev@vger.kernel.org
3085L: lvs-devel@vger.kernel.org
3079S: Maintained 3086S: Maintained
3080 3087
3081NFS, SUNRPC, AND LOCKD CLIENTS 3088NFS, SUNRPC, AND LOCKD CLIENTS
@@ -4195,12 +4202,6 @@ M: oliver@neukum.name
4195L: linux-usb@vger.kernel.org 4202L: linux-usb@vger.kernel.org
4196S: Maintained 4203S: Maintained
4197 4204
4198USB AUERSWALD DRIVER
4199P: Wolfgang Muees
4200M: wolfgang@iksw-muees.de
4201L: linux-usb@vger.kernel.org
4202S: Maintained
4203
4204USB BLOCK DRIVER (UB ub) 4205USB BLOCK DRIVER (UB ub)
4205P: Pete Zaitcev 4206P: Pete Zaitcev
4206M: zaitcev@redhat.com 4207M: zaitcev@redhat.com
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 1607c941d95f..10ef464d6be7 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -317,7 +317,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
317 printk(error, 6, status); 317 printk(error, 6, status);
318 return -ENODEV; 318 return -ENODEV;
319 } 319 }
320 data->multipoint = 1;
321 tusb_device.dev.platform_data = data; 320 tusb_device.dev.platform_data = data;
322 321
323 /* REVISIT let the driver know what DMA channels work */ 322 /* REVISIT let the driver know what DMA channels work */
diff --git a/include/asm-h8300/Kbuild b/arch/h8300/include/asm/Kbuild
index c68e1680da01..c68e1680da01 100644
--- a/include/asm-h8300/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
diff --git a/include/asm-h8300/a.out.h b/arch/h8300/include/asm/a.out.h
index ded780f0a492..ded780f0a492 100644
--- a/include/asm-h8300/a.out.h
+++ b/arch/h8300/include/asm/a.out.h
diff --git a/include/asm-h8300/atomic.h b/arch/h8300/include/asm/atomic.h
index b4cf0ea97ede..b4cf0ea97ede 100644
--- a/include/asm-h8300/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
diff --git a/include/asm-h8300/auxvec.h b/arch/h8300/include/asm/auxvec.h
index 1d36fe38b088..1d36fe38b088 100644
--- a/include/asm-h8300/auxvec.h
+++ b/arch/h8300/include/asm/auxvec.h
diff --git a/include/asm-h8300/bitops.h b/arch/h8300/include/asm/bitops.h
index cb18e3b0aa94..cb18e3b0aa94 100644
--- a/include/asm-h8300/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
diff --git a/include/asm-h8300/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
index 5bed7e7aac0a..5bed7e7aac0a 100644
--- a/include/asm-h8300/bootinfo.h
+++ b/arch/h8300/include/asm/bootinfo.h
diff --git a/include/asm-h8300/bug.h b/arch/h8300/include/asm/bug.h
index edddf5b086e5..edddf5b086e5 100644
--- a/include/asm-h8300/bug.h
+++ b/arch/h8300/include/asm/bug.h
diff --git a/include/asm-h8300/bugs.h b/arch/h8300/include/asm/bugs.h
index 1cb4afba6eb1..1cb4afba6eb1 100644
--- a/include/asm-h8300/bugs.h
+++ b/arch/h8300/include/asm/bugs.h
diff --git a/include/asm-h8300/byteorder.h b/arch/h8300/include/asm/byteorder.h
index 36e597d61619..36e597d61619 100644
--- a/include/asm-h8300/byteorder.h
+++ b/arch/h8300/include/asm/byteorder.h
diff --git a/include/asm-h8300/cache.h b/arch/h8300/include/asm/cache.h
index c6350283649d..c6350283649d 100644
--- a/include/asm-h8300/cache.h
+++ b/arch/h8300/include/asm/cache.h
diff --git a/include/asm-h8300/cachectl.h b/arch/h8300/include/asm/cachectl.h
index c464022d8e26..c464022d8e26 100644
--- a/include/asm-h8300/cachectl.h
+++ b/arch/h8300/include/asm/cachectl.h
diff --git a/include/asm-h8300/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
index 5ffdca217b95..5ffdca217b95 100644
--- a/include/asm-h8300/cacheflush.h
+++ b/arch/h8300/include/asm/cacheflush.h
diff --git a/include/asm-h8300/checksum.h b/arch/h8300/include/asm/checksum.h
index 98724e12508c..98724e12508c 100644
--- a/include/asm-h8300/checksum.h
+++ b/arch/h8300/include/asm/checksum.h
diff --git a/include/asm-h8300/cputime.h b/arch/h8300/include/asm/cputime.h
index 092e187c7b08..092e187c7b08 100644
--- a/include/asm-h8300/cputime.h
+++ b/arch/h8300/include/asm/cputime.h
diff --git a/include/asm-h8300/current.h b/arch/h8300/include/asm/current.h
index 57d74ee55a14..57d74ee55a14 100644
--- a/include/asm-h8300/current.h
+++ b/arch/h8300/include/asm/current.h
diff --git a/include/asm-h8300/dbg.h b/arch/h8300/include/asm/dbg.h
index 2c6d1cbcf736..2c6d1cbcf736 100644
--- a/include/asm-h8300/dbg.h
+++ b/arch/h8300/include/asm/dbg.h
diff --git a/include/asm-h8300/delay.h b/arch/h8300/include/asm/delay.h
index 743beba70f82..743beba70f82 100644
--- a/include/asm-h8300/delay.h
+++ b/arch/h8300/include/asm/delay.h
diff --git a/include/asm-h8300/device.h b/arch/h8300/include/asm/device.h
index d8f9872b0e2d..d8f9872b0e2d 100644
--- a/include/asm-h8300/device.h
+++ b/arch/h8300/include/asm/device.h
diff --git a/include/asm-h8300/div64.h b/arch/h8300/include/asm/div64.h
index 6cd978cefb28..6cd978cefb28 100644
--- a/include/asm-h8300/div64.h
+++ b/arch/h8300/include/asm/div64.h
diff --git a/include/asm-h8300/dma.h b/arch/h8300/include/asm/dma.h
index 3edbaaaedf5b..3edbaaaedf5b 100644
--- a/include/asm-h8300/dma.h
+++ b/arch/h8300/include/asm/dma.h
diff --git a/include/asm-h8300/elf.h b/arch/h8300/include/asm/elf.h
index a8b57d1f4128..a8b57d1f4128 100644
--- a/include/asm-h8300/elf.h
+++ b/arch/h8300/include/asm/elf.h
diff --git a/include/asm-h8300/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
index 108d8c48e42e..108d8c48e42e 100644
--- a/include/asm-h8300/emergency-restart.h
+++ b/arch/h8300/include/asm/emergency-restart.h
diff --git a/include/asm-h8300/errno.h b/arch/h8300/include/asm/errno.h
index 0c2f5641fdcc..0c2f5641fdcc 100644
--- a/include/asm-h8300/errno.h
+++ b/arch/h8300/include/asm/errno.h
diff --git a/include/asm-h8300/fb.h b/arch/h8300/include/asm/fb.h
index c7df38030992..c7df38030992 100644
--- a/include/asm-h8300/fb.h
+++ b/arch/h8300/include/asm/fb.h
diff --git a/include/asm-h8300/fcntl.h b/arch/h8300/include/asm/fcntl.h
index 1952cb2e3b06..1952cb2e3b06 100644
--- a/include/asm-h8300/fcntl.h
+++ b/arch/h8300/include/asm/fcntl.h
diff --git a/include/asm-h8300/flat.h b/arch/h8300/include/asm/flat.h
index 2a873508a9a1..2a873508a9a1 100644
--- a/include/asm-h8300/flat.h
+++ b/arch/h8300/include/asm/flat.h
diff --git a/include/asm-h8300/fpu.h b/arch/h8300/include/asm/fpu.h
index 4fc416e80bef..4fc416e80bef 100644
--- a/include/asm-h8300/fpu.h
+++ b/arch/h8300/include/asm/fpu.h
diff --git a/include/asm-h8300/futex.h b/arch/h8300/include/asm/futex.h
index 6a332a9f099c..6a332a9f099c 100644
--- a/include/asm-h8300/futex.h
+++ b/arch/h8300/include/asm/futex.h
diff --git a/include/asm-h8300/gpio.h b/arch/h8300/include/asm/gpio.h
index a714f0c0efbc..a714f0c0efbc 100644
--- a/include/asm-h8300/gpio.h
+++ b/arch/h8300/include/asm/gpio.h
diff --git a/include/asm-h8300/hardirq.h b/arch/h8300/include/asm/hardirq.h
index 9d7f7a7462b2..9d7f7a7462b2 100644
--- a/include/asm-h8300/hardirq.h
+++ b/arch/h8300/include/asm/hardirq.h
diff --git a/include/asm-h8300/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
index d75a5a1119e8..d75a5a1119e8 100644
--- a/include/asm-h8300/hw_irq.h
+++ b/arch/h8300/include/asm/hw_irq.h
diff --git a/include/asm-h8300/io.h b/arch/h8300/include/asm/io.h
index 26dc6ccd9441..26dc6ccd9441 100644
--- a/include/asm-h8300/io.h
+++ b/arch/h8300/include/asm/io.h
diff --git a/include/asm-h8300/ioctl.h b/arch/h8300/include/asm/ioctl.h
index b279fe06dfe5..b279fe06dfe5 100644
--- a/include/asm-h8300/ioctl.h
+++ b/arch/h8300/include/asm/ioctl.h
diff --git a/include/asm-h8300/ioctls.h b/arch/h8300/include/asm/ioctls.h
index 98a53d067269..98a53d067269 100644
--- a/include/asm-h8300/ioctls.h
+++ b/arch/h8300/include/asm/ioctls.h
diff --git a/include/asm-h8300/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h
index 2cd1ebcc109d..2cd1ebcc109d 100644
--- a/include/asm-h8300/ipcbuf.h
+++ b/arch/h8300/include/asm/ipcbuf.h
diff --git a/include/asm-h8300/irq.h b/arch/h8300/include/asm/irq.h
index 13d7c601cd0a..13d7c601cd0a 100644
--- a/include/asm-h8300/irq.h
+++ b/arch/h8300/include/asm/irq.h
diff --git a/include/asm-h8300/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
index 3dd9c0b70270..3dd9c0b70270 100644
--- a/include/asm-h8300/irq_regs.h
+++ b/arch/h8300/include/asm/irq_regs.h
diff --git a/include/asm-h8300/kdebug.h b/arch/h8300/include/asm/kdebug.h
index 6ece1b037665..6ece1b037665 100644
--- a/include/asm-h8300/kdebug.h
+++ b/arch/h8300/include/asm/kdebug.h
diff --git a/include/asm-h8300/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
index 1ec8a3427120..1ec8a3427120 100644
--- a/include/asm-h8300/kmap_types.h
+++ b/arch/h8300/include/asm/kmap_types.h
diff --git a/include/asm-h8300/linkage.h b/arch/h8300/include/asm/linkage.h
index 6f4df7d46180..6f4df7d46180 100644
--- a/include/asm-h8300/linkage.h
+++ b/arch/h8300/include/asm/linkage.h
diff --git a/include/asm-h8300/local.h b/arch/h8300/include/asm/local.h
index fdd4efe437cd..fdd4efe437cd 100644
--- a/include/asm-h8300/local.h
+++ b/arch/h8300/include/asm/local.h
diff --git a/include/asm-h8300/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
index ab9d9646d241..ab9d9646d241 100644
--- a/include/asm-h8300/mc146818rtc.h
+++ b/arch/h8300/include/asm/mc146818rtc.h
diff --git a/include/asm-h8300/md.h b/arch/h8300/include/asm/md.h
index 1a47dc6691fb..1a47dc6691fb 100644
--- a/include/asm-h8300/md.h
+++ b/arch/h8300/include/asm/md.h
diff --git a/include/asm-h8300/mman.h b/arch/h8300/include/asm/mman.h
index b9f104f22a36..b9f104f22a36 100644
--- a/include/asm-h8300/mman.h
+++ b/arch/h8300/include/asm/mman.h
diff --git a/include/asm-h8300/mmu.h b/arch/h8300/include/asm/mmu.h
index 2ce06ea46104..2ce06ea46104 100644
--- a/include/asm-h8300/mmu.h
+++ b/arch/h8300/include/asm/mmu.h
diff --git a/include/asm-h8300/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
index f44b730da54d..f44b730da54d 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/arch/h8300/include/asm/mmu_context.h
diff --git a/include/asm-h8300/module.h b/arch/h8300/include/asm/module.h
index de23231f3196..de23231f3196 100644
--- a/include/asm-h8300/module.h
+++ b/arch/h8300/include/asm/module.h
diff --git a/include/asm-h8300/msgbuf.h b/arch/h8300/include/asm/msgbuf.h
index 6b148cd09aa5..6b148cd09aa5 100644
--- a/include/asm-h8300/msgbuf.h
+++ b/arch/h8300/include/asm/msgbuf.h
diff --git a/include/asm-h8300/mutex.h b/arch/h8300/include/asm/mutex.h
index 458c1f7fbc18..458c1f7fbc18 100644
--- a/include/asm-h8300/mutex.h
+++ b/arch/h8300/include/asm/mutex.h
diff --git a/include/asm-h8300/page.h b/arch/h8300/include/asm/page.h
index 0b6acf0b03aa..0b6acf0b03aa 100644
--- a/include/asm-h8300/page.h
+++ b/arch/h8300/include/asm/page.h
diff --git a/include/asm-h8300/page_offset.h b/arch/h8300/include/asm/page_offset.h
index f8706463008c..f8706463008c 100644
--- a/include/asm-h8300/page_offset.h
+++ b/arch/h8300/include/asm/page_offset.h
diff --git a/include/asm-h8300/param.h b/arch/h8300/include/asm/param.h
index 1c72fb8080ff..1c72fb8080ff 100644
--- a/include/asm-h8300/param.h
+++ b/arch/h8300/include/asm/param.h
diff --git a/include/asm-h8300/pci.h b/arch/h8300/include/asm/pci.h
index 97389b35aa35..97389b35aa35 100644
--- a/include/asm-h8300/pci.h
+++ b/arch/h8300/include/asm/pci.h
diff --git a/include/asm-h8300/percpu.h b/arch/h8300/include/asm/percpu.h
index 72c03e3666d8..72c03e3666d8 100644
--- a/include/asm-h8300/percpu.h
+++ b/arch/h8300/include/asm/percpu.h
diff --git a/include/asm-h8300/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
index c2e89a286d23..c2e89a286d23 100644
--- a/include/asm-h8300/pgalloc.h
+++ b/arch/h8300/include/asm/pgalloc.h
diff --git a/include/asm-h8300/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a09230a08e02..a09230a08e02 100644
--- a/include/asm-h8300/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
diff --git a/include/asm-h8300/poll.h b/arch/h8300/include/asm/poll.h
index f61540c22d94..f61540c22d94 100644
--- a/include/asm-h8300/poll.h
+++ b/arch/h8300/include/asm/poll.h
diff --git a/include/asm-h8300/posix_types.h b/arch/h8300/include/asm/posix_types.h
index 5c553927fc53..5c553927fc53 100644
--- a/include/asm-h8300/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
diff --git a/include/asm-h8300/processor.h b/arch/h8300/include/asm/processor.h
index 69e8a34eb6d5..69e8a34eb6d5 100644
--- a/include/asm-h8300/processor.h
+++ b/arch/h8300/include/asm/processor.h
diff --git a/include/asm-h8300/ptrace.h b/arch/h8300/include/asm/ptrace.h
index c2e05e4b512e..c2e05e4b512e 100644
--- a/include/asm-h8300/ptrace.h
+++ b/arch/h8300/include/asm/ptrace.h
diff --git a/include/asm-h8300/regs267x.h b/arch/h8300/include/asm/regs267x.h
index 1bff731a9f77..1bff731a9f77 100644
--- a/include/asm-h8300/regs267x.h
+++ b/arch/h8300/include/asm/regs267x.h
diff --git a/include/asm-h8300/regs306x.h b/arch/h8300/include/asm/regs306x.h
index 027dd633fa25..027dd633fa25 100644
--- a/include/asm-h8300/regs306x.h
+++ b/arch/h8300/include/asm/regs306x.h
diff --git a/include/asm-h8300/resource.h b/arch/h8300/include/asm/resource.h
index 46c5f4391607..46c5f4391607 100644
--- a/include/asm-h8300/resource.h
+++ b/arch/h8300/include/asm/resource.h
diff --git a/include/asm-h8300/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd87ac90..d3ecdd87ac90 100644
--- a/include/asm-h8300/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
diff --git a/include/asm-h8300/sections.h b/arch/h8300/include/asm/sections.h
index a81743e8b743..a81743e8b743 100644
--- a/include/asm-h8300/sections.h
+++ b/arch/h8300/include/asm/sections.h
diff --git a/include/asm-h8300/segment.h b/arch/h8300/include/asm/segment.h
index b79a82d0f99d..b79a82d0f99d 100644
--- a/include/asm-h8300/segment.h
+++ b/arch/h8300/include/asm/segment.h
diff --git a/include/asm-h8300/sembuf.h b/arch/h8300/include/asm/sembuf.h
index e04a3ec0cb92..e04a3ec0cb92 100644
--- a/include/asm-h8300/sembuf.h
+++ b/arch/h8300/include/asm/sembuf.h
diff --git a/include/asm-h8300/setup.h b/arch/h8300/include/asm/setup.h
index e2c600e96733..e2c600e96733 100644
--- a/include/asm-h8300/setup.h
+++ b/arch/h8300/include/asm/setup.h
diff --git a/include/asm-h8300/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
index b6bb6e58295c..b6bb6e58295c 100644
--- a/include/asm-h8300/sh_bios.h
+++ b/arch/h8300/include/asm/sh_bios.h
diff --git a/include/asm-h8300/shm.h b/arch/h8300/include/asm/shm.h
index ed6623c0545d..ed6623c0545d 100644
--- a/include/asm-h8300/shm.h
+++ b/arch/h8300/include/asm/shm.h
diff --git a/include/asm-h8300/shmbuf.h b/arch/h8300/include/asm/shmbuf.h
index 64e77993a7a9..64e77993a7a9 100644
--- a/include/asm-h8300/shmbuf.h
+++ b/arch/h8300/include/asm/shmbuf.h
diff --git a/include/asm-h8300/shmparam.h b/arch/h8300/include/asm/shmparam.h
index d1863953ec64..d1863953ec64 100644
--- a/include/asm-h8300/shmparam.h
+++ b/arch/h8300/include/asm/shmparam.h
diff --git a/include/asm-h8300/sigcontext.h b/arch/h8300/include/asm/sigcontext.h
index e4b81505f8f8..e4b81505f8f8 100644
--- a/include/asm-h8300/sigcontext.h
+++ b/arch/h8300/include/asm/sigcontext.h
diff --git a/include/asm-h8300/siginfo.h b/arch/h8300/include/asm/siginfo.h
index bc8fbea931a5..bc8fbea931a5 100644
--- a/include/asm-h8300/siginfo.h
+++ b/arch/h8300/include/asm/siginfo.h
diff --git a/include/asm-h8300/signal.h b/arch/h8300/include/asm/signal.h
index 7bc15048a64f..7bc15048a64f 100644
--- a/include/asm-h8300/signal.h
+++ b/arch/h8300/include/asm/signal.h
diff --git a/include/asm-h8300/smp.h b/arch/h8300/include/asm/smp.h
index 9e9bd7e58922..9e9bd7e58922 100644
--- a/include/asm-h8300/smp.h
+++ b/arch/h8300/include/asm/smp.h
diff --git a/include/asm-h8300/socket.h b/arch/h8300/include/asm/socket.h
index da2520dbf254..da2520dbf254 100644
--- a/include/asm-h8300/socket.h
+++ b/arch/h8300/include/asm/socket.h
diff --git a/include/asm-h8300/sockios.h b/arch/h8300/include/asm/sockios.h
index e9c7ec810c23..e9c7ec810c23 100644
--- a/include/asm-h8300/sockios.h
+++ b/arch/h8300/include/asm/sockios.h
diff --git a/include/asm-h8300/spinlock.h b/arch/h8300/include/asm/spinlock.h
index d5407fa173e4..d5407fa173e4 100644
--- a/include/asm-h8300/spinlock.h
+++ b/arch/h8300/include/asm/spinlock.h
diff --git a/include/asm-h8300/stat.h b/arch/h8300/include/asm/stat.h
index 62c3cc24dfe6..62c3cc24dfe6 100644
--- a/include/asm-h8300/stat.h
+++ b/arch/h8300/include/asm/stat.h
diff --git a/include/asm-h8300/statfs.h b/arch/h8300/include/asm/statfs.h
index b96efa712aac..b96efa712aac 100644
--- a/include/asm-h8300/statfs.h
+++ b/arch/h8300/include/asm/statfs.h
diff --git a/include/asm-h8300/string.h b/arch/h8300/include/asm/string.h
index ca5034897d87..ca5034897d87 100644
--- a/include/asm-h8300/string.h
+++ b/arch/h8300/include/asm/string.h
diff --git a/include/asm-h8300/system.h b/arch/h8300/include/asm/system.h
index 4b8e475908ae..4b8e475908ae 100644
--- a/include/asm-h8300/system.h
+++ b/arch/h8300/include/asm/system.h
diff --git a/include/asm-h8300/target_time.h b/arch/h8300/include/asm/target_time.h
index 9f2a9aa1fe6f..9f2a9aa1fe6f 100644
--- a/include/asm-h8300/target_time.h
+++ b/arch/h8300/include/asm/target_time.h
diff --git a/include/asm-h8300/termbits.h b/arch/h8300/include/asm/termbits.h
index 31eca81db3f7..31eca81db3f7 100644
--- a/include/asm-h8300/termbits.h
+++ b/arch/h8300/include/asm/termbits.h
diff --git a/include/asm-h8300/termios.h b/arch/h8300/include/asm/termios.h
index 70eea64b4213..70eea64b4213 100644
--- a/include/asm-h8300/termios.h
+++ b/arch/h8300/include/asm/termios.h
diff --git a/include/asm-h8300/thread_info.h b/arch/h8300/include/asm/thread_info.h
index aafd4d322ec3..aafd4d322ec3 100644
--- a/include/asm-h8300/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
diff --git a/include/asm-h8300/timex.h b/arch/h8300/include/asm/timex.h
index 23e67013439f..23e67013439f 100644
--- a/include/asm-h8300/timex.h
+++ b/arch/h8300/include/asm/timex.h
diff --git a/include/asm-h8300/tlb.h b/arch/h8300/include/asm/tlb.h
index 3dea80ad9e6f..3dea80ad9e6f 100644
--- a/include/asm-h8300/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
diff --git a/include/asm-h8300/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
index 41c148a9208e..41c148a9208e 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/arch/h8300/include/asm/tlbflush.h
diff --git a/include/asm-h8300/topology.h b/arch/h8300/include/asm/topology.h
index fdc121924d4c..fdc121924d4c 100644
--- a/include/asm-h8300/topology.h
+++ b/arch/h8300/include/asm/topology.h
diff --git a/include/asm-h8300/traps.h b/arch/h8300/include/asm/traps.h
index 41cf6be02f68..41cf6be02f68 100644
--- a/include/asm-h8300/traps.h
+++ b/arch/h8300/include/asm/traps.h
diff --git a/include/asm-h8300/types.h b/arch/h8300/include/asm/types.h
index 12875190b156..12875190b156 100644
--- a/include/asm-h8300/types.h
+++ b/arch/h8300/include/asm/types.h
diff --git a/include/asm-h8300/uaccess.h b/arch/h8300/include/asm/uaccess.h
index 356068cd0879..356068cd0879 100644
--- a/include/asm-h8300/uaccess.h
+++ b/arch/h8300/include/asm/uaccess.h
diff --git a/include/asm-h8300/ucontext.h b/arch/h8300/include/asm/ucontext.h
index 0bcf8f85fab9..0bcf8f85fab9 100644
--- a/include/asm-h8300/ucontext.h
+++ b/arch/h8300/include/asm/ucontext.h
diff --git a/include/asm-h8300/unaligned.h b/arch/h8300/include/asm/unaligned.h
index b8d06c70c2da..b8d06c70c2da 100644
--- a/include/asm-h8300/unaligned.h
+++ b/arch/h8300/include/asm/unaligned.h
diff --git a/include/asm-h8300/unistd.h b/arch/h8300/include/asm/unistd.h
index 99f3c3561ecb..99f3c3561ecb 100644
--- a/include/asm-h8300/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
diff --git a/include/asm-h8300/user.h b/arch/h8300/include/asm/user.h
index 14a9e18950f1..14a9e18950f1 100644
--- a/include/asm-h8300/user.h
+++ b/arch/h8300/include/asm/user.h
diff --git a/include/asm-h8300/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
index 19cfd62b11c3..19cfd62b11c3 100644
--- a/include/asm-h8300/virtconvert.h
+++ b/arch/h8300/include/asm/virtconvert.h
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3473e25231d9..e3dd9303643d 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,4 +93,8 @@ static inline unsigned long get_softint(void)
93void __trigger_all_cpu_backtrace(void); 93void __trigger_all_cpu_backtrace(void);
94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
95 95
96extern void *hardirq_stack[NR_CPUS];
97extern void *softirq_stack[NR_CPUS];
98#define __ARCH_HAS_DO_SOFTIRQ
99
96#endif 100#endif
diff --git a/arch/sparc/include/asm/of_device.h b/arch/sparc/include/asm/of_device.h
index e5f5aedc2293..bba777a416d3 100644
--- a/arch/sparc/include/asm/of_device.h
+++ b/arch/sparc/include/asm/of_device.h
@@ -30,8 +30,7 @@ struct of_device
30extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); 30extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
31extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); 31extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
32 32
33/* These are just here during the transition */ 33/* This is just here during the transition */
34#include <linux/of_device.h>
35#include <linux/of_platform.h> 34#include <linux/of_platform.h>
36 35
37#endif /* __KERNEL__ */ 36#endif /* __KERNEL__ */
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ba43d85e8dde..9b6689d9d570 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
682 ino, virt_irq); 682 ino, virt_irq);
683} 683}
684 684
685void *hardirq_stack[NR_CPUS];
686void *softirq_stack[NR_CPUS];
687
688static __attribute__((always_inline)) void *set_hardirq_stack(void)
689{
690 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
691
692 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
693 if (orig_sp < sp ||
694 orig_sp > (sp + THREAD_SIZE)) {
695 sp += THREAD_SIZE - 192 - STACK_BIAS;
696 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
697 }
698
699 return orig_sp;
700}
701static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
702{
703 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
704}
705
685void handler_irq(int irq, struct pt_regs *regs) 706void handler_irq(int irq, struct pt_regs *regs)
686{ 707{
687 unsigned long pstate, bucket_pa; 708 unsigned long pstate, bucket_pa;
688 struct pt_regs *old_regs; 709 struct pt_regs *old_regs;
710 void *orig_sp;
689 711
690 clear_softint(1 << irq); 712 clear_softint(1 << irq);
691 713
@@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
703 "i" (PSTATE_IE) 725 "i" (PSTATE_IE)
704 : "memory"); 726 : "memory");
705 727
728 orig_sp = set_hardirq_stack();
729
706 while (bucket_pa) { 730 while (bucket_pa) {
707 struct irq_desc *desc; 731 struct irq_desc *desc;
708 unsigned long next_pa; 732 unsigned long next_pa;
@@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
719 bucket_pa = next_pa; 743 bucket_pa = next_pa;
720 } 744 }
721 745
746 restore_hardirq_stack(orig_sp);
747
722 irq_exit(); 748 irq_exit();
723 set_irq_regs(old_regs); 749 set_irq_regs(old_regs);
724} 750}
725 751
752void do_softirq(void)
753{
754 unsigned long flags;
755
756 if (in_interrupt())
757 return;
758
759 local_irq_save(flags);
760
761 if (local_softirq_pending()) {
762 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
763
764 sp += THREAD_SIZE - 192 - STACK_BIAS;
765
766 __asm__ __volatile__("mov %%sp, %0\n\t"
767 "mov %1, %%sp"
768 : "=&r" (orig_sp)
769 : "r" (sp));
770 __do_softirq();
771 __asm__ __volatile__("mov %0, %%sp"
772 : : "r" (orig_sp));
773 }
774
775 local_irq_restore(flags);
776}
777
726#ifdef CONFIG_HOTPLUG_CPU 778#ifdef CONFIG_HOTPLUG_CPU
727void fixup_irqs(void) 779void fixup_irqs(void)
728{ 780{
diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
new file mode 100644
index 000000000000..4248d969272f
--- /dev/null
+++ b/arch/sparc64/kernel/kstack.h
@@ -0,0 +1,60 @@
1#ifndef _KSTACK_H
2#define _KSTACK_H
3
4#include <linux/thread_info.h>
5#include <linux/sched.h>
6#include <asm/ptrace.h>
7#include <asm/irq.h>
8
9/* SP must be STACK_BIAS adjusted already. */
10static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{
12 unsigned long base = (unsigned long) tp;
13
14 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true;
17
18 if (hardirq_stack[tp->cpu]) {
19 base = (unsigned long) hardirq_stack[tp->cpu];
20 if (sp >= base &&
21 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
22 return true;
23 base = (unsigned long) softirq_stack[tp->cpu];
24 if (sp >= base &&
25 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
26 return true;
27 }
28 return false;
29}
30
31/* Does "regs" point to a valid pt_regs trap frame? */
32static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
33{
34 unsigned long base = (unsigned long) tp;
35 unsigned long addr = (unsigned long) regs;
36
37 if (addr >= base &&
38 addr <= (base + THREAD_SIZE - sizeof(*regs)))
39 goto check_magic;
40
41 if (hardirq_stack[tp->cpu]) {
42 base = (unsigned long) hardirq_stack[tp->cpu];
43 if (addr >= base &&
44 addr <= (base + THREAD_SIZE - sizeof(*regs)))
45 goto check_magic;
46 base = (unsigned long) softirq_stack[tp->cpu];
47 if (addr >= base &&
48 addr <= (base + THREAD_SIZE - sizeof(*regs)))
49 goto check_magic;
50 }
51 return false;
52
53check_magic:
54 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
55 return true;
56 return false;
57
58}
59
60#endif /* _KSTACK_H */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7f5debdc5fed..15f4178592e7 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -52,6 +52,8 @@
52#include <asm/irq_regs.h> 52#include <asm/irq_regs.h>
53#include <asm/smp.h> 53#include <asm/smp.h>
54 54
55#include "kstack.h"
56
55static void sparc64_yield(int cpu) 57static void sparc64_yield(int cpu)
56{ 58{
57 if (tlb_type != hypervisor) 59 if (tlb_type != hypervisor)
@@ -235,19 +237,6 @@ void show_regs(struct pt_regs *regs)
235struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 237struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
236static DEFINE_SPINLOCK(global_reg_snapshot_lock); 238static DEFINE_SPINLOCK(global_reg_snapshot_lock);
237 239
238static bool kstack_valid(struct thread_info *tp, struct reg_window *rw)
239{
240 unsigned long thread_base, fp;
241
242 thread_base = (unsigned long) tp;
243 fp = (unsigned long) rw;
244
245 if (fp < (thread_base + sizeof(struct thread_info)) ||
246 fp >= (thread_base + THREAD_SIZE))
247 return false;
248 return true;
249}
250
251static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 240static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
252 int this_cpu) 241 int this_cpu)
253{ 242{
@@ -264,11 +253,11 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
264 253
265 rw = (struct reg_window *) 254 rw = (struct reg_window *)
266 (regs->u_regs[UREG_FP] + STACK_BIAS); 255 (regs->u_regs[UREG_FP] + STACK_BIAS);
267 if (kstack_valid(tp, rw)) { 256 if (kstack_valid(tp, (unsigned long) rw)) {
268 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 257 global_reg_snapshot[this_cpu].i7 = rw->ins[7];
269 rw = (struct reg_window *) 258 rw = (struct reg_window *)
270 (rw->ins[6] + STACK_BIAS); 259 (rw->ins[6] + STACK_BIAS);
271 if (kstack_valid(tp, rw)) 260 if (kstack_valid(tp, (unsigned long) rw))
272 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 261 global_reg_snapshot[this_cpu].rpc = rw->ins[7];
273 } 262 }
274 } else { 263 } else {
@@ -828,7 +817,7 @@ out:
828unsigned long get_wchan(struct task_struct *task) 817unsigned long get_wchan(struct task_struct *task)
829{ 818{
830 unsigned long pc, fp, bias = 0; 819 unsigned long pc, fp, bias = 0;
831 unsigned long thread_info_base; 820 struct thread_info *tp;
832 struct reg_window *rw; 821 struct reg_window *rw;
833 unsigned long ret = 0; 822 unsigned long ret = 0;
834 int count = 0; 823 int count = 0;
@@ -837,14 +826,12 @@ unsigned long get_wchan(struct task_struct *task)
837 task->state == TASK_RUNNING) 826 task->state == TASK_RUNNING)
838 goto out; 827 goto out;
839 828
840 thread_info_base = (unsigned long) task_stack_page(task); 829 tp = task_thread_info(task);
841 bias = STACK_BIAS; 830 bias = STACK_BIAS;
842 fp = task_thread_info(task)->ksp + bias; 831 fp = task_thread_info(task)->ksp + bias;
843 832
844 do { 833 do {
845 /* Bogus frame pointer? */ 834 if (!kstack_valid(tp, fp))
846 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
847 fp >= (thread_info_base + THREAD_SIZE))
848 break; 835 break;
849 rw = (struct reg_window *) fp; 836 rw = (struct reg_window *) fp;
850 pc = rw->ins[7]; 837 pc = rw->ins[7];
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 27b81775a4de..743ccad61c60 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -858,9 +858,7 @@ void smp_tsb_sync(struct mm_struct *mm)
858extern unsigned long xcall_flush_tlb_mm; 858extern unsigned long xcall_flush_tlb_mm;
859extern unsigned long xcall_flush_tlb_pending; 859extern unsigned long xcall_flush_tlb_pending;
860extern unsigned long xcall_flush_tlb_kernel_range; 860extern unsigned long xcall_flush_tlb_kernel_range;
861#ifdef CONFIG_MAGIC_SYSRQ
862extern unsigned long xcall_fetch_glob_regs; 861extern unsigned long xcall_fetch_glob_regs;
863#endif
864extern unsigned long xcall_receive_signal; 862extern unsigned long xcall_receive_signal;
865extern unsigned long xcall_new_mmu_context_version; 863extern unsigned long xcall_new_mmu_context_version;
866#ifdef CONFIG_KGDB 864#ifdef CONFIG_KGDB
@@ -1005,12 +1003,10 @@ void kgdb_roundup_cpus(unsigned long flags)
1005} 1003}
1006#endif 1004#endif
1007 1005
1008#ifdef CONFIG_MAGIC_SYSRQ
1009void smp_fetch_global_regs(void) 1006void smp_fetch_global_regs(void)
1010{ 1007{
1011 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1008 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1012} 1009}
1013#endif
1014 1010
1015/* We know that the window frames of the user have been flushed 1011/* We know that the window frames of the user have been flushed
1016 * to the stack before we get here because all callers of us 1012 * to the stack before we get here because all callers of us
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index e9d7f0660f2e..4e21d4a57d3b 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -5,10 +5,12 @@
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/stacktrace.h> 6#include <asm/stacktrace.h>
7 7
8#include "kstack.h"
9
8void save_stack_trace(struct stack_trace *trace) 10void save_stack_trace(struct stack_trace *trace)
9{ 11{
10 unsigned long ksp, fp, thread_base;
11 struct thread_info *tp = task_thread_info(current); 12 struct thread_info *tp = task_thread_info(current);
13 unsigned long ksp, fp;
12 14
13 stack_trace_flush(); 15 stack_trace_flush();
14 16
@@ -18,23 +20,18 @@ void save_stack_trace(struct stack_trace *trace)
18 ); 20 );
19 21
20 fp = ksp + STACK_BIAS; 22 fp = ksp + STACK_BIAS;
21 thread_base = (unsigned long) tp;
22 do { 23 do {
23 struct sparc_stackf *sf; 24 struct sparc_stackf *sf;
24 struct pt_regs *regs; 25 struct pt_regs *regs;
25 unsigned long pc; 26 unsigned long pc;
26 27
27 /* Bogus frame pointer? */ 28 if (!kstack_valid(tp, fp))
28 if (fp < (thread_base + sizeof(struct thread_info)) ||
29 fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf)))
30 break; 29 break;
31 30
32 sf = (struct sparc_stackf *) fp; 31 sf = (struct sparc_stackf *) fp;
33 regs = (struct pt_regs *) (sf + 1); 32 regs = (struct pt_regs *) (sf + 1);
34 33
35 if (((unsigned long)regs <= 34 if (kstack_is_trap_frame(tp, regs)) {
36 (thread_base + THREAD_SIZE - sizeof(*regs))) &&
37 (regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
38 if (!(regs->tstate & TSTATE_PRIV)) 35 if (!(regs->tstate & TSTATE_PRIV))
39 break; 36 break;
40 pc = regs->tpc; 37 pc = regs->tpc;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 404e8561e2d0..3d924121c796 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -39,6 +39,7 @@
39#include <asm/prom.h> 39#include <asm/prom.h>
40 40
41#include "entry.h" 41#include "entry.h"
42#include "kstack.h"
42 43
43/* When an irrecoverable trap occurs at tl > 0, the trap entry 44/* When an irrecoverable trap occurs at tl > 0, the trap entry
44 * code logs the trap state registers at every level in the trap 45 * code logs the trap state registers at every level in the trap
@@ -2115,14 +2116,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2115 struct pt_regs *regs; 2116 struct pt_regs *regs;
2116 unsigned long pc; 2117 unsigned long pc;
2117 2118
2118 /* Bogus frame pointer? */ 2119 if (!kstack_valid(tp, fp))
2119 if (fp < (thread_base + sizeof(struct thread_info)) ||
2120 fp >= (thread_base + THREAD_SIZE))
2121 break; 2120 break;
2122 sf = (struct sparc_stackf *) fp; 2121 sf = (struct sparc_stackf *) fp;
2123 regs = (struct pt_regs *) (sf + 1); 2122 regs = (struct pt_regs *) (sf + 1);
2124 2123
2125 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { 2124 if (kstack_is_trap_frame(tp, regs)) {
2126 if (!(regs->tstate & TSTATE_PRIV)) 2125 if (!(regs->tstate & TSTATE_PRIV))
2127 break; 2126 break;
2128 pc = regs->tpc; 2127 pc = regs->tpc;
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
index 7735a7a60533..fad90ddb3a28 100644
--- a/arch/sparc64/lib/mcount.S
+++ b/arch/sparc64/lib/mcount.S
@@ -48,12 +48,45 @@ mcount:
48 sub %g3, STACK_BIAS, %g3 48 sub %g3, STACK_BIAS, %g3
49 cmp %sp, %g3 49 cmp %sp, %g3
50 bg,pt %xcc, 1f 50 bg,pt %xcc, 1f
51 sethi %hi(panicstring), %g3 51 nop
52 lduh [%g6 + TI_CPU], %g1
53 sethi %hi(hardirq_stack), %g3
54 or %g3, %lo(hardirq_stack), %g3
55 sllx %g1, 3, %g1
56 ldx [%g3 + %g1], %g7
57 sub %g7, STACK_BIAS, %g7
58 cmp %sp, %g7
59 bleu,pt %xcc, 2f
60 sethi %hi(THREAD_SIZE), %g3
61 add %g7, %g3, %g7
62 cmp %sp, %g7
63 blu,pn %xcc, 1f
642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7
67 cmp %sp, %g7
68 bleu,pt %xcc, 2f
69 sethi %hi(THREAD_SIZE), %g3
70 add %g7, %g3, %g7
71 cmp %sp, %g7
72 blu,pn %xcc, 1f
73 nop
74 /* If we are already on ovstack, don't hop onto it
75 * again, we are already trying to output the stack overflow
76 * message.
77 */
52 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 78 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
53 or %g7, %lo(ovstack), %g7 79 or %g7, %lo(ovstack), %g7
54 add %g7, OVSTACKSIZE, %g7 80 add %g7, OVSTACKSIZE, %g3
81 sub %g3, STACK_BIAS + 192, %g3
55 sub %g7, STACK_BIAS, %g7 82 sub %g7, STACK_BIAS, %g7
56 mov %g7, %sp 83 cmp %sp, %g7
84 blu,pn %xcc, 2f
85 cmp %sp, %g3
86 bleu,pn %xcc, 1f
87 nop
882: mov %g3, %sp
89 sethi %hi(panicstring), %g3
57 call prom_printf 90 call prom_printf
58 or %g3, %lo(panicstring), %o0 91 or %g3, %lo(panicstring), %o0
59 call prom_halt 92 call prom_halt
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3ecb03..217de3ea29e8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
49#include <asm/sstate.h> 49#include <asm/sstate.h>
50#include <asm/mdesc.h> 50#include <asm/mdesc.h>
51#include <asm/cpudata.h> 51#include <asm/cpudata.h>
52#include <asm/irq.h>
52 53
53#define MAX_PHYS_ADDRESS (1UL << 42UL) 54#define MAX_PHYS_ADDRESS (1UL << 42UL)
54#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 55#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -1771,6 +1772,16 @@ void __init paging_init(void)
1771 if (tlb_type == hypervisor) 1772 if (tlb_type == hypervisor)
1772 sun4v_mdesc_init(); 1773 sun4v_mdesc_init();
1773 1774
1775 /* Once the OF device tree and MDESC have been setup, we know
1776 * the list of possible cpus. Therefore we can allocate the
1777 * IRQ stacks.
1778 */
1779 for_each_possible_cpu(i) {
1780 /* XXX Use node local allocations... XXX */
1781 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1782 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1783 }
1784
1774 /* Setup bootmem... */ 1785 /* Setup bootmem... */
1775 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1786 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1776 1787
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index ff1dc44d363e..86773e89dc1b 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -480,7 +480,6 @@ xcall_sync_tick:
480 b rtrap_xcall 480 b rtrap_xcall
481 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 481 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
482 482
483#ifdef CONFIG_MAGIC_SYSRQ
484 .globl xcall_fetch_glob_regs 483 .globl xcall_fetch_glob_regs
485xcall_fetch_glob_regs: 484xcall_fetch_glob_regs:
486 sethi %hi(global_reg_snapshot), %g1 485 sethi %hi(global_reg_snapshot), %g1
@@ -511,7 +510,6 @@ xcall_fetch_glob_regs:
511 membar #StoreStore 510 membar #StoreStore
512 stx %g3, [%g1 + GR_SNAP_THREAD] 511 stx %g3, [%g1 + GR_SNAP_THREAD]
513 retry 512 retry
514#endif /* CONFIG_MAGIC_SYSRQ */
515 513
516#ifdef DCACHE_ALIASING_POSSIBLE 514#ifdef DCACHE_ALIASING_POSSIBLE
517 .align 32 515 .align 32
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 22d7d050905d..de39e1f2ede5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -101,16 +101,13 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
101 */ 101 */
102static int iommu_completion_wait(struct amd_iommu *iommu) 102static int iommu_completion_wait(struct amd_iommu *iommu)
103{ 103{
104 int ret; 104 int ret, ready = 0;
105 unsigned status = 0;
105 struct iommu_cmd cmd; 106 struct iommu_cmd cmd;
106 volatile u64 ready = 0;
107 unsigned long ready_phys = virt_to_phys(&ready);
108 unsigned long i = 0; 107 unsigned long i = 0;
109 108
110 memset(&cmd, 0, sizeof(cmd)); 109 memset(&cmd, 0, sizeof(cmd));
111 cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; 110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
112 cmd.data[1] = upper_32_bits(ready_phys);
113 cmd.data[2] = 1; /* value written to 'ready' */
114 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 111 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
115 112
116 iommu->need_sync = 0; 113 iommu->need_sync = 0;
@@ -122,9 +119,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
122 119
123 while (!ready && (i < EXIT_LOOP_COUNT)) { 120 while (!ready && (i < EXIT_LOOP_COUNT)) {
124 ++i; 121 ++i;
125 cpu_relax(); 122 /* wait for the bit to become one */
123 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
124 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
126 } 125 }
127 126
127 /* set bit back to zero */
128 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
129 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
130
128 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
129 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
130 133
@@ -161,7 +164,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
161 address &= PAGE_MASK; 164 address &= PAGE_MASK;
162 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); 165 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
163 cmd.data[1] |= domid; 166 cmd.data[1] |= domid;
164 cmd.data[2] = LOW_U32(address); 167 cmd.data[2] = lower_32_bits(address);
165 cmd.data[3] = upper_32_bits(address); 168 cmd.data[3] = upper_32_bits(address);
166 if (s) /* size bit - we flush more than one 4kb page */ 169 if (s) /* size bit - we flush more than one 4kb page */
167 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 170 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index d9a9da597e79..a69cc0f52042 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -801,6 +801,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
801} 801}
802 802
803/* 803/*
804 * Init the device table to not allow DMA access for devices and
805 * suppress all page faults
806 */
807static void init_device_table(void)
808{
809 u16 devid;
810
811 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
812 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
813 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
814 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
815 }
816}
817
818/*
804 * This function finally enables all IOMMUs found in the system after 819 * This function finally enables all IOMMUs found in the system after
805 * they have been initialized 820 * they have been initialized
806 */ 821 */
@@ -931,6 +946,9 @@ int __init amd_iommu_init(void)
931 if (amd_iommu_pd_alloc_bitmap == NULL) 946 if (amd_iommu_pd_alloc_bitmap == NULL)
932 goto free; 947 goto free;
933 948
949 /* init the device table */
950 init_device_table();
951
934 /* 952 /*
935 * let all alias entries point to itself 953 * let all alias entries point to itself
936 */ 954 */
@@ -954,15 +972,15 @@ int __init amd_iommu_init(void)
954 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 972 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
955 goto free; 973 goto free;
956 974
957 ret = amd_iommu_init_dma_ops(); 975 ret = sysdev_class_register(&amd_iommu_sysdev_class);
958 if (ret) 976 if (ret)
959 goto free; 977 goto free;
960 978
961 ret = sysdev_class_register(&amd_iommu_sysdev_class); 979 ret = sysdev_register(&device_amd_iommu);
962 if (ret) 980 if (ret)
963 goto free; 981 goto free;
964 982
965 ret = sysdev_register(&device_amd_iommu); 983 ret = amd_iommu_init_dma_ops();
966 if (ret) 984 if (ret)
967 goto free; 985 goto free;
968 986
diff --git a/crypto/digest.c b/crypto/digest.c
index ac0919460d14..5d3f1303da98 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -225,7 +225,7 @@ int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
225 struct ahash_tfm *crt = &tfm->crt_ahash; 225 struct ahash_tfm *crt = &tfm->crt_ahash;
226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; 226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227 227
228 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) 228 if (dalg->dia_digestsize > PAGE_SIZE / 8)
229 return -EINVAL; 229 return -EINVAL;
230 230
231 crt->init = digest_async_init; 231 crt->init = digest_async_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 59821a22d752..66368022e0bf 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -481,21 +481,31 @@ next_one:
481 481
482 for (k = 0, temp = 0; k < template[i].np; k++) { 482 for (k = 0, temp = 0; k < template[i].np; k++) {
483 printk(KERN_INFO "page %u\n", k); 483 printk(KERN_INFO "page %u\n", k);
484 q = &axbuf[IDX[k]]; 484 q = &xbuf[IDX[k]];
485 hexdump(q, template[i].tap[k]); 485
486 n = template[i].tap[k];
487 if (k == template[i].np - 1)
488 n += enc ? authsize : -authsize;
489 hexdump(q, n);
486 printk(KERN_INFO "%s\n", 490 printk(KERN_INFO "%s\n",
487 memcmp(q, template[i].result + temp, 491 memcmp(q, template[i].result + temp, n) ?
488 template[i].tap[k] -
489 (k < template[i].np - 1 || enc ?
490 0 : authsize)) ?
491 "fail" : "pass"); 492 "fail" : "pass");
492 493
493 for (n = 0; q[template[i].tap[k] + n]; n++) 494 q += n;
494 ; 495 if (k == template[i].np - 1 && !enc) {
496 if (memcmp(q, template[i].input +
497 temp + n, authsize))
498 n = authsize;
499 else
500 n = 0;
501 } else {
502 for (n = 0; q[n]; n++)
503 ;
504 }
495 if (n) { 505 if (n) {
496 printk("Result buffer corruption %u " 506 printk("Result buffer corruption %u "
497 "bytes:\n", n); 507 "bytes:\n", n);
498 hexdump(&q[template[i].tap[k]], n); 508 hexdump(q, n);
499 } 509 }
500 510
501 temp += template[i].tap[k]; 511 temp += template[i].tap[k];
diff --git a/drivers/Makefile b/drivers/Makefile
index a280ab3d0833..2735bde73475 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
57obj-$(CONFIG_PARIDE) += block/paride/ 57obj-$(CONFIG_PARIDE) += block/paride/
58obj-$(CONFIG_TC) += tc/ 58obj-$(CONFIG_TC) += tc/
59obj-$(CONFIG_USB) += usb/ 59obj-$(CONFIG_USB) += usb/
60obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
60obj-$(CONFIG_PCI) += usb/ 61obj-$(CONFIG_PCI) += usb/
61obj-$(CONFIG_USB_GADGET) += usb/gadget/ 62obj-$(CONFIG_USB_GADGET) += usb/gadget/
62obj-$(CONFIG_SERIO) += input/serio/ 63obj-$(CONFIG_SERIO) += input/serio/
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4ebb5e..128202e18fc9 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/cpufeature.h> 33#include <asm/cpufeature.h>
34#include <asm/i387.h>
34 35
35 36
36#define PFX KBUILD_MODNAME ": " 37#define PFX KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@ enum {
67 * Another possible performance boost may come from simply buffering 68 * Another possible performance boost may come from simply buffering
68 * until we have 4 bytes, thus returning a u32 at a time, 69 * until we have 4 bytes, thus returning a u32 at a time,
69 * instead of the current u8-at-a-time. 70 * instead of the current u8-at-a-time.
71 *
72 * Padlock instructions can generate a spurious DNA fault, so
73 * we have to call them in the context of irq_ts_save/restore()
70 */ 74 */
71 75
72static inline u32 xstore(u32 *addr, u32 edx_in) 76static inline u32 xstore(u32 *addr, u32 edx_in)
73{ 77{
74 u32 eax_out; 78 u32 eax_out;
79 int ts_state;
80
81 ts_state = irq_ts_save();
75 82
76 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" 83 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
77 :"=m"(*addr), "=a"(eax_out) 84 :"=m"(*addr), "=a"(eax_out)
78 :"D"(addr), "d"(edx_in)); 85 :"D"(addr), "d"(edx_in));
79 86
87 irq_ts_restore(ts_state);
80 return eax_out; 88 return eax_out;
81} 89}
82 90
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a166e566..bf2917d197a0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/i387.h>
19#include "padlock.h" 20#include "padlock.h"
20 21
21/* Control word. */ 22/* Control word. */
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
141 asm volatile ("pushfl; popfl"); 142 asm volatile ("pushfl; popfl");
142} 143}
143 144
145/*
146 * While the padlock instructions don't use FP/SSE registers, they
147 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
148 * should be used only inside the irq_ts_save/restore() context
149 */
150
144static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 151static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
145 void *control_word) 152 void *control_word)
146{ 153{
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
205static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 212static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
206{ 213{
207 struct aes_ctx *ctx = aes_ctx(tfm); 214 struct aes_ctx *ctx = aes_ctx(tfm);
215 int ts_state;
208 padlock_reset_key(); 216 padlock_reset_key();
217
218 ts_state = irq_ts_save();
209 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 219 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
220 irq_ts_restore(ts_state);
210} 221}
211 222
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 223static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
213{ 224{
214 struct aes_ctx *ctx = aes_ctx(tfm); 225 struct aes_ctx *ctx = aes_ctx(tfm);
226 int ts_state;
215 padlock_reset_key(); 227 padlock_reset_key();
228
229 ts_state = irq_ts_save();
216 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 230 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
231 irq_ts_restore(ts_state);
217} 232}
218 233
219static struct crypto_alg aes_alg = { 234static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
244 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 259 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
245 struct blkcipher_walk walk; 260 struct blkcipher_walk walk;
246 int err; 261 int err;
262 int ts_state;
247 263
248 padlock_reset_key(); 264 padlock_reset_key();
249 265
250 blkcipher_walk_init(&walk, dst, src, nbytes); 266 blkcipher_walk_init(&walk, dst, src, nbytes);
251 err = blkcipher_walk_virt(desc, &walk); 267 err = blkcipher_walk_virt(desc, &walk);
252 268
269 ts_state = irq_ts_save();
253 while ((nbytes = walk.nbytes)) { 270 while ((nbytes = walk.nbytes)) {
254 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 271 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
255 ctx->E, &ctx->cword.encrypt, 272 ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
257 nbytes &= AES_BLOCK_SIZE - 1; 274 nbytes &= AES_BLOCK_SIZE - 1;
258 err = blkcipher_walk_done(desc, &walk, nbytes); 275 err = blkcipher_walk_done(desc, &walk, nbytes);
259 } 276 }
277 irq_ts_restore(ts_state);
260 278
261 return err; 279 return err;
262} 280}
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
268 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 286 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
269 struct blkcipher_walk walk; 287 struct blkcipher_walk walk;
270 int err; 288 int err;
289 int ts_state;
271 290
272 padlock_reset_key(); 291 padlock_reset_key();
273 292
274 blkcipher_walk_init(&walk, dst, src, nbytes); 293 blkcipher_walk_init(&walk, dst, src, nbytes);
275 err = blkcipher_walk_virt(desc, &walk); 294 err = blkcipher_walk_virt(desc, &walk);
276 295
296 ts_state = irq_ts_save();
277 while ((nbytes = walk.nbytes)) { 297 while ((nbytes = walk.nbytes)) {
278 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 298 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
279 ctx->D, &ctx->cword.decrypt, 299 ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
281 nbytes &= AES_BLOCK_SIZE - 1; 301 nbytes &= AES_BLOCK_SIZE - 1;
282 err = blkcipher_walk_done(desc, &walk, nbytes); 302 err = blkcipher_walk_done(desc, &walk, nbytes);
283 } 303 }
284 304 irq_ts_restore(ts_state);
285 return err; 305 return err;
286} 306}
287 307
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
314 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 334 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
315 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
316 int err; 336 int err;
337 int ts_state;
317 338
318 padlock_reset_key(); 339 padlock_reset_key();
319 340
320 blkcipher_walk_init(&walk, dst, src, nbytes); 341 blkcipher_walk_init(&walk, dst, src, nbytes);
321 err = blkcipher_walk_virt(desc, &walk); 342 err = blkcipher_walk_virt(desc, &walk);
322 343
344 ts_state = irq_ts_save();
323 while ((nbytes = walk.nbytes)) { 345 while ((nbytes = walk.nbytes)) {
324 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, 346 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
325 walk.dst.virt.addr, ctx->E, 347 walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
329 nbytes &= AES_BLOCK_SIZE - 1; 351 nbytes &= AES_BLOCK_SIZE - 1;
330 err = blkcipher_walk_done(desc, &walk, nbytes); 352 err = blkcipher_walk_done(desc, &walk, nbytes);
331 } 353 }
354 irq_ts_restore(ts_state);
332 355
333 return err; 356 return err;
334} 357}
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
340 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 363 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
341 struct blkcipher_walk walk; 364 struct blkcipher_walk walk;
342 int err; 365 int err;
366 int ts_state;
343 367
344 padlock_reset_key(); 368 padlock_reset_key();
345 369
346 blkcipher_walk_init(&walk, dst, src, nbytes); 370 blkcipher_walk_init(&walk, dst, src, nbytes);
347 err = blkcipher_walk_virt(desc, &walk); 371 err = blkcipher_walk_virt(desc, &walk);
348 372
373 ts_state = irq_ts_save();
349 while ((nbytes = walk.nbytes)) { 374 while ((nbytes = walk.nbytes)) {
350 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, 375 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
351 ctx->D, walk.iv, &ctx->cword.decrypt, 376 ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
354 err = blkcipher_walk_done(desc, &walk, nbytes); 379 err = blkcipher_walk_done(desc, &walk, nbytes);
355 } 380 }
356 381
382 irq_ts_restore(ts_state);
357 return err; 383 return err;
358} 384}
359 385
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680fa013..a7fbadebf623 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <asm/i387.h>
25#include "padlock.h" 26#include "padlock.h"
26 27
27#define SHA1_DEFAULT_FALLBACK "sha1-generic" 28#define SHA1_DEFAULT_FALLBACK "sha1-generic"
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
102 * PadLock microcode needs it that big. */ 103 * PadLock microcode needs it that big. */
103 char buf[128+16]; 104 char buf[128+16];
104 char *result = NEAREST_ALIGNED(buf); 105 char *result = NEAREST_ALIGNED(buf);
106 int ts_state;
105 107
106 ((uint32_t *)result)[0] = SHA1_H0; 108 ((uint32_t *)result)[0] = SHA1_H0;
107 ((uint32_t *)result)[1] = SHA1_H1; 109 ((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
109 ((uint32_t *)result)[3] = SHA1_H3; 111 ((uint32_t *)result)[3] = SHA1_H3;
110 ((uint32_t *)result)[4] = SHA1_H4; 112 ((uint32_t *)result)[4] = SHA1_H4;
111 113
114 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save();
112 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
113 : "+S"(in), "+D"(result) 117 : "+S"(in), "+D"(result)
114 : "c"(count), "a"(0)); 118 : "c"(count), "a"(0));
119 irq_ts_restore(ts_state);
115 120
116 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
117} 122}
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
123 * PadLock microcode needs it that big. */ 128 * PadLock microcode needs it that big. */
124 char buf[128+16]; 129 char buf[128+16];
125 char *result = NEAREST_ALIGNED(buf); 130 char *result = NEAREST_ALIGNED(buf);
131 int ts_state;
126 132
127 ((uint32_t *)result)[0] = SHA256_H0; 133 ((uint32_t *)result)[0] = SHA256_H0;
128 ((uint32_t *)result)[1] = SHA256_H1; 134 ((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
133 ((uint32_t *)result)[6] = SHA256_H6; 139 ((uint32_t *)result)[6] = SHA256_H6;
134 ((uint32_t *)result)[7] = SHA256_H7; 140 ((uint32_t *)result)[7] = SHA256_H7;
135 141
142 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save();
136 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
137 : "+S"(in), "+D"(result) 145 : "+S"(in), "+D"(result)
138 : "c"(count), "a"(0)); 146 : "c"(count), "a"(0));
147 irq_ts_restore(ts_state);
139 148
140 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
141} 150}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f42083..ee827a7f7c6a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@ struct talitos_private {
96 unsigned int exec_units; 96 unsigned int exec_units;
97 unsigned int desc_types; 97 unsigned int desc_types;
98 98
99 /* SEC Compatibility info */
100 unsigned long features;
101
99 /* next channel to be assigned next incoming descriptor */ 102 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan; 103 atomic_t last_chan;
101 104
@@ -133,6 +136,9 @@ struct talitos_private {
133 struct hwrng rng; 136 struct hwrng rng;
134}; 137};
135 138
139/* .features flag */
140#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
141
136/* 142/*
137 * map virtual single (contiguous) pointer to h/w descriptor pointer 143 * map virtual single (contiguous) pointer to h/w descriptor pointer
138 */ 144 */
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
785 /* copy the generated ICV to dst */ 791 /* copy the generated ICV to dst */
786 if (edesc->dma_len) { 792 if (edesc->dma_len) {
787 icvdata = &edesc->link_tbl[edesc->src_nents + 793 icvdata = &edesc->link_tbl[edesc->src_nents +
788 edesc->dst_nents + 1]; 794 edesc->dst_nents + 2];
789 sg = sg_last(areq->dst, edesc->dst_nents); 795 sg = sg_last(areq->dst, edesc->dst_nents);
790 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 796 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
791 icvdata, ctx->authsize); 797 icvdata, ctx->authsize);
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev,
814 /* auth check */ 820 /* auth check */
815 if (edesc->dma_len) 821 if (edesc->dma_len)
816 icvdata = &edesc->link_tbl[edesc->src_nents + 822 icvdata = &edesc->link_tbl[edesc->src_nents +
817 edesc->dst_nents + 1]; 823 edesc->dst_nents + 2];
818 else 824 else
819 icvdata = &edesc->link_tbl[0]; 825 icvdata = &edesc->link_tbl[0];
820 826
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
921 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 927 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
922 &edesc->link_tbl[0]); 928 &edesc->link_tbl[0]);
923 if (sg_count > 1) { 929 if (sg_count > 1) {
930 struct talitos_ptr *link_tbl_ptr =
931 &edesc->link_tbl[sg_count-1];
932 struct scatterlist *sg;
933 struct talitos_private *priv = dev_get_drvdata(dev);
934
924 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 935 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
925 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 936 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
926 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 937 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
927 edesc->dma_len, DMA_BIDIRECTIONAL); 938 edesc->dma_len, DMA_BIDIRECTIONAL);
939 /* If necessary for this SEC revision,
940 * add a link table entry for ICV.
941 */
942 if ((priv->features &
943 TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
944 (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
945 link_tbl_ptr->j_extent = 0;
946 link_tbl_ptr++;
947 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
948 link_tbl_ptr->len = cpu_to_be16(authsize);
949 sg = sg_last(areq->src, edesc->src_nents ? : 1);
950 link_tbl_ptr->ptr = cpu_to_be32(
951 (char *)sg_dma_address(sg)
952 + sg->length - authsize);
953 }
928 } else { 954 } else {
929 /* Only one segment now, so no link tbl needed */ 955 /* Only one segment now, so no link tbl needed */
930 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 956 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
944 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 970 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
945 } else { 971 } else {
946 struct talitos_ptr *link_tbl_ptr = 972 struct talitos_ptr *link_tbl_ptr =
947 &edesc->link_tbl[edesc->src_nents]; 973 &edesc->link_tbl[edesc->src_nents + 1];
948 struct scatterlist *sg;
949 974
950 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 975 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
951 edesc->dma_link_tbl + 976 edesc->dma_link_tbl +
952 edesc->src_nents); 977 edesc->src_nents + 1);
953 if (areq->src == areq->dst) { 978 if (areq->src == areq->dst) {
954 memcpy(link_tbl_ptr, &edesc->link_tbl[0], 979 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
955 edesc->src_nents * sizeof(struct talitos_ptr)); 980 edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
957 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 982 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
958 link_tbl_ptr); 983 link_tbl_ptr);
959 } 984 }
985 /* Add an entry to the link table for ICV data */
960 link_tbl_ptr += sg_count - 1; 986 link_tbl_ptr += sg_count - 1;
961
962 /* handle case where sg_last contains the ICV exclusively */
963 sg = sg_last(areq->dst, edesc->dst_nents);
964 if (sg->length == ctx->authsize)
965 link_tbl_ptr--;
966
967 link_tbl_ptr->j_extent = 0; 987 link_tbl_ptr->j_extent = 0;
988 sg_count++;
968 link_tbl_ptr++; 989 link_tbl_ptr++;
969 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 990 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
970 link_tbl_ptr->len = cpu_to_be16(authsize); 991 link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
973 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 994 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
974 edesc->dma_link_tbl + 995 edesc->dma_link_tbl +
975 edesc->src_nents + 996 edesc->src_nents +
976 edesc->dst_nents + 1); 997 edesc->dst_nents + 2);
977 998
978 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 999 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
979 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1000 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1040 1061
1041 /* 1062 /*
1042 * allocate space for base edesc plus the link tables, 1063 * allocate space for base edesc plus the link tables,
1043 * allowing for a separate entry for the generated ICV (+ 1), 1064 * allowing for two separate entries for ICV and generated ICV (+ 2),
1044 * and the ICV data itself 1065 * and the ICV data itself
1045 */ 1066 */
1046 alloc_len = sizeof(struct ipsec_esp_edesc); 1067 alloc_len = sizeof(struct ipsec_esp_edesc);
1047 if (src_nents || dst_nents) { 1068 if (src_nents || dst_nents) {
1048 dma_len = (src_nents + dst_nents + 1) * 1069 dma_len = (src_nents + dst_nents + 2) *
1049 sizeof(struct talitos_ptr) + ctx->authsize; 1070 sizeof(struct talitos_ptr) + ctx->authsize;
1050 alloc_len += dma_len; 1071 alloc_len += dma_len;
1051 } else { 1072 } else {
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req)
1104 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1125 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1105 if (edesc->dma_len) 1126 if (edesc->dma_len)
1106 icvdata = &edesc->link_tbl[edesc->src_nents + 1127 icvdata = &edesc->link_tbl[edesc->src_nents +
1107 edesc->dst_nents + 1]; 1128 edesc->dst_nents + 2];
1108 else 1129 else
1109 icvdata = &edesc->link_tbl[0]; 1130 icvdata = &edesc->link_tbl[0];
1110 1131
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev,
1480 goto err_out; 1501 goto err_out;
1481 } 1502 }
1482 1503
1504 if (of_device_is_compatible(np, "fsl,sec3.0"))
1505 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1506
1483 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1507 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1484 GFP_KERNEL); 1508 GFP_KERNEL);
1485 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1509 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 18355ae2155d..4655b794ebe3 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1593,7 +1593,7 @@ fail1:
1593 if (machine_is_omap_h2()) { 1593 if (machine_is_omap_h2()) {
1594 /* full speed signaling by default */ 1594 /* full speed signaling by default */
1595 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, 1595 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
1596 MC1_SPEED_REG); 1596 MC1_SPEED);
1597 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, 1597 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
1598 MC2_SPD_SUSP_CTRL); 1598 MC2_SPD_SUSP_CTRL);
1599 1599
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 66bafe308b0c..692a79ec2a22 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,10 +1,11 @@
1#ifndef _I8042_SPARCIO_H 1#ifndef _I8042_SPARCIO_H
2#define _I8042_SPARCIO_H 2#define _I8042_SPARCIO_H
3 3
4#include <linux/of_device.h>
5
4#include <asm/io.h> 6#include <asm/io.h>
5#include <asm/oplib.h> 7#include <asm/oplib.h>
6#include <asm/prom.h> 8#include <asm/prom.h>
7#include <asm/of_device.h>
8 9
9static int i8042_kbd_irq = -1; 10static int i8042_kbd_irq = -1;
10static int i8042_aux_irq = -1; 11static int i8042_aux_irq = -1;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b205f2..b468f904c7f8 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
40#define DP(__mask, __fmt, __args...) do { \ 40#define DP(__mask, __fmt, __args...) do { \
41 if (bp->msglevel & (__mask)) \ 41 if (bp->msglevel & (__mask)) \
42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
43 bp->dev?(bp->dev->name):"?", ##__args); \ 43 bp->dev ? (bp->dev->name) : "?", ##__args); \
44 } while (0) 44 } while (0)
45 45
46/* errors debug print */ 46/* errors debug print */
47#define BNX2X_DBG_ERR(__fmt, __args...) do { \ 47#define BNX2X_DBG_ERR(__fmt, __args...) do { \
48 if (bp->msglevel & NETIF_MSG_PROBE) \ 48 if (bp->msglevel & NETIF_MSG_PROBE) \
49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
50 bp->dev?(bp->dev->name):"?", ##__args); \ 50 bp->dev ? (bp->dev->name) : "?", ##__args); \
51 } while (0) 51 } while (0)
52 52
53/* for errors (never masked) */ 53/* for errors (never masked) */
54#define BNX2X_ERR(__fmt, __args...) do { \ 54#define BNX2X_ERR(__fmt, __args...) do { \
55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
56 bp->dev?(bp->dev->name):"?", ##__args); \ 56 bp->dev ? (bp->dev->name) : "?", ##__args); \
57 } while (0) 57 } while (0)
58 58
59/* before we have a dev->name use dev_info() */ 59/* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) 120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) 121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
122 122
123#define NIG_WR(reg, val) REG_WR(bp, reg, val) 123#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
124#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) 124#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
125#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
126
127
128#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
129
130#define for_each_nondefault_queue(bp, var) \
131 for (var = 1; var < bp->num_queues; var++)
132#define is_multi(bp) (bp->num_queues > 1)
133 125
134 126
135/* fast path */ 127/* fast path */
@@ -163,7 +155,7 @@ struct sw_rx_page {
163#define NUM_RX_SGE_PAGES 2 155#define NUM_RX_SGE_PAGES 2
164#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 156#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
165#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 157#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
166/* RX_SGE_CNT is promissed to be a power of 2 */ 158/* RX_SGE_CNT is promised to be a power of 2 */
167#define RX_SGE_MASK (RX_SGE_CNT - 1) 159#define RX_SGE_MASK (RX_SGE_CNT - 1)
168#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 160#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
169#define MAX_RX_SGE (NUM_RX_SGE - 1) 161#define MAX_RX_SGE (NUM_RX_SGE - 1)
@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
258 250
259 unsigned long tx_pkt, 251 unsigned long tx_pkt,
260 rx_pkt, 252 rx_pkt,
261 rx_calls, 253 rx_calls;
262 rx_alloc_failed;
263 /* TPA related */ 254 /* TPA related */
264 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; 255 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
265 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; 256 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
275 266
276#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 267#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
277 268
269#define BNX2X_HAS_TX_WORK(fp) \
270 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
271 (fp->tx_pkt_prod != fp->tx_pkt_cons))
272
273#define BNX2X_HAS_RX_WORK(fp) \
274 (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
275
276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
277
278 278
279/* MC hsi */ 279/* MC hsi */
280#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 280#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
317#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 317#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
318 318
319 319
320/* This is needed for determening of last_max */ 320/* This is needed for determining of last_max */
321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
322 322
323#define __SGE_MASK_SET_BIT(el, bit) \ 323#define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ 386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
387 (TPA_TYPE_START | TPA_TYPE_END)) 387 (TPA_TYPE_START | TPA_TYPE_END))
388 388
389#define BNX2X_RX_SUM_OK(cqe) \ 389#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
390 (!(cqe->fast_path_cqe.status_flags & \ 390
391 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ 391#define BNX2X_IP_CSUM_ERR(cqe) \
392 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) 392 (!((cqe)->fast_path_cqe.status_flags & \
393 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
394 ((cqe)->fast_path_cqe.type_error_flags & \
395 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
396
397#define BNX2X_L4_CSUM_ERR(cqe) \
398 (!((cqe)->fast_path_cqe.status_flags & \
399 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
400 ((cqe)->fast_path_cqe.type_error_flags & \
401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
402
403#define BNX2X_RX_CSUM_OK(cqe) \
404 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
393 405
394#define BNX2X_RX_SUM_FIX(cqe) \ 406#define BNX2X_RX_SUM_FIX(cqe) \
395 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ 407 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
396 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ 408 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
397 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) 409 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
398 410
399#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
400 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
402
403 411
404#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 412#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
405#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 413#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
647 655
648 u32 brb_drop_hi; 656 u32 brb_drop_hi;
649 u32 brb_drop_lo; 657 u32 brb_drop_lo;
658 u32 brb_truncate_hi;
659 u32 brb_truncate_lo;
650 660
651 u32 jabber_packets_received; 661 u32 jabber_packets_received;
652 662
@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
663 u32 mac_discard; 673 u32 mac_discard;
664 674
665 u32 driver_xoff; 675 u32 driver_xoff;
676 u32 rx_err_discard_pkt;
677 u32 rx_skb_alloc_failed;
678 u32 hw_csum_err;
666}; 679};
667 680
668#define STATS_OFFSET32(stat_name) \ 681#define STATS_OFFSET32(stat_name) \
@@ -753,7 +766,6 @@ struct bnx2x {
753 u16 def_att_idx; 766 u16 def_att_idx;
754 u32 attn_state; 767 u32 attn_state;
755 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 768 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
756 u32 aeu_mask;
757 u32 nig_mask; 769 u32 nig_mask;
758 770
759 /* slow path ring */ 771 /* slow path ring */
@@ -772,7 +784,7 @@ struct bnx2x {
772 u8 stats_pending; 784 u8 stats_pending;
773 u8 set_mac_pending; 785 u8 set_mac_pending;
774 786
775 /* End of fileds used in the performance code paths */ 787 /* End of fields used in the performance code paths */
776 788
777 int panic; 789 int panic;
778 int msglevel; 790 int msglevel;
@@ -794,9 +806,6 @@ struct bnx2x {
794#define BP_FUNC(bp) (bp->func) 806#define BP_FUNC(bp) (bp->func)
795#define BP_E1HVN(bp) (bp->func >> 1) 807#define BP_E1HVN(bp) (bp->func >> 1)
796#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 808#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
797/* assorted E1HVN */
798#define IS_E1HMF(bp) (bp->e1hmf != 0)
799#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
800 809
801 int pm_cap; 810 int pm_cap;
802 int pcie_cap; 811 int pcie_cap;
@@ -821,6 +830,7 @@ struct bnx2x {
821 u32 mf_config; 830 u32 mf_config;
822 u16 e1hov; 831 u16 e1hov;
823 u8 e1hmf; 832 u8 e1hmf;
833#define IS_E1HMF(bp) (bp->e1hmf != 0)
824 834
825 u8 wol; 835 u8 wol;
826 836
@@ -836,7 +846,6 @@ struct bnx2x {
836 u16 rx_ticks_int; 846 u16 rx_ticks_int;
837 u16 rx_ticks; 847 u16 rx_ticks;
838 848
839 u32 stats_ticks;
840 u32 lin_cnt; 849 u32 lin_cnt;
841 850
842 int state; 851 int state;
@@ -852,6 +861,7 @@ struct bnx2x {
852#define BNX2X_STATE_ERROR 0xf000 861#define BNX2X_STATE_ERROR 0xf000
853 862
854 int num_queues; 863 int num_queues;
864#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
855 865
856 u32 rx_mode; 866 u32 rx_mode;
857#define BNX2X_RX_MODE_NONE 0 867#define BNX2X_RX_MODE_NONE 0
@@ -902,10 +912,17 @@ struct bnx2x {
902}; 912};
903 913
904 914
915#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
916
917#define for_each_nondefault_queue(bp, var) \
918 for (var = 1; var < bp->num_queues; var++)
919#define is_multi(bp) (bp->num_queues > 1)
920
921
905void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 922void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
906void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 923void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
907 u32 len32); 924 u32 len32);
908int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); 925int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
909 926
910static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 927static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
911 int wait) 928 int wait)
@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
976#define PCICFG_LINK_SPEED_SHIFT 16 993#define PCICFG_LINK_SPEED_SHIFT 16
977 994
978 995
979#define BNX2X_NUM_STATS 39 996#define BNX2X_NUM_STATS 42
980#define BNX2X_NUM_TESTS 8 997#define BNX2X_NUM_TESTS 8
981 998
982#define BNX2X_MAC_LOOPBACK 0 999#define BNX2X_MAC_LOOPBACK 0
@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1007/* resolution of the rate shaping timer - 100 usec */ 1024/* resolution of the rate shaping timer - 100 usec */
1008#define RS_PERIODIC_TIMEOUT_USEC 100 1025#define RS_PERIODIC_TIMEOUT_USEC 100
1009/* resolution of fairness algorithm in usecs - 1026/* resolution of fairness algorithm in usecs -
1010 coefficient for clauclating the actuall t fair */ 1027 coefficient for calculating the actual t fair */
1011#define T_FAIR_COEF 10000000 1028#define T_FAIR_COEF 10000000
1012/* number of bytes in single QM arbitration cycle - 1029/* number of bytes in single QM arbitration cycle -
1013 coeffiecnt for calculating the fairness timer */ 1030 coefficient for calculating the fairness timer */
1014#define QM_ARB_BYTES 40000 1031#define QM_ARB_BYTES 40000
1015#define FAIR_MEM 2 1032#define FAIR_MEM 2
1016 1033
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f69d27b..192fa981b930 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
9 9
10 10
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
12 (IS_E1H_OFFSET? 0x7000 : 0x1000) 12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
16 (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ 16 (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
17 * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ 17 ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
18 * 0x4))) 18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
20 (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ 20 (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
21 * 0x100)) : (0x1900 + (function * 0x40))) 21 ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
23 (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ 23 (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
24 * 0x100)) : (0x1908 + (function * 0x40))) 24 ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
25#define CSTORM_FUNCTION_MODE_OFFSET \ 25#define CSTORM_FUNCTION_MODE_OFFSET \
26 (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) 26 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
27#define CSTORM_HC_BTR_OFFSET(port) \ 27#define CSTORM_HC_BTR_OFFSET(port) \
28 (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) 28 (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
30 (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ 30 (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
32 (index * 0x4))) 32 (index * 0x4)))
33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
34 (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ 34 (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
36 (index * 0x4))) 36 (index * 0x4)))
37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
38 (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ 38 (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
39 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 39 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
41 (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ 41 (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
42 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 42 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
43#define CSTORM_STATS_FLAGS_OFFSET(function) \ 43#define CSTORM_STATS_FLAGS_OFFSET(function) \
44 (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ 44 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
45 (function * 0x8))) 45 (function * 0x8)))
46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ 46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
47 (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) 47 (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \ 48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
49 (IS_E1H_OFFSET? 0xa000 : 0x1000) 49 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
50#define TSTORM_ASSERT_LIST_OFFSET(idx) \ 50#define TSTORM_ASSERT_LIST_OFFSET(idx) \
51 (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 51 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ 52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
53 (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ 53 (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
54 (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) 54 : (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
56 (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ 56 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
57 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 57 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
58 0x4))) 58 0x28) + (index * 0x4)))
59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
60 (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ 60 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
61 * 0xa0)) : (0x1400 + (function * 0x28))) 61 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
63 (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ 63 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
64 * 0xa0)) : (0x1408 + (function * 0x28))) 64 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
66 (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ 66 (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
67 (function * 0x8))) 67 (function * 0x8)))
68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
69 (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ 69 (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
70 (function * 0x38))) 70 (function * 0x38)))
71#define TSTORM_FUNCTION_MODE_OFFSET \ 71#define TSTORM_FUNCTION_MODE_OFFSET \
72 (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) 72 (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
73#define TSTORM_HC_BTR_OFFSET(port) \ 73#define TSTORM_HC_BTR_OFFSET(port) \
74 (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 74 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
76 (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 76 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
77 (function * 0x80))) 77 (function * 0x80)))
78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
80 (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ 80 (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
81 (function * 0x38))) 81 (function * 0x38)))
82#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
83 (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
84 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
82#define TSTORM_RX_PRODS_OFFSET(port, client_id) \ 85#define TSTORM_RX_PRODS_OFFSET(port, client_id) \
83 (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ 86 (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
84 (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) 87 : (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
85#define TSTORM_STATS_FLAGS_OFFSET(function) \ 88#define TSTORM_STATS_FLAGS_OFFSET(function) \
86 (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ 89 (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
87 (function * 0x8))) 90 (function * 0x8)))
88#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) 91#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
89#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) 92#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
90#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) 93#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
91#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 94#define USTORM_ASSERT_LIST_INDEX_OFFSET \
92 (IS_E1H_OFFSET? 0x8000 : 0x1000) 95 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
93#define USTORM_ASSERT_LIST_OFFSET(idx) \ 96#define USTORM_ASSERT_LIST_OFFSET(idx) \
94 (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 97 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
95#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 98#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
96 (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ 99 (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
97 (0x5450 + (port * 0x1c8) + (clientId * 0x18))) 100 (0x5450 + (port * 0x1c8) + (clientId * 0x18)))
98#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
99 (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ 102 (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
100 * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ 103 ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
101 0x4))) 104 0x28) + (index * 0x4)))
102#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
103 (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ 106 (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
104 * 0xa0)) : (0x1900 + (function * 0x28))) 107 ((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
105#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
106 (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ 109 (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
107 * 0xa0)) : (0x1908 + (function * 0x28))) 110 ((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
108#define USTORM_FUNCTION_MODE_OFFSET \ 111#define USTORM_FUNCTION_MODE_OFFSET \
109 (IS_E1H_OFFSET? 0x2448 : 0xffffffff) 112 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
110#define USTORM_HC_BTR_OFFSET(port) \ 113#define USTORM_HC_BTR_OFFSET(port) \
111 (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) 114 (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
112#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 115#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
113 (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ 116 (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
114 (0x5448 + (port * 0x1c8) + (clientId * 0x18))) 117 (0x5448 + (port * 0x1c8) + (clientId * 0x18)))
115#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 118#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
116 (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ 119 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
117 (function * 0x8))) 120 (function * 0x8)))
118#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 121#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
119 (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ 122 (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
120 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 123 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
121 (index * 0x4))) 124 (index * 0x4)))
122#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 125#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
123 (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ 126 (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
124 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 127 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
125 (index * 0x4))) 128 (index * 0x4)))
126#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 129#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
127 (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ 130 (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
128 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 131 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
129#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 132#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
130 (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ 133 (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
131 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 134 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
132#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 135#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
133 (IS_E1H_OFFSET? 0x9000 : 0x1000) 136 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
134#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 137#define XSTORM_ASSERT_LIST_OFFSET(idx) \
135 (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 138 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
136#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 139#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
137 (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) 140 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
138#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 141#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
139 (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ 142 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
140 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 143 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
141 0x4))) 144 0x28) + (index * 0x4)))
142#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 145#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
143 (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ 146 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
144 * 0xa0)) : (0x1400 + (function * 0x28))) 147 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
145#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 148#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
146 (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ 149 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
147 * 0xa0)) : (0x1408 + (function * 0x28))) 150 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
148#define XSTORM_E1HOV_OFFSET(function) \ 151#define XSTORM_E1HOV_OFFSET(function) \
149 (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) 152 (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
150#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 153#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
151 (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ 154 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
152 (function * 0x8))) 155 (function * 0x8)))
153#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 156#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
154 (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ 157 (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
155 (function * 0x70))) 158 (function * 0x70)))
156#define XSTORM_FUNCTION_MODE_OFFSET \ 159#define XSTORM_FUNCTION_MODE_OFFSET \
157 (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) 160 (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
158#define XSTORM_HC_BTR_OFFSET(port) \ 161#define XSTORM_HC_BTR_OFFSET(port) \
159 (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 162 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
163#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
164 (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
165 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
160#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ 166#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
161 (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ 167 (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
162 (function * 0x70))) 168 (function * 0x70)))
163#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ 169#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
164 (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ 170 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
165 (function * 0x10))) 171 (function * 0x10)))
166#define XSTORM_SPQ_PROD_OFFSET(function) \ 172#define XSTORM_SPQ_PROD_OFFSET(function) \
167 (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ 173 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
168 (function * 0x10))) 174 (function * 0x10)))
169#define XSTORM_STATS_FLAGS_OFFSET(function) \ 175#define XSTORM_STATS_FLAGS_OFFSET(function) \
170 (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ 176 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
171 (function * 0x8))) 177 (function * 0x8)))
172#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 178#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
173 179
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198d7dba..efd764427fa1 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@ struct doorbell {
1268 1268
1269 1269
1270/* 1270/*
1271 * IGU driver acknowlegement register 1271 * IGU driver acknowledgement register
1272 */ 1272 */
1273struct igu_ack_register { 1273struct igu_ack_register {
1274#if defined(__BIG_ENDIAN) 1274#if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@ struct timers_block_context {
1882}; 1882};
1883 1883
1884/* 1884/*
1885 * structure for easy accessability to assembler 1885 * structure for easy accessibility to assembler
1886 */ 1886 */
1887struct eth_tx_bd_flags { 1887struct eth_tx_bd_flags {
1888 u8 as_bitfield; 1888 u8 as_bitfield;
@@ -2044,7 +2044,7 @@ struct eth_context {
2044 2044
2045 2045
2046/* 2046/*
2047 * ethernet doorbell 2047 * Ethernet doorbell
2048 */ 2048 */
2049struct eth_tx_doorbell { 2049struct eth_tx_doorbell {
2050#if defined(__BIG_ENDIAN) 2050#if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@ struct ramrod_data {
2256}; 2256};
2257 2257
2258/* 2258/*
2259 * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) 2259 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
2260 */ 2260 */
2261union eth_ramrod_data { 2261union eth_ramrod_data {
2262 struct ramrod_data general; 2262 struct ramrod_data general;
@@ -2330,7 +2330,7 @@ struct spe_hdr {
2330}; 2330};
2331 2331
2332/* 2332/*
2333 * ethernet slow path element 2333 * Ethernet slow path element
2334 */ 2334 */
2335union eth_specific_data { 2335union eth_specific_data {
2336 u8 protocol_data[8]; 2336 u8 protocol_data[8];
@@ -2343,7 +2343,7 @@ union eth_specific_data {
2343}; 2343};
2344 2344
2345/* 2345/*
2346 * ethernet slow path element 2346 * Ethernet slow path element
2347 */ 2347 */
2348struct eth_spe { 2348struct eth_spe {
2349 struct spe_hdr hdr; 2349 struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
2615 2615
2616 2616
2617/* 2617/*
2618 * common flag to indicate existance of TPA. 2618 * common flag to indicate existence of TPA.
2619 */ 2619 */
2620struct tstorm_eth_tpa_exist { 2620struct tstorm_eth_tpa_exist {
2621#if defined(__BIG_ENDIAN) 2621#if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
2765}; 2765};
2766 2766
2767/* 2767/*
2768 * Eth statistics query sturcture for the eth_stats_quesry ramrod 2768 * Eth statistics query structure for the eth_stats_query ramrod
2769 */ 2769 */
2770struct eth_stats_query { 2770struct eth_stats_query {
2771 struct xstorm_common_stats xstorm_common; 2771 struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c7750789b62..130927cfc75b 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
72 72
73 73
74struct raw_op { 74struct raw_op {
75 u32 op :8; 75 u32 op:8;
76 u32 offset :24; 76 u32 offset:24;
77 u32 raw_data; 77 u32 raw_data;
78}; 78};
79 79
80struct op_read { 80struct op_read {
81 u32 op :8; 81 u32 op:8;
82 u32 offset :24; 82 u32 offset:24;
83 u32 pad; 83 u32 pad;
84}; 84};
85 85
86struct op_write { 86struct op_write {
87 u32 op :8; 87 u32 op:8;
88 u32 offset :24; 88 u32 offset:24;
89 u32 val; 89 u32 val;
90}; 90};
91 91
92struct op_string_write { 92struct op_string_write {
93 u32 op :8; 93 u32 op:8;
94 u32 offset :24; 94 u32 offset:24;
95#ifdef __LITTLE_ENDIAN 95#ifdef __LITTLE_ENDIAN
96 u16 data_off; 96 u16 data_off;
97 u16 data_len; 97 u16 data_len;
@@ -102,8 +102,8 @@ struct op_string_write {
102}; 102};
103 103
104struct op_zero { 104struct op_zero {
105 u32 op :8; 105 u32 op:8;
106 u32 offset :24; 106 u32 offset:24;
107 u32 len; 107 u32 len;
108}; 108};
109 109
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
208/********************************************************* 208/*********************************************************
209 There are different blobs for each PRAM section. 209 There are different blobs for each PRAM section.
210 In addition, each blob write operation is divided into a few operations 210 In addition, each blob write operation is divided into a few operations
211 in order to decrease the amount of phys. contigious buffer needed. 211 in order to decrease the amount of phys. contiguous buffer needed.
212 Thus, when we select a blob the address may be with some offset 212 Thus, when we select a blob the address may be with some offset
213 from the beginning of PRAM section. 213 from the beginning of PRAM section.
214 The same holds for the INT_TABLE sections. 214 The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
336 len = op->str_wr.data_len; 336 len = op->str_wr.data_len;
337 data = data_base + op->str_wr.data_off; 337 data = data_base + op->str_wr.data_off;
338 338
339 /* carefull! it must be in order */ 339 /* careful! it must be in order */
340 if (unlikely(op_type > OP_WB)) { 340 if (unlikely(op_type > OP_WB)) {
341 341
342 /* If E1 only */ 342 /* If E1 only */
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
740 return crc_res; 740 return crc_res;
741} 741}
742 742
743/* regiesers addresses are not in order 743/* registers addresses are not in order
744 so these arrays help simplify the code */ 744 so these arrays help simplify the code */
745static const int cm_start[E1H_FUNC_MAX][9] = { 745static const int cm_start[E1H_FUNC_MAX][9] = {
746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, 746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 63019055e4bb..9755bf6b08dd 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = {
901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, 901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, 902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, 903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400}, 904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, 905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
906 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2}, 906 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, 907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
908 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278}, 908 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, 909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
910 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, 910 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, 911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
912 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
913 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, 912 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
914 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, 913 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
916 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
917 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, 914 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
918 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, 915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, 916 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 917 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e}, 918 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, 919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
923 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, 920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
924 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e}, 921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
925 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, 922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
926 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, 923 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
927 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, 924 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
928 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e}, 925 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
929 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, 926 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
930 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, 927 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
931 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, 928 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = {
933 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, 930 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
934 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, 931 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
935 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, 932 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
936 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0}, 933 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
937 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, 934 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
938 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, 935 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
939#define USEM_COMMON_END 790 936#define USEM_COMMON_END 787
940#define USEM_PORT0_START 790 937#define USEM_PORT0_START 787
941 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 938 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
942 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, 939 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
943 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, 940 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = {
950 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, 947 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
951 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, 948 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
952 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, 949 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
953 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20}, 950 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, 951 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
955 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20}, 952 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, 953 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
957 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, 954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
959 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, 955 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
961 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, 956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
963 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, 957 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
965 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, 958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
967 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, 959 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
969 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
970 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, 960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, 961 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
974 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, 962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
975 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
976 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, 963 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
977 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
978 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, 964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
979 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, 965 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, 966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, 967 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, 968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
987 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, 969#define USEM_PORT0_END 818
988 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, 970#define USEM_PORT1_START 818
989#define USEM_PORT0_END 838
990#define USEM_PORT1_START 838
991 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, 972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
993 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, 973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = {
1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, 980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
1001 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, 981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
1002 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, 982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
1003 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20}, 983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, 984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
1005 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20}, 985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
1006 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, 986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
1007 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, 987 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
1009 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
1010 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, 988 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
1011 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, 989 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
1013 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
1014 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, 990 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
1015 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, 991 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
1017 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
1018 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, 992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
1019 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, 993 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
1021 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
1022 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, 994 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
1023 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, 995 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
1025 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
1026 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, 996 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
1027 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, 997 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
1029 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
1030 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, 998 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
1031 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, 999 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
1033 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
1034 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, 1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
1035 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
1036 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, 1001 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
1037 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, 1002#define USEM_PORT1_END 849
1038 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, 1003#define USEM_FUNC0_START 849
1039#define USEM_PORT1_END 886
1040#define USEM_FUNC0_START 886
1041 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, 1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
1042 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, 1005 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
1043#define USEM_FUNC0_END 888 1006#define USEM_FUNC0_END 851
1044#define USEM_FUNC1_START 888 1007#define USEM_FUNC1_START 851
1045 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, 1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
1046 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, 1009 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
1047#define USEM_FUNC1_END 890 1010#define USEM_FUNC1_END 853
1048#define USEM_FUNC2_START 890 1011#define USEM_FUNC2_START 853
1049 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, 1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
1050 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, 1013 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
1051#define USEM_FUNC2_END 892 1014#define USEM_FUNC2_END 855
1052#define USEM_FUNC3_START 892 1015#define USEM_FUNC3_START 855
1053 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, 1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
1054 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, 1017 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
1055#define USEM_FUNC3_END 894 1018#define USEM_FUNC3_END 857
1056#define USEM_FUNC4_START 894 1019#define USEM_FUNC4_START 857
1057 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, 1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
1058 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, 1021 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
1059#define USEM_FUNC4_END 896 1022#define USEM_FUNC4_END 859
1060#define USEM_FUNC5_START 896 1023#define USEM_FUNC5_START 859
1061 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, 1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
1062 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, 1025 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
1063#define USEM_FUNC5_END 898 1026#define USEM_FUNC5_END 861
1064#define USEM_FUNC6_START 898 1027#define USEM_FUNC6_START 861
1065 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, 1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
1066 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, 1029 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
1067#define USEM_FUNC6_END 900 1030#define USEM_FUNC6_END 863
1068#define USEM_FUNC7_START 900 1031#define USEM_FUNC7_START 863
1069 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, 1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
1070 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, 1033 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
1071#define USEM_FUNC7_END 902 1034#define USEM_FUNC7_END 865
1072#define CSEM_COMMON_START 902 1035#define CSEM_COMMON_START 865
1073 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, 1036 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
1074 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, 1037 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
1075 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, 1038 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = {
1128 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, 1091 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
1129 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, 1092 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
1130 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, 1093 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
1131 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2}, 1094 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, 1095 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
1133 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, 1096 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
1134 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, 1097 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
1135 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, 1098 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
1136 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, 1099 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
1137 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa}, 1100 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
1138 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, 1101 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
1139 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1102 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1140 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba}, 1103 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
1141 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, 1104 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
1142 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, 1105 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
1143 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, 1106 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
1144 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca}, 1107 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
1145 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, 1108 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
1146 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, 1109 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
1147 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, 1110 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
1148 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, 1111 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
1149 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, 1112 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
1150 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc}, 1113 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
1151 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, 1114 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
1152#define CSEM_COMMON_END 981 1115#define CSEM_COMMON_END 944
1153#define CSEM_PORT0_START 981 1116#define CSEM_PORT0_START 944
1154 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 1117 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
1155 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, 1118 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
1156 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, 1119 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = {
1163 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, 1126 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
1164 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, 1127 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
1165 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, 1128 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
1166#define CSEM_PORT0_END 993 1129#define CSEM_PORT0_END 956
1167#define CSEM_PORT1_START 993 1130#define CSEM_PORT1_START 956
1168 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 1131 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
1169 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, 1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
1170 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, 1133 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = {
1177 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, 1140 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
1178 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, 1141 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
1179 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, 1142 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
1180#define CSEM_PORT1_END 1005 1143#define CSEM_PORT1_END 968
1181#define CSEM_FUNC0_START 1005 1144#define CSEM_FUNC0_START 968
1182 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, 1145 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
1183 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, 1146 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
1184#define CSEM_FUNC0_END 1007 1147#define CSEM_FUNC0_END 970
1185#define CSEM_FUNC1_START 1007 1148#define CSEM_FUNC1_START 970
1186 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, 1149 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
1187 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, 1150 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
1188#define CSEM_FUNC1_END 1009 1151#define CSEM_FUNC1_END 972
1189#define CSEM_FUNC2_START 1009 1152#define CSEM_FUNC2_START 972
1190 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, 1153 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
1191 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, 1154 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
1192#define CSEM_FUNC2_END 1011 1155#define CSEM_FUNC2_END 974
1193#define CSEM_FUNC3_START 1011 1156#define CSEM_FUNC3_START 974
1194 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, 1157 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
1195 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, 1158 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
1196#define CSEM_FUNC3_END 1013 1159#define CSEM_FUNC3_END 976
1197#define CSEM_FUNC4_START 1013 1160#define CSEM_FUNC4_START 976
1198 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, 1161 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
1199 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, 1162 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
1200#define CSEM_FUNC4_END 1015 1163#define CSEM_FUNC4_END 978
1201#define CSEM_FUNC5_START 1015 1164#define CSEM_FUNC5_START 978
1202 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, 1165 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
1203 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, 1166 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
1204#define CSEM_FUNC5_END 1017 1167#define CSEM_FUNC5_END 980
1205#define CSEM_FUNC6_START 1017 1168#define CSEM_FUNC6_START 980
1206 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, 1169 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
1207 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, 1170 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
1208#define CSEM_FUNC6_END 1019 1171#define CSEM_FUNC6_END 982
1209#define CSEM_FUNC7_START 1019 1172#define CSEM_FUNC7_START 982
1210 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, 1173 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
1211 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, 1174 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
1212#define CSEM_FUNC7_END 1021 1175#define CSEM_FUNC7_END 984
1213#define XPB_COMMON_START 1021 1176#define XPB_COMMON_START 984
1214 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, 1177 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
1215#define XPB_COMMON_END 1022 1178#define XPB_COMMON_END 985
1216#define DQ_COMMON_START 1022 1179#define DQ_COMMON_START 985
1217 {OP_WR, DORQ_REG_MODE_ACT, 0x2}, 1180 {OP_WR, DORQ_REG_MODE_ACT, 0x2},
1218 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, 1181 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
1219 {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, 1182 {OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = {
1232 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, 1195 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
1233 {OP_WR, DORQ_REG_REGN, 0x7c1004}, 1196 {OP_WR, DORQ_REG_REGN, 0x7c1004},
1234 {OP_WR, DORQ_REG_IF_EN, 0xf}, 1197 {OP_WR, DORQ_REG_IF_EN, 0xf},
1235#define DQ_COMMON_END 1040 1198#define DQ_COMMON_END 1003
1236#define TIMERS_COMMON_START 1040 1199#define TIMERS_COMMON_START 1003
1237 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, 1200 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
1238 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, 1201 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
1239 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, 1202 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = {
1256 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, 1219 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
1257 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, 1220 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
1258 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, 1221 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
1259#define TIMERS_COMMON_END 1062 1222#define TIMERS_COMMON_END 1025
1260#define TIMERS_PORT0_START 1062 1223#define TIMERS_PORT0_START 1025
1261 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, 1224 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
1262#define TIMERS_PORT0_END 1063 1225#define TIMERS_PORT0_END 1026
1263#define TIMERS_PORT1_START 1063 1226#define TIMERS_PORT1_START 1026
1264 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, 1227 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
1265#define TIMERS_PORT1_END 1064 1228#define TIMERS_PORT1_END 1027
1266#define XSDM_COMMON_START 1064 1229#define XSDM_COMMON_START 1027
1267 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, 1230 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
1268 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, 1231 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
1269 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, 1232 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = {
1311 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, 1274 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
1312 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, 1275 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
1313 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, 1276 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
1314#define XSDM_COMMON_END 1111 1277#define XSDM_COMMON_END 1074
1315#define QM_COMMON_START 1111 1278#define QM_COMMON_START 1074
1316 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, 1279 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
1317 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, 1280 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
1318 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, 1281 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = {
1613 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, 1576 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
1614 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, 1577 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
1615 {OP_WR, QM_REG_CMINTEN, 0xff}, 1578 {OP_WR, QM_REG_CMINTEN, 0xff},
1616#define QM_COMMON_END 1411 1579#define QM_COMMON_END 1374
1617#define PBF_COMMON_START 1411 1580#define PBF_COMMON_START 1374
1618 {OP_WR, PBF_REG_INIT, 0x1}, 1581 {OP_WR, PBF_REG_INIT, 0x1},
1619 {OP_WR, PBF_REG_INIT_P4, 0x1}, 1582 {OP_WR, PBF_REG_INIT_P4, 0x1},
1620 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, 1583 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = {
1622 {OP_WR, PBF_REG_INIT_P4, 0x0}, 1585 {OP_WR, PBF_REG_INIT_P4, 0x0},
1623 {OP_WR, PBF_REG_INIT, 0x0}, 1586 {OP_WR, PBF_REG_INIT, 0x0},
1624 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, 1587 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
1625#define PBF_COMMON_END 1418 1588#define PBF_COMMON_END 1381
1626#define PBF_PORT0_START 1418 1589#define PBF_PORT0_START 1381
1627 {OP_WR, PBF_REG_INIT_P0, 0x1}, 1590 {OP_WR, PBF_REG_INIT_P0, 0x1},
1628 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, 1591 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
1629 {OP_WR, PBF_REG_INIT_P0, 0x0}, 1592 {OP_WR, PBF_REG_INIT_P0, 0x0},
1630 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, 1593 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
1631#define PBF_PORT0_END 1422 1594#define PBF_PORT0_END 1385
1632#define PBF_PORT1_START 1422 1595#define PBF_PORT1_START 1385
1633 {OP_WR, PBF_REG_INIT_P1, 0x1}, 1596 {OP_WR, PBF_REG_INIT_P1, 0x1},
1634 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, 1597 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
1635 {OP_WR, PBF_REG_INIT_P1, 0x0}, 1598 {OP_WR, PBF_REG_INIT_P1, 0x0},
1636 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, 1599 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
1637#define PBF_PORT1_END 1426 1600#define PBF_PORT1_END 1389
1638#define XCM_COMMON_START 1426 1601#define XCM_COMMON_START 1389
1639 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, 1602 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
1640 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, 1603 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
1641 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, 1604 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = {
1670 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, 1633 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
1671 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, 1634 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
1672 {OP_ZR, XCM_REG_XX_TABLE, 0x12}, 1635 {OP_ZR, XCM_REG_XX_TABLE, 0x12},
1673 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce}, 1636 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
1674 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, 1637 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
1675 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, 1638 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
1676 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, 1639 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = {
1700 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, 1663 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
1701 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, 1664 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
1702 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, 1665 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
1703#define XCM_COMMON_END 1490 1666#define XCM_COMMON_END 1453
1704#define XCM_PORT0_START 1490 1667#define XCM_PORT0_START 1453
1705 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1668 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1706 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1669 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1707 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1670 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = {
1710 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, 1673 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
1711 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1674 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1712 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1675 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1713#define XCM_PORT0_END 1498 1676#define XCM_PORT0_END 1461
1714#define XCM_PORT1_START 1498 1677#define XCM_PORT1_START 1461
1715 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1678 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1716 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1679 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1717 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1680 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = {
1720 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, 1683 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
1721 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1684 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1722 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1685 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1723#define XCM_PORT1_END 1506 1686#define XCM_PORT1_END 1469
1724#define XCM_FUNC0_START 1506 1687#define XCM_FUNC0_START 1469
1725 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1688 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1726 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1689 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1727 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1690 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = {
1731 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1694 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1732 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1695 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1733 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1696 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1734#define XCM_FUNC0_END 1515 1697#define XCM_FUNC0_END 1478
1735#define XCM_FUNC1_START 1515 1698#define XCM_FUNC1_START 1478
1736 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1699 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1737 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1700 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1738 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1701 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = {
1742 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1705 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1743 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1706 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1744 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1707 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1745#define XCM_FUNC1_END 1524 1708#define XCM_FUNC1_END 1487
1746#define XCM_FUNC2_START 1524 1709#define XCM_FUNC2_START 1487
1747 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1710 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1748 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1711 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1749 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1712 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = {
1753 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1716 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1754 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1717 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1755 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1718 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1756#define XCM_FUNC2_END 1533 1719#define XCM_FUNC2_END 1496
1757#define XCM_FUNC3_START 1533 1720#define XCM_FUNC3_START 1496
1758 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1721 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1759 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1722 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1760 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1723 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = {
1764 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1727 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1765 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1728 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1766 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1729 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1767#define XCM_FUNC3_END 1542 1730#define XCM_FUNC3_END 1505
1768#define XCM_FUNC4_START 1542 1731#define XCM_FUNC4_START 1505
1769 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1732 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1770 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1733 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1771 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1734 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = {
1775 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1738 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1776 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1739 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1777 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1740 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1778#define XCM_FUNC4_END 1551 1741#define XCM_FUNC4_END 1514
1779#define XCM_FUNC5_START 1551 1742#define XCM_FUNC5_START 1514
1780 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1743 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1781 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1744 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1782 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1745 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = {
1786 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1749 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1787 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1750 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1788 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1751 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1789#define XCM_FUNC5_END 1560 1752#define XCM_FUNC5_END 1523
1790#define XCM_FUNC6_START 1560 1753#define XCM_FUNC6_START 1523
1791 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1754 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1792 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1755 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1793 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1756 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = {
1797 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1760 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1798 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1761 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1799 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1762 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1800#define XCM_FUNC6_END 1569 1763#define XCM_FUNC6_END 1532
1801#define XCM_FUNC7_START 1569 1764#define XCM_FUNC7_START 1532
1802 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1765 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1803 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1766 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1804 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1767 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = {
1808 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1771 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1809 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1772 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1810 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1773 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1811#define XCM_FUNC7_END 1578 1774#define XCM_FUNC7_END 1541
1812#define XSEM_COMMON_START 1578 1775#define XSEM_COMMON_START 1541
1813 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, 1776 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
1814 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, 1777 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
1815 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, 1778 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = {
1876 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, 1839 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
1877 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, 1840 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
1878 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, 1841 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
1879 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed}, 1842 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
1880 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, 1843 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
1881 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef}, 1844 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
1882 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, 1845 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
1883 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, 1846 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
1884 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, 1847 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = {
1886 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, 1849 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
1887 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, 1850 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
1888 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, 1851 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
1889 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3}, 1852 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
1890 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, 1853 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
1891 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, 1854 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
1892 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, 1855 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
1893 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, 1856 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
1894 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, 1857 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
1895 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5}, 1858 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
1896 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, 1859 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, 1860 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
1898 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, 1861 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
1899 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, 1862 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
1900 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1863 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1901 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7}, 1864 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
1902 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, 1865 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
1903 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 1866 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
1904 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307}, 1867 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
1905 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, 1868 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
1906 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, 1869 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
1907 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f}, 1870 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
1908 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, 1871 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
1909 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, 1872 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
1910 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, 1873 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
1911 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317}, 1874 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
1912 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, 1875 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
1913 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, 1876 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
1914 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, 1877 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = {
1918 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, 1881 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
1919 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, 1882 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
1920 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, 1883 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
1921 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319}, 1884 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
1922 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, 1885 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
1923#define XSEM_COMMON_END 1688 1886#define XSEM_COMMON_END 1651
1924#define XSEM_PORT0_START 1688 1887#define XSEM_PORT0_START 1651
1925 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, 1888 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
1926 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, 1889 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
1927 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, 1890 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = {
1934 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, 1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
1935 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, 1898 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
1936 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, 1899 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
1937 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b}, 1900 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
1938 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, 1901 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
1939 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, 1902 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
1940 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, 1903 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = {
1950 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, 1913 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
1951 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, 1914 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
1952 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, 1915 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
1953 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b}, 1916 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
1954 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, 1917 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
1955 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, 1918 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
1956 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, 1919 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
1957#define XSEM_PORT0_END 1720 1920#define XSEM_PORT0_END 1683
1958#define XSEM_PORT1_START 1720 1921#define XSEM_PORT1_START 1683
1959 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, 1922 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
1960 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, 1923 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
1961 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, 1924 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = {
1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, 1931 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
1969 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, 1932 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
1970 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, 1933 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
1971 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d}, 1934 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
1972 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, 1935 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
1973 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, 1936 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
1974 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, 1937 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = {
1984 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, 1947 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
1985 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, 1948 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
1986 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, 1949 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
1987 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d}, 1950 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, 1951 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
1989 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, 1952 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
1990 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, 1953 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
1991#define XSEM_PORT1_END 1752 1954#define XSEM_PORT1_END 1715
1992#define XSEM_FUNC0_START 1752 1955#define XSEM_FUNC0_START 1715
1993 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, 1956 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
1994 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, 1957 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
1995 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, 1958 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
1996#define XSEM_FUNC0_END 1755 1959#define XSEM_FUNC0_END 1718
1997#define XSEM_FUNC1_START 1755 1960#define XSEM_FUNC1_START 1718
1998 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, 1961 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
1999 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, 1962 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
2000 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, 1963 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
2001#define XSEM_FUNC1_END 1758 1964#define XSEM_FUNC1_END 1721
2002#define XSEM_FUNC2_START 1758 1965#define XSEM_FUNC2_START 1721
2003 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, 1966 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
2004 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, 1967 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
2005 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, 1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
2006#define XSEM_FUNC2_END 1761 1969#define XSEM_FUNC2_END 1724
2007#define XSEM_FUNC3_START 1761 1970#define XSEM_FUNC3_START 1724
2008 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, 1971 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
2009 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, 1972 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
2010 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, 1973 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
2011#define XSEM_FUNC3_END 1764 1974#define XSEM_FUNC3_END 1727
2012#define XSEM_FUNC4_START 1764 1975#define XSEM_FUNC4_START 1727
2013 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, 1976 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
2014 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, 1977 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
2015 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, 1978 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
2016#define XSEM_FUNC4_END 1767 1979#define XSEM_FUNC4_END 1730
2017#define XSEM_FUNC5_START 1767 1980#define XSEM_FUNC5_START 1730
2018 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, 1981 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
2019 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, 1982 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
2020 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, 1983 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
2021#define XSEM_FUNC5_END 1770 1984#define XSEM_FUNC5_END 1733
2022#define XSEM_FUNC6_START 1770 1985#define XSEM_FUNC6_START 1733
2023 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, 1986 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
2024 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, 1987 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
2025 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, 1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
2026#define XSEM_FUNC6_END 1773 1989#define XSEM_FUNC6_END 1736
2027#define XSEM_FUNC7_START 1773 1990#define XSEM_FUNC7_START 1736
2028 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, 1991 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
2029 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, 1992 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
2030 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, 1993 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
2031#define XSEM_FUNC7_END 1776 1994#define XSEM_FUNC7_END 1739
2032#define CDU_COMMON_START 1776 1995#define CDU_COMMON_START 1739
2033 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, 1996 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
2034 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, 1997 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
2035 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, 1998 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
2036 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, 1999 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
2037 {OP_WB_E1, CDU_REG_L1TT, 0x200033f}, 2000 {OP_WB_E1, CDU_REG_L1TT, 0x200033d},
2038 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, 2001 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
2039 {OP_WB_E1, CDU_REG_MATT, 0x20053f}, 2002 {OP_WB_E1, CDU_REG_MATT, 0x20053d},
2040 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, 2003 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
2041 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, 2004 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
2042 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f}, 2005 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
2043 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, 2006 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
2044#define CDU_COMMON_END 1787 2007#define CDU_COMMON_END 1750
2045#define DMAE_COMMON_START 1787 2008#define DMAE_COMMON_START 1750
2046 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, 2009 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
2047 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, 2010 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
2048 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, 2011 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = {
2050 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, 2013 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
2051 {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, 2014 {OP_WR, DMAE_REG_PCI_IFEN, 0x1},
2052 {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, 2015 {OP_WR, DMAE_REG_GRC_IFEN, 0x1},
2053#define DMAE_COMMON_END 1794 2016#define DMAE_COMMON_END 1757
2054#define PXP_COMMON_START 1794 2017#define PXP_COMMON_START 1757
2055 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565}, 2018 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
2056 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, 2019 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
2057 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a}, 2020 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
2058 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, 2021 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
2059 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f}, 2022 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
2060#define PXP_COMMON_END 1799 2023#define PXP_COMMON_END 1762
2061#define CFC_COMMON_START 1799 2024#define CFC_COMMON_START 1762
2062 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, 2025 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
2063 {OP_WR, CFC_REG_CONTROL0, 0x10}, 2026 {OP_WR, CFC_REG_CONTROL0, 0x10},
2064 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, 2027 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
2065 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, 2028 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
2066#define CFC_COMMON_END 1803 2029#define CFC_COMMON_END 1766
2067#define HC_COMMON_START 1803 2030#define HC_COMMON_START 1766
2068 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, 2031 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
2069#define HC_COMMON_END 1804 2032#define HC_COMMON_END 1767
2070#define HC_PORT0_START 1804 2033#define HC_PORT0_START 1767
2071 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, 2034 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
2072 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, 2035 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
2073 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, 2036 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = {
2086 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2049 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2087 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2050 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2088 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2051 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2089#define HC_PORT0_END 1822 2052#define HC_PORT0_END 1785
2090#define HC_PORT1_START 1822 2053#define HC_PORT1_START 1785
2091 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, 2054 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
2092 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, 2055 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
2093 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, 2056 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = {
2106 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2069 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2107 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2070 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2108 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2071 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2109#define HC_PORT1_END 1840 2072#define HC_PORT1_END 1803
2110#define HC_FUNC0_START 1840 2073#define HC_FUNC0_START 1803
2111 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2074 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2112 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, 2075 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
2113 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2076 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = {
2123 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2086 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2124 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2087 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2125 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2088 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2126#define HC_FUNC0_END 1855 2089#define HC_FUNC0_END 1818
2127#define HC_FUNC1_START 1855 2090#define HC_FUNC1_START 1818
2128 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2091 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2129 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, 2092 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
2130 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2093 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = {
2140 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2103 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2141 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2104 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2142 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2105 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2143#define HC_FUNC1_END 1870 2106#define HC_FUNC1_END 1833
2144#define HC_FUNC2_START 1870 2107#define HC_FUNC2_START 1833
2145 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2108 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2146 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, 2109 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
2147 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2110 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = {
2157 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2120 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2158 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2121 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2159 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2122 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2160#define HC_FUNC2_END 1885 2123#define HC_FUNC2_END 1848
2161#define HC_FUNC3_START 1885 2124#define HC_FUNC3_START 1848
2162 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2125 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2163 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, 2126 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
2164 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2127 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = {
2174 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2137 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2175 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2138 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2176 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2139 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2177#define HC_FUNC3_END 1900 2140#define HC_FUNC3_END 1863
2178#define HC_FUNC4_START 1900 2141#define HC_FUNC4_START 1863
2179 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2142 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2180 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, 2143 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
2181 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2144 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = {
2191 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2154 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2192 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2155 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2193 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2156 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2194#define HC_FUNC4_END 1915 2157#define HC_FUNC4_END 1878
2195#define HC_FUNC5_START 1915 2158#define HC_FUNC5_START 1878
2196 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2159 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2197 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, 2160 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
2198 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2161 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = {
2208 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2171 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2209 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2172 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2210 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2173 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2211#define HC_FUNC5_END 1930 2174#define HC_FUNC5_END 1893
2212#define HC_FUNC6_START 1930 2175#define HC_FUNC6_START 1893
2213 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2176 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2214 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, 2177 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
2215 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2178 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = {
2225 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2188 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2226 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2189 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2227 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2190 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2228#define HC_FUNC6_END 1945 2191#define HC_FUNC6_END 1908
2229#define HC_FUNC7_START 1945 2192#define HC_FUNC7_START 1908
2230 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2193 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2231 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, 2194 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
2232 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2195 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = {
2242 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2205 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2243 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2206 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2244 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2207 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2245#define HC_FUNC7_END 1960 2208#define HC_FUNC7_END 1923
2246#define PXP2_COMMON_START 1960 2209#define PXP2_COMMON_START 1923
2247 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2210 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
2248 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, 2211 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
2249 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, 2212 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = {
2361 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, 2324 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
2362 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, 2325 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
2363 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2326 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
2364#define PXP2_COMMON_END 2077 2327#define PXP2_COMMON_END 2040
2365#define MISC_AEU_COMMON_START 2077 2328#define MISC_AEU_COMMON_START 2040
2366 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, 2329 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
2367 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, 2330 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
2368 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, 2331 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = {
2382 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, 2345 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
2383 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, 2346 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
2384 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, 2347 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
2385#define MISC_AEU_COMMON_END 2096 2348#define MISC_AEU_COMMON_END 2059
2386#define MISC_AEU_PORT0_START 2096 2349#define MISC_AEU_PORT0_START 2059
2387 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, 2350 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
2388 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, 2351 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
2389 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, 2352 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = {
2416 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, 2379 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
2417 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, 2380 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
2418 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, 2381 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
2419#define MISC_AEU_PORT0_END 2128 2382#define MISC_AEU_PORT0_END 2091
2420#define MISC_AEU_PORT1_START 2128 2383#define MISC_AEU_PORT1_START 2091
2421 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, 2384 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
2422 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, 2385 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
2423 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, 2386 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = {
2450 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, 2413 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
2451 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, 2414 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
2452 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, 2415 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
2453#define MISC_AEU_PORT1_END 2160 2416#define MISC_AEU_PORT1_END 2123
2454 2417
2455}; 2418};
2456 2419
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = {
2560 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, 2523 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
2561 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, 2524 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
2562 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, 2525 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
2563 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000, 2526 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
2564 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2565 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2527 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2566 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2528 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2567 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2529 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2568 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2530 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2569 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2570 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2571 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
2572 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2531 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2532 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
2533 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2534 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2573 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2535 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2574 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2536 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2575 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2537 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2576 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2538 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2577 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080, 2539 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2578 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380, 2540 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
2579 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 2541 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
2580 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980, 2542 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
2581 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 2543 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
2582 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 2544 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
2583 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 2545 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
2584 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 2546 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
2547 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2585 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2548 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2586 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2549 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2587 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2550 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2588 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2551 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2589 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 2552 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
2590 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 2553 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2591 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2554 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2555 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2592 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2556 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2593 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2594 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2557 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2595 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2558 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2596 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2559 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2560 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
2598 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2599 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2600 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2601 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2602 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2603 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2604 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2606 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2607 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2608 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2561 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2610 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2562 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2611 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2563 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2612 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2564 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2613 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2565 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
2614 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2566 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
2615 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2567 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2616 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 2568 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
2617 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 2569 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2618 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 2570 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2619 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2571 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2620 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 2572 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2621 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 2573 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2622 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 2574 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2623 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2575 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2624 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 2576 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2625 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 2577 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
2626 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 2578 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
2627 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2579 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2628 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2580 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
2629 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2581 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
2630 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 2582 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2631 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2583 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2632 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 2584 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2633 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 2585 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2634 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 2586 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2635 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2587 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2636 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 2588 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
2637 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 2589 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2638 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 2590 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2639 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2591 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2640 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 2592 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2641 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2593 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2642 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2594 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2643 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2595 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2644 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2596 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2645 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
2646 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2598 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
2647 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2599 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
2648 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 2600 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
2649 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 2601 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2650 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 2602 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2651 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2603 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2652 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2604 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2653 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2654 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 2606 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2655 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2607 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2656 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2608 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2657 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
2658 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 2610 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
2659 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2611 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2660 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2612 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2661 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 2613 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
2662 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 2614 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = {
2678 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 2630 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
2679 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2631 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2680 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2632 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2681 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 2633 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
2682 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 2634 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
2683 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 2635 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2684 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 2636 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2685 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 2637 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
2686 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 2638 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
2687 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 2639 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2688 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 2640 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2689 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 2641 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
2690 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000 2642 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
2643 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2644 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
2645 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
2646 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
2647 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
2648 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
2649 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
2650 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
2651 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
2652 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
2653 0xcccccccc, 0x00002000
2691}; 2654};
2692 2655
2693static const u32 init_data_e1h[] = { 2656static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743db10d9..8b92c6ad0759 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -31,17 +31,16 @@
31 31
32/********************************************************/ 32/********************************************************/
33#define SUPPORT_CL73 0 /* Currently no */ 33#define SUPPORT_CL73 0 /* Currently no */
34#define ETH_HLEN 14 34#define ETH_HLEN 14
35#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 35#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
36#define ETH_MIN_PACKET_SIZE 60 36#define ETH_MIN_PACKET_SIZE 60
37#define ETH_MAX_PACKET_SIZE 1500 37#define ETH_MAX_PACKET_SIZE 1500
38#define ETH_MAX_JUMBO_PACKET_SIZE 9600 38#define ETH_MAX_JUMBO_PACKET_SIZE 9600
39#define MDIO_ACCESS_TIMEOUT 1000 39#define MDIO_ACCESS_TIMEOUT 1000
40#define BMAC_CONTROL_RX_ENABLE 2 40#define BMAC_CONTROL_RX_ENABLE 2
41#define MAX_MTU_SIZE 5000
42 41
43/***********************************************************/ 42/***********************************************************/
44/* Shortcut definitions */ 43/* Shortcut definitions */
45/***********************************************************/ 44/***********************************************************/
46 45
47#define NIG_STATUS_XGXS0_LINK10G \ 46#define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +79,12 @@
80 79
81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
84#define AUTONEG_PARALLEL \ 83#define AUTONEG_PARALLEL \
85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
86#define AUTONEG_SGMII_FIBER_AUTODET \ 85#define AUTONEG_SGMII_FIBER_AUTODET \
87 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 86 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
88#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 87#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
89 88
90#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ 89#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
91 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 90 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +201,10 @@ static void bnx2x_emac_init(struct link_params *params,
202 /* init emac - use read-modify-write */ 201 /* init emac - use read-modify-write */
203 /* self clear reset */ 202 /* self clear reset */
204 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 203 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
205 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); 204 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
206 205
207 timeout = 200; 206 timeout = 200;
208 do 207 do {
209 {
210 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 208 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
211 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); 209 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
212 if (!timeout) { 210 if (!timeout) {
@@ -214,18 +212,18 @@ static void bnx2x_emac_init(struct link_params *params,
214 return; 212 return;
215 } 213 }
216 timeout--; 214 timeout--;
217 }while (val & EMAC_MODE_RESET); 215 } while (val & EMAC_MODE_RESET);
218 216
219 /* Set mac address */ 217 /* Set mac address */
220 val = ((params->mac_addr[0] << 8) | 218 val = ((params->mac_addr[0] << 8) |
221 params->mac_addr[1]); 219 params->mac_addr[1]);
222 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); 220 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
223 221
224 val = ((params->mac_addr[2] << 24) | 222 val = ((params->mac_addr[2] << 24) |
225 (params->mac_addr[3] << 16) | 223 (params->mac_addr[3] << 16) |
226 (params->mac_addr[4] << 8) | 224 (params->mac_addr[4] << 8) |
227 params->mac_addr[5]); 225 params->mac_addr[5]);
228 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); 226 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
229} 227}
230 228
231static u8 bnx2x_emac_enable(struct link_params *params, 229static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +284,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
286 if (CHIP_REV_IS_SLOW(bp)) { 284 if (CHIP_REV_IS_SLOW(bp)) {
287 /* config GMII mode */ 285 /* config GMII mode */
288 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 286 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
289 EMAC_WR(EMAC_REG_EMAC_MODE, 287 EMAC_WR(bp, EMAC_REG_EMAC_MODE,
290 (val | EMAC_MODE_PORT_GMII)); 288 (val | EMAC_MODE_PORT_GMII));
291 } else { /* ASIC */ 289 } else { /* ASIC */
292 /* pause enable/disable */ 290 /* pause enable/disable */
@@ -298,17 +296,19 @@ static u8 bnx2x_emac_enable(struct link_params *params,
298 EMAC_RX_MODE_FLOW_EN); 296 EMAC_RX_MODE_FLOW_EN);
299 297
300 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 298 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
301 EMAC_TX_MODE_EXT_PAUSE_EN); 299 (EMAC_TX_MODE_EXT_PAUSE_EN |
300 EMAC_TX_MODE_FLOW_EN));
302 if (vars->flow_ctrl & FLOW_CTRL_TX) 301 if (vars->flow_ctrl & FLOW_CTRL_TX)
303 bnx2x_bits_en(bp, emac_base + 302 bnx2x_bits_en(bp, emac_base +
304 EMAC_REG_EMAC_TX_MODE, 303 EMAC_REG_EMAC_TX_MODE,
305 EMAC_TX_MODE_EXT_PAUSE_EN); 304 (EMAC_TX_MODE_EXT_PAUSE_EN |
305 EMAC_TX_MODE_FLOW_EN));
306 } 306 }
307 307
308 /* KEEP_VLAN_TAG, promiscuous */ 308 /* KEEP_VLAN_TAG, promiscuous */
309 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 309 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
310 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 310 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
311 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); 311 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
312 312
313 /* Set Loopback */ 313 /* Set Loopback */
314 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 314 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +316,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
316 val |= 0x810; 316 val |= 0x810;
317 else 317 else
318 val &= ~0x810; 318 val &= ~0x810;
319 EMAC_WR(EMAC_REG_EMAC_MODE, val); 319 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
320 320
321 /* enable emac for jumbo packets */ 321 /* enable emac for jumbo packets */
322 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE, 322 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
323 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 323 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
324 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 324 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
325 325
@@ -591,9 +591,9 @@ void bnx2x_link_status_update(struct link_params *params,
591 vars->flow_ctrl &= ~FLOW_CTRL_RX; 591 vars->flow_ctrl &= ~FLOW_CTRL_RX;
592 592
593 if (vars->phy_flags & PHY_XGXS_FLAG) { 593 if (vars->phy_flags & PHY_XGXS_FLAG) {
594 if (params->req_line_speed && 594 if (vars->line_speed &&
595 ((params->req_line_speed == SPEED_10) || 595 ((vars->line_speed == SPEED_10) ||
596 (params->req_line_speed == SPEED_100))) { 596 (vars->line_speed == SPEED_100))) {
597 vars->phy_flags |= PHY_SGMII_FLAG; 597 vars->phy_flags |= PHY_SGMII_FLAG;
598 } else { 598 } else {
599 vars->phy_flags &= ~PHY_SGMII_FLAG; 599 vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +645,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
645 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 645 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
646 NIG_REG_INGRESS_BMAC0_MEM; 646 NIG_REG_INGRESS_BMAC0_MEM;
647 u32 wb_data[2]; 647 u32 wb_data[2];
648 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 648 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
649 649
650 /* Only if the bmac is out of reset */ 650 /* Only if the bmac is out of reset */
651 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 651 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +670,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
670 u8 port = params->port; 670 u8 port = params->port;
671 u32 init_crd, crd; 671 u32 init_crd, crd;
672 u32 count = 1000; 672 u32 count = 1000;
673 u32 pause = 0;
674 673
675 /* disable port */ 674 /* disable port */
676 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 675 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +692,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
693 return -EINVAL; 692 return -EINVAL;
694 } 693 }
695 694
696 if (flow_ctrl & FLOW_CTRL_RX) 695 if (flow_ctrl & FLOW_CTRL_RX ||
697 pause = 1; 696 line_speed == SPEED_10 ||
698 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause); 697 line_speed == SPEED_100 ||
699 if (pause) { 698 line_speed == SPEED_1000 ||
699 line_speed == SPEED_2500) {
700 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
700 /* update threshold */ 701 /* update threshold */
701 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 702 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
702 /* update init credit */ 703 /* update init credit */
703 init_crd = 778; /* (800-18-4) */ 704 init_crd = 778; /* (800-18-4) */
704 705
705 } else { 706 } else {
706 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 707 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
707 ETH_OVREHEAD)/16; 708 ETH_OVREHEAD)/16;
708 709 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
709 /* update threshold */ 710 /* update threshold */
710 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 711 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
711 /* update init credit */ 712 /* update init credit */
712 switch (line_speed) { 713 switch (line_speed) {
713 case SPEED_10:
714 case SPEED_100:
715 case SPEED_1000:
716 init_crd = thresh + 55 - 22;
717 break;
718
719 case SPEED_2500:
720 init_crd = thresh + 138 - 22;
721 break;
722
723 case SPEED_10000: 714 case SPEED_10000:
724 init_crd = thresh + 553 - 22; 715 init_crd = thresh + 553 - 22;
725 break; 716 break;
@@ -764,10 +755,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
764 emac_base = GRCBASE_EMAC0; 755 emac_base = GRCBASE_EMAC0;
765 break; 756 break;
766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
767 emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1; 758 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
768 break; 759 break;
769 default: 760 default:
770 emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0; 761 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
771 break; 762 break;
772 } 763 }
773 return emac_base; 764 return emac_base;
@@ -1044,7 +1035,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
1044} 1035}
1045 1036
1046static void bnx2x_set_parallel_detection(struct link_params *params, 1037static void bnx2x_set_parallel_detection(struct link_params *params,
1047 u8 phy_flags) 1038 u8 phy_flags)
1048{ 1039{
1049 struct bnx2x *bp = params->bp; 1040 struct bnx2x *bp = params->bp;
1050 u16 control2; 1041 u16 control2;
@@ -1114,7 +1105,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1114 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1105 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1115 1106
1116 /* CL37 Autoneg Enabled */ 1107 /* CL37 Autoneg Enabled */
1117 if (params->req_line_speed == SPEED_AUTO_NEG) 1108 if (vars->line_speed == SPEED_AUTO_NEG)
1118 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; 1109 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
1119 else /* CL37 Autoneg Disabled */ 1110 else /* CL37 Autoneg Disabled */
1120 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1111 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1123,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1132 MDIO_REG_BANK_SERDES_DIGITAL, 1123 MDIO_REG_BANK_SERDES_DIGITAL,
1133 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1124 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1134 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; 1125 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
1135 if (params->req_line_speed == SPEED_AUTO_NEG) 1126 if (vars->line_speed == SPEED_AUTO_NEG)
1136 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1127 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1137 else 1128 else
1138 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1129 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1139,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1148 MDIO_REG_BANK_BAM_NEXT_PAGE, 1139 MDIO_REG_BANK_BAM_NEXT_PAGE,
1149 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1140 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1150 &reg_val); 1141 &reg_val);
1151 if (params->req_line_speed == SPEED_AUTO_NEG) { 1142 if (vars->line_speed == SPEED_AUTO_NEG) {
1152 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1143 /* Enable BAM aneg Mode and TetonII aneg Mode */
1153 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1144 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1154 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1145 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1155,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1164 reg_val); 1155 reg_val);
1165 1156
1166 /* Enable Clause 73 Aneg */ 1157 /* Enable Clause 73 Aneg */
1167 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1158 if ((vars->line_speed == SPEED_AUTO_NEG) &&
1168 (SUPPORT_CL73)) { 1159 (SUPPORT_CL73)) {
1169 /* Enable BAM Station Manager */ 1160 /* Enable BAM Station Manager */
1170 1161
@@ -1226,7 +1217,8 @@ static void bnx2x_set_autoneg(struct link_params *params,
1226} 1217}
1227 1218
1228/* program SerDes, forced speed */ 1219/* program SerDes, forced speed */
1229static void bnx2x_program_serdes(struct link_params *params) 1220static void bnx2x_program_serdes(struct link_params *params,
1221 struct link_vars *vars)
1230{ 1222{
1231 struct bnx2x *bp = params->bp; 1223 struct bnx2x *bp = params->bp;
1232 u16 reg_val; 1224 u16 reg_val;
@@ -1248,28 +1240,35 @@ static void bnx2x_program_serdes(struct link_params *params)
1248 1240
1249 /* program speed 1241 /* program speed
1250 - needed only if the speed is greater than 1G (2.5G or 10G) */ 1242 - needed only if the speed is greater than 1G (2.5G or 10G) */
1251 if (!((params->req_line_speed == SPEED_1000) || 1243 CL45_RD_OVER_CL22(bp, params->port,
1252 (params->req_line_speed == SPEED_100) ||
1253 (params->req_line_speed == SPEED_10))) {
1254 CL45_RD_OVER_CL22(bp, params->port,
1255 params->phy_addr, 1244 params->phy_addr,
1256 MDIO_REG_BANK_SERDES_DIGITAL, 1245 MDIO_REG_BANK_SERDES_DIGITAL,
1257 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 1246 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1258 /* clearing the speed value before setting the right speed */ 1247 /* clearing the speed value before setting the right speed */
1259 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK; 1248 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1249
1250 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
1251 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1252
1253 if (!((vars->line_speed == SPEED_1000) ||
1254 (vars->line_speed == SPEED_100) ||
1255 (vars->line_speed == SPEED_10))) {
1256
1260 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | 1257 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
1261 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); 1258 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1262 if (params->req_line_speed == SPEED_10000) 1259 if (vars->line_speed == SPEED_10000)
1263 reg_val |= 1260 reg_val |=
1264 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; 1261 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
1265 if (params->req_line_speed == SPEED_13000) 1262 if (vars->line_speed == SPEED_13000)
1266 reg_val |= 1263 reg_val |=
1267 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 1264 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1268 CL45_WR_OVER_CL22(bp, params->port, 1265 }
1266
1267 CL45_WR_OVER_CL22(bp, params->port,
1269 params->phy_addr, 1268 params->phy_addr,
1270 MDIO_REG_BANK_SERDES_DIGITAL, 1269 MDIO_REG_BANK_SERDES_DIGITAL,
1271 MDIO_SERDES_DIGITAL_MISC1, reg_val); 1270 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1272 } 1271
1273} 1272}
1274 1273
1275static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 1274static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1294,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1295 MDIO_OVER_1G_UP3, 0); 1294 MDIO_OVER_1G_UP3, 0);
1296} 1295}
1297 1296
1298static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1297static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
1299 u32 *ieee_fc)
1300{ 1298{
1301 struct bnx2x *bp = params->bp; 1299 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1302 /* for AN, we are always publishing full duplex */
1303 u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1304
1305 /* resolve pause mode and advertisement 1300 /* resolve pause mode and advertisement
1306 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1301 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1307 1302
1308 switch (params->req_flow_ctrl) { 1303 switch (params->req_flow_ctrl) {
1309 case FLOW_CTRL_AUTO: 1304 case FLOW_CTRL_AUTO:
1310 if (params->mtu <= MAX_MTU_SIZE) { 1305 if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
1311 an_adv |= 1306 *ieee_fc |=
1312 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1307 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1313 } else { 1308 } else {
1314 an_adv |= 1309 *ieee_fc |=
1315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1310 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1316 } 1311 }
1317 break; 1312 break;
1318 case FLOW_CTRL_TX: 1313 case FLOW_CTRL_TX:
1319 an_adv |= 1314 *ieee_fc |=
1320 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1321 break; 1316 break;
1322 1317
1323 case FLOW_CTRL_RX: 1318 case FLOW_CTRL_RX:
1324 case FLOW_CTRL_BOTH: 1319 case FLOW_CTRL_BOTH:
1325 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1320 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1326 break; 1321 break;
1327 1322
1328 case FLOW_CTRL_NONE: 1323 case FLOW_CTRL_NONE:
1329 default: 1324 default:
1330 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1325 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1331 break; 1326 break;
1332 } 1327 }
1328}
1333 1329
1334 *ieee_fc = an_adv; 1330static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1331 u32 ieee_fc)
1332{
1333 struct bnx2x *bp = params->bp;
1334 /* for AN, we are always publishing full duplex */
1335 1335
1336 CL45_WR_OVER_CL22(bp, params->port, 1336 CL45_WR_OVER_CL22(bp, params->port,
1337 params->phy_addr, 1337 params->phy_addr,
1338 MDIO_REG_BANK_COMBO_IEEE0, 1338 MDIO_REG_BANK_COMBO_IEEE0,
1339 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv); 1339 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
1340} 1340}
1341 1341
1342static void bnx2x_restart_autoneg(struct link_params *params) 1342static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1382,8 @@ static void bnx2x_restart_autoneg(struct link_params *params)
1382 } 1382 }
1383} 1383}
1384 1384
1385static void bnx2x_initialize_sgmii_process(struct link_params *params) 1385static void bnx2x_initialize_sgmii_process(struct link_params *params,
1386 struct link_vars *vars)
1386{ 1387{
1387 struct bnx2x *bp = params->bp; 1388 struct bnx2x *bp = params->bp;
1388 u16 control1; 1389 u16 control1;
@@ -1406,7 +1407,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1406 control1); 1407 control1);
1407 1408
1408 /* if forced speed */ 1409 /* if forced speed */
1409 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 1410 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1410 /* set speed, disable autoneg */ 1411 /* set speed, disable autoneg */
1411 u16 mii_control; 1412 u16 mii_control;
1412 1413
@@ -1419,7 +1420,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1419 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 1420 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1420 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 1421 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
1421 1422
1422 switch (params->req_line_speed) { 1423 switch (vars->line_speed) {
1423 case SPEED_100: 1424 case SPEED_100:
1424 mii_control |= 1425 mii_control |=
1425 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; 1426 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1434,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1433 break; 1434 break;
1434 default: 1435 default:
1435 /* invalid speed for SGMII */ 1436 /* invalid speed for SGMII */
1436 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n", 1437 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1437 params->req_line_speed); 1438 vars->line_speed);
1438 break; 1439 break;
1439 } 1440 }
1440 1441
@@ -1460,20 +1461,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1460 */ 1461 */
1461 1462
1462static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 1463static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1463{ 1464{ /* LD LP */
1464 switch (pause_result) { /* ASYM P ASYM P */ 1465 switch (pause_result) { /* ASYM P ASYM P */
1465 case 0xb: /* 1 0 1 1 */ 1466 case 0xb: /* 1 0 1 1 */
1466 vars->flow_ctrl = FLOW_CTRL_TX; 1467 vars->flow_ctrl = FLOW_CTRL_TX;
1467 break; 1468 break;
1468 1469
1469 case 0xe: /* 1 1 1 0 */ 1470 case 0xe: /* 1 1 1 0 */
1470 vars->flow_ctrl = FLOW_CTRL_RX; 1471 vars->flow_ctrl = FLOW_CTRL_RX;
1471 break; 1472 break;
1472 1473
1473 case 0x5: /* 0 1 0 1 */ 1474 case 0x5: /* 0 1 0 1 */
1474 case 0x7: /* 0 1 1 1 */ 1475 case 0x7: /* 0 1 1 1 */
1475 case 0xd: /* 1 1 0 1 */ 1476 case 0xd: /* 1 1 0 1 */
1476 case 0xf: /* 1 1 1 1 */ 1477 case 0xf: /* 1 1 1 1 */
1477 vars->flow_ctrl = FLOW_CTRL_BOTH; 1478 vars->flow_ctrl = FLOW_CTRL_BOTH;
1478 break; 1479 break;
1479 1480
@@ -1531,6 +1532,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1532 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1532 pause_result); 1533 pause_result);
1533 bnx2x_pause_resolve(vars, pause_result); 1534 bnx2x_pause_resolve(vars, pause_result);
1535 if (vars->flow_ctrl == FLOW_CTRL_NONE &&
1536 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1537 bnx2x_cl45_read(bp, port,
1538 ext_phy_type,
1539 ext_phy_addr,
1540 MDIO_AN_DEVAD,
1541 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1542
1543 bnx2x_cl45_read(bp, port,
1544 ext_phy_type,
1545 ext_phy_addr,
1546 MDIO_AN_DEVAD,
1547 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1548 pause_result = (ld_pause &
1549 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1550 pause_result |= (lp_pause &
1551 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1552
1553 bnx2x_pause_resolve(vars, pause_result);
1554 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
1555 pause_result);
1556 }
1534 } 1557 }
1535 return ret; 1558 return ret;
1536} 1559}
@@ -1541,8 +1564,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1541 u32 gp_status) 1564 u32 gp_status)
1542{ 1565{
1543 struct bnx2x *bp = params->bp; 1566 struct bnx2x *bp = params->bp;
1544 u16 ld_pause; /* local driver */ 1567 u16 ld_pause; /* local driver */
1545 u16 lp_pause; /* link partner */ 1568 u16 lp_pause; /* link partner */
1546 u16 pause_result; 1569 u16 pause_result;
1547 1570
1548 vars->flow_ctrl = FLOW_CTRL_NONE; 1571 vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1596,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1573 (bnx2x_ext_phy_resove_fc(params, vars))) { 1596 (bnx2x_ext_phy_resove_fc(params, vars))) {
1574 return; 1597 return;
1575 } else { 1598 } else {
1576 vars->flow_ctrl = params->req_flow_ctrl; 1599 if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
1577 if (vars->flow_ctrl == FLOW_CTRL_AUTO) { 1600 vars->flow_ctrl = params->req_fc_auto_adv;
1578 if (params->mtu <= MAX_MTU_SIZE) 1601 else
1579 vars->flow_ctrl = FLOW_CTRL_BOTH; 1602 vars->flow_ctrl = params->req_flow_ctrl;
1580 else
1581 vars->flow_ctrl = FLOW_CTRL_TX;
1582 }
1583 } 1603 }
1584 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 1604 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1585} 1605}
@@ -1590,6 +1610,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1590 u32 gp_status) 1610 u32 gp_status)
1591{ 1611{
1592 struct bnx2x *bp = params->bp; 1612 struct bnx2x *bp = params->bp;
1613
1593 u8 rc = 0; 1614 u8 rc = 0;
1594 vars->link_status = 0; 1615 vars->link_status = 0;
1595 1616
@@ -1690,7 +1711,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1690 1711
1691 vars->link_status |= LINK_STATUS_SERDES_LINK; 1712 vars->link_status |= LINK_STATUS_SERDES_LINK;
1692 1713
1693 if (params->req_line_speed == SPEED_AUTO_NEG) { 1714 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1715 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1716 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1717 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1718 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
1694 vars->autoneg = AUTO_NEG_ENABLED; 1719 vars->autoneg = AUTO_NEG_ENABLED;
1695 1720
1696 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 1721 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1730,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1705 1730
1706 } 1731 }
1707 if (vars->flow_ctrl & FLOW_CTRL_TX) 1732 if (vars->flow_ctrl & FLOW_CTRL_TX)
1708 vars->link_status |= 1733 vars->link_status |=
1709 LINK_STATUS_TX_FLOW_CONTROL_ENABLED; 1734 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1710 1735
1711 if (vars->flow_ctrl & FLOW_CTRL_RX) 1736 if (vars->flow_ctrl & FLOW_CTRL_RX)
1712 vars->link_status |= 1737 vars->link_status |=
1713 LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1738 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1714 1739
1715 } else { /* link_down */ 1740 } else { /* link_down */
1716 DP(NETIF_MSG_LINK, "phy link down\n"); 1741 DP(NETIF_MSG_LINK, "phy link down\n");
1717 1742
1718 vars->phy_link_up = 0; 1743 vars->phy_link_up = 0;
1719 vars->line_speed = 0; 1744
1720 vars->duplex = DUPLEX_FULL; 1745 vars->duplex = DUPLEX_FULL;
1721 vars->flow_ctrl = FLOW_CTRL_NONE; 1746 vars->flow_ctrl = FLOW_CTRL_NONE;
1722 vars->autoneg = AUTO_NEG_DISABLED; 1747 vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1842,15 @@ static u8 bnx2x_emac_program(struct link_params *params,
1817} 1842}
1818 1843
1819/*****************************************************************************/ 1844/*****************************************************************************/
1820/* External Phy section */ 1845/* External Phy section */
1821/*****************************************************************************/ 1846/*****************************************************************************/
1822static void bnx2x_hw_reset(struct bnx2x *bp) 1847static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
1823{ 1848{
1824 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1849 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1825 MISC_REGISTERS_GPIO_OUTPUT_LOW); 1850 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1826 msleep(1); 1851 msleep(1);
1827 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1852 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1828 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1853 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
1829} 1854}
1830 1855
1831static void bnx2x_ext_phy_reset(struct link_params *params, 1856static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1879,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1854 1879
1855 /* Restore normal power mode*/ 1880 /* Restore normal power mode*/
1856 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1881 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1857 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1882 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1883 params->port);
1858 1884
1859 /* HW reset */ 1885 /* HW reset */
1860 bnx2x_hw_reset(bp); 1886 bnx2x_hw_reset(bp, params->port);
1861 1887
1862 bnx2x_cl45_write(bp, params->port, 1888 bnx2x_cl45_write(bp, params->port,
1863 ext_phy_type, 1889 ext_phy_type,
@@ -1869,7 +1895,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1869 /* Unset Low Power Mode and SW reset */ 1895 /* Unset Low Power Mode and SW reset */
1870 /* Restore normal power mode*/ 1896 /* Restore normal power mode*/
1871 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1897 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1872 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1898 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1899 params->port);
1873 1900
1874 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 1901 DP(NETIF_MSG_LINK, "XGXS 8072\n");
1875 bnx2x_cl45_write(bp, params->port, 1902 bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1914,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1887 1914
1888 /* Restore normal power mode*/ 1915 /* Restore normal power mode*/
1889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1916 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1890 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1917 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1918 params->port);
1891 1919
1892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1893 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1921 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1922 params->port);
1894 1923
1895 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 1924 DP(NETIF_MSG_LINK, "XGXS 8073\n");
1896 bnx2x_cl45_write(bp,
1897 params->port,
1898 ext_phy_type,
1899 ext_phy_addr,
1900 MDIO_PMA_DEVAD,
1901 MDIO_PMA_REG_CTRL,
1902 1<<15);
1903 } 1925 }
1904 break; 1926 break;
1905 1927
@@ -1908,10 +1930,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1908 1930
1909 /* Restore normal power mode*/ 1931 /* Restore normal power mode*/
1910 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1932 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1911 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1933 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1934 params->port);
1912 1935
1913 /* HW reset */ 1936 /* HW reset */
1914 bnx2x_hw_reset(bp); 1937 bnx2x_hw_reset(bp, params->port);
1915 1938
1916 break; 1939 break;
1917 1940
@@ -1934,7 +1957,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1934 1957
1935 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 1958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1936 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 1959 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1937 bnx2x_hw_reset(bp); 1960 bnx2x_hw_reset(bp, params->port);
1938 break; 1961 break;
1939 1962
1940 default: 1963 default:
@@ -2098,42 +2121,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2098 2121
2099} 2122}
2100 2123
2101static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) 2124static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
2125 u8 ext_phy_addr)
2102{ 2126{
2103 struct bnx2x *bp = params->bp; 2127 u16 fw_ver1, fw_ver2;
2104 u8 port = params->port; 2128 /* Boot port from external ROM */
2105 u8 ext_phy_addr = ((params->ext_phy_config &
2106 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2107 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2108 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2109 u16 fw_ver1, fw_ver2, val;
2110 /* Need to wait 100ms after reset */
2111 msleep(100);
2112 /* Boot port from external ROM */
2113 /* EDC grst */ 2129 /* EDC grst */
2114 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2130 bnx2x_cl45_write(bp, port,
2131 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2132 ext_phy_addr,
2115 MDIO_PMA_DEVAD, 2133 MDIO_PMA_DEVAD,
2116 MDIO_PMA_REG_GEN_CTRL, 2134 MDIO_PMA_REG_GEN_CTRL,
2117 0x0001); 2135 0x0001);
2118 2136
2119 /* ucode reboot and rst */ 2137 /* ucode reboot and rst */
2120 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2138 bnx2x_cl45_write(bp, port,
2139 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2140 ext_phy_addr,
2121 MDIO_PMA_DEVAD, 2141 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_GEN_CTRL, 2142 MDIO_PMA_REG_GEN_CTRL,
2123 0x008c); 2143 0x008c);
2124 2144
2125 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2145 bnx2x_cl45_write(bp, port,
2146 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2147 ext_phy_addr,
2126 MDIO_PMA_DEVAD, 2148 MDIO_PMA_DEVAD,
2127 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 2149 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2128 2150
2129 /* Reset internal microprocessor */ 2151 /* Reset internal microprocessor */
2130 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2152 bnx2x_cl45_write(bp, port,
2153 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2154 ext_phy_addr,
2131 MDIO_PMA_DEVAD, 2155 MDIO_PMA_DEVAD,
2132 MDIO_PMA_REG_GEN_CTRL, 2156 MDIO_PMA_REG_GEN_CTRL,
2133 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2157 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2134 2158
2135 /* Release srst bit */ 2159 /* Release srst bit */
2136 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2160 bnx2x_cl45_write(bp, port,
2161 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2162 ext_phy_addr,
2137 MDIO_PMA_DEVAD, 2163 MDIO_PMA_DEVAD,
2138 MDIO_PMA_REG_GEN_CTRL, 2164 MDIO_PMA_REG_GEN_CTRL,
2139 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2165 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2168,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
2142 msleep(100); 2168 msleep(100);
2143 2169
2144 /* Clear ser_boot_ctl bit */ 2170 /* Clear ser_boot_ctl bit */
2145 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2171 bnx2x_cl45_write(bp, port,
2172 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2173 ext_phy_addr,
2146 MDIO_PMA_DEVAD, 2174 MDIO_PMA_DEVAD,
2147 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 2175 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2148 2176
2149 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2177 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2150 MDIO_PMA_DEVAD, 2178 ext_phy_addr,
2151 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 2179 MDIO_PMA_DEVAD,
2152 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2180 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2153 MDIO_PMA_DEVAD, 2181 bnx2x_cl45_read(bp, port,
2154 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 2182 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2183 ext_phy_addr,
2184 MDIO_PMA_DEVAD,
2185 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2155 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); 2186 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
2156 2187
2157 /* Only set bit 10 = 1 (Tx power down) */ 2188}
2158 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_TX_POWER_DOWN, &val);
2161 2189
2190static void bnx2x_bcm807x_force_10G(struct link_params *params)
2191{
2192 struct bnx2x *bp = params->bp;
2193 u8 port = params->port;
2194 u8 ext_phy_addr = ((params->ext_phy_config &
2195 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2196 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2197 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2198
2199 /* Force KR or KX */
2162 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2200 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2163 MDIO_PMA_DEVAD, 2201 MDIO_PMA_DEVAD,
2164 MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10)); 2202 MDIO_PMA_REG_CTRL,
2165 2203 0x2040);
2166 msleep(600);
2167 /* Release bit 10 (Release Tx power down) */
2168 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2204 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2169 MDIO_PMA_DEVAD, 2205 MDIO_PMA_DEVAD,
2170 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 2206 MDIO_PMA_REG_10G_CTRL2,
2171 2207 0x000b);
2208 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2209 MDIO_PMA_DEVAD,
2210 MDIO_PMA_REG_BCM_CTRL,
2211 0x0000);
2212 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2213 MDIO_AN_DEVAD,
2214 MDIO_AN_REG_CTRL,
2215 0x0000);
2172} 2216}
2173
2174static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) 2217static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2175{ 2218{
2176 struct bnx2x *bp = params->bp; 2219 struct bnx2x *bp = params->bp;
@@ -2236,32 +2279,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2236 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2279 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2237 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); 2280 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
2238} 2281}
2239static void bnx2x_bcm807x_force_10G(struct link_params *params) 2282
2283static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2284 struct link_vars *vars)
2240{ 2285{
2286
2241 struct bnx2x *bp = params->bp; 2287 struct bnx2x *bp = params->bp;
2242 u8 port = params->port; 2288 u16 cl37_val;
2243 u8 ext_phy_addr = ((params->ext_phy_config & 2289 u8 ext_phy_addr = ((params->ext_phy_config &
2244 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 2290 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2245 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 2291 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2246 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2292 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2247 2293
2248 /* Force KR or KX */ 2294 bnx2x_cl45_read(bp, params->port,
2249 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2295 ext_phy_type,
2250 MDIO_PMA_DEVAD, 2296 ext_phy_addr,
2251 MDIO_PMA_REG_CTRL, 2297 MDIO_AN_DEVAD,
2252 0x2040); 2298 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2253 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2299
2254 MDIO_PMA_DEVAD, 2300 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2255 MDIO_PMA_REG_10G_CTRL2, 2301 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2256 0x000b); 2302
2257 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2303 if ((vars->ieee_fc &
2258 MDIO_PMA_DEVAD, 2304 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
2259 MDIO_PMA_REG_BCM_CTRL, 2305 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
2260 0x0000); 2306 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2261 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2307 }
2308 if ((vars->ieee_fc &
2309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2310 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2311 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2312 }
2313 if ((vars->ieee_fc &
2314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2316 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2317 }
2318 DP(NETIF_MSG_LINK,
2319 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2320
2321 bnx2x_cl45_write(bp, params->port,
2322 ext_phy_type,
2323 ext_phy_addr,
2262 MDIO_AN_DEVAD, 2324 MDIO_AN_DEVAD,
2263 MDIO_AN_REG_CTRL, 2325 MDIO_AN_REG_CL37_FC_LD, cl37_val);
2264 0x0000); 2326 msleep(500);
2265} 2327}
2266 2328
2267static void bnx2x_ext_phy_set_pause(struct link_params *params, 2329static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2344,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2282 MDIO_AN_REG_ADV_PAUSE, &val); 2344 MDIO_AN_REG_ADV_PAUSE, &val);
2283 2345
2284 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 2346 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2347
2285 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 2348 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2286 2349
2287 if (vars->ieee_fc & 2350 if ((vars->ieee_fc &
2351 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2288 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 2352 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2289 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 2353 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2290 } 2354 }
2291 if (vars->ieee_fc & 2355 if ((vars->ieee_fc &
2356 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2292 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 2357 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2293 val |= 2358 val |=
2294 MDIO_AN_REG_ADV_PAUSE_PAUSE; 2359 MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2367,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2302 MDIO_AN_REG_ADV_PAUSE, val); 2367 MDIO_AN_REG_ADV_PAUSE, val);
2303} 2368}
2304 2369
2370
2371static void bnx2x_init_internal_phy(struct link_params *params,
2372 struct link_vars *vars)
2373{
2374 struct bnx2x *bp = params->bp;
2375 u8 port = params->port;
2376 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2377 u16 bank, rx_eq;
2378
2379 rx_eq = ((params->serdes_config &
2380 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2381 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2382
2383 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
2384 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2385 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
2386 CL45_WR_OVER_CL22(bp, port,
2387 params->phy_addr,
2388 bank ,
2389 MDIO_RX0_RX_EQ_BOOST,
2390 ((rx_eq &
2391 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2392 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2393 }
2394
2395 /* forced speed requested? */
2396 if (vars->line_speed != SPEED_AUTO_NEG) {
2397 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2398
2399 /* disable autoneg */
2400 bnx2x_set_autoneg(params, vars);
2401
2402 /* program speed and duplex */
2403 bnx2x_program_serdes(params, vars);
2404
2405 } else { /* AN_mode */
2406 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2407
2408 /* AN enabled */
2409 bnx2x_set_brcm_cl37_advertisment(params);
2410
2411 /* program duplex & pause advertisement (for aneg) */
2412 bnx2x_set_ieee_aneg_advertisment(params,
2413 vars->ieee_fc);
2414
2415 /* enable autoneg */
2416 bnx2x_set_autoneg(params, vars);
2417
2418 /* enable and restart AN */
2419 bnx2x_restart_autoneg(params);
2420 }
2421
2422 } else { /* SGMII mode */
2423 DP(NETIF_MSG_LINK, "SGMII\n");
2424
2425 bnx2x_initialize_sgmii_process(params, vars);
2426 }
2427}
2428
2305static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 2429static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2306{ 2430{
2307 struct bnx2x *bp = params->bp; 2431 struct bnx2x *bp = params->bp;
@@ -2343,7 +2467,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2343 2467
2344 switch (ext_phy_type) { 2468 switch (ext_phy_type) {
2345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2346 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2347 break; 2470 break;
2348 2471
2349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2542,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2419 ext_phy_type, 2542 ext_phy_type,
2420 ext_phy_addr, 2543 ext_phy_addr,
2421 MDIO_AN_DEVAD, 2544 MDIO_AN_DEVAD,
2422 MDIO_AN_REG_CL37_FD, 2545 MDIO_AN_REG_CL37_FC_LP,
2423 0x0020); 2546 0x0020);
2424 /* Enable CL37 AN */ 2547 /* Enable CL37 AN */
2425 bnx2x_cl45_write(bp, params->port, 2548 bnx2x_cl45_write(bp, params->port,
@@ -2458,54 +2581,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2458 rx_alarm_ctrl_val = 0x400; 2581 rx_alarm_ctrl_val = 0x400;
2459 lasi_ctrl_val = 0x0004; 2582 lasi_ctrl_val = 0x0004;
2460 } else { 2583 } else {
2461 /* In 8073, port1 is directed through emac0 and
2462 * port0 is directed through emac1
2463 */
2464 rx_alarm_ctrl_val = (1<<2); 2584 rx_alarm_ctrl_val = (1<<2);
2465 /*lasi_ctrl_val = 0x0005;*/
2466 lasi_ctrl_val = 0x0004; 2585 lasi_ctrl_val = 0x0004;
2467 } 2586 }
2468 2587
2469 /* Wait for soft reset to get cleared upto 1 sec */ 2588 /* enable LASI */
2470 for (cnt = 0; cnt < 1000; cnt++) { 2589 bnx2x_cl45_write(bp, params->port,
2471 bnx2x_cl45_read(bp, params->port, 2590 ext_phy_type,
2472 ext_phy_type, 2591 ext_phy_addr,
2473 ext_phy_addr, 2592 MDIO_PMA_DEVAD,
2474 MDIO_PMA_DEVAD, 2593 MDIO_PMA_REG_RX_ALARM_CTRL,
2475 MDIO_PMA_REG_CTRL, 2594 rx_alarm_ctrl_val);
2476 &ctrl); 2595
2477 if (!(ctrl & (1<<15))) 2596 bnx2x_cl45_write(bp, params->port,
2478 break; 2597 ext_phy_type,
2479 msleep(1); 2598 ext_phy_addr,
2480 } 2599 MDIO_PMA_DEVAD,
2481 DP(NETIF_MSG_LINK, 2600 MDIO_PMA_REG_LASI_CTRL,
2482 "807x control reg 0x%x (after %d ms)\n", 2601 lasi_ctrl_val);
2483 ctrl, cnt); 2602
2603 bnx2x_8073_set_pause_cl37(params, vars);
2484 2604
2485 if (ext_phy_type == 2605 if (ext_phy_type ==
2486 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ 2606 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
2487 bnx2x_bcm8072_external_rom_boot(params); 2607 bnx2x_bcm8072_external_rom_boot(params);
2488 } else { 2608 } else {
2489 bnx2x_bcm8073_external_rom_boot(params); 2609
2490 /* In case of 8073 with long xaui lines, 2610 /* In case of 8073 with long xaui lines,
2491 don't set the 8073 xaui low power*/ 2611 don't set the 8073 xaui low power*/
2492 bnx2x_bcm8073_set_xaui_low_power_mode(params); 2612 bnx2x_bcm8073_set_xaui_low_power_mode(params);
2493 } 2613 }
2494 2614
2495 /* enable LASI */ 2615 bnx2x_cl45_read(bp, params->port,
2496 bnx2x_cl45_write(bp, params->port, 2616 ext_phy_type,
2497 ext_phy_type, 2617 ext_phy_addr,
2498 ext_phy_addr, 2618 MDIO_PMA_DEVAD,
2499 MDIO_PMA_DEVAD, 2619 0xca13,
2500 MDIO_PMA_REG_RX_ALARM_CTRL, 2620 &tmp1);
2501 rx_alarm_ctrl_val);
2502
2503 bnx2x_cl45_write(bp, params->port,
2504 ext_phy_type,
2505 ext_phy_addr,
2506 MDIO_PMA_DEVAD,
2507 MDIO_PMA_REG_LASI_CTRL,
2508 lasi_ctrl_val);
2509 2621
2510 bnx2x_cl45_read(bp, params->port, 2622 bnx2x_cl45_read(bp, params->port,
2511 ext_phy_type, 2623 ext_phy_type,
@@ -2519,12 +2631,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2519 /* If this is forced speed, set to KR or KX 2631 /* If this is forced speed, set to KR or KX
2520 * (all other are not supported) 2632 * (all other are not supported)
2521 */ 2633 */
2522 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 2634 if (params->loopback_mode == LOOPBACK_EXT) {
2523 if (params->req_line_speed == SPEED_10000) { 2635 bnx2x_bcm807x_force_10G(params);
2524 bnx2x_bcm807x_force_10G(params); 2636 DP(NETIF_MSG_LINK,
2525 DP(NETIF_MSG_LINK, 2637 "Forced speed 10G on 807X\n");
2526 "Forced speed 10G on 807X\n"); 2638 break;
2527 break; 2639 } else {
2640 bnx2x_cl45_write(bp, params->port,
2641 ext_phy_type, ext_phy_addr,
2642 MDIO_PMA_DEVAD,
2643 MDIO_PMA_REG_BCM_CTRL,
2644 0x0002);
2645 }
2646 if (params->req_line_speed != SPEED_AUTO_NEG) {
2647 if (params->req_line_speed == SPEED_10000) {
2648 val = (1<<7);
2528 } else if (params->req_line_speed == 2649 } else if (params->req_line_speed ==
2529 SPEED_2500) { 2650 SPEED_2500) {
2530 val = (1<<5); 2651 val = (1<<5);
@@ -2539,11 +2660,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2539 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2660 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2540 val |= (1<<7); 2661 val |= (1<<7);
2541 2662
2663 /* Note that 2.5G works only when
2664 used with 1G advertisment */
2542 if (params->speed_cap_mask & 2665 if (params->speed_cap_mask &
2543 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 2666 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
2667 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
2544 val |= (1<<5); 2668 val |= (1<<5);
2545 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); 2669 DP(NETIF_MSG_LINK,
2546 /*val = ((1<<5)|(1<<7));*/ 2670 "807x autoneg val = 0x%x\n", val);
2547 } 2671 }
2548 2672
2549 bnx2x_cl45_write(bp, params->port, 2673 bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2678,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2554 2678
2555 if (ext_phy_type == 2679 if (ext_phy_type ==
2556 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2680 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2557 /* Disable 2.5Ghz */ 2681
2558 bnx2x_cl45_read(bp, params->port, 2682 bnx2x_cl45_read(bp, params->port,
2559 ext_phy_type, 2683 ext_phy_type,
2560 ext_phy_addr, 2684 ext_phy_addr,
2561 MDIO_AN_DEVAD, 2685 MDIO_AN_DEVAD,
2562 0x8329, &tmp1); 2686 0x8329, &tmp1);
2563/* SUPPORT_SPEED_CAPABILITY 2687
2564 (Due to the nature of the link order, its not 2688 if (((params->speed_cap_mask &
2565 possible to enable 2.5G within the autoneg 2689 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
2566 capabilities) 2690 (params->req_line_speed ==
2567 if (params->speed_cap_mask & 2691 SPEED_AUTO_NEG)) ||
2568 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 2692 (params->req_line_speed ==
2569*/ 2693 SPEED_2500)) {
2570 if (params->req_line_speed == SPEED_2500) {
2571 u16 phy_ver; 2694 u16 phy_ver;
2572 /* Allow 2.5G for A1 and above */ 2695 /* Allow 2.5G for A1 and above */
2573 bnx2x_cl45_read(bp, params->port, 2696 bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2698,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2575 ext_phy_addr, 2698 ext_phy_addr,
2576 MDIO_PMA_DEVAD, 2699 MDIO_PMA_DEVAD,
2577 0xc801, &phy_ver); 2700 0xc801, &phy_ver);
2578 2701 DP(NETIF_MSG_LINK, "Add 2.5G\n");
2579 if (phy_ver > 0) 2702 if (phy_ver > 0)
2580 tmp1 |= 1; 2703 tmp1 |= 1;
2581 else 2704 else
2582 tmp1 &= 0xfffe; 2705 tmp1 &= 0xfffe;
2583 } 2706 } else {
2584 else 2707 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
2585 tmp1 &= 0xfffe; 2708 tmp1 &= 0xfffe;
2709 }
2586 2710
2587 bnx2x_cl45_write(bp, params->port, 2711 bnx2x_cl45_write(bp, params->port,
2588 ext_phy_type, 2712 ext_phy_type,
2589 ext_phy_addr, 2713 ext_phy_addr,
2590 MDIO_AN_DEVAD, 2714 MDIO_AN_DEVAD,
2591 0x8329, tmp1); 2715 0x8329, tmp1);
2592 } 2716 }
2593 /* Add support for CL37 (passive mode) I */ 2717
2594 bnx2x_cl45_write(bp, params->port, 2718 /* Add support for CL37 (passive mode) II */
2719
2720 bnx2x_cl45_read(bp, params->port,
2595 ext_phy_type, 2721 ext_phy_type,
2596 ext_phy_addr, 2722 ext_phy_addr,
2597 MDIO_AN_DEVAD, 2723 MDIO_AN_DEVAD,
2598 MDIO_AN_REG_CL37_CL73, 0x040c); 2724 MDIO_AN_REG_CL37_FC_LD,
2599 /* Add support for CL37 (passive mode) II */ 2725 &tmp1);
2726
2600 bnx2x_cl45_write(bp, params->port, 2727 bnx2x_cl45_write(bp, params->port,
2601 ext_phy_type, 2728 ext_phy_type,
2602 ext_phy_addr, 2729 ext_phy_addr,
2603 MDIO_AN_DEVAD, 2730 MDIO_AN_DEVAD,
2604 MDIO_AN_REG_CL37_FD, 0x20); 2731 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
2732 ((params->req_duplex == DUPLEX_FULL) ?
2733 0x20 : 0x40)));
2734
2605 /* Add support for CL37 (passive mode) III */ 2735 /* Add support for CL37 (passive mode) III */
2606 bnx2x_cl45_write(bp, params->port, 2736 bnx2x_cl45_write(bp, params->port,
2607 ext_phy_type, 2737 ext_phy_type,
2608 ext_phy_addr, 2738 ext_phy_addr,
2609 MDIO_AN_DEVAD, 2739 MDIO_AN_DEVAD,
2610 MDIO_AN_REG_CL37_AN, 0x1000); 2740 MDIO_AN_REG_CL37_AN, 0x1000);
2611 /* Restart autoneg */
2612 msleep(500);
2613 2741
2614 if (ext_phy_type == 2742 if (ext_phy_type ==
2615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2743 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2616 2744 /* The SNR will improve about 2db by changing
2617 /* The SNR will improve about 2db by changing the
2618 BW and FEE main tap. Rest commands are executed 2745 BW and FEE main tap. Rest commands are executed
2619 after link is up*/ 2746 after link is up*/
2620 /* Change FFE main cursor to 5 in EDC register */ 2747 /*Change FFE main cursor to 5 in EDC register*/
2621 if (bnx2x_8073_is_snr_needed(params)) 2748 if (bnx2x_8073_is_snr_needed(params))
2622 bnx2x_cl45_write(bp, params->port, 2749 bnx2x_cl45_write(bp, params->port,
2623 ext_phy_type, 2750 ext_phy_type,
@@ -2626,25 +2753,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2626 MDIO_PMA_REG_EDC_FFE_MAIN, 2753 MDIO_PMA_REG_EDC_FFE_MAIN,
2627 0xFB0C); 2754 0xFB0C);
2628 2755
2629 /* Enable FEC (Forware Error Correction) 2756 /* Enable FEC (Forware Error Correction)
2630 Request in the AN */ 2757 Request in the AN */
2631 bnx2x_cl45_read(bp, params->port, 2758 bnx2x_cl45_read(bp, params->port,
2632 ext_phy_type, 2759 ext_phy_type,
2633 ext_phy_addr, 2760 ext_phy_addr,
2634 MDIO_AN_DEVAD, 2761 MDIO_AN_DEVAD,
2635 MDIO_AN_REG_ADV2, &tmp1); 2762 MDIO_AN_REG_ADV2, &tmp1);
2636 2763
2637 tmp1 |= (1<<15); 2764 tmp1 |= (1<<15);
2765
2766 bnx2x_cl45_write(bp, params->port,
2767 ext_phy_type,
2768 ext_phy_addr,
2769 MDIO_AN_DEVAD,
2770 MDIO_AN_REG_ADV2, tmp1);
2638 2771
2639 bnx2x_cl45_write(bp, params->port,
2640 ext_phy_type,
2641 ext_phy_addr,
2642 MDIO_AN_DEVAD,
2643 MDIO_AN_REG_ADV2, tmp1);
2644 } 2772 }
2645 2773
2646 bnx2x_ext_phy_set_pause(params, vars); 2774 bnx2x_ext_phy_set_pause(params, vars);
2647 2775
2776 /* Restart autoneg */
2777 msleep(500);
2648 bnx2x_cl45_write(bp, params->port, 2778 bnx2x_cl45_write(bp, params->port,
2649 ext_phy_type, 2779 ext_phy_type,
2650 ext_phy_addr, 2780 ext_phy_addr,
@@ -2701,10 +2831,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2701 } 2831 }
2702 2832
2703 } else { /* SerDes */ 2833 } else { /* SerDes */
2704/* ext_phy_addr = ((bp->ext_phy_config & 2834
2705 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2706 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2707*/
2708 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2835 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
2709 switch (ext_phy_type) { 2836 switch (ext_phy_type) {
2710 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2837 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2853,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2726 2853
2727 2854
2728static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, 2855static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2729 struct link_vars *vars) 2856 struct link_vars *vars)
2730{ 2857{
2731 struct bnx2x *bp = params->bp; 2858 struct bnx2x *bp = params->bp;
2732 u32 ext_phy_type; 2859 u32 ext_phy_type;
@@ -2767,6 +2894,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2767 MDIO_PMA_REG_RX_SD, &rx_sd); 2894 MDIO_PMA_REG_RX_SD, &rx_sd);
2768 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); 2895 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2769 ext_phy_link_up = (rx_sd & 0x1); 2896 ext_phy_link_up = (rx_sd & 0x1);
2897 if (ext_phy_link_up)
2898 vars->line_speed = SPEED_10000;
2770 break; 2899 break;
2771 2900
2772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2901 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2939,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2810 */ 2939 */
2811 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || 2940 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
2812 (val2 & (1<<1))); 2941 (val2 & (1<<1)));
2942 if (ext_phy_link_up) {
2943 if (val2 & (1<<1))
2944 vars->line_speed = SPEED_1000;
2945 else
2946 vars->line_speed = SPEED_10000;
2947 }
2948
2813 /* clear LASI indication*/ 2949 /* clear LASI indication*/
2814 bnx2x_cl45_read(bp, params->port, ext_phy_type, 2950 bnx2x_cl45_read(bp, params->port, ext_phy_type,
2815 ext_phy_addr, 2951 ext_phy_addr,
@@ -2820,6 +2956,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2820 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2821 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2822 { 2958 {
2959 u16 link_status = 0;
2960 u16 an1000_status = 0;
2823 if (ext_phy_type == 2961 if (ext_phy_type ==
2824 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { 2962 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
2825 bnx2x_cl45_read(bp, params->port, 2963 bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2984,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2846 MDIO_PMA_DEVAD, 2984 MDIO_PMA_DEVAD,
2847 MDIO_PMA_REG_LASI_STATUS, &val1); 2985 MDIO_PMA_REG_LASI_STATUS, &val1);
2848 2986
2849 bnx2x_cl45_read(bp, params->port,
2850 ext_phy_type,
2851 ext_phy_addr,
2852 MDIO_PMA_DEVAD,
2853 MDIO_PMA_REG_LASI_STATUS, &val2);
2854 DP(NETIF_MSG_LINK, 2987 DP(NETIF_MSG_LINK,
2855 "8703 LASI status 0x%x->0x%x\n", 2988 "8703 LASI status 0x%x\n",
2856 val1, val2); 2989 val1);
2857 } 2990 }
2858 2991
2859 /* clear the interrupt LASI status register */ 2992 /* clear the interrupt LASI status register */
@@ -2869,20 +3002,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2869 MDIO_PCS_REG_STATUS, &val1); 3002 MDIO_PCS_REG_STATUS, &val1);
2870 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", 3003 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
2871 val2, val1); 3004 val2, val1);
2872 /* Check the LASI */ 3005 /* Clear MSG-OUT */
2873 bnx2x_cl45_read(bp, params->port, 3006 bnx2x_cl45_read(bp, params->port,
2874 ext_phy_type, 3007 ext_phy_type,
2875 ext_phy_addr, 3008 ext_phy_addr,
2876 MDIO_PMA_DEVAD, 3009 MDIO_PMA_DEVAD,
2877 MDIO_PMA_REG_RX_ALARM, &val2); 3010 0xca13,
3011 &val1);
3012
3013 /* Check the LASI */
2878 bnx2x_cl45_read(bp, params->port, 3014 bnx2x_cl45_read(bp, params->port,
2879 ext_phy_type, 3015 ext_phy_type,
2880 ext_phy_addr, 3016 ext_phy_addr,
2881 MDIO_PMA_DEVAD, 3017 MDIO_PMA_DEVAD,
2882 MDIO_PMA_REG_RX_ALARM, 3018 MDIO_PMA_REG_RX_ALARM, &val2);
2883 &val1); 3019
2884 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", 3020 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
2885 val2, val1); 3021
2886 /* Check the link status */ 3022 /* Check the link status */
2887 bnx2x_cl45_read(bp, params->port, 3023 bnx2x_cl45_read(bp, params->port,
2888 ext_phy_type, 3024 ext_phy_type,
@@ -2905,29 +3041,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2905 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); 3041 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
2906 if (ext_phy_type == 3042 if (ext_phy_type ==
2907 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 3043 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2908 u16 an1000_status = 0; 3044
2909 if (ext_phy_link_up && 3045 if (ext_phy_link_up &&
2910 ( 3046 ((params->req_line_speed !=
2911 (params->req_line_speed != SPEED_10000) 3047 SPEED_10000))) {
2912 )) {
2913 if (bnx2x_bcm8073_xaui_wa(params) 3048 if (bnx2x_bcm8073_xaui_wa(params)
2914 != 0) { 3049 != 0) {
2915 ext_phy_link_up = 0; 3050 ext_phy_link_up = 0;
2916 break; 3051 break;
2917 } 3052 }
2918 bnx2x_cl45_read(bp, params->port, 3053 }
3054 bnx2x_cl45_read(bp, params->port,
2919 ext_phy_type, 3055 ext_phy_type,
2920 ext_phy_addr, 3056 ext_phy_addr,
2921 MDIO_XS_DEVAD, 3057 MDIO_AN_DEVAD,
2922 0x8304, 3058 0x8304,
2923 &an1000_status); 3059 &an1000_status);
2924 bnx2x_cl45_read(bp, params->port, 3060 bnx2x_cl45_read(bp, params->port,
2925 ext_phy_type, 3061 ext_phy_type,
2926 ext_phy_addr, 3062 ext_phy_addr,
2927 MDIO_XS_DEVAD, 3063 MDIO_AN_DEVAD,
2928 0x8304, 3064 0x8304,
2929 &an1000_status); 3065 &an1000_status);
2930 } 3066
2931 /* Check the link status on 1.1.2 */ 3067 /* Check the link status on 1.1.2 */
2932 bnx2x_cl45_read(bp, params->port, 3068 bnx2x_cl45_read(bp, params->port,
2933 ext_phy_type, 3069 ext_phy_type,
@@ -2943,8 +3079,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2943 "an_link_status=0x%x\n", 3079 "an_link_status=0x%x\n",
2944 val2, val1, an1000_status); 3080 val2, val1, an1000_status);
2945 3081
2946 ext_phy_link_up = (((val1 & 4) == 4) || 3082 ext_phy_link_up = (((val1 & 4) == 4) ||
2947 (an1000_status & (1<<1))); 3083 (an1000_status & (1<<1)));
2948 if (ext_phy_link_up && 3084 if (ext_phy_link_up &&
2949 bnx2x_8073_is_snr_needed(params)) { 3085 bnx2x_8073_is_snr_needed(params)) {
2950 /* The SNR will improve about 2dbby 3086 /* The SNR will improve about 2dbby
@@ -2968,8 +3104,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2968 MDIO_PMA_REG_CDR_BANDWIDTH, 3104 MDIO_PMA_REG_CDR_BANDWIDTH,
2969 0x0333); 3105 0x0333);
2970 3106
3107
3108 }
3109 bnx2x_cl45_read(bp, params->port,
3110 ext_phy_type,
3111 ext_phy_addr,
3112 MDIO_PMA_DEVAD,
3113 0xc820,
3114 &link_status);
3115
3116 /* Bits 0..2 --> speed detected,
3117 bits 13..15--> link is down */
3118 if ((link_status & (1<<2)) &&
3119 (!(link_status & (1<<15)))) {
3120 ext_phy_link_up = 1;
3121 vars->line_speed = SPEED_10000;
3122 DP(NETIF_MSG_LINK,
3123 "port %x: External link"
3124 " up in 10G\n", params->port);
3125 } else if ((link_status & (1<<1)) &&
3126 (!(link_status & (1<<14)))) {
3127 ext_phy_link_up = 1;
3128 vars->line_speed = SPEED_2500;
3129 DP(NETIF_MSG_LINK,
3130 "port %x: External link"
3131 " up in 2.5G\n", params->port);
3132 } else if ((link_status & (1<<0)) &&
3133 (!(link_status & (1<<13)))) {
3134 ext_phy_link_up = 1;
3135 vars->line_speed = SPEED_1000;
3136 DP(NETIF_MSG_LINK,
3137 "port %x: External link"
3138 " up in 1G\n", params->port);
3139 } else {
3140 ext_phy_link_up = 0;
3141 DP(NETIF_MSG_LINK,
3142 "port %x: External link"
3143 " is down\n", params->port);
3144 }
3145 } else {
3146 /* See if 1G link is up for the 8072 */
3147 bnx2x_cl45_read(bp, params->port,
3148 ext_phy_type,
3149 ext_phy_addr,
3150 MDIO_AN_DEVAD,
3151 0x8304,
3152 &an1000_status);
3153 bnx2x_cl45_read(bp, params->port,
3154 ext_phy_type,
3155 ext_phy_addr,
3156 MDIO_AN_DEVAD,
3157 0x8304,
3158 &an1000_status);
3159 if (an1000_status & (1<<1)) {
3160 ext_phy_link_up = 1;
3161 vars->line_speed = SPEED_1000;
3162 DP(NETIF_MSG_LINK,
3163 "port %x: External link"
3164 " up in 1G\n", params->port);
3165 } else if (ext_phy_link_up) {
3166 ext_phy_link_up = 1;
3167 vars->line_speed = SPEED_10000;
3168 DP(NETIF_MSG_LINK,
3169 "port %x: External link"
3170 " up in 10G\n", params->port);
2971 } 3171 }
2972 } 3172 }
3173
3174
2973 break; 3175 break;
2974 } 3176 }
2975 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 3177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
3006 MDIO_AN_DEVAD, 3208 MDIO_AN_DEVAD,
3007 MDIO_AN_REG_MASTER_STATUS, 3209 MDIO_AN_REG_MASTER_STATUS,
3008 &val2); 3210 &val2);
3211 vars->line_speed = SPEED_10000;
3009 DP(NETIF_MSG_LINK, 3212 DP(NETIF_MSG_LINK,
3010 "SFX7101 AN status 0x%x->Master=%x\n", 3213 "SFX7101 AN status 0x%x->Master=%x\n",
3011 val2, 3214 val2,
@@ -3100,7 +3303,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
3100 * link management 3303 * link management
3101 */ 3304 */
3102static void bnx2x_link_int_ack(struct link_params *params, 3305static void bnx2x_link_int_ack(struct link_params *params,
3103 struct link_vars *vars, u16 is_10g) 3306 struct link_vars *vars, u8 is_10g)
3104{ 3307{
3105 struct bnx2x *bp = params->bp; 3308 struct bnx2x *bp = params->bp;
3106 u8 port = params->port; 3309 u8 port = params->port;
@@ -3181,7 +3384,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
3181} 3384}
3182 3385
3183 3386
3184static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) 3387static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
3388 u32 ext_phy_type)
3185{ 3389{
3186 u32 cnt = 0; 3390 u32 cnt = 0;
3187 u16 ctrl = 0; 3391 u16 ctrl = 0;
@@ -3192,12 +3396,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3192 3396
3193 /* take ext phy out of reset */ 3397 /* take ext phy out of reset */
3194 bnx2x_set_gpio(bp, 3398 bnx2x_set_gpio(bp,
3195 MISC_REGISTERS_GPIO_2, 3399 MISC_REGISTERS_GPIO_2,
3196 MISC_REGISTERS_GPIO_HIGH); 3400 MISC_REGISTERS_GPIO_HIGH,
3401 port);
3197 3402
3198 bnx2x_set_gpio(bp, 3403 bnx2x_set_gpio(bp,
3199 MISC_REGISTERS_GPIO_1, 3404 MISC_REGISTERS_GPIO_1,
3200 MISC_REGISTERS_GPIO_HIGH); 3405 MISC_REGISTERS_GPIO_HIGH,
3406 port);
3201 3407
3202 /* wait for 5ms */ 3408 /* wait for 5ms */
3203 msleep(5); 3409 msleep(5);
@@ -3205,7 +3411,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3205 for (cnt = 0; cnt < 1000; cnt++) { 3411 for (cnt = 0; cnt < 1000; cnt++) {
3206 msleep(1); 3412 msleep(1);
3207 bnx2x_cl45_read(bp, port, 3413 bnx2x_cl45_read(bp, port,
3208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 3414 ext_phy_type,
3209 ext_phy_addr, 3415 ext_phy_addr,
3210 MDIO_PMA_DEVAD, 3416 MDIO_PMA_DEVAD,
3211 MDIO_PMA_REG_CTRL, 3417 MDIO_PMA_REG_CTRL,
@@ -3217,13 +3423,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3217 } 3423 }
3218} 3424}
3219 3425
3220static void bnx2x_turn_off_sf(struct bnx2x *bp) 3426static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
3221{ 3427{
3222 /* put sf to reset */ 3428 /* put sf to reset */
3223 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
3224 bnx2x_set_gpio(bp, 3429 bnx2x_set_gpio(bp,
3225 MISC_REGISTERS_GPIO_2, 3430 MISC_REGISTERS_GPIO_1,
3226 MISC_REGISTERS_GPIO_LOW); 3431 MISC_REGISTERS_GPIO_LOW,
3432 port);
3433 bnx2x_set_gpio(bp,
3434 MISC_REGISTERS_GPIO_2,
3435 MISC_REGISTERS_GPIO_LOW,
3436 port);
3227} 3437}
3228 3438
3229u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 3439u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3463,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3253 3463
3254 /* Take ext phy out of reset */ 3464 /* Take ext phy out of reset */
3255 if (!driver_loaded) 3465 if (!driver_loaded)
3256 bnx2x_turn_on_sf(bp, params->port, ext_phy_addr); 3466 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3467 ext_phy_type);
3257 3468
3258 /* wait for 1ms */ 3469 /* wait for 1ms */
3259 msleep(1); 3470 msleep(1);
@@ -3276,11 +3487,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3276 version[4] = '\0'; 3487 version[4] = '\0';
3277 3488
3278 if (!driver_loaded) 3489 if (!driver_loaded)
3279 bnx2x_turn_off_sf(bp); 3490 bnx2x_turn_off_sf(bp, params->port);
3280 break; 3491 break;
3281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 3492 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 3493 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3283 { 3494 {
3495 /* Take ext phy out of reset */
3496 if (!driver_loaded)
3497 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3498 ext_phy_type);
3499
3284 bnx2x_cl45_read(bp, params->port, ext_phy_type, 3500 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3285 ext_phy_addr, 3501 ext_phy_addr,
3286 MDIO_PMA_DEVAD, 3502 MDIO_PMA_DEVAD,
@@ -3333,7 +3549,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
3333 struct bnx2x *bp = params->bp; 3549 struct bnx2x *bp = params->bp;
3334 3550
3335 if (is_10g) { 3551 if (is_10g) {
3336 u32 md_devad; 3552 u32 md_devad;
3337 3553
3338 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 3554 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3339 3555
@@ -3553,6 +3769,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3553 u16 hw_led_mode, u32 chip_id) 3769 u16 hw_led_mode, u32 chip_id)
3554{ 3770{
3555 u8 rc = 0; 3771 u8 rc = 0;
3772 u32 tmp;
3773 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3556 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 3774 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
3557 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 3775 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
3558 speed, hw_led_mode); 3776 speed, hw_led_mode);
@@ -3561,6 +3779,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3561 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3779 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3562 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3780 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3563 SHARED_HW_CFG_LED_MAC1); 3781 SHARED_HW_CFG_LED_MAC1);
3782
3783 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3784 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3564 break; 3785 break;
3565 3786
3566 case LED_MODE_OPER: 3787 case LED_MODE_OPER:
@@ -3572,6 +3793,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3572 LED_BLINK_RATE_VAL); 3793 LED_BLINK_RATE_VAL);
3573 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3794 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3574 port*4, 1); 3795 port*4, 1);
3796 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3797 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3798 (tmp & (~EMAC_LED_OVERRIDE)));
3799
3575 if (!CHIP_IS_E1H(bp) && 3800 if (!CHIP_IS_E1H(bp) &&
3576 ((speed == SPEED_2500) || 3801 ((speed == SPEED_2500) ||
3577 (speed == SPEED_1000) || 3802 (speed == SPEED_1000) ||
@@ -3622,7 +3847,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3622 struct bnx2x *bp = params->bp; 3847 struct bnx2x *bp = params->bp;
3623 u8 port = params->port; 3848 u8 port = params->port;
3624 u8 rc = 0; 3849 u8 rc = 0;
3625 3850 u8 non_ext_phy;
3851 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3626 /* Activate the external PHY */ 3852 /* Activate the external PHY */
3627 bnx2x_ext_phy_reset(params, vars); 3853 bnx2x_ext_phy_reset(params, vars);
3628 3854
@@ -3644,10 +3870,6 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3644 bnx2x_set_swap_lanes(params); 3870 bnx2x_set_swap_lanes(params);
3645 } 3871 }
3646 3872
3647 /* Set Parallel Detect */
3648 if (params->req_line_speed == SPEED_AUTO_NEG)
3649 bnx2x_set_parallel_detection(params, vars->phy_flags);
3650
3651 if (vars->phy_flags & PHY_XGXS_FLAG) { 3873 if (vars->phy_flags & PHY_XGXS_FLAG) {
3652 if (params->req_line_speed && 3874 if (params->req_line_speed &&
3653 ((params->req_line_speed == SPEED_100) || 3875 ((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3879,33 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3657 vars->phy_flags &= ~PHY_SGMII_FLAG; 3879 vars->phy_flags &= ~PHY_SGMII_FLAG;
3658 } 3880 }
3659 } 3881 }
3882 /* In case of external phy existance, the line speed would be the
3883 line speed linked up by the external phy. In case it is direct only,
3884 then the line_speed during initialization will be equal to the
3885 req_line_speed*/
3886 vars->line_speed = params->req_line_speed;
3660 3887
3661 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 3888 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
3662 u16 bank, rx_eq;
3663
3664 rx_eq = ((params->serdes_config &
3665 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3666 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3667 3889
3668 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); 3890 /* init ext phy and enable link state int */
3669 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; 3891 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
3670 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { 3892 (params->loopback_mode == LOOPBACK_XGXS_10) ||
3671 CL45_WR_OVER_CL22(bp, port, 3893 (params->loopback_mode == LOOPBACK_EXT_PHY));
3672 params->phy_addr, 3894
3673 bank , 3895 if (non_ext_phy ||
3674 MDIO_RX0_RX_EQ_BOOST, 3896 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
3675 ((rx_eq & 3897 if (params->req_line_speed == SPEED_AUTO_NEG)
3676 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | 3898 bnx2x_set_parallel_detection(params, vars->phy_flags);
3677 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); 3899 bnx2x_init_internal_phy(params, vars);
3678 }
3679
3680 /* forced speed requested? */
3681 if (params->req_line_speed != SPEED_AUTO_NEG) {
3682 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3683
3684 /* disable autoneg */
3685 bnx2x_set_autoneg(params, vars);
3686
3687 /* program speed and duplex */
3688 bnx2x_program_serdes(params);
3689 vars->ieee_fc =
3690 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3691
3692 } else { /* AN_mode */
3693 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3694
3695 /* AN enabled */
3696 bnx2x_set_brcm_cl37_advertisment(params);
3697
3698 /* program duplex & pause advertisement (for aneg) */
3699 bnx2x_set_ieee_aneg_advertisment(params,
3700 &vars->ieee_fc);
3701
3702 /* enable autoneg */
3703 bnx2x_set_autoneg(params, vars);
3704
3705 /* enable and restart AN */
3706 bnx2x_restart_autoneg(params);
3707 }
3708
3709 } else { /* SGMII mode */
3710 DP(NETIF_MSG_LINK, "SGMII\n");
3711
3712 bnx2x_initialize_sgmii_process(params);
3713 } 3900 }
3714 3901
3715 /* init ext phy and enable link state int */ 3902 if (!non_ext_phy)
3716 rc |= bnx2x_ext_phy_init(params, vars); 3903 rc |= bnx2x_ext_phy_init(params, vars);
3717 3904
3718 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 3905 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3719 (NIG_STATUS_XGXS0_LINK10G | 3906 (NIG_STATUS_XGXS0_LINK10G |
3720 NIG_STATUS_XGXS0_LINK_STATUS | 3907 NIG_STATUS_XGXS0_LINK_STATUS |
3721 NIG_STATUS_SERDES0_LINK_STATUS)); 3908 NIG_STATUS_SERDES0_LINK_STATUS));
3722 3909
3723 return rc; 3910 return rc;
3724 3911
@@ -3730,15 +3917,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3730 struct bnx2x *bp = params->bp; 3917 struct bnx2x *bp = params->bp;
3731 3918
3732 u32 val; 3919 u32 val;
3733 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 3920 DP(NETIF_MSG_LINK, "Phy Initialization started \n");
3734 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", 3921 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
3735 params->req_line_speed, params->req_flow_ctrl); 3922 params->req_line_speed, params->req_flow_ctrl);
3736 vars->link_status = 0; 3923 vars->link_status = 0;
3924 vars->phy_link_up = 0;
3925 vars->link_up = 0;
3926 vars->line_speed = 0;
3927 vars->duplex = DUPLEX_FULL;
3928 vars->flow_ctrl = FLOW_CTRL_NONE;
3929 vars->mac_type = MAC_TYPE_NONE;
3930
3737 if (params->switch_cfg == SWITCH_CFG_1G) 3931 if (params->switch_cfg == SWITCH_CFG_1G)
3738 vars->phy_flags = PHY_SERDES_FLAG; 3932 vars->phy_flags = PHY_SERDES_FLAG;
3739 else 3933 else
3740 vars->phy_flags = PHY_XGXS_FLAG; 3934 vars->phy_flags = PHY_XGXS_FLAG;
3741 3935
3936
3742 /* disable attentions */ 3937 /* disable attentions */
3743 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 3938 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
3744 (NIG_MASK_XGXS0_LINK_STATUS | 3939 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4089,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3894 } 4089 }
3895 4090
3896 bnx2x_link_initialize(params, vars); 4091 bnx2x_link_initialize(params, vars);
4092 msleep(30);
3897 bnx2x_link_int_enable(params); 4093 bnx2x_link_int_enable(params);
3898 } 4094 }
3899 return 0; 4095 return 0;
@@ -3943,39 +4139,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3943 /* HW reset */ 4139 /* HW reset */
3944 4140
3945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4141 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3946 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4142 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4143 port);
3947 4144
3948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4145 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3949 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4146 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4147 port);
3950 4148
3951 DP(NETIF_MSG_LINK, "reset external PHY\n"); 4149 DP(NETIF_MSG_LINK, "reset external PHY\n");
3952 } else { 4150 } else if (ext_phy_type ==
3953 4151 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3954 u8 ext_phy_addr = ((ext_phy_config & 4152 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
3955 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3956 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3957
3958 /* SW reset */
3959 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_CTRL,
3962 1<<15);
3963
3964 /* Set Low Power Mode */
3965 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3966 MDIO_PMA_DEVAD,
3967 MDIO_PMA_REG_CTRL,
3968 1<<11);
3969
3970
3971 if (ext_phy_type ==
3972 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3973 DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
3974 "low power mode\n", 4153 "low power mode\n",
3975 port); 4154 port);
3976 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4155 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3977 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4156 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3978 } 4157 port);
3979 } 4158 }
3980 } 4159 }
3981 /* reset the SerDes/XGXS */ 4160 /* reset the SerDes/XGXS */
@@ -3995,6 +4174,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3995 return 0; 4174 return 0;
3996} 4175}
3997 4176
4177static u8 bnx2x_update_link_down(struct link_params *params,
4178 struct link_vars *vars)
4179{
4180 struct bnx2x *bp = params->bp;
4181 u8 port = params->port;
4182 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
4183 bnx2x_set_led(bp, port, LED_MODE_OFF,
4184 0, params->hw_led_mode,
4185 params->chip_id);
4186
4187 /* indicate no mac active */
4188 vars->mac_type = MAC_TYPE_NONE;
4189
4190 /* update shared memory */
4191 vars->link_status = 0;
4192 vars->line_speed = 0;
4193 bnx2x_update_mng(params, vars->link_status);
4194
4195 /* activate nig drain */
4196 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
4197
4198 /* reset BigMac */
4199 bnx2x_bmac_rx_disable(bp, params->port);
4200 REG_WR(bp, GRCBASE_MISC +
4201 MISC_REGISTERS_RESET_REG_2_CLEAR,
4202 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4203 return 0;
4204}
4205
4206static u8 bnx2x_update_link_up(struct link_params *params,
4207 struct link_vars *vars,
4208 u8 link_10g, u32 gp_status)
4209{
4210 struct bnx2x *bp = params->bp;
4211 u8 port = params->port;
4212 u8 rc = 0;
4213 vars->link_status |= LINK_STATUS_LINK_UP;
4214 if (link_10g) {
4215 bnx2x_bmac_enable(params, vars, 0);
4216 bnx2x_set_led(bp, port, LED_MODE_OPER,
4217 SPEED_10000, params->hw_led_mode,
4218 params->chip_id);
4219
4220 } else {
4221 bnx2x_emac_enable(params, vars, 0);
4222 rc = bnx2x_emac_program(params, vars->line_speed,
4223 vars->duplex);
4224
4225 /* AN complete? */
4226 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
4227 if (!(vars->phy_flags &
4228 PHY_SGMII_FLAG))
4229 bnx2x_set_sgmii_tx_driver(params);
4230 }
4231 }
4232
4233 /* PBF - link up */
4234 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
4235 vars->line_speed);
4236
4237 /* disable drain */
4238 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
4239
4240 /* update shared memory */
4241 bnx2x_update_mng(params, vars->link_status);
4242 return rc;
4243}
3998/* This function should called upon link interrupt */ 4244/* This function should called upon link interrupt */
3999/* In case vars->link_up, driver needs to 4245/* In case vars->link_up, driver needs to
4000 1. Update the pbf 4246 1. Update the pbf
@@ -4012,10 +4258,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4012{ 4258{
4013 struct bnx2x *bp = params->bp; 4259 struct bnx2x *bp = params->bp;
4014 u8 port = params->port; 4260 u8 port = params->port;
4015 u16 i;
4016 u16 gp_status; 4261 u16 gp_status;
4017 u16 link_10g; 4262 u8 link_10g;
4018 u8 rc = 0; 4263 u8 ext_phy_link_up, rc = 0;
4264 u32 ext_phy_type;
4019 4265
4020 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", 4266 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
4021 port, 4267 port,
@@ -4031,15 +4277,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4031 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
4032 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4278 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
4033 4279
4280 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4034 4281
4035 /* avoid fast toggling */ 4282 /* Check external link change only for non-direct */
4036 for (i = 0; i < 10; i++) { 4283 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
4037 msleep(10); 4284
4038 CL45_RD_OVER_CL22(bp, port, params->phy_addr, 4285 /* Read gp_status */
4039 MDIO_REG_BANK_GP_STATUS, 4286 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
4040 MDIO_GP_STATUS_TOP_AN_STATUS1, 4287 MDIO_REG_BANK_GP_STATUS,
4041 &gp_status); 4288 MDIO_GP_STATUS_TOP_AN_STATUS1,
4042 } 4289 &gp_status);
4043 4290
4044 rc = bnx2x_link_settings_status(params, vars, gp_status); 4291 rc = bnx2x_link_settings_status(params, vars, gp_status);
4045 if (rc != 0) 4292 if (rc != 0)
@@ -4055,73 +4302,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4055 4302
4056 bnx2x_link_int_ack(params, vars, link_10g); 4303 bnx2x_link_int_ack(params, vars, link_10g);
4057 4304
4305 /* In case external phy link is up, and internal link is down
4306 ( not initialized yet probably after link initialization, it needs
4307 to be initialized.
4308 Note that after link down-up as result of cable plug,
4309 the xgxs link would probably become up again without the need to
4310 initialize it*/
4311
4312 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
4313 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
4314 (ext_phy_link_up && !vars->phy_link_up))
4315 bnx2x_init_internal_phy(params, vars);
4316
4058 /* link is up only if both local phy and external phy are up */ 4317 /* link is up only if both local phy and external phy are up */
4059 vars->link_up = (vars->phy_link_up && 4318 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
4060 bnx2x_ext_phy_is_link_up(params, vars));
4061 4319
4062 if (!vars->phy_link_up && 4320 if (vars->link_up)
4063 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) { 4321 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
4064 bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */ 4322 else
4323 rc = bnx2x_update_link_down(params, vars);
4324
4325 return rc;
4326}
4327
4328static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4329{
4330 u8 ext_phy_addr[PORT_MAX];
4331 u16 val;
4332 s8 port;
4333
4334 /* PART1 - Reset both phys */
4335 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4336 /* Extract the ext phy address for the port */
4337 u32 ext_phy_config = REG_RD(bp, shmem_base +
4338 offsetof(struct shmem_region,
4339 dev_info.port_hw_config[port].external_phy_config));
4340
4341 /* disable attentions */
4342 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
4343 (NIG_MASK_XGXS0_LINK_STATUS |
4344 NIG_MASK_XGXS0_LINK10G |
4345 NIG_MASK_SERDES0_LINK_STATUS |
4346 NIG_MASK_MI_INT));
4347
4348 ext_phy_addr[port] =
4349 ((ext_phy_config &
4350 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
4351 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
4352
4353 /* Need to take the phy out of low power mode in order
4354 to write to access its registers */
4355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4356 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
4357
4358 /* Reset the phy */
4359 bnx2x_cl45_write(bp, port,
4360 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4361 ext_phy_addr[port],
4362 MDIO_PMA_DEVAD,
4363 MDIO_PMA_REG_CTRL,
4364 1<<15);
4065 } 4365 }
4066 4366
4067 if (vars->link_up) { 4367 /* Add delay of 150ms after reset */
4068 vars->link_status |= LINK_STATUS_LINK_UP; 4368 msleep(150);
4069 if (link_10g) {
4070 bnx2x_bmac_enable(params, vars, 0);
4071 bnx2x_set_led(bp, port, LED_MODE_OPER,
4072 SPEED_10000, params->hw_led_mode,
4073 params->chip_id);
4074 4369
4075 } else { 4370 /* PART2 - Download firmware to both phys */
4076 bnx2x_emac_enable(params, vars, 0); 4371 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4077 rc = bnx2x_emac_program(params, vars->line_speed, 4372 u16 fw_ver1;
4078 vars->duplex);
4079 4373
4080 /* AN complete? */ 4374 bnx2x_bcm8073_external_rom_boot(bp, port,
4081 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 4375 ext_phy_addr[port]);
4082 if (!(vars->phy_flags & 4376
4083 PHY_SGMII_FLAG)) 4377 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4084 bnx2x_set_sgmii_tx_driver(params); 4378 ext_phy_addr[port],
4085 } 4379 MDIO_PMA_DEVAD,
4380 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
4381 if (fw_ver1 == 0) {
4382 DP(NETIF_MSG_LINK,
4383 "bnx2x_8073_common_init_phy port %x "
4384 "fw Download failed\n", port);
4385 return -EINVAL;
4086 } 4386 }
4087 4387
4088 /* PBF - link up */ 4388 /* Only set bit 10 = 1 (Tx power down) */
4089 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 4389 bnx2x_cl45_read(bp, port,
4090 vars->line_speed); 4390 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4391 ext_phy_addr[port],
4392 MDIO_PMA_DEVAD,
4393 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4091 4394
4092 /* disable drain */ 4395 /* Phase1 of TX_POWER_DOWN reset */
4093 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 4396 bnx2x_cl45_write(bp, port,
4397 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4398 ext_phy_addr[port],
4399 MDIO_PMA_DEVAD,
4400 MDIO_PMA_REG_TX_POWER_DOWN,
4401 (val | 1<<10));
4402 }
4094 4403
4095 /* update shared memory */ 4404 /* Toggle Transmitter: Power down and then up with 600ms
4096 bnx2x_update_mng(params, vars->link_status); 4405 delay between */
4406 msleep(600);
4097 4407
4098 } else { /* link down */ 4408 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
4099 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port); 4409 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4100 bnx2x_set_led(bp, port, LED_MODE_OFF, 4410 /* Phase2 of POWER_DOWN_RESET*/
4101 0, params->hw_led_mode, 4411 /* Release bit 10 (Release Tx power down) */
4102 params->chip_id); 4412 bnx2x_cl45_read(bp, port,
4413 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4414 ext_phy_addr[port],
4415 MDIO_PMA_DEVAD,
4416 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4103 4417
4104 /* indicate no mac active */ 4418 bnx2x_cl45_write(bp, port,
4105 vars->mac_type = MAC_TYPE_NONE; 4419 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4420 ext_phy_addr[port],
4421 MDIO_PMA_DEVAD,
4422 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
4423 msleep(15);
4106 4424
4107 /* update shared memory */ 4425 /* Read modify write the SPI-ROM version select register */
4108 vars->link_status = 0; 4426 bnx2x_cl45_read(bp, port,
4109 bnx2x_update_mng(params, vars->link_status); 4427 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4428 ext_phy_addr[port],
4429 MDIO_PMA_DEVAD,
4430 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
4431 bnx2x_cl45_write(bp, port,
4432 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4433 ext_phy_addr[port],
4434 MDIO_PMA_DEVAD,
4435 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
4110 4436
4111 /* activate nig drain */ 4437 /* set GPIO2 back to LOW */
4112 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4438 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4439 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
4440 }
4441 return 0;
4113 4442
4114 /* reset BigMac */ 4443}
4115 bnx2x_bmac_rx_disable(bp, params->port);
4116 REG_WR(bp, GRCBASE_MISC +
4117 MISC_REGISTERS_RESET_REG_2_CLEAR,
4118 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4119 4444
4445u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4446{
4447 u8 rc = 0;
4448 u32 ext_phy_type;
4449
4450 DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
4451
4452 /* Read the ext_phy_type for arbitrary port(0) */
4453 ext_phy_type = XGXS_EXT_PHY_TYPE(
4454 REG_RD(bp, shmem_base +
4455 offsetof(struct shmem_region,
4456 dev_info.port_hw_config[0].external_phy_config)));
4457
4458 switch (ext_phy_type) {
4459 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4460 {
4461 rc = bnx2x_8073_common_init_phy(bp, shmem_base);
4462 break;
4463 }
4464 default:
4465 DP(NETIF_MSG_LINK,
4466 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
4467 ext_phy_type);
4468 break;
4120 } 4469 }
4121 4470
4122 return rc; 4471 return rc;
4123} 4472}
4124 4473
4474
4475
4125static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 4476static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4126{ 4477{
4127 u16 val, cnt; 4478 u16 val, cnt;
@@ -4154,7 +4505,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4154} 4505}
4155#define RESERVED_SIZE 256 4506#define RESERVED_SIZE 256
4156/* max application is 160K bytes - data at end of RAM */ 4507/* max application is 160K bytes - data at end of RAM */
4157#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE 4508#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
4158 4509
4159/* Header is 14 bytes */ 4510/* Header is 14 bytes */
4160#define HEADER_SIZE 14 4511#define HEADER_SIZE 14
@@ -4192,12 +4543,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4192 size = MAX_APP_SIZE+HEADER_SIZE; 4543 size = MAX_APP_SIZE+HEADER_SIZE;
4193 } 4544 }
4194 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); 4545 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
4195 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); 4546 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
4196 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 4547 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1
4197 and issuing a reset.*/ 4548 and issuing a reset.*/
4198 4549
4199 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 4550 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4200 MISC_REGISTERS_GPIO_HIGH); 4551 MISC_REGISTERS_GPIO_HIGH, port);
4201 4552
4202 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4553 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4203 4554
@@ -4429,7 +4780,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4429 } 4780 }
4430 4781
4431 /* DSP Remove Download Mode */ 4782 /* DSP Remove Download Mode */
4432 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW); 4783 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4784 MISC_REGISTERS_GPIO_LOW, port);
4433 4785
4434 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4786 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4435 4787
@@ -4437,7 +4789,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4437 for (cnt = 0; cnt < 100; cnt++) 4789 for (cnt = 0; cnt < 100; cnt++)
4438 msleep(5); 4790 msleep(5);
4439 4791
4440 bnx2x_hw_reset(bp); 4792 bnx2x_hw_reset(bp, port);
4441 4793
4442 for (cnt = 0; cnt < 100; cnt++) 4794 for (cnt = 0; cnt < 100; cnt++)
4443 msleep(5); 4795 msleep(5);
@@ -4473,7 +4825,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4473 MDIO_PMA_REG_7101_VER2, 4825 MDIO_PMA_REG_7101_VER2,
4474 &image_revision2); 4826 &image_revision2);
4475 4827
4476 if (data[0x14e] != (image_revision2&0xFF) || 4828 if (data[0x14e] != (image_revision2&0xFF) ||
4477 data[0x14f] != ((image_revision2&0xFF00)>>8) || 4829 data[0x14f] != ((image_revision2&0xFF00)>>8) ||
4478 data[0x150] != (image_revision1&0xFF) || 4830 data[0x150] != (image_revision1&0xFF) ||
4479 data[0x151] != ((image_revision1&0xFF00)>>8)) { 4831 data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4860,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
4508 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 4860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4509 /* Take ext phy out of reset */ 4861 /* Take ext phy out of reset */
4510 if (!driver_loaded) 4862 if (!driver_loaded)
4511 bnx2x_turn_on_sf(bp, port, ext_phy_addr); 4863 bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
4512 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, 4864 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
4513 data, size); 4865 data, size);
4514 if (!driver_loaded) 4866 if (!driver_loaded)
4515 bnx2x_turn_off_sf(bp); 4867 bnx2x_turn_off_sf(bp, port);
4516 break; 4868 break;
4517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 4869 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
4518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 4870 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37ac95de..86d54a17b411 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@ struct link_params {
55#define LOOPBACK_BMAC 2 55#define LOOPBACK_BMAC 2
56#define LOOPBACK_XGXS_10 3 56#define LOOPBACK_XGXS_10 3
57#define LOOPBACK_EXT_PHY 4 57#define LOOPBACK_EXT_PHY 4
58#define LOOPBACK_EXT 5
58 59
59 u16 req_duplex; 60 u16 req_duplex;
60 u16 req_flow_ctrl; 61 u16 req_flow_ctrl;
62 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
63 req_flow_ctrl is set to AUTO */
61 u16 req_line_speed; /* Also determine AutoNeg */ 64 u16 req_line_speed; /* Also determine AutoNeg */
62 65
63 /* Device parameters */ 66 /* Device parameters */
64 u8 mac_addr[6]; 67 u8 mac_addr[6];
65 u16 mtu; 68
66 69
67 70
68 /* shmem parameters */ 71 /* shmem parameters */
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
140 u8 phy_addr, u8 devad, u16 reg, u16 val); 143 u8 phy_addr, u8 devad, u16 reg, u16 val);
141 144
142/* Reads the link_status from the shmem, 145/* Reads the link_status from the shmem,
143 and update the link vars accordinaly */ 146 and update the link vars accordingly */
144void bnx2x_link_status_update(struct link_params *input, 147void bnx2x_link_status_update(struct link_params *input,
145 struct link_vars *output); 148 struct link_vars *output);
146/* returns string representing the fw_version of the external phy */ 149/* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
149 152
150/* Set/Unset the led 153/* Set/Unset the led
151 Basically, the CLC takes care of the led for the link, but in case one needs 154 Basically, the CLC takes care of the led for the link, but in case one needs
152 to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to 155 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
153 blink the led, and LED_MODE_OFF to set the led off.*/ 156 blink the led, and LED_MODE_OFF to set the led off.*/
154u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 157u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
155 u16 hw_led_mode, u32 chip_id); 158 u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
164 otherwise link is down*/ 167 otherwise link is down*/
165u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 168u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
166 169
170/* One-time initialization for external phy after power up */
171u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
167 172
168#endif /* BNX2X_LINK_H */ 173#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd25953..3e7dc171cdf1 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -60,8 +60,8 @@
60#include "bnx2x.h" 60#include "bnx2x.h"
61#include "bnx2x_init.h" 61#include "bnx2x_init.h"
62 62
63#define DRV_MODULE_VERSION "1.45.6" 63#define DRV_MODULE_VERSION "1.45.17"
64#define DRV_MODULE_RELDATE "2008/06/23" 64#define DRV_MODULE_RELDATE "2008/08/13"
65#define BNX2X_BC_VER 0x040200 65#define BNX2X_BC_VER 0x040200
66 66
67/* Time in jiffies before concluding the transmitter is hung */ 67/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +76,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 77MODULE_VERSION(DRV_MODULE_VERSION);
78 78
79static int disable_tpa;
79static int use_inta; 80static int use_inta;
80static int poll; 81static int poll;
81static int debug; 82static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 84static int use_multi;
86 85
86module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 87module_param(use_inta, int, 0);
88module_param(poll, int, 0); 88module_param(poll, int, 0);
89module_param(debug, int, 0); 89module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 92MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 93MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 94
97#ifdef BNX2X_MULTI 95#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 96module_param(use_multi, int, 0);
@@ -237,17 +235,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 235 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 237
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 238 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 239 BNX2X_ERR("dmae timeout!\n");
248 break; 240 break;
249 } 241 }
250 cnt--; 242 cnt--;
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
251 } 248 }
252 249
253 mutex_unlock(&bp->dmae_mutex); 250 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +307,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 307
311 while (*wb_comp != DMAE_COMP_VAL) { 308 while (*wb_comp != DMAE_COMP_VAL) {
312 309
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 310 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 311 BNX2X_ERR("dmae timeout!\n");
321 break; 312 break;
322 } 313 }
323 cnt--; 314 cnt--;
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
324 } 320 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +499,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 499 int i;
504 u16 j, start, end; 500 u16 j, start, end;
505 501
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
506 BNX2X_ERR("begin crash dump -----------------\n"); 505 BNX2X_ERR("begin crash dump -----------------\n");
507 506
508 for_each_queue(bp, i) { 507 for_each_queue(bp, i) {
@@ -513,17 +512,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 518 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 523 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
527 529
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +555,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 556 }
555 557
556 start = 0; 558 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 559 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 560 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +584,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 584 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 585 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 586 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 587}
589 588
590static void bnx2x_int_enable(struct bnx2x *bp) 589static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +683,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 684 u8 storm, u16 index, u8 op, u8 update)
686{ 685{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 688 struct igu_ack_register igu_ack;
689 689
690 igu_ack.status_block_index = index; 690 igu_ack.status_block_index = index;
@@ -694,9 +694,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 696
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 700}
701 701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +716,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 716 return rc;
717} 717}
718 718
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 719static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 720{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
738 724
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 726 result, hc_addr);
741 727
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 728 return result;
750} 729}
751 730
@@ -898,6 +877,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 877 netif_tx_lock(bp->dev);
899 878
900 if (netif_queue_stopped(bp->dev) && 879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 882 netif_wake_queue(bp->dev);
903 883
@@ -905,6 +885,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 885 }
906} 886}
907 887
888
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 890 union eth_rx_cqe *rr_cqe)
910{ 891{
@@ -960,6 +941,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 942 break;
962 943
944
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1151,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1151 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1153
1172 /* Clear the two last indeces in the page to 1: 1154 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1155 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1156 hence will never be indicated and should be removed from
1175 the calculations. */ 1157 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1158 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1243,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1243 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1245 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1246 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1247 return err;
1266 } 1248 }
1267 1249
@@ -1297,14 +1279,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1280 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1299 1281
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1282 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1302 1285
1303 prefetch(skb); 1286 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1287 prefetch(((char *)(skb)) + 128);
1305 1288
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1289#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1290 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1291 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1334,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1334 fp->tpa_pool[queue].skb = new_skb;
1354 1335
1355 } else { 1336 } else {
1337 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1338 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1339 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1340 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1341 }
1360 1342
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1372,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1374 int rx_pkt = 0;
1393 u16 queue;
1394 1375
1395#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1377 if (unlikely(bp->panic))
@@ -1456,7 +1437,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1437 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1438 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1439 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1440 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1441
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1443 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1484,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1484
1504 /* is this an error packet? */ 1485 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1487 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1488 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1489 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1490 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1491 goto reuse_rx;
1512 } 1492 }
1513 1493
@@ -1524,7 +1504,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1504 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1505 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1506 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1507 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1508 goto reuse_rx;
1529 } 1509 }
1530 1510
@@ -1550,7 +1530,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1530 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1531 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1532 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1533 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1534reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1536 goto next_rx;
@@ -1559,10 +1539,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1539 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1540
1561 skb->ip_summed = CHECKSUM_NONE; 1541 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1542 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1545 else
1546 bp->eth_stats.hw_csum_err++;
1547 }
1566 } 1548 }
1567 1549
1568#ifdef BCM_VLAN 1550#ifdef BCM_VLAN
@@ -1615,6 +1597,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1597 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1598 int index = FP_IDX(fp);
1617 1599
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603 return IRQ_HANDLED;
1604 }
1605
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1607 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1636,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1636 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1638
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1639 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1642 return IRQ_HANDLED;
1660 } 1643 }
1661 1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
1662 mask = 0x2 << bp->fp[0].sb_id; 1650 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1651 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1652 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1687,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1687 * General service functions
1700 */ 1688 */
1701 1689
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1690static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1691{
1704 u32 lock_status; 1692 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1693 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
1707 int cnt; 1696 int cnt;
1708 1697
1709 /* Validating that the resource is within range */ 1698 /* Validating that the resource is within range */
@@ -1714,8 +1703,15 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1703 return -EINVAL;
1715 } 1704 }
1716 1705
1706 if (func <= 5) {
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 } else {
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711 }
1712
1717 /* Validating that the resource is not already taken */ 1713 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1714 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1715 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1717 lock_status, resource_bit);
@@ -1725,9 +1721,8 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1725 /* Try for 1 second every 5ms */ 1721 /* Try for 1 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1722 for (cnt = 0; cnt < 200; cnt++) {
1727 /* Try to acquire the lock */ 1723 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1725 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1726 if (lock_status & resource_bit)
1732 return 0; 1727 return 0;
1733 1728
@@ -1737,11 +1732,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1732 return -EAGAIN;
1738} 1733}
1739 1734
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1735static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1736{
1742 u32 lock_status; 1737 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1738 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
1745 1741
1746 /* Validating that the resource is within range */ 1742 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1747,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1747 return -EINVAL;
1752 } 1748 }
1753 1749
1750 if (func <= 5) {
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 } else {
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755 }
1756
1754 /* Validating that the resource is currently taken */ 1757 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1758 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1759 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1761 lock_status, resource_bit);
1759 return -EFAULT; 1762 return -EFAULT;
1760 } 1763 }
1761 1764
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1766 return 0;
1764} 1767}
1765 1768
1766/* HW Lock for shared dual port PHYs */ 1769/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1770static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1771{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1773
@@ -1772,25 +1775,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1775
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1779}
1777 1780
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1781static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1782{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1784
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1788
1786 mutex_unlock(&bp->port.phy_mutex); 1789 mutex_unlock(&bp->port.phy_mutex);
1787} 1790}
1788 1791
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1792int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1793{
1791 /* The GPIO should be swapped if swap register is set and active */ 1794 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1797 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1799 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1804,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1804 return -EINVAL;
1802 } 1805 }
1803 1806
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1808 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1810
@@ -1822,7 +1825,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1826 break;
1824 1827
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1830 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1831 /* set FLOAT */
@@ -1834,7 +1837,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1837 }
1835 1838
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1841
1839 return 0; 1842 return 0;
1840} 1843}
@@ -1850,19 +1853,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1853 return -EINVAL;
1851 } 1854 }
1852 1855
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1857 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1859
1857 switch (mode) { 1860 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1863 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1866 break;
1864 1867
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1870 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1883,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1883 }
1881 1884
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1887
1885 return 0; 1888 return 0;
1886} 1889}
@@ -1940,46 +1943,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1943
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1944static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1945{
1943 u8 rc; 1946 if (!BP_NOMCP(bp)) {
1947 u8 rc;
1944 1948
1945 /* Initialize link parameters structure variables */ 1949 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1952 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1958
1948 bnx2x_phy_hw_lock(bp); 1959 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1961 bnx2x_release_phy_lock(bp);
1951 1962
1952 if (bp->link_vars.link_up) 1963 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1964 bnx2x_link_report(bp);
1954 1965
1955 bnx2x_calc_fc_adv(bp); 1966 bnx2x_calc_fc_adv(bp);
1956 1967
1957 return rc; 1968 return rc;
1969 }
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971 return -EINVAL;
1958} 1972}
1959 1973
1960static void bnx2x_link_set(struct bnx2x *bp) 1974static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1975{
1962 bnx2x_phy_hw_lock(bp); 1976 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1977 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979 bnx2x_release_phy_lock(bp);
1965 1980
1966 bnx2x_calc_fc_adv(bp); 1981 bnx2x_calc_fc_adv(bp);
1982 } else
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1984}
1968 1985
1969static void bnx2x__link_reset(struct bnx2x *bp) 1986static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1987{
1971 bnx2x_phy_hw_lock(bp); 1988 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1989 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991 bnx2x_release_phy_lock(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1994}
1975 1995
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1996static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1997{
1978 u8 rc; 1998 u8 rc;
1979 1999
1980 bnx2x_phy_hw_lock(bp); 2000 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2002 bnx2x_release_phy_lock(bp);
1983 2003
1984 return rc; 2004 return rc;
1985} 2005}
@@ -1991,7 +2011,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2011 sum of vn_min_rates
1992 or 2012 or
1993 0 - if all the min_rates are 0. 2013 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2014 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2015 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2016 be set to 1.
1997 */ 2017 */
@@ -2114,7 +2134,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2136 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2137 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2138 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2139 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2223,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2223 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2225
2206 bnx2x_phy_hw_lock(bp); 2226 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2228 bnx2x_release_phy_lock(bp);
2209 2229
2210 if (bp->link_vars.link_up) { 2230 if (bp->link_vars.link_up) {
2211 2231
@@ -2357,7 +2377,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2377}
2358 2378
2359/* acquire split MCP access lock register */ 2379/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2380static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2381{
2362 u32 i, j, val; 2382 u32 i, j, val;
2363 int rc = 0; 2383 int rc = 0;
@@ -2374,15 +2394,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2394 msleep(5);
2375 } 2395 }
2376 if (!(val & (1L << 31))) { 2396 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2398 rc = -EBUSY;
2379 } 2399 }
2380 2400
2381 return rc; 2401 return rc;
2382} 2402}
2383 2403
2384/* Release split MCP access lock register */ 2404/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2405static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2406{
2387 u32 val = 0; 2407 u32 val = 0;
2388 2408
@@ -2395,7 +2415,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2415 u16 rc = 0;
2396 2416
2397 barrier(); /* status block is written to by the chip */ 2417 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2420 rc |= 1;
@@ -2426,26 +2445,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2445static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2446{
2428 int port = BP_PORT(bp); 2447 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2449 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2453 NIG_REG_MASK_INTERRUPT_PORT0;
2454 u32 aeu_mask;
2435 2455
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2456 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2457 BNX2X_ERR("IGU ERROR\n");
2440 2458
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2461
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2463 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2464 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2466
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2469
2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2471 bp->attn_state |= asserted;
2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2473
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2475 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2524,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2524
2501 } /* if hardwired */ 2525 } /* if hardwired */
2502 2526
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2528 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2529 REG_WR(bp, hc_addr, asserted);
2506 2530
2507 /* now set back the mask */ 2531 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2532 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2554,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2555 /* Fan failure attention */
2532 2556
2533 /* The PHY reset is controled by GPIO 1 */ 2557 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2560 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2563 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2564 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2723 int index;
2700 u32 reg_addr; 2724 u32 reg_addr;
2701 u32 val; 2725 u32 val;
2726 u32 aeu_mask;
2702 2727
2703 /* need to take HW lock because MCP or other port might also 2728 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2729 try to handle this event */
2705 bnx2x_lock_alr(bp); 2730 bnx2x_acquire_alr(bp);
2706 2731
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2759 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2760 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2761 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2762 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2763 }
2739 } 2764 }
2740 2765
2741 bnx2x_unlock_alr(bp); 2766 bnx2x_release_alr(bp);
2742 2767
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2769
2745 val = ~deasserted; 2770 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2772 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2773 REG_WR(bp, reg_addr, val);
2749 2774
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2775 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2776 BNX2X_ERR("IGU ERROR\n");
2754 2777
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2780
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2782 aeu_mask = REG_RD(bp, reg_addr);
2783
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2760 2788
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2789 REG_WR(bp, reg_addr, aeu_mask);
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2791
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2793 bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2828
2801 /* Return here if interrupt is disabled */ 2829 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2832 return;
2805 } 2833 }
2806 2834
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2836/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2838
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2840
2813 /* HW attentions */ 2841 /* HW attentions */
2814 if (status & 0x1) 2842 if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2866
2839 /* Return here if interrupt is disabled */ 2867 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2870 return IRQ_HANDLED;
2843 } 2871 }
2844 2872
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2904 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2905 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2906 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2907 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2908 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2910 } else { \
2883 /* m_hi <= s_hi */ \ 2911 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2912 d_hi = 0; \
2885 d_lo = 0; \ 2913 d_lo = 0; \
2886 } \ 2914 } \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2918 d_hi = 0; \
2891 d_lo = 0; \ 2919 d_lo = 0; \
2892 } else { \ 2920 } else { \
2893 /* m_hi >= s_hi */ \ 2921 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2922 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2923 d_lo = m_lo - s_lo; \
2896 } \ 2924 } \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2991 * Init service functions
2964 */ 2992 */
2965 2993
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2995{
2999 if (!bp->stats_pending) { 2996 if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3030 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3100
3102 might_sleep(); 3101 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3103 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3104 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3105 break;
3108 } 3106 }
3109 cnt--; 3107 cnt--;
3108 msleep(1);
3110 } 3109 }
3111 return 1; 3110 return 1;
3112} 3111}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3534
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3539 3539
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3541 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3782 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3786 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3788 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3993
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3996 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3999 sizeof(struct cstorm_status_block)/4);
4001} 4000}
4002 4001
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4003 dma_addr_t mapping, int sb_id)
4005{ 4004{
4006 int port = BP_PORT(bp); 4005 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4006 int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4076 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 4078
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4079 bp->attn_state = 0;
4082 4080
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4092 reg_offset + 0xc + 0x10*index);
4095 } 4093 }
4096 4094
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4096 HC_REG_ATTN_MSG0_ADDR_L);
4102 4097
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4109 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4110 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4111
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4116 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4119
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4126 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4127 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4128
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4133 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4136
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4143 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4144 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4145
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4150 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4153
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4160 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4161 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4162
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4167 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4170
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4174
4196 bp->stats_pending = 0; 4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4197 4177
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4179}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4192 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4193 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4201 bp->rx_ticks ? 0 : 1);
4218 4202
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4206 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4207 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4210 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4211 bp->tx_ticks ? 0 : 1);
4228 } 4212 }
4229} 4213}
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4241{
4258 int func = BP_FUNC(bp); 4242 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4246 int i, j;
4261 4247
4262 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4270 bp->dev->mtu + ETH_OVREHEAD); 4256 bp->dev->mtu + ETH_OVREHEAD);
4271 4257
4272 for_each_queue(bp, j) { 4258 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4260
4261 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4262 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4264 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4339 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4342 fp->disable_tpa = 1;
4358 ring_prod = 0; 4343 ring_prod = 0;
4359 break; 4344 break;
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4348 fp->rx_sge_prod = ring_prod;
4364 4349
4365 /* Allocate BDs and initialize BD ring */ 4350 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4351 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4352 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4353 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4355 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4356 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4357 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4358 break;
4374 } 4359 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4360 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4482 }
4498 4483
4499 context->cstorm_st_context.sb_index_number = 4484 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4485 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4486 context->cstorm_st_context.status_block_id = sb_id;
4502 4487
4503 context->xstorm_ag_context.cdu_reserved = 4488 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4520 int i;
4536 4521
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4524 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4526#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4564 int func = BP_FUNC(bp);
4580 int i; 4565 int i;
4581 4566
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4568
4584 switch (mode) { 4569 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4570 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,35 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4602 bnx2x_set_client_config(bp);
4618} 4603}
4619 4604
4620static void bnx2x_init_internal(struct bnx2x *bp) 4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
4609 /* Zero this manually as its initialization is
4610 currently missing in the initTool */
4611 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4614}
4615
4616static void bnx2x_init_internal_port(struct bnx2x *bp)
4617{
4618 int port = BP_PORT(bp);
4619
4620 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4624}
4625
4626static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4627{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4628 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4629 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4630 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4631 int func = BP_FUNC(bp);
4626 int i; 4632 int i;
4633 u16 max_agg_size;
4627 4634
4628 if (is_multi(bp)) { 4635 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4636 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4643,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4644 (*(u32 *)&tstorm_config));
4638 4645
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4646 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4647 bnx2x_set_storm_rx_mode(bp);
4644 4648
4649 /* reset xstorm per client statistics */
4650 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653 i*4, 0);
4654 }
4655 /* reset tstorm per client statistics */
4656 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4659 i*4, 0);
4660 }
4661
4662 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4663 stats_flags.collect_eth = 1;
4646 4664
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4666 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4668 ((u32 *)&stats_flags)[1]);
4651 4669
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4670 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4671 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4672 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4673 ((u32 *)&stats_flags)[1]);
4656 4674
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4676 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4678 ((u32 *)&stats_flags)[1]);
4661 4679
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4680 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4681 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683 REG_WR(bp, BAR_XSTRORM_INTMEM +
4684 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686
4687 REG_WR(bp, BAR_TSTRORM_INTMEM +
4688 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690 REG_WR(bp, BAR_TSTRORM_INTMEM +
4691 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4693
4665 if (CHIP_IS_E1H(bp)) { 4694 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4695 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4705,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4705 bp->e1hov);
4677 } 4706 }
4678 4707
4679 /* Zero this manualy as its initialization is 4708 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4709 max_agg_size = min((u32)(bp->rx_buf_use_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4710 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4711 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4712 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4713 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4714
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4715 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4716 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4719,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4719 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4720 U64_HI(fp->rx_comp_mapping));
4695 4721
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4722 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4723 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4724 max_agg_size);
4702 } 4725 }
4703} 4726}
4704 4727
4705static void bnx2x_nic_init(struct bnx2x *bp) 4728static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729{
4730 switch (load_code) {
4731 case FW_MSG_CODE_DRV_LOAD_COMMON:
4732 bnx2x_init_internal_common(bp);
4733 /* no break */
4734
4735 case FW_MSG_CODE_DRV_LOAD_PORT:
4736 bnx2x_init_internal_port(bp);
4737 /* no break */
4738
4739 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740 bnx2x_init_internal_func(bp);
4741 break;
4742
4743 default:
4744 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4745 break;
4746 }
4747}
4748
4749static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4750{
4707 int i; 4751 int i;
4708 4752
@@ -4717,19 +4761,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4761 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4762 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4763 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4764 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4765 FP_SB_ID(fp));
4766 bnx2x_update_fpsb_idx(fp);
4722 } 4767 }
4723 4768
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4769 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4770 DEF_SB_ID);
4771 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4772 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4773 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4774 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4775 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4776 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4777 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4778 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4779 bnx2x_int_enable(bp);
4735} 4780}
@@ -4878,7 +4923,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4923 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4924 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4925 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4926 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4927
4883 /* Write 0 to parser credits for CFC search request */ 4928 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4929 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4978,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4978 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4979 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4980 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4981 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4982
4938 /* Write 0 to parser credits for CFC search request */ 4983 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4984 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5045,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5045 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5046 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5047 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5048 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5049
5005 DP(NETIF_MSG_HW, "done\n"); 5050 DP(NETIF_MSG_HW, "done\n");
5006 5051
@@ -5089,11 +5134,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5134 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5135#endif
5091 5136
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5138#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5203,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5203 }
5164 5204
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206 /* set NIC mode */
5207 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5208 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5210
@@ -5333,6 +5375,13 @@ static int bnx2x_init_common(struct bnx2x *bp)
5333 ((u32 *)&tmp)[1]); 5375 ((u32 *)&tmp)[1]);
5334 } 5376 }
5335 5377
5378 if (!BP_NOMCP(bp)) {
5379 bnx2x_acquire_phy_lock(bp);
5380 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381 bnx2x_release_phy_lock(bp);
5382 } else
5383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5336 return 0; 5385 return 0;
5337} 5386}
5338 5387
@@ -5638,18 +5687,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5687 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5688 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5689 u32 rc = 0;
5690 u32 cnt = 1;
5691 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5692
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5693 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5694 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5695
5645 /* let the FW do it's magic ... */ 5696 do {
5646 msleep(100); /* TBD */ 5697 /* let the FW do it's magic ... */
5698 msleep(delay);
5647 5699
5648 if (CHIP_REV_IS_SLOW(bp)) 5700 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5701
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5702 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5703 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706 cnt*delay, rc, seq);
5653 5707
5654 /* is this a reply to our command? */ 5708 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5709 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5767,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5767 NUM_RCQ_BD);
5714 5768
5715 /* SGE ring */ 5769 /* SGE ring */
5770 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5771 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5772 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5773 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5945,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5890 dev_kfree_skb(skb); 5945 dev_kfree_skb(skb);
5891 } 5946 }
5892 if (!fp->disable_tpa) 5947 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5950 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5951 }
5896} 5952}
@@ -5976,8 +6032,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6032 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6033 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6034 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6035 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6036 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6037 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6038 return -EBUSY;
5983 } 6039 }
@@ -6004,7 +6060,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6004 * Init service functions 6060 * Init service functions
6005 */ 6061 */
6006 6062
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6063static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6064{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6065 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6066 int port = BP_PORT(bp);
@@ -6026,11 +6082,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6082 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6083 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6084 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6085 if (set)
6086 config->config_table[0].target_table_entry.flags = 0;
6087 else
6088 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6089 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6090 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6091
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6092 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6093 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6094 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6095 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6096 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6100,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6100 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6101 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6102 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6103 if (set)
6104 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6105 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6106 else
6107 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6108 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6109 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6110
@@ -6050,12 +6113,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6113 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6114}
6052 6115
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6116static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6117{
6055 struct mac_configuration_cmd_e1h *config = 6118 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6119 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6120
6058 if (bp->state != BNX2X_STATE_OPEN) { 6121 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6122 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6123 return;
6061 } 6124 }
@@ -6079,9 +6142,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6142 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6143 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6144 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6145 if (set)
6146 config->config_table[0].flags = BP_PORT(bp);
6147 else
6148 config->config_table[0].flags =
6149 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6150
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6151 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6152 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6153 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6154 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6155 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6174,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6174 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6175 /* if index is different from 0
6108 * the reply for some commands will 6176 * the reply for some commands will
6109 * be on the none default queue 6177 * be on the non default queue
6110 */ 6178 */
6111 if (idx) 6179 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6180 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6181 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6182
6183 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6184 if (*state_p == state)
6117 return 0; 6185 return 0;
6118 6186
@@ -6167,7 +6235,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6235{
6168 u32 load_code; 6236 u32 load_code;
6169 int i, rc; 6237 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6238#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6239 if (unlikely(bp->panic))
6173 return -EPERM; 6240 return -EPERM;
@@ -6183,22 +6250,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6250 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6251 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6252 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6253 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6254 return -EBUSY;
6188 } 6255 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6256 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6257 return -EBUSY; /* other port in diagnostic mode */
6191 6258
6192 } else { 6259 } else {
6260 int port = BP_PORT(bp);
6261
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6262 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6263 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6264 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6265 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6266 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6267 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6268 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6269 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6270 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6271 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6272 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6273 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6316,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6316 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6317 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6318
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6319 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6320 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6321 if (rc) {
@@ -6276,17 +6342,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 goto load_error; 6342 goto load_error;
6277 } 6343 }
6278 6344
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6345 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6346 bnx2x_nic_init(bp, load_code);
6284 6347
6285 /* Send LOAD_DONE command to MCP */ 6348 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6349 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6350 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6351 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6352 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6353 rc = -EBUSY;
6291 goto load_int_disable; 6354 goto load_int_disable;
6292 } 6355 }
@@ -6301,11 +6364,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 for_each_queue(bp, i) 6364 for_each_queue(bp, i)
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6365 napi_enable(&bnx2x_fp(bp, i, napi));
6303 6366
6367 /* Enable interrupt handling */
6368 atomic_set(&bp->intr_sem, 0);
6369
6304 rc = bnx2x_setup_leading(bp); 6370 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6371 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6372 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1;
6308#endif
6309 goto load_stop_netif; 6373 goto load_stop_netif;
6310 } 6374 }
6311 6375
@@ -6323,9 +6387,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6323 } 6387 }
6324 6388
6325 if (CHIP_IS_E1(bp)) 6389 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6390 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6391 else
6328 bnx2x_set_mac_addr_e1h(bp); 6392 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6393
6330 if (bp->port.pmf) 6394 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6395 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6403,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6403 break;
6340 6404
6341 case LOAD_OPEN: 6405 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6406 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6407 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6408 if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6441,7 @@ load_int_disable:
6378 /* Free SKBs, SGEs, TPA pool and driver internals */ 6441 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp); 6442 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i) 6443 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6444 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6445load_error:
6384 bnx2x_free_mem(bp); 6446 bnx2x_free_mem(bp);
6385 6447
@@ -6411,7 +6473,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6473 return rc;
6412} 6474}
6413 6475
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6476static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6477{
6416 u16 dsb_sp_prod_idx; 6478 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6479 /* if the other port is handling traffic,
@@ -6429,7 +6491,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6492 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6493 if (rc) /* timeout */
6432 return; 6494 return rc;
6433 6495
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6496 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6497
@@ -6441,20 +6503,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6503 so there is not much to do if this times out
6442 */ 6504 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6505 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6506 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6507 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6508 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6509 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6510#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6511 bnx2x_panic();
6512#else
6513 rc = -EBUSY;
6451#endif 6514#endif
6452 break; 6515 break;
6453 } 6516 }
6454 cnt--; 6517 cnt--;
6518 msleep(1);
6455 } 6519 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6520 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6521 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6522
6523 return rc;
6458} 6524}
6459 6525
6460static void bnx2x_reset_func(struct bnx2x *bp) 6526static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6562,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6562 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6563 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6564 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6565 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6566
6501 /* TODO: Close Doorbell port? */ 6567 /* TODO: Close Doorbell port? */
6502} 6568}
@@ -6536,11 +6602,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6602 }
6537} 6603}
6538 6604
6539/* msut be called with rtnl_lock */ 6605/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6606static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6607{
6608 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6609 u32 reset_code = 0;
6543 int i, cnt; 6610 int i, cnt, rc;
6544 6611
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6612 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6613
@@ -6557,22 +6624,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6624 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6625 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6626
6560 /* Wait until all fast path tasks complete */ 6627 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6628 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6630
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6631 cnt = 1000;
6573 smp_rmb(); 6632 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6633 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6634
6635 if (!netif_running(bp->dev))
6636 bnx2x_tx_int(fp, 1000);
6637
6576 if (!cnt) { 6638 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6639 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6640 i);
@@ -6584,14 +6646,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6646#endif
6585 } 6647 }
6586 cnt--; 6648 cnt--;
6649 msleep(1);
6587 smp_rmb(); 6650 smp_rmb();
6588 } 6651 }
6589 } 6652 }
6590 6653
6591 /* Wait until all slow path tasks complete */ 6654 /* Give HW time to discard old tx messages */
6592 cnt = 1000; 6655 msleep(1);
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595 6656
6596 for_each_queue(bp, i) 6657 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi)); 6658 napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6662,79 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6601 /* Release IRQs */ 6662 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6663 bnx2x_free_irq(bp);
6603 6664
6604 if (bp->flags & NO_WOL_FLAG) 6665 if (unload_mode == UNLOAD_NORMAL)
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6670 if (CHIP_IS_E1H(bp))
6671 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6672
6607 else if (bp->wol) { 6673 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6674 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6675 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6676 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6677 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6678 preserve entry 0 which is used by the PMF */
6679 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6681 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6682 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6683
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6684 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6685 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6686 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6687
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6689
6624 } else 6690 } else
6625 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6626 6692
6693 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config);
6696
6697 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699 for (i = 0; i < config->hdr.length_6b; i++)
6700 CAM_INVALIDATE(config->config_table[i]);
6701
6702 config->hdr.length_6b = i;
6703 if (CHIP_REV_IS_SLOW(bp))
6704 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705 else
6706 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707 config->hdr.client_id = BP_CL_ID(bp);
6708 config->hdr.reserved1 = 0;
6709
6710 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714 } else { /* E1H */
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
6721 if (CHIP_IS_E1H(bp))
6722 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6723
6627 /* Close multi and leading connections 6724 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */ 6725 Completions for ramrods are collected in a synchronous way */
6629 for_each_nondefault_queue(bp, i) 6726 for_each_nondefault_queue(bp, i)
6630 if (bnx2x_stop_multi(bp, i)) 6727 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6728 goto unload_error;
6632 6729
6633 if (CHIP_IS_E1H(bp)) 6730 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6731 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6732 BNX2X_ERR("Stop leading failed!\n");
6733#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6734 return -EBUSY;
6642 } 6735#else
6736 goto unload_error;
6643#endif 6737#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6738 }
6651 6739
6652unload_error: 6740unload_error:
@@ -6656,12 +6744,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6744 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6745 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6746 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6747 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6748 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6749 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6750 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6751 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6752 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6753 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6754 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6755 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6769,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6769 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6770 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6771 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6772 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6773 bnx2x_free_mem(bp);
6687 6774
6688 bp->state = BNX2X_STATE_CLOSED; 6775 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6820,93 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6820 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6821 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6822 */
6823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6824 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6737 if (val == 0x7) { 6825 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6826 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6827 /* save our func */
6740 int func = BP_FUNC(bp); 6828 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6829 u32 swap_en;
6830 u32 swap_val;
6742 6831
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6832 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6833
6745 /* try unload UNDI on port 0 */ 6834 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6835 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6836 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6837 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6838 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6839 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6840
6754 /* if UNDI is loaded on the other port */ 6841 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6842 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6843
6844 /* send "DONE" for previous unload */
6845 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6846
6847 /* unload UNDI on port 1 */
6757 bp->func = 1; 6848 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6849 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6850 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6851 DRV_MSG_SEQ_NUMBER_MASK);
6761 6852 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6853
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6854 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6855 }
6771 6856
6857 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6858 HC_REG_CONFIG_0), 0x1000);
6859
6860 /* close input traffic and wait for it */
6861 /* Do not rcv packets to BRB */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865 /* Do not direct rcv packets that are not for MCP to
6866 * the BRB */
6867 REG_WR(bp,
6868 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870 /* clear AEU */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874 msleep(10);
6875
6876 /* save NIG port swap info */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6879 /* reset device */
6773 REG_WR(bp, 6880 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6881 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6882 0xd3ffffff);
6776 REG_WR(bp, 6883 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6884 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6885 0x1403);
6886 /* take the NIG out of reset and restore swap values */
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893 /* send unload done to the MCP */
6894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6895
6896 /* restore our func and fw_seq */
6897 bp->func = func;
6898 bp->fw_seq =
6899 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6900 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6901 }
6902 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6780 } 6903 }
6781} 6904}
6782 6905
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6906static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6907{
6785 u32 val, val2, val3, val4, id; 6908 u32 val, val2, val3, val4, id;
6909 u16 pmc;
6786 6910
6787 /* Get the chip revision id and number. */ 6911 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6912 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6964,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6964 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6965 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6966 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6967
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6968 if (BP_E1HVN(bp) == 0) {
6969 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6970 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6971 } else {
6972 /* no WOL capability for E1HVN != 0 */
6973 bp->flags |= NO_WOL_FLAG;
6974 }
6975 BNX2X_DEV_INFO("%sWoL capable\n",
6976 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 6977
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 6978 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 6979 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7406,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7406 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7407 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7408
7277 val = 7409 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7410 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7411 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7412
7282 bp->e1hov = val; 7413 bp->e1hov = val;
@@ -7324,7 +7455,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7455
7325 if (BP_NOMCP(bp)) { 7456 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7457 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7458 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7459 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7460 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7461 }
@@ -7337,8 +7468,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7468 int func = BP_FUNC(bp);
7338 int rc; 7469 int rc;
7339 7470
7340 if (nomcp) 7471 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7472 atomic_set(&bp->intr_sem, 1);
7342 7473
7343 mutex_init(&bp->port.phy_mutex); 7474 mutex_init(&bp->port.phy_mutex);
7344 7475
@@ -7377,8 +7508,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7508 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7509 bp->rx_ticks = 25;
7379 7510
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7511 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7512 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7513
@@ -7628,25 +7757,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7757 struct ethtool_drvinfo *info)
7629{ 7758{
7630 struct bnx2x *bp = netdev_priv(dev); 7759 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7760 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7761
7633 strcpy(info->driver, DRV_MODULE_NAME); 7762 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7763 strcpy(info->version, DRV_MODULE_VERSION);
7635 7764
7636 phy_fw_ver[0] = '\0'; 7765 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7766 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7767 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7768 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7769 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7770 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7771 bnx2x_release_phy_lock(bp);
7643 } 7772 }
7644 7773
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7774 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7775 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7776 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7777 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7778 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7779 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7780 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7781 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8226,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8226 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8227 if (bp->port.pmf) {
8099 8228
8100 bnx2x_phy_hw_lock(bp); 8229 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8230 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8231 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8232 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8238,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8238 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8239 &bp->link_vars);
8111 } 8240 }
8112 bnx2x_phy_hw_unlock(bp); 8241 bnx2x_release_phy_lock(bp);
8113 8242
8114 } else /* Only the PMF can access the PHY */ 8243 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8244 return -EINVAL;
@@ -8128,7 +8257,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8257
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8258 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8259 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8260
8133 return 0; 8261 return 0;
8134} 8262}
@@ -8146,44 +8274,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8274 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8275 bp->tx_ticks = 0x3000;
8148 8276
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8277 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8278 bnx2x_update_coalesce(bp);
8156 8279
8157 return 0; 8280 return 0;
8158} 8281}
8159 8282
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8283static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8284 struct ethtool_ringparam *ering)
8189{ 8285{
@@ -8266,7 +8362,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8362
8267 if (epause->autoneg) { 8363 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8364 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8365 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8366 return -EINVAL;
8271 } 8367 }
8272 8368
@@ -8285,6 +8381,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8381 return 0;
8286} 8382}
8287 8383
8384static int bnx2x_set_flags(struct net_device *dev, u32 data)
8385{
8386 struct bnx2x *bp = netdev_priv(dev);
8387 int changed = 0;
8388 int rc = 0;
8389
8390 /* TPA requires Rx CSUM offloading */
8391 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8392 if (!(dev->features & NETIF_F_LRO)) {
8393 dev->features |= NETIF_F_LRO;
8394 bp->flags |= TPA_ENABLE_FLAG;
8395 changed = 1;
8396 }
8397
8398 } else if (dev->features & NETIF_F_LRO) {
8399 dev->features &= ~NETIF_F_LRO;
8400 bp->flags &= ~TPA_ENABLE_FLAG;
8401 changed = 1;
8402 }
8403
8404 if (changed && netif_running(dev)) {
8405 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8406 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8407 }
8408
8409 return rc;
8410}
8411
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8412static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8413{
8290 struct bnx2x *bp = netdev_priv(dev); 8414 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8419,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8419static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8420{
8297 struct bnx2x *bp = netdev_priv(dev); 8421 struct bnx2x *bp = netdev_priv(dev);
8422 int rc = 0;
8298 8423
8299 bp->rx_csum = data; 8424 bp->rx_csum = data;
8300 return 0; 8425
8426 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8427 TPA'ed packets will be discarded due to wrong TCP CSUM */
8428 if (!data) {
8429 u32 flags = ethtool_op_get_flags(dev);
8430
8431 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8432 }
8433
8434 return rc;
8301} 8435}
8302 8436
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8437static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8469,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8469{
8336 int idx, i, rc = -ENODEV; 8470 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8471 u32 wr_val = 0;
8472 int port = BP_PORT(bp);
8338 static const struct { 8473 static const struct {
8339 u32 offset0; 8474 u32 offset0;
8340 u32 offset1; 8475 u32 offset1;
@@ -8400,7 +8535,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8535
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8536 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8537 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8538
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8539 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8540 mask = reg_tbl[i].mask;
@@ -8446,16 +8580,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8580 static const struct {
8447 char *name; 8581 char *name;
8448 u32 offset; 8582 u32 offset;
8449 u32 mask; 8583 u32 e1_mask;
8584 u32 e1h_mask;
8450 } prty_tbl[] = { 8585 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8586 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8587 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8588 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8589 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8590 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8591 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8592
8458 { NULL, 0xffffffff, 0 } 8593 { NULL, 0xffffffff, 0, 0 }
8459 }; 8594 };
8460 8595
8461 if (!netif_running(bp->dev)) 8596 if (!netif_running(bp->dev))
@@ -8469,7 +8604,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8604 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8605 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8606 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8607 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8608 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8609 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8610 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8611 goto test_mem_exit;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8675
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8676 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8677 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8678 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8679 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8680 bnx2x_release_phy_lock(bp);
8545 8681
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8682 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8683 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8684 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8685 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8686 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8687 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8688 bnx2x_wait_for_link(bp, link_up);
8553 8689
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8907 if (!netif_running(dev))
8772 return; 8908 return;
8773 8909
8774 /* offline tests are not suppoerted in MF mode */ 8910 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8911 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8912 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8913
@@ -8827,76 +8963,99 @@ static const struct {
8827 long offset; 8963 long offset;
8828 int size; 8964 int size;
8829 u32 flags; 8965 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8966#define STATS_FLAGS_PORT 1
8967#define STATS_FLAGS_FUNC 2
8968 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8969} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8970/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8971 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8972 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8973 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974 { STATS_OFFSET32(total_bytes_transmitted_hi),
8975 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8978 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8979 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8980 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8981 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8982 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8983 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8984 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8985 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8986 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8987 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8988/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8989 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8990 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8991 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8992 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8993 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8994 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8995 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8996 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8997 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8998 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8999 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9000 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9001 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9002 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9003 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9004 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9005 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9006 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9007 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9008/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9010 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9011 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9012 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9013 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9014 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9015 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9016 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9017 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9018 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9019 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9020 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9021 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9022 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9023 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9024 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9025 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9026 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9027 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9028/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9029 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9030 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9031 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9032 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9033 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9036 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9037 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9038 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9039 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9040 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9041 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9042 { STATS_OFFSET32(xxoverflow_discard),
9043 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044 { STATS_OFFSET32(brb_drop_hi),
9045 8, STATS_FLAGS_PORT, "brb_discard" },
9046 { STATS_OFFSET32(brb_truncate_hi),
9047 8, STATS_FLAGS_PORT, "brb_truncate" },
9048/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050 { STATS_OFFSET32(rx_skb_alloc_failed),
9051 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052/* 42 */{ STATS_OFFSET32(hw_csum_err),
9053 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9054};
8899 9055
9056#define IS_NOT_E1HMF_STAT(bp, i) \
9057 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9059static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9060{
8902 struct bnx2x *bp = netdev_priv(dev); 9061 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9064 switch (stringset) {
8906 case ETH_SS_STATS: 9065 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9066 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9067 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9068 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9069 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9070 bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9084 int i, num_stats = 0;
8926 9085
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9086 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9087 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9088 continue;
8930 num_stats++; 9089 num_stats++;
8931 } 9090 }
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9099 int i, j;
8941 9100
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9101 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9102 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9103 continue;
8945 9104
8946 if (bnx2x_stats_arr[i].size == 0) { 9105 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9216 PCI_PM_CTRL_PME_STATUS));
9058 9217
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9218 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9219 /* delay required during transition out of D3hot */
9061 msleep(20); 9220 msleep(20);
9062 break; 9221 break;
9063 9222
@@ -9104,17 +9263,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9263
9105 bnx2x_update_fpsb_idx(fp); 9264 bnx2x_update_fpsb_idx(fp);
9106 9265
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9266 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9267 bnx2x_tx_int(fp, budget);
9110 9268
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9269 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9270 work_done = bnx2x_rx_int(fp, budget);
9113 9271
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9272 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9115 9273
9116 /* must not complete if we consumed full budget */ 9274 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9275 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9276
9119#ifdef BNX2X_STOP_ON_ERROR 9277#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9278poll_panic:
@@ -9131,7 +9289,7 @@ poll_panic:
9131 9289
9132 9290
9133/* we split the first BD into headers and data BDs 9291/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9292 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9293 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9294 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9295 * in Other Operating Systems(TM)
@@ -9238,7 +9396,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9396 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9397 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9398 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9399 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9400 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9401 int wnd_idx = 0;
9244 int frag_idx = 0; 9402 int frag_idx = 0;
@@ -9340,7 +9498,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9498 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9499 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9500
9343 /* First, check if we need to linearaize the skb 9501 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9502 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9503 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9504 /* Statistics of linearization */
@@ -9349,7 +9507,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9507 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9508 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9509 dev_kfree_skb_any(skb);
9352 return 0; 9510 return NETDEV_TX_OK;
9353 } 9511 }
9354 } 9512 }
9355 9513
@@ -9372,7 +9530,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9530 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9531 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9532 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9533 /* header nbd */
9534 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9535
9377 /* remember the first BD of the packet */ 9536 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9537 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9610,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9610
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9613 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9614 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9615 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9616
@@ -9721,9 +9880,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9880 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9881 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9882 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9883 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9884 else
9726 bnx2x_set_mac_addr_e1h(bp); 9885 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9886 }
9728 9887
9729 return 0; 9888 return 0;
@@ -9734,6 +9893,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9893{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9894 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9895 struct bnx2x *bp = netdev_priv(dev);
9896 int port = BP_PORT(bp);
9737 int err; 9897 int err;
9738 9898
9739 switch (cmd) { 9899 switch (cmd) {
@@ -9749,7 +9909,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9909 return -EAGAIN;
9750 9910
9751 mutex_lock(&bp->port.phy_mutex); 9911 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9912 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9913 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9914 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9915 data->val_out = mii_regval;
@@ -9765,7 +9925,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9925 return -EAGAIN;
9766 9926
9767 mutex_lock(&bp->port.phy_mutex); 9927 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9928 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9929 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9930 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9931 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10301,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10301
10142 netif_device_detach(dev); 10302 netif_device_detach(dev);
10143 10303
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10304 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10305
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10306 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10307
@@ -10174,7 +10334,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10334 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10335 netif_device_attach(dev);
10176 10336
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10337 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10338
10179 rtnl_unlock(); 10339 rtnl_unlock();
10180 10340
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a9946724..a67b0c358ae4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * The registers description starts with the regsister Access type followed 9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are: 10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only 11 * R - Read only
12 * RC - Clear on read 12 * RC - Clear on read
@@ -49,7 +49,7 @@
49/* [RW 10] Write client 0: Assert pause threshold. */ 49/* [RW 10] Write client 0: Assert pause threshold. */
50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c 51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
52/* [R 24] The number of full blocks occpied by port. */ 52/* [R 24] The number of full blocks occupied by port. */
53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
54/* [RW 1] Reset the design by software. */ 54/* [RW 1] Reset the design by software. */
55#define BRB1_REG_SOFT_RESET 0x600dc 55#define BRB1_REG_SOFT_RESET 0x600dc
@@ -740,6 +740,7 @@
740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
741#define HC_REG_ATTN_NUM_P0 0x108038 741#define HC_REG_ATTN_NUM_P0 0x108038
742#define HC_REG_ATTN_NUM_P1 0x10803c 742#define HC_REG_ATTN_NUM_P1 0x10803c
743#define HC_REG_COMMAND_REG 0x108180
743#define HC_REG_CONFIG_0 0x108000 744#define HC_REG_CONFIG_0 0x108000
744#define HC_REG_CONFIG_1 0x108004 745#define HC_REG_CONFIG_1 0x108004
745#define HC_REG_FUNC_NUM_P0 0x1080ac 746#define HC_REG_FUNC_NUM_P0 0x1080ac
@@ -1372,6 +1373,23 @@
1372 be asserted). */ 1373 be asserted). */
1373#define MISC_REG_DRIVER_CONTROL_16 0xa5f0 1374#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
1374#define MISC_REG_DRIVER_CONTROL_16_SIZE 2 1375#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
1376/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1377 32 clients. Each client can be controlled by one driver only. One in each
1378 bit represent that this driver control the appropriate client (Ex: bit 5
1379 is set means this driver control client number 5). addr1 = set; addr0 =
1380 clear; read from both addresses will give the same result = status. write
1381 to address 1 will set a request to control all the clients that their
1382 appropriate bit (in the write command) is set. if the client is free (the
1383 appropriate bit in all the other drivers is clear) one will be written to
1384 that driver register; if the client isn't free the bit will remain zero.
1385 if the appropriate bit is set (the driver request to gain control on a
1386 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1387 interrupt will be asserted). write to address 0 will set a request to
1388 free all the clients that their appropriate bit (in the write command) is
1389 set. if the appropriate bit is clear (the driver request to free a client
1390 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1391 be asserted). */
1392#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1375/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 1393/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1376 only. */ 1394 only. */
1377#define MISC_REG_E1HMF_MODE 0xa5f8 1395#define MISC_REG_E1HMF_MODE 0xa5f8
@@ -1394,13 +1412,13 @@
1394#define MISC_REG_GPIO 0xa490 1412#define MISC_REG_GPIO 0xa490
1395/* [R 28] this field hold the last information that caused reserved 1413/* [R 28] this field hold the last information that caused reserved
1396 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1414 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1397 [27:24] the master thatcaused the attention - according to the following 1415 [27:24] the master that caused the attention - according to the following
1398 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1416 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1399 dbu; 8 = dmae */ 1417 dbu; 8 = dmae */
1400#define MISC_REG_GRC_RSV_ATTN 0xa3c0 1418#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1401/* [R 28] this field hold the last information that caused timeout 1419/* [R 28] this field hold the last information that caused timeout
1402 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1420 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1403 [27:24] the master thatcaused the attention - according to the following 1421 [27:24] the master that caused the attention - according to the following
1404 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1422 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1405 dbu; 8 = dmae */ 1423 dbu; 8 = dmae */
1406#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 1424#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1677,6 +1695,7 @@
1677/* [RW 8] init credit counter for port0 in LLH */ 1695/* [RW 8] init credit counter for port0 in LLH */
1678#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 1696#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1679#define NIG_REG_LLH0_XCM_MASK 0x10130 1697#define NIG_REG_LLH0_XCM_MASK 0x10130
1698#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1680/* [RW 1] send to BRB1 if no match on any of RMP rules. */ 1699/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1681#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc 1700#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1682/* [RW 2] Determine the classification participants. 0: no classification.1: 1701/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
1727/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure 1746/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1728 for port0 */ 1747 for port0 */
1729#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 1748#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
1749/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
1750 for port0 */
1751#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
1730/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that 1752/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1731 between 1024 and 1522 bytes for port0 */ 1753 between 1024 and 1522 bytes for port0 */
1732#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 1754#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
@@ -2298,7 +2320,7 @@
2298/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; 2320/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
2299 -128k */ 2321 -128k */
2300#define PXP2_REG_RQ_QM_P_SIZE 0x120050 2322#define PXP2_REG_RQ_QM_P_SIZE 0x120050
2301/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */ 2323/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
2302#define PXP2_REG_RQ_RBC_DONE 0x1201b0 2324#define PXP2_REG_RQ_RBC_DONE 0x1201b0
2303/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; 2325/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
2304 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ 2326 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
2406/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2428/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2407 buffer reaches this number has_payload will be asserted */ 2429 buffer reaches this number has_payload will be asserted */
2408#define PXP2_REG_WR_DMAE_MPS 0x1205ec 2430#define PXP2_REG_WR_DMAE_MPS 0x1205ec
2409/* [RW 10] if Number of entries in dmae fifo will be higer than this 2431/* [RW 10] if Number of entries in dmae fifo will be higher than this
2410 threshold then has_payload indication will be asserted; the default value 2432 threshold then has_payload indication will be asserted; the default value
2411 should be equal to &gt; write MBS size! */ 2433 should be equal to &gt; write MBS size! */
2412#define PXP2_REG_WR_DMAE_TH 0x120368 2434#define PXP2_REG_WR_DMAE_TH 0x120368
@@ -2427,7 +2449,7 @@
2427/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2449/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2428 buffer reaches this number has_payload will be asserted */ 2450 buffer reaches this number has_payload will be asserted */
2429#define PXP2_REG_WR_TSDM_MPS 0x1205d4 2451#define PXP2_REG_WR_TSDM_MPS 0x1205d4
2430/* [RW 10] if Number of entries in usdmdp fifo will be higer than this 2452/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
2431 threshold then has_payload indication will be asserted; the default value 2453 threshold then has_payload indication will be asserted; the default value
2432 should be equal to &gt; write MBS size! */ 2454 should be equal to &gt; write MBS size! */
2433#define PXP2_REG_WR_USDMDP_TH 0x120348 2455#define PXP2_REG_WR_USDMDP_TH 0x120348
@@ -3294,12 +3316,12 @@
3294#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 3316#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
3295#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) 3317#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
3296#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 3318#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
3297/* [R 1] debug only: This bit indicates wheter indicates that external 3319/* [R 1] debug only: This bit indicates whether indicates that external
3298 buffer was wrapped (oldest data was thrown); Relevant only when 3320 buffer was wrapped (oldest data was thrown); Relevant only when
3299 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ 3321 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
3300#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 3322#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
3301#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 3323#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
3302/* [R 1] debug only: This bit indicates wheter the internal buffer was 3324/* [R 1] debug only: This bit indicates whether the internal buffer was
3303 wrapped (oldest data was thrown) Relevant only when 3325 wrapped (oldest data was thrown) Relevant only when
3304 ~dbg_registers_debug_target=0 (internal buffer) */ 3326 ~dbg_registers_debug_target=0 (internal buffer) */
3305#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 3327#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
@@ -4944,6 +4966,7 @@
4944#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) 4966#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4945#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) 4967#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
4946#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) 4968#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
4969#define EMAC_TX_MODE_FLOW_EN (1L<<4)
4947#define MISC_REGISTERS_GPIO_0 0 4970#define MISC_REGISTERS_GPIO_0 0
4948#define MISC_REGISTERS_GPIO_1 1 4971#define MISC_REGISTERS_GPIO_1 1
4949#define MISC_REGISTERS_GPIO_2 2 4972#define MISC_REGISTERS_GPIO_2 2
@@ -4959,6 +4982,7 @@
4959#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4982#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4960#define MISC_REGISTERS_GPIO_SET_POS 8 4983#define MISC_REGISTERS_GPIO_SET_POS 8
4961#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4984#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4985#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4962#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4986#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4963#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4987#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4964#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4988#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4993,7 +5017,9 @@
4993#define HW_LOCK_MAX_RESOURCE_VALUE 31 5017#define HW_LOCK_MAX_RESOURCE_VALUE 31
4994#define HW_LOCK_RESOURCE_8072_MDIO 0 5018#define HW_LOCK_RESOURCE_8072_MDIO 0
4995#define HW_LOCK_RESOURCE_GPIO 1 5019#define HW_LOCK_RESOURCE_GPIO 1
5020#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4996#define HW_LOCK_RESOURCE_SPIO 2 5021#define HW_LOCK_RESOURCE_SPIO 2
5022#define HW_LOCK_RESOURCE_UNDI 5
4997#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5023#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4998#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5024#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4999#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5025#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -5144,59 +5170,73 @@
5144#define GRCBASE_MISC_AEU GRCBASE_MISC 5170#define GRCBASE_MISC_AEU GRCBASE_MISC
5145 5171
5146 5172
5147/*the offset of the configuration space in the pci core register*/ 5173/* offset of configuration space in the pci core register */
5148#define PCICFG_OFFSET 0x2000 5174#define PCICFG_OFFSET 0x2000
5149#define PCICFG_VENDOR_ID_OFFSET 0x00 5175#define PCICFG_VENDOR_ID_OFFSET 0x00
5150#define PCICFG_DEVICE_ID_OFFSET 0x02 5176#define PCICFG_DEVICE_ID_OFFSET 0x02
5151#define PCICFG_COMMAND_OFFSET 0x04 5177#define PCICFG_COMMAND_OFFSET 0x04
5178#define PCICFG_COMMAND_IO_SPACE (1<<0)
5179#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5180#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5181#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5182#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5183#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5184#define PCICFG_COMMAND_PERR_ENA (1<<6)
5185#define PCICFG_COMMAND_STEPPING (1<<7)
5186#define PCICFG_COMMAND_SERR_ENA (1<<8)
5187#define PCICFG_COMMAND_FAST_B2B (1<<9)
5188#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5189#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5152#define PCICFG_STATUS_OFFSET 0x06 5190#define PCICFG_STATUS_OFFSET 0x06
5153#define PCICFG_REVESION_ID 0x08 5191#define PCICFG_REVESION_ID 0x08
5154#define PCICFG_CACHE_LINE_SIZE 0x0c 5192#define PCICFG_CACHE_LINE_SIZE 0x0c
5155#define PCICFG_LATENCY_TIMER 0x0d 5193#define PCICFG_LATENCY_TIMER 0x0d
5156#define PCICFG_BAR_1_LOW 0x10 5194#define PCICFG_BAR_1_LOW 0x10
5157#define PCICFG_BAR_1_HIGH 0x14 5195#define PCICFG_BAR_1_HIGH 0x14
5158#define PCICFG_BAR_2_LOW 0x18 5196#define PCICFG_BAR_2_LOW 0x18
5159#define PCICFG_BAR_2_HIGH 0x1c 5197#define PCICFG_BAR_2_HIGH 0x1c
5160#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c 5198#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
5161#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e 5199#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
5162#define PCICFG_INT_LINE 0x3c 5200#define PCICFG_INT_LINE 0x3c
5163#define PCICFG_INT_PIN 0x3d 5201#define PCICFG_INT_PIN 0x3d
5164#define PCICFG_PM_CSR_OFFSET 0x4c 5202#define PCICFG_PM_CAPABILITY 0x48
5165#define PCICFG_GRC_ADDRESS 0x78 5203#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
5166#define PCICFG_GRC_DATA 0x80 5204#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
5205#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
5206#define PCICFG_PM_CAPABILITY_DSI (1<<21)
5207#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
5208#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
5209#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
5210#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
5211#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
5212#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
5213#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
5214#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
5215#define PCICFG_PM_CSR_OFFSET 0x4c
5216#define PCICFG_PM_CSR_STATE (0x3<<0)
5217#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
5218#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5219#define PCICFG_GRC_ADDRESS 0x78
5220#define PCICFG_GRC_DATA 0x80
5167#define PCICFG_DEVICE_CONTROL 0xb4 5221#define PCICFG_DEVICE_CONTROL 0xb4
5168#define PCICFG_LINK_CONTROL 0xbc 5222#define PCICFG_LINK_CONTROL 0xbc
5169 5223
5170#define PCICFG_COMMAND_IO_SPACE (1<<0)
5171#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5172#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5173#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5174#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5175#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5176#define PCICFG_COMMAND_PERR_ENA (1<<6)
5177#define PCICFG_COMMAND_STEPPING (1<<7)
5178#define PCICFG_COMMAND_SERR_ENA (1<<8)
5179#define PCICFG_COMMAND_FAST_B2B (1<<9)
5180#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5181#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5182
5183#define PCICFG_PM_CSR_STATE (0x3<<0)
5184#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5185 5224
5186#define BAR_USTRORM_INTMEM 0x400000 5225#define BAR_USTRORM_INTMEM 0x400000
5187#define BAR_CSTRORM_INTMEM 0x410000 5226#define BAR_CSTRORM_INTMEM 0x410000
5188#define BAR_XSTRORM_INTMEM 0x420000 5227#define BAR_XSTRORM_INTMEM 0x420000
5189#define BAR_TSTRORM_INTMEM 0x430000 5228#define BAR_TSTRORM_INTMEM 0x430000
5190 5229
5230/* for accessing the IGU in case of status block ACK */
5191#define BAR_IGU_INTMEM 0x440000 5231#define BAR_IGU_INTMEM 0x440000
5192 5232
5193#define BAR_DOORBELL_OFFSET 0x800000 5233#define BAR_DOORBELL_OFFSET 0x800000
5194 5234
5195#define BAR_ME_REGISTER 0x450000 5235#define BAR_ME_REGISTER 0x450000
5196 5236
5197 5237/* config_2 offset */
5198#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */ 5238#define GRC_CONFIG_2_SIZE_REG 0x408
5199#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) 5239#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
5200#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) 5240#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
5201#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) 5241#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
5202#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) 5242#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
@@ -5213,11 +5253,11 @@
5213#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) 5253#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
5214#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) 5254#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
5215#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) 5255#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
5216#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) 5256#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
5217#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) 5257#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
5218#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) 5258#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
5219#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) 5259#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
5220#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) 5260#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
5221#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) 5261#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
5222#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) 5262#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
5223#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) 5263#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
@@ -5234,46 +5274,44 @@
5234#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) 5274#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
5235#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) 5275#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
5236#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) 5276#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
5237#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) 5277#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
5238#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) 5278#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
5239 5279
5240/* config_3 offset */ 5280/* config_3 offset */
5241#define GRC_CONFIG_3_SIZE_REG (0x40c) 5281#define GRC_CONFIG_3_SIZE_REG 0x40c
5242#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) 5282#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
5243#define PCI_CONFIG_3_FORCE_PME (1L<<24) 5283#define PCI_CONFIG_3_FORCE_PME (1L<<24)
5244#define PCI_CONFIG_3_PME_STATUS (1L<<25) 5284#define PCI_CONFIG_3_PME_STATUS (1L<<25)
5245#define PCI_CONFIG_3_PME_ENABLE (1L<<26) 5285#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
5246#define PCI_CONFIG_3_PM_STATE (0x3L<<27) 5286#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
5247#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) 5287#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
5248#define PCI_CONFIG_3_PCI_POWER (1L<<31) 5288#define PCI_CONFIG_3_PCI_POWER (1L<<31)
5249
5250/* config_2 offset */
5251#define GRC_CONFIG_2_SIZE_REG 0x408
5252 5289
5253#define GRC_BAR2_CONFIG 0x4e0 5290#define GRC_BAR2_CONFIG 0x4e0
5254#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) 5291#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
5255#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) 5292#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
5256#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) 5293#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
5257#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) 5294#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
5258#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) 5295#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
5259#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) 5296#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
5260#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) 5297#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
5261#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) 5298#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
5262#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) 5299#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
5263#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) 5300#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
5264#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) 5301#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
5265#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) 5302#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
5266#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) 5303#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
5267#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) 5304#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
5268#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) 5305#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
5269#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) 5306#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
5270#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) 5307#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
5271#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) 5308#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
5309
5310#define PCI_PM_DATA_A 0x410
5311#define PCI_PM_DATA_B 0x414
5312#define PCI_ID_VAL1 0x434
5313#define PCI_ID_VAL2 0x438
5272 5314
5273#define PCI_PM_DATA_A (0x410)
5274#define PCI_PM_DATA_B (0x414)
5275#define PCI_ID_VAL1 (0x434)
5276#define PCI_ID_VAL2 (0x438)
5277 5315
5278#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5316#define MDIO_REG_BANK_CL73_IEEEB0 0x0
5279#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5317#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/
5522#define MDIO_PMA_REG_GEN_CTRL 0xca10 5560#define MDIO_PMA_REG_GEN_CTRL 0xca10
5523#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 5561#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
5524#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a 5562#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
5563#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
5564#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
5525#define MDIO_PMA_REG_ROM_VER1 0xca19 5565#define MDIO_PMA_REG_ROM_VER1 0xca19
5526#define MDIO_PMA_REG_ROM_VER2 0xca1a 5566#define MDIO_PMA_REG_ROM_VER2 0xca1a
5527#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b 5567#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/
5576#define MDIO_AN_REG_LINK_STATUS 0x8304 5616#define MDIO_AN_REG_LINK_STATUS 0x8304
5577#define MDIO_AN_REG_CL37_CL73 0x8370 5617#define MDIO_AN_REG_CL37_CL73 0x8370
5578#define MDIO_AN_REG_CL37_AN 0xffe0 5618#define MDIO_AN_REG_CL37_AN 0xffe0
5579#define MDIO_AN_REG_CL37_FD 0xffe4 5619#define MDIO_AN_REG_CL37_FC_LD 0xffe4
5620#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5580 5621
5581 5622
5582#define IGU_FUNC_BASE 0x0400 5623#define IGU_FUNC_BASE 0x0400
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/
5600#define IGU_INT_NOP 2 5641#define IGU_INT_NOP 2
5601#define IGU_INT_NOP2 3 5642#define IGU_INT_NOP2 3
5602 5643
5644#define COMMAND_REG_INT_ACK 0x0
5645#define COMMAND_REG_PROD_UPD 0x4
5646#define COMMAND_REG_ATTN_BITS_UPD 0x8
5647#define COMMAND_REG_ATTN_BITS_SET 0xc
5648#define COMMAND_REG_ATTN_BITS_CLR 0x10
5649#define COMMAND_REG_COALESCE_NOW 0x14
5650#define COMMAND_REG_SIMD_MASK 0x18
5651#define COMMAND_REG_SIMD_NOMASK 0x1c
5652
5603 5653
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 73a86d09bba8..9c129248466c 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -7,13 +7,13 @@
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/of_device.h>
10 11
11#include <asm/system.h> 12#include <asm/system.h>
12#include <asm/sbus.h> 13#include <asm/sbus.h>
13#include <asm/dma.h> 14#include <asm/dma.h>
14#include <asm/oplib.h> 15#include <asm/oplib.h>
15#include <asm/prom.h> 16#include <asm/prom.h>
16#include <asm/of_device.h>
17#include <asm/bpp.h> 17#include <asm/bpp.h>
18#include <asm/irq.h> 18#include <asm/irq.h>
19 19
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index aeeec5588afd..e41766d08035 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -17,11 +17,11 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/of_device.h>
20 21
21#include <asm/hypervisor.h> 22#include <asm/hypervisor.h>
22#include <asm/spitfire.h> 23#include <asm/spitfire.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/of_device.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26 26
27#if defined(CONFIG_MAGIC_SYSRQ) 27#if defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 15ee497e1c78..29b4458abf74 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -32,11 +32,11 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/of_device.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index e24e68235088..a378464f9292 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -35,11 +35,11 @@
35#include <linux/serial_reg.h> 35#include <linux/serial_reg.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/of_device.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/irq.h> 41#include <asm/irq.h>
41#include <asm/prom.h> 42#include <asm/prom.h>
42#include <asm/of_device.h>
43 43
44#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 44#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
45#define SUPPORT_SYSRQ 45#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0f3d69b86d67..3cb4c8aee13f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -32,11 +32,11 @@
32#include <linux/serio.h> 32#include <linux/serio.h>
33#endif 33#endif
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/of_device.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823cdf62a..bcefbddeba50 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@ config USB
95 95
96source "drivers/usb/core/Kconfig" 96source "drivers/usb/core/Kconfig"
97 97
98source "drivers/usb/mon/Kconfig"
99
98source "drivers/usb/host/Kconfig" 100source "drivers/usb/host/Kconfig"
99 101
102source "drivers/usb/musb/Kconfig"
103
100source "drivers/usb/class/Kconfig" 104source "drivers/usb/class/Kconfig"
101 105
102source "drivers/usb/storage/Kconfig" 106source "drivers/usb/storage/Kconfig"
103 107
104source "drivers/usb/image/Kconfig" 108source "drivers/usb/image/Kconfig"
105 109
106source "drivers/usb/mon/Kconfig"
107
108comment "USB port drivers" 110comment "USB port drivers"
109 depends on USB 111 depends on USB
110 112
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd0d77c..9aea43a8c4ad 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
602 offd = le32_to_cpu(buf[offb++]); 602 offd = le32_to_cpu(buf[offb++]);
603 if (offd >= size) { 603 if (offd >= size) {
604 if (printk_ratelimit()) 604 if (printk_ratelimit())
605 usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n", 605 usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
606 offd, cm); 606 offd, cm);
607 ret = -EIO; 607 ret = -EIO;
608 goto cleanup; 608 goto cleanup;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b1871f23..efc4373ededb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
51 */ 51 */
52 52
53#undef DEBUG 53#undef DEBUG
54#undef VERBOSE_DEBUG
54 55
55#include <linux/kernel.h> 56#include <linux/kernel.h>
56#include <linux/errno.h> 57#include <linux/errno.h>
@@ -70,6 +71,9 @@
70 71
71#include "cdc-acm.h" 72#include "cdc-acm.h"
72 73
74
75#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
76
73/* 77/*
74 * Version Information 78 * Version Information
75 */ 79 */
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex);
85 89
86#define ACM_READY(acm) (acm && acm->dev && acm->used) 90#define ACM_READY(acm) (acm && acm->dev && acm->used)
87 91
92#ifdef VERBOSE_DEBUG
93#define verbose 1
94#else
95#define verbose 0
96#endif
97
88/* 98/*
89 * Functions for ACM control messages. 99 * Functions for ACM control messages.
90 */ 100 */
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm)
136static int acm_wb_is_avail(struct acm *acm) 146static int acm_wb_is_avail(struct acm *acm)
137{ 147{
138 int i, n; 148 int i, n;
149 unsigned long flags;
139 150
140 n = ACM_NW; 151 n = ACM_NW;
152 spin_lock_irqsave(&acm->write_lock, flags);
141 for (i = 0; i < ACM_NW; i++) { 153 for (i = 0; i < ACM_NW; i++) {
142 n -= acm->wb[i].use; 154 n -= acm->wb[i].use;
143 } 155 }
156 spin_unlock_irqrestore(&acm->write_lock, flags);
144 return n; 157 return n;
145} 158}
146 159
147static inline int acm_wb_is_used(struct acm *acm, int wbn)
148{
149 return acm->wb[wbn].use;
150}
151
152/* 160/*
153 * Finish write. 161 * Finish write.
154 */ 162 */
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
157 unsigned long flags; 165 unsigned long flags;
158 166
159 spin_lock_irqsave(&acm->write_lock, flags); 167 spin_lock_irqsave(&acm->write_lock, flags);
160 acm->write_ready = 1;
161 wb->use = 0; 168 wb->use = 0;
162 acm->transmitting--; 169 acm->transmitting--;
163 spin_unlock_irqrestore(&acm->write_lock, flags); 170 spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
190static int acm_write_start(struct acm *acm, int wbn) 197static int acm_write_start(struct acm *acm, int wbn)
191{ 198{
192 unsigned long flags; 199 unsigned long flags;
193 struct acm_wb *wb; 200 struct acm_wb *wb = &acm->wb[wbn];
194 int rc; 201 int rc;
195 202
196 spin_lock_irqsave(&acm->write_lock, flags); 203 spin_lock_irqsave(&acm->write_lock, flags);
197 if (!acm->dev) { 204 if (!acm->dev) {
205 wb->use = 0;
198 spin_unlock_irqrestore(&acm->write_lock, flags); 206 spin_unlock_irqrestore(&acm->write_lock, flags);
199 return -ENODEV; 207 return -ENODEV;
200 } 208 }
201 209
202 if (!acm->write_ready) {
203 spin_unlock_irqrestore(&acm->write_lock, flags);
204 return 0; /* A white lie */
205 }
206
207 wb = &acm->wb[wbn];
208 if(acm_wb_is_avail(acm) <= 1)
209 acm->write_ready = 0;
210
211 dbg("%s susp_count: %d", __func__, acm->susp_count); 210 dbg("%s susp_count: %d", __func__, acm->susp_count);
212 if (acm->susp_count) { 211 if (acm->susp_count) {
213 acm->old_ready = acm->write_ready;
214 acm->delayed_wb = wb; 212 acm->delayed_wb = wb;
215 acm->write_ready = 0;
216 schedule_work(&acm->waker); 213 schedule_work(&acm->waker);
217 spin_unlock_irqrestore(&acm->write_lock, flags); 214 spin_unlock_irqrestore(&acm->write_lock, flags);
218 return 0; /* A white lie */ 215 return 0; /* A white lie */
219 } 216 }
220 usb_mark_last_busy(acm->dev); 217 usb_mark_last_busy(acm->dev);
221 218
222 if (!acm_wb_is_used(acm, wbn)) {
223 spin_unlock_irqrestore(&acm->write_lock, flags);
224 return 0;
225 }
226
227 rc = acm_start_wb(acm, wb); 219 rc = acm_start_wb(acm, wb);
228 spin_unlock_irqrestore(&acm->write_lock, flags); 220 spin_unlock_irqrestore(&acm->write_lock, flags);
229 221
@@ -488,22 +480,28 @@ urbs:
488/* data interface wrote those outgoing bytes */ 480/* data interface wrote those outgoing bytes */
489static void acm_write_bulk(struct urb *urb) 481static void acm_write_bulk(struct urb *urb)
490{ 482{
491 struct acm *acm;
492 struct acm_wb *wb = urb->context; 483 struct acm_wb *wb = urb->context;
484 struct acm *acm = wb->instance;
493 485
494 dbg("Entering acm_write_bulk with status %d", urb->status); 486 if (verbose || urb->status
487 || (urb->actual_length != urb->transfer_buffer_length))
488 dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
489 urb->actual_length,
490 urb->transfer_buffer_length,
491 urb->status);
495 492
496 acm = wb->instance;
497 acm_write_done(acm, wb); 493 acm_write_done(acm, wb);
498 if (ACM_READY(acm)) 494 if (ACM_READY(acm))
499 schedule_work(&acm->work); 495 schedule_work(&acm->work);
496 else
497 wake_up_interruptible(&acm->drain_wait);
500} 498}
501 499
502static void acm_softint(struct work_struct *work) 500static void acm_softint(struct work_struct *work)
503{ 501{
504 struct acm *acm = container_of(work, struct acm, work); 502 struct acm *acm = container_of(work, struct acm, work);
505 dbg("Entering acm_softint."); 503
506 504 dev_vdbg(&acm->data->dev, "tx work\n");
507 if (!ACM_READY(acm)) 505 if (!ACM_READY(acm))
508 return; 506 return;
509 tty_wakeup(acm->tty); 507 tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work)
512static void acm_waker(struct work_struct *waker) 510static void acm_waker(struct work_struct *waker)
513{ 511{
514 struct acm *acm = container_of(waker, struct acm, waker); 512 struct acm *acm = container_of(waker, struct acm, waker);
515 long flags;
516 int rv; 513 int rv;
517 514
518 rv = usb_autopm_get_interface(acm->control); 515 rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker)
524 acm_start_wb(acm, acm->delayed_wb); 521 acm_start_wb(acm, acm->delayed_wb);
525 acm->delayed_wb = NULL; 522 acm->delayed_wb = NULL;
526 } 523 }
527 spin_lock_irqsave(&acm->write_lock, flags);
528 acm->write_ready = acm->old_ready;
529 spin_unlock_irqrestore(&acm->write_lock, flags);
530 usb_autopm_put_interface(acm->control); 524 usb_autopm_put_interface(acm->control);
531} 525}
532 526
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm)
628 kfree(acm); 622 kfree(acm);
629} 623}
630 624
625static int acm_tty_chars_in_buffer(struct tty_struct *tty);
626
631static void acm_tty_close(struct tty_struct *tty, struct file *filp) 627static void acm_tty_close(struct tty_struct *tty, struct file *filp)
632{ 628{
633 struct acm *acm = tty->driver_data; 629 struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
642 if (acm->dev) { 638 if (acm->dev) {
643 usb_autopm_get_interface(acm->control); 639 usb_autopm_get_interface(acm->control);
644 acm_set_control(acm, acm->ctrlout = 0); 640 acm_set_control(acm, acm->ctrlout = 0);
641
642 /* try letting the last writes drain naturally */
643 wait_event_interruptible_timeout(acm->drain_wait,
644 (ACM_NW == acm_wb_is_avail(acm))
645 || !acm->dev,
646 ACM_CLOSE_TIMEOUT * HZ);
647
645 usb_kill_urb(acm->ctrlurb); 648 usb_kill_urb(acm->ctrlurb);
646 for (i = 0; i < ACM_NW; i++) 649 for (i = 0; i < ACM_NW; i++)
647 usb_kill_urb(acm->wb[i].urb); 650 usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
697 * Do not let the line discipline to know that we have a reserve, 700 * Do not let the line discipline to know that we have a reserve,
698 * or it might get too enthusiastic. 701 * or it might get too enthusiastic.
699 */ 702 */
700 return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0; 703 return acm_wb_is_avail(acm) ? acm->writesize : 0;
701} 704}
702 705
703static int acm_tty_chars_in_buffer(struct tty_struct *tty) 706static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@ skip_normal_probe:
1072 acm->urb_task.data = (unsigned long) acm; 1075 acm->urb_task.data = (unsigned long) acm;
1073 INIT_WORK(&acm->work, acm_softint); 1076 INIT_WORK(&acm->work, acm_softint);
1074 INIT_WORK(&acm->waker, acm_waker); 1077 INIT_WORK(&acm->waker, acm_waker);
1078 init_waitqueue_head(&acm->drain_wait);
1075 spin_lock_init(&acm->throttle_lock); 1079 spin_lock_init(&acm->throttle_lock);
1076 spin_lock_init(&acm->write_lock); 1080 spin_lock_init(&acm->write_lock);
1077 spin_lock_init(&acm->read_lock); 1081 spin_lock_init(&acm->read_lock);
1078 mutex_init(&acm->mutex); 1082 mutex_init(&acm->mutex);
1079 acm->write_ready = 1;
1080 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); 1083 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
1081 1084
1082 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); 1085 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@ skip_normal_probe:
1108 rcv->instance = acm; 1111 rcv->instance = acm;
1109 } 1112 }
1110 for (i = 0; i < num_rx_buf; i++) { 1113 for (i = 0; i < num_rx_buf; i++) {
1111 struct acm_rb *buf = &(acm->rb[i]); 1114 struct acm_rb *rb = &(acm->rb[i]);
1112 1115
1113 if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) { 1116 rb->base = usb_buffer_alloc(acm->dev, readsize,
1117 GFP_KERNEL, &rb->dma);
1118 if (!rb->base) {
1114 dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); 1119 dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
1115 goto alloc_fail7; 1120 goto alloc_fail7;
1116 } 1121 }
@@ -1172,6 +1177,7 @@ skip_countries:
1172 acm_set_line(acm, &acm->line); 1177 acm_set_line(acm, &acm->line);
1173 1178
1174 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1179 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1180 usb_set_intfdata(data_interface, acm);
1175 1181
1176 usb_get_intf(control_interface); 1182 usb_get_intf(control_interface);
1177 tty_register_device(acm_tty_driver, minor, &control_interface->dev); 1183 tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf)
1221 struct acm *acm = usb_get_intfdata(intf); 1227 struct acm *acm = usb_get_intfdata(intf);
1222 struct usb_device *usb_dev = interface_to_usbdev(intf); 1228 struct usb_device *usb_dev = interface_to_usbdev(intf);
1223 1229
1224 mutex_lock(&open_mutex); 1230 /* sibling interface is already cleaning up */
1225 if (!acm || !acm->dev) { 1231 if (!acm)
1226 mutex_unlock(&open_mutex);
1227 return; 1232 return;
1228 } 1233
1234 mutex_lock(&open_mutex);
1229 if (acm->country_codes){ 1235 if (acm->country_codes){
1230 device_remove_file(&acm->control->dev, 1236 device_remove_file(&acm->control->dev,
1231 &dev_attr_wCountryCodes); 1237 &dev_attr_wCountryCodes);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaaab7c5..1f95e7aa1b66 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@ struct acm {
106 struct list_head spare_read_bufs; 106 struct list_head spare_read_bufs;
107 struct list_head filled_read_bufs; 107 struct list_head filled_read_bufs;
108 int write_used; /* number of non-empty write buffers */ 108 int write_used; /* number of non-empty write buffers */
109 int write_ready; /* write urb is not running */
110 int old_ready;
111 int processing; 109 int processing;
112 int transmitting; 110 int transmitting;
113 spinlock_t write_lock; 111 spinlock_t write_lock;
@@ -115,6 +113,7 @@ struct acm {
115 struct usb_cdc_line_coding line; /* bits, stop, parity */ 113 struct usb_cdc_line_coding line; /* bits, stop, parity */
116 struct work_struct work; /* work queue entry for line discipline waking up */ 114 struct work_struct work; /* work queue entry for line discipline waking up */
117 struct work_struct waker; 115 struct work_struct waker;
116 wait_queue_head_t drain_wait; /* close processing */
118 struct tasklet_struct urb_task; /* rx processing */ 117 struct tasklet_struct urb_task; /* rx processing */
119 spinlock_t throttle_lock; /* synchronize throtteling and read callback */ 118 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
120 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ 119 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e14a5c5..2be37fe466f2 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -774,7 +774,6 @@ void usb_deregister(struct usb_driver *driver)
774} 774}
775EXPORT_SYMBOL_GPL(usb_deregister); 775EXPORT_SYMBOL_GPL(usb_deregister);
776 776
777
778/* Forced unbinding of a USB interface driver, either because 777/* Forced unbinding of a USB interface driver, either because
779 * it doesn't support pre_reset/post_reset/reset_resume or 778 * it doesn't support pre_reset/post_reset/reset_resume or
780 * because it doesn't support suspend/resume. 779 * because it doesn't support suspend/resume.
@@ -821,6 +820,8 @@ void usb_rebind_intf(struct usb_interface *intf)
821 dev_warn(&intf->dev, "rebind failed: %d\n", rc); 820 dev_warn(&intf->dev, "rebind failed: %d\n", rc);
822} 821}
823 822
823#ifdef CONFIG_PM
824
824#define DO_UNBIND 0 825#define DO_UNBIND 0
825#define DO_REBIND 1 826#define DO_REBIND 1
826 827
@@ -872,8 +873,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
872 } 873 }
873} 874}
874 875
875#ifdef CONFIG_PM
876
877/* Caller has locked udev's pm_mutex */ 876/* Caller has locked udev's pm_mutex */
878static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) 877static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
879{ 878{
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 586d6f1376cf..286b4431a097 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1091 continue; 1091 continue;
1092 dev_dbg(&dev->dev, "unregistering interface %s\n", 1092 dev_dbg(&dev->dev, "unregistering interface %s\n",
1093 dev_name(&interface->dev)); 1093 dev_name(&interface->dev));
1094 device_del(&interface->dev);
1095 usb_remove_sysfs_intf_files(interface); 1094 usb_remove_sysfs_intf_files(interface);
1095 device_del(&interface->dev);
1096 } 1096 }
1097 1097
1098 /* Now that the interfaces are unbound, nobody should 1098 /* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b1116a..acc95b2ac6f8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@ config USB_LH7A40X
284 default USB_GADGET 284 default USB_GADGET
285 select USB_GADGET_SELECTED 285 select USB_GADGET_SELECTED
286 286
287# built in ../musb along with host support
288config USB_GADGET_MUSB_HDRC
289 boolean "Inventra HDRC USB Peripheral (TI, ...)"
290 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
291 select USB_GADGET_DUALSPEED
292 select USB_GADGET_SELECTED
293 help
294 This OTG-capable silicon IP is used in dual designs including
295 the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
296
287config USB_GADGET_OMAP 297config USB_GADGET_OMAP
288 boolean "OMAP USB Device Controller" 298 boolean "OMAP USB Device Controller"
289 depends on ARCH_OMAP 299 depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406af9ee..7600a0c78753 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
542 req->req.context = dum; 542 req->req.context = dum;
543 req->req.complete = fifo_complete; 543 req->req.complete = fifo_complete;
544 544
545 list_add_tail(&req->queue, &ep->queue);
545 spin_unlock (&dum->lock); 546 spin_unlock (&dum->lock);
546 _req->actual = _req->length; 547 _req->actual = _req->length;
547 _req->status = 0; 548 _req->status = 0;
548 _req->complete (_ep, _req); 549 _req->complete (_ep, _req);
549 spin_lock (&dum->lock); 550 spin_lock (&dum->lock);
550 } 551 } else
551 list_add_tail (&req->queue, &ep->queue); 552 list_add_tail(&req->queue, &ep->queue);
552 spin_unlock_irqrestore (&dum->lock, flags); 553 spin_unlock_irqrestore (&dum->lock, flags);
553 554
554 /* real hardware would likely enable transfers here, in case 555 /* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf27895..5ee1590b8e9c 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@ struct f_acm {
47 u8 ctrl_id, data_id; 47 u8 ctrl_id, data_id;
48 u8 port_num; 48 u8 port_num;
49 49
50 struct usb_descriptor_header **fs_function; 50 u8 pending;
51
52 /* lock is mostly for pending and notify_req ... they get accessed
53 * by callbacks both from tty (open/close/break) under its spinlock,
54 * and notify_req.complete() which can't use that lock.
55 */
56 spinlock_t lock;
57
51 struct acm_ep_descs fs; 58 struct acm_ep_descs fs;
52 struct usb_descriptor_header **hs_function;
53 struct acm_ep_descs hs; 59 struct acm_ep_descs hs;
54 60
55 struct usb_ep *notify; 61 struct usb_ep *notify;
56 struct usb_endpoint_descriptor *notify_desc; 62 struct usb_endpoint_descriptor *notify_desc;
63 struct usb_request *notify_req;
57 64
58 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ 65 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
66
67 /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
59 u16 port_handshake_bits; 68 u16 port_handshake_bits;
60#define RS232_RTS (1 << 1) /* unused with full duplex */ 69#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
61#define RS232_DTR (1 << 0) /* host is ready for data r/w */ 70#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
71
72 /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
73 u16 serial_state;
74#define ACM_CTRL_OVERRUN (1 << 6)
75#define ACM_CTRL_PARITY (1 << 5)
76#define ACM_CTRL_FRAMING (1 << 4)
77#define ACM_CTRL_RI (1 << 3)
78#define ACM_CTRL_BRK (1 << 2)
79#define ACM_CTRL_DSR (1 << 1)
80#define ACM_CTRL_DCD (1 << 0)
62}; 81};
63 82
64static inline struct f_acm *func_to_acm(struct usb_function *f) 83static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f)
66 return container_of(f, struct f_acm, port.func); 85 return container_of(f, struct f_acm, port.func);
67} 86}
68 87
88static inline struct f_acm *port_to_acm(struct gserial *p)
89{
90 return container_of(p, struct f_acm, port);
91}
92
69/*-------------------------------------------------------------------------*/ 93/*-------------------------------------------------------------------------*/
70 94
71/* notification endpoint uses smallish and infrequent fixed-size messages */ 95/* notification endpoint uses smallish and infrequent fixed-size messages */
72 96
73#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ 97#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
74#define GS_NOTIFY_MAXPACKET 8 98#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
75 99
76/* interface and class descriptors: */ 100/* interface and class descriptors: */
77 101
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
117 .bLength = sizeof(acm_descriptor), 141 .bLength = sizeof(acm_descriptor),
118 .bDescriptorType = USB_DT_CS_INTERFACE, 142 .bDescriptorType = USB_DT_CS_INTERFACE,
119 .bDescriptorSubType = USB_CDC_ACM_TYPE, 143 .bDescriptorSubType = USB_CDC_ACM_TYPE,
120 .bmCapabilities = (1 << 1), 144 .bmCapabilities = USB_CDC_CAP_LINE,
121}; 145};
122 146
123static struct usb_cdc_union_desc acm_union_desc __initdata = { 147static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
277 301
278 /* composite driver infrastructure handles everything except 302 /* composite driver infrastructure handles everything except
279 * CDC class messages; interface activation uses set_alt(). 303 * CDC class messages; interface activation uses set_alt().
304 *
305 * Note CDC spec table 4 lists the ACM request profile. It requires
306 * encapsulated command support ... we don't handle any, and respond
307 * to them by stalling. Options include get/set/clear comm features
308 * (not that useful) and SEND_BREAK.
280 */ 309 */
281 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 310 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
282 311
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
312 value = 0; 341 value = 0;
313 342
314 /* FIXME we should not allow data to flow until the 343 /* FIXME we should not allow data to flow until the
315 * host sets the RS232_DTR bit; and when it clears 344 * host sets the ACM_CTRL_DTR bit; and when it clears
316 * that bit, we should return to that no-flow state. 345 * that bit, we should return to that no-flow state.
317 */ 346 */
318 acm->port_handshake_bits = w_value; 347 acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
350 /* we know alt == 0, so this is an activation or a reset */ 379 /* we know alt == 0, so this is an activation or a reset */
351 380
352 if (intf == acm->ctrl_id) { 381 if (intf == acm->ctrl_id) {
353 /* REVISIT this may need more work when we start to
354 * send notifications ...
355 */
356 if (acm->notify->driver_data) { 382 if (acm->notify->driver_data) {
357 VDBG(cdev, "reset acm control interface %d\n", intf); 383 VDBG(cdev, "reset acm control interface %d\n", intf);
358 usb_ep_disable(acm->notify); 384 usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f)
397 423
398/*-------------------------------------------------------------------------*/ 424/*-------------------------------------------------------------------------*/
399 425
426/**
427 * acm_cdc_notify - issue CDC notification to host
428 * @acm: wraps host to be notified
429 * @type: notification type
430 * @value: Refer to cdc specs, wValue field.
431 * @data: data to be sent
432 * @length: size of data
433 * Context: irqs blocked, acm->lock held, acm_notify_req non-null
434 *
435 * Returns zero on sucess or a negative errno.
436 *
437 * See section 6.3.5 of the CDC 1.1 specification for information
438 * about the only notification we issue: SerialState change.
439 */
440static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
441 void *data, unsigned length)
442{
443 struct usb_ep *ep = acm->notify;
444 struct usb_request *req;
445 struct usb_cdc_notification *notify;
446 const unsigned len = sizeof(*notify) + length;
447 void *buf;
448 int status;
449
450 req = acm->notify_req;
451 acm->notify_req = NULL;
452 acm->pending = false;
453
454 req->length = len;
455 notify = req->buf;
456 buf = notify + 1;
457
458 notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
459 | USB_RECIP_INTERFACE;
460 notify->bNotificationType = type;
461 notify->wValue = cpu_to_le16(value);
462 notify->wIndex = cpu_to_le16(acm->ctrl_id);
463 notify->wLength = cpu_to_le16(length);
464 memcpy(buf, data, length);
465
466 status = usb_ep_queue(ep, req, GFP_ATOMIC);
467 if (status < 0) {
468 ERROR(acm->port.func.config->cdev,
469 "acm ttyGS%d can't notify serial state, %d\n",
470 acm->port_num, status);
471 acm->notify_req = req;
472 }
473
474 return status;
475}
476
477static int acm_notify_serial_state(struct f_acm *acm)
478{
479 struct usb_composite_dev *cdev = acm->port.func.config->cdev;
480 int status;
481
482 spin_lock(&acm->lock);
483 if (acm->notify_req) {
484 DBG(cdev, "acm ttyGS%d serial state %04x\n",
485 acm->port_num, acm->serial_state);
486 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
487 0, &acm->serial_state, sizeof(acm->serial_state));
488 } else {
489 acm->pending = true;
490 status = 0;
491 }
492 spin_unlock(&acm->lock);
493 return status;
494}
495
496static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
497{
498 struct f_acm *acm = req->context;
499 u8 doit = false;
500
501 /* on this call path we do NOT hold the port spinlock,
502 * which is why ACM needs its own spinlock
503 */
504 spin_lock(&acm->lock);
505 if (req->status != -ESHUTDOWN)
506 doit = acm->pending;
507 acm->notify_req = req;
508 spin_unlock(&acm->lock);
509
510 if (doit)
511 acm_notify_serial_state(acm);
512}
513
514/* connect == the TTY link is open */
515
516static void acm_connect(struct gserial *port)
517{
518 struct f_acm *acm = port_to_acm(port);
519
520 acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
521 acm_notify_serial_state(acm);
522}
523
524static void acm_disconnect(struct gserial *port)
525{
526 struct f_acm *acm = port_to_acm(port);
527
528 acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
529 acm_notify_serial_state(acm);
530}
531
532static int acm_send_break(struct gserial *port, int duration)
533{
534 struct f_acm *acm = port_to_acm(port);
535 u16 state;
536
537 state = acm->serial_state;
538 state &= ~ACM_CTRL_BRK;
539 if (duration)
540 state |= ACM_CTRL_BRK;
541
542 acm->serial_state = state;
543 return acm_notify_serial_state(acm);
544}
545
546/*-------------------------------------------------------------------------*/
547
400/* ACM function driver setup/binding */ 548/* ACM function driver setup/binding */
401static int __init 549static int __init
402acm_bind(struct usb_configuration *c, struct usb_function *f) 550acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
445 acm->notify = ep; 593 acm->notify = ep;
446 ep->driver_data = cdev; /* claim */ 594 ep->driver_data = cdev; /* claim */
447 595
596 /* allocate notification */
597 acm->notify_req = gs_alloc_req(ep,
598 sizeof(struct usb_cdc_notification) + 2,
599 GFP_KERNEL);
600 if (!acm->notify_req)
601 goto fail;
602
603 acm->notify_req->complete = acm_cdc_notify_complete;
604 acm->notify_req->context = acm;
605
448 /* copy descriptors, and track endpoint copies */ 606 /* copy descriptors, and track endpoint copies */
449 f->descriptors = usb_copy_descriptors(acm_fs_function); 607 f->descriptors = usb_copy_descriptors(acm_fs_function);
608 if (!f->descriptors)
609 goto fail;
450 610
451 acm->fs.in = usb_find_endpoint(acm_fs_function, 611 acm->fs.in = usb_find_endpoint(acm_fs_function,
452 f->descriptors, &acm_fs_in_desc); 612 f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
478 f->hs_descriptors, &acm_hs_notify_desc); 638 f->hs_descriptors, &acm_hs_notify_desc);
479 } 639 }
480 640
481 /* FIXME provide a callback for triggering notifications */
482
483 DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", 641 DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
484 acm->port_num, 642 acm->port_num,
485 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", 643 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
488 return 0; 646 return 0;
489 647
490fail: 648fail:
649 if (acm->notify_req)
650 gs_free_req(acm->notify, acm->notify_req);
651
491 /* we might as well release our claims on endpoints */ 652 /* we might as well release our claims on endpoints */
492 if (acm->notify) 653 if (acm->notify)
493 acm->notify->driver_data = NULL; 654 acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@ fail:
504static void 665static void
505acm_unbind(struct usb_configuration *c, struct usb_function *f) 666acm_unbind(struct usb_configuration *c, struct usb_function *f)
506{ 667{
668 struct f_acm *acm = func_to_acm(f);
669
507 if (gadget_is_dualspeed(c->cdev->gadget)) 670 if (gadget_is_dualspeed(c->cdev->gadget))
508 usb_free_descriptors(f->hs_descriptors); 671 usb_free_descriptors(f->hs_descriptors);
509 usb_free_descriptors(f->descriptors); 672 usb_free_descriptors(f->descriptors);
510 kfree(func_to_acm(f)); 673 gs_free_req(acm->notify, acm->notify_req);
674 kfree(acm);
511} 675}
512 676
513/* Some controllers can't support CDC ACM ... */ 677/* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
571 if (!acm) 735 if (!acm)
572 return -ENOMEM; 736 return -ENOMEM;
573 737
738 spin_lock_init(&acm->lock);
739
574 acm->port_num = port_num; 740 acm->port_num = port_num;
575 741
742 acm->port.connect = acm_connect;
743 acm->port.disconnect = acm_disconnect;
744 acm->port.send_break = acm_send_break;
745
576 acm->port.func.name = "acm"; 746 acm->port.func.name = "acm";
577 acm->port.func.strings = acm_strings; 747 acm->port.func.strings = acm_strings;
578 /* descriptors are per-instance copies */ 748 /* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d7693a..a2b5c092bda0 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@ struct f_ecm {
63 63
64 char ethaddr[14]; 64 char ethaddr[14];
65 65
66 struct usb_descriptor_header **fs_function;
67 struct ecm_ep_descs fs; 66 struct ecm_ep_descs fs;
68 struct usb_descriptor_header **hs_function;
69 struct ecm_ep_descs hs; 67 struct ecm_ep_descs hs;
70 68
71 struct usb_ep *notify; 69 struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0f13fd..659b3d9671c4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@ struct f_rndis {
85 u8 ethaddr[ETH_ALEN]; 85 u8 ethaddr[ETH_ALEN];
86 int config; 86 int config;
87 87
88 struct usb_descriptor_header **fs_function;
89 struct rndis_ep_descs fs; 88 struct rndis_ep_descs fs;
90 struct usb_descriptor_header **hs_function;
91 struct rndis_ep_descs hs; 89 struct rndis_ep_descs hs;
92 90
93 struct usb_ep *notify; 91 struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9aaed5..fe5674db344b 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@ struct f_gser {
36 u8 data_id; 36 u8 data_id;
37 u8 port_num; 37 u8 port_num;
38 38
39 struct usb_descriptor_header **fs_function;
40 struct gser_descs fs; 39 struct gser_descs fs;
41 struct usb_descriptor_header **hs_function;
42 struct gser_descs hs; 40 struct gser_descs hs;
43}; 41};
44 42
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a0523f..acb8d233aa1d 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@ struct f_gether {
66 66
67 char ethaddr[14]; 67 char ethaddr[14];
68 68
69 struct usb_descriptor_header **fs_function;
70 struct geth_descs fs; 69 struct geth_descs fs;
71 struct usb_descriptor_header **hs_function;
72 struct geth_descs hs; 70 struct geth_descs hs;
73}; 71};
74 72
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8fef2b2..17d9905101b7 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
11 * Some are available on 2.4 kernels; several are available, but not 11 * Some are available on 2.4 kernels; several are available, but not
12 * yet pushed in the 2.6 mainline tree. 12 * yet pushed in the 2.6 mainline tree.
13 */ 13 */
14
15#ifndef __GADGET_CHIPS_H
16#define __GADGET_CHIPS_H
17
14#ifdef CONFIG_USB_GADGET_NET2280 18#ifdef CONFIG_USB_GADGET_NET2280
15#define gadget_is_net2280(g) !strcmp("net2280", (g)->name) 19#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
16#else 20#else
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
237 /* Everything else is *presumably* fine ... */ 241 /* Everything else is *presumably* fine ... */
238 return true; 242 return true;
239} 243}
244
245#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 376e80c07530..574c53831a05 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -54,6 +54,7 @@
54 54
55#include <mach/dma.h> 55#include <mach/dma.h>
56#include <mach/usb.h> 56#include <mach/usb.h>
57#include <mach/control.h>
57 58
58#include "omap_udc.h" 59#include "omap_udc.h"
59 60
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s)
2310 u32 trans; 2311 u32 trans;
2311 char *ctrl_name; 2312 char *ctrl_name;
2312 2313
2313 tmp = OTG_REV_REG; 2314 tmp = omap_readl(OTG_REV);
2314 if (cpu_is_omap24xx()) { 2315 if (cpu_is_omap24xx()) {
2315 ctrl_name = "control_devconf"; 2316 ctrl_name = "control_devconf";
2316 trans = CONTROL_DEVCONF_REG; 2317 trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
2317 } else { 2318 } else {
2318 ctrl_name = "tranceiver_ctrl"; 2319 ctrl_name = "tranceiver_ctrl";
2319 trans = omap_readw(USB_TRANSCEIVER_CTRL); 2320 trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505d3a75..53d59287f2bc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. 52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
53 */ 53 */
54 54
55#define PREFIX "ttyGS"
56
55/* 57/*
56 * gserial is the lifecycle interface, used by USB functions 58 * gserial is the lifecycle interface, used by USB functions
57 * gs_port is the I/O nexus, used by the tty driver 59 * gs_port is the I/O nexus, used by the tty driver
58 * tty_struct links to the tty/filesystem framework 60 * tty_struct links to the tty/filesystem framework
59 * 61 *
60 * gserial <---> gs_port ... links will be null when the USB link is 62 * gserial <---> gs_port ... links will be null when the USB link is
61 * inactive; managed by gserial_{connect,disconnect}(). 63 * inactive; managed by gserial_{connect,disconnect}(). each gserial
64 * instance can wrap its own USB control protocol.
62 * gserial->ioport == usb_ep->driver_data ... gs_port 65 * gserial->ioport == usb_ep->driver_data ... gs_port
63 * gs_port->port_usb ... gserial 66 * gs_port->port_usb ... gserial
64 * 67 *
@@ -100,6 +103,8 @@ struct gs_port {
100 wait_queue_head_t close_wait; /* wait for last close */ 103 wait_queue_head_t close_wait; /* wait for last close */
101 104
102 struct list_head read_pool; 105 struct list_head read_pool;
106 struct list_head read_queue;
107 unsigned n_read;
103 struct tasklet_struct push; 108 struct tasklet_struct push;
104 109
105 struct list_head write_pool; 110 struct list_head write_pool;
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb)
177/* 182/*
178 * gs_buf_data_avail 183 * gs_buf_data_avail
179 * 184 *
180 * Return the number of bytes of data available in the circular 185 * Return the number of bytes of data written into the circular
181 * buffer. 186 * buffer.
182 */ 187 */
183static unsigned gs_buf_data_avail(struct gs_buf *gb) 188static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
278 * Allocate a usb_request and its buffer. Returns a pointer to the 283 * Allocate a usb_request and its buffer. Returns a pointer to the
279 * usb_request or NULL if there is an error. 284 * usb_request or NULL if there is an error.
280 */ 285 */
281static struct usb_request * 286struct usb_request *
282gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) 287gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
283{ 288{
284 struct usb_request *req; 289 struct usb_request *req;
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
302 * 307 *
303 * Free a usb_request and its buffer. 308 * Free a usb_request and its buffer.
304 */ 309 */
305static void gs_free_req(struct usb_ep *ep, struct usb_request *req) 310void gs_free_req(struct usb_ep *ep, struct usb_request *req)
306{ 311{
307 kfree(req->buf); 312 kfree(req->buf);
308 usb_ep_free_request(ep, req); 313 usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@ __acquires(&port->port_lock)
367 req->length = len; 372 req->length = len;
368 list_del(&req->list); 373 list_del(&req->list);
369 374
370#ifdef VERBOSE_DEBUG 375 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
371 pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", 376 port->port_num, len, *((u8 *)req->buf),
372 __func__, in->name, len, *((u8 *)req->buf),
373 *((u8 *)req->buf+1), *((u8 *)req->buf+2)); 377 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
374#endif
375 378
376 /* Drop lock while we call out of driver; completions 379 /* Drop lock while we call out of driver; completions
377 * could be issued while we do so. Disconnection may 380 * could be issued while we do so. Disconnection may
@@ -401,56 +404,6 @@ __acquires(&port->port_lock)
401 return status; 404 return status;
402} 405}
403 406
404static void gs_rx_push(unsigned long _port)
405{
406 struct gs_port *port = (void *)_port;
407 struct tty_struct *tty = port->port_tty;
408
409 /* With low_latency, tty_flip_buffer_push() doesn't put its
410 * real work through a workqueue, so the ldisc has a better
411 * chance to keep up with peak USB data rates.
412 */
413 if (tty) {
414 tty_flip_buffer_push(tty);
415 wake_up_interruptible(&tty->read_wait);
416 }
417}
418
419/*
420 * gs_recv_packet
421 *
422 * Called for each USB packet received. Reads the packet
423 * header and stuffs the data in the appropriate tty buffer.
424 * Returns 0 if successful, or a negative error number.
425 *
426 * Called during USB completion routine, on interrupt time.
427 * With port_lock.
428 */
429static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
430{
431 unsigned len;
432 struct tty_struct *tty;
433
434 /* I/O completions can continue for a while after close(), until the
435 * request queue empties. Just discard any data we receive, until
436 * something reopens this TTY ... as if there were no HW flow control.
437 */
438 tty = port->port_tty;
439 if (tty == NULL) {
440 pr_vdebug("%s: ttyGS%d, after close\n",
441 __func__, port->port_num);
442 return -EIO;
443 }
444
445 len = tty_insert_flip_string(tty, packet, size);
446 if (len > 0)
447 tasklet_schedule(&port->push);
448 if (len < size)
449 pr_debug("%s: ttyGS%d, drop %d bytes\n",
450 __func__, port->port_num, size - len);
451 return 0;
452}
453
454/* 407/*
455 * Context: caller owns port_lock, and port_usb is set 408 * Context: caller owns port_lock, and port_usb is set
456 */ 409 */
@@ -469,9 +422,9 @@ __acquires(&port->port_lock)
469 int status; 422 int status;
470 struct tty_struct *tty; 423 struct tty_struct *tty;
471 424
472 /* no more rx if closed or throttled */ 425 /* no more rx if closed */
473 tty = port->port_tty; 426 tty = port->port_tty;
474 if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) 427 if (!tty)
475 break; 428 break;
476 429
477 req = list_entry(pool->next, struct usb_request, list); 430 req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@ __acquires(&port->port_lock)
500 return started; 453 return started;
501} 454}
502 455
503static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) 456/*
457 * RX tasklet takes data out of the RX queue and hands it up to the TTY
458 * layer until it refuses to take any more data (or is throttled back).
459 * Then it issues reads for any further data.
460 *
461 * If the RX queue becomes full enough that no usb_request is queued,
462 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
463 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
464 * can be buffered before the TTY layer's buffers (currently 64 KB).
465 */
466static void gs_rx_push(unsigned long _port)
504{ 467{
505 int status; 468 struct gs_port *port = (void *)_port;
506 struct gs_port *port = ep->driver_data; 469 struct tty_struct *tty;
470 struct list_head *queue = &port->read_queue;
471 bool disconnect = false;
472 bool do_push = false;
507 473
508 spin_lock(&port->port_lock); 474 /* hand any queued data to the tty */
509 list_add(&req->list, &port->read_pool); 475 spin_lock_irq(&port->port_lock);
476 tty = port->port_tty;
477 while (!list_empty(queue)) {
478 struct usb_request *req;
510 479
511 switch (req->status) { 480 req = list_first_entry(queue, struct usb_request, list);
512 case 0:
513 /* normal completion */
514 status = gs_recv_packet(port, req->buf, req->actual);
515 if (status && status != -EIO)
516 pr_debug("%s: %s %s err %d\n",
517 __func__, "recv", ep->name, status);
518 gs_start_rx(port);
519 break;
520 481
521 case -ESHUTDOWN: 482 /* discard data if tty was closed */
522 /* disconnect */ 483 if (!tty)
523 pr_vdebug("%s: %s shutdown\n", __func__, ep->name); 484 goto recycle;
524 break;
525 485
526 default: 486 /* leave data queued if tty was rx throttled */
527 /* presumably a transient fault */ 487 if (test_bit(TTY_THROTTLED, &tty->flags))
528 pr_warning("%s: unexpected %s status %d\n", 488 break;
529 __func__, ep->name, req->status); 489
530 gs_start_rx(port); 490 switch (req->status) {
531 break; 491 case -ESHUTDOWN:
492 disconnect = true;
493 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
494 break;
495
496 default:
497 /* presumably a transient fault */
498 pr_warning(PREFIX "%d: unexpected RX status %d\n",
499 port->port_num, req->status);
500 /* FALLTHROUGH */
501 case 0:
502 /* normal completion */
503 break;
504 }
505
506 /* push data to (open) tty */
507 if (req->actual) {
508 char *packet = req->buf;
509 unsigned size = req->actual;
510 unsigned n;
511 int count;
512
513 /* we may have pushed part of this packet already... */
514 n = port->n_read;
515 if (n) {
516 packet += n;
517 size -= n;
518 }
519
520 count = tty_insert_flip_string(tty, packet, size);
521 if (count)
522 do_push = true;
523 if (count != size) {
524 /* stop pushing; TTY layer can't handle more */
525 port->n_read += count;
526 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
527 port->port_num,
528 count, req->actual);
529 break;
530 }
531 port->n_read = 0;
532 }
533recycle:
534 list_move(&req->list, &port->read_pool);
532 } 535 }
536
537 /* Push from tty to ldisc; this is immediate with low_latency, and
538 * may trigger callbacks to this driver ... so drop the spinlock.
539 */
540 if (tty && do_push) {
541 spin_unlock_irq(&port->port_lock);
542 tty_flip_buffer_push(tty);
543 wake_up_interruptible(&tty->read_wait);
544 spin_lock_irq(&port->port_lock);
545
546 /* tty may have been closed */
547 tty = port->port_tty;
548 }
549
550
551 /* We want our data queue to become empty ASAP, keeping data
552 * in the tty and ldisc (not here). If we couldn't push any
553 * this time around, there may be trouble unless there's an
554 * implicit tty_unthrottle() call on its way...
555 *
556 * REVISIT we should probably add a timer to keep the tasklet
557 * from starving ... but it's not clear that case ever happens.
558 */
559 if (!list_empty(queue) && tty) {
560 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
561 if (do_push)
562 tasklet_schedule(&port->push);
563 else
564 pr_warning(PREFIX "%d: RX not scheduled?\n",
565 port->port_num);
566 }
567 }
568
569 /* If we're still connected, refill the USB RX queue. */
570 if (!disconnect && port->port_usb)
571 gs_start_rx(port);
572
573 spin_unlock_irq(&port->port_lock);
574}
575
576static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
577{
578 struct gs_port *port = ep->driver_data;
579
580 /* Queue all received data until the tty layer is ready for it. */
581 spin_lock(&port->port_lock);
582 list_add_tail(&req->list, &port->read_queue);
583 tasklet_schedule(&port->push);
533 spin_unlock(&port->port_lock); 584 spin_unlock(&port->port_lock);
534} 585}
535 586
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port)
625 } 676 }
626 677
627 /* queue read requests */ 678 /* queue read requests */
679 port->n_read = 0;
628 started = gs_start_rx(port); 680 started = gs_start_rx(port);
629 681
630 /* unblock any pending writes into our circular buffer */ 682 /* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port)
633 } else { 685 } else {
634 gs_free_requests(ep, head); 686 gs_free_requests(ep, head);
635 gs_free_requests(port->port_usb->in, &port->write_pool); 687 gs_free_requests(port->port_usb->in, &port->write_pool);
688 status = -EIO;
636 } 689 }
637 690
638 return started ? 0 : status; 691 return status;
639} 692}
640 693
641/*-------------------------------------------------------------------------*/ 694/*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file)
736 789
737 /* if connected, start the I/O stream */ 790 /* if connected, start the I/O stream */
738 if (port->port_usb) { 791 if (port->port_usb) {
792 struct gserial *gser = port->port_usb;
793
739 pr_debug("gs_open: start ttyGS%d\n", port->port_num); 794 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
740 gs_start_io(port); 795 gs_start_io(port);
741 796
742 /* REVISIT for ACM, issue "network connected" event */ 797 if (gser->connect)
798 gser->connect(gser);
743 } 799 }
744 800
745 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); 801 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p)
766static void gs_close(struct tty_struct *tty, struct file *file) 822static void gs_close(struct tty_struct *tty, struct file *file)
767{ 823{
768 struct gs_port *port = tty->driver_data; 824 struct gs_port *port = tty->driver_data;
825 struct gserial *gser;
769 826
770 spin_lock_irq(&port->port_lock); 827 spin_lock_irq(&port->port_lock);
771 828
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file)
785 port->openclose = true; 842 port->openclose = true;
786 port->open_count = 0; 843 port->open_count = 0;
787 844
788 if (port->port_usb) 845 gser = port->port_usb;
789 /* REVISIT for ACM, issue "network disconnected" event */; 846 if (gser && gser->disconnect)
847 gser->disconnect(gser);
790 848
791 /* wait for circular write buffer to drain, disconnect, or at 849 /* wait for circular write buffer to drain, disconnect, or at
792 * most GS_CLOSE_TIMEOUT seconds; then discard the rest 850 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
793 */ 851 */
794 if (gs_buf_data_avail(&port->port_write_buf) > 0 852 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
795 && port->port_usb) {
796 spin_unlock_irq(&port->port_lock); 853 spin_unlock_irq(&port->port_lock);
797 wait_event_interruptible_timeout(port->drain_wait, 854 wait_event_interruptible_timeout(port->drain_wait,
798 gs_writes_finished(port), 855 gs_writes_finished(port),
799 GS_CLOSE_TIMEOUT * HZ); 856 GS_CLOSE_TIMEOUT * HZ);
800 spin_lock_irq(&port->port_lock); 857 spin_lock_irq(&port->port_lock);
858 gser = port->port_usb;
801 } 859 }
802 860
803 /* Iff we're disconnected, there can be no I/O in flight so it's 861 /* Iff we're disconnected, there can be no I/O in flight so it's
804 * ok to free the circular buffer; else just scrub it. And don't 862 * ok to free the circular buffer; else just scrub it. And don't
805 * let the push tasklet fire again until we're re-opened. 863 * let the push tasklet fire again until we're re-opened.
806 */ 864 */
807 if (port->port_usb == NULL) 865 if (gser == NULL)
808 gs_buf_free(&port->port_write_buf); 866 gs_buf_free(&port->port_write_buf);
809 else 867 else
810 gs_buf_clear(&port->port_write_buf); 868 gs_buf_clear(&port->port_write_buf);
811 869
812 tasklet_kill(&port->push);
813
814 tty->driver_data = NULL; 870 tty->driver_data = NULL;
815 port->port_tty = NULL; 871 port->port_tty = NULL;
816 872
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty)
911{ 967{
912 struct gs_port *port = tty->driver_data; 968 struct gs_port *port = tty->driver_data;
913 unsigned long flags; 969 unsigned long flags;
914 unsigned started = 0;
915 970
916 spin_lock_irqsave(&port->port_lock, flags); 971 spin_lock_irqsave(&port->port_lock, flags);
917 if (port->port_usb) 972 if (port->port_usb) {
918 started = gs_start_rx(port); 973 /* Kickstart read queue processing. We don't do xon/xoff,
974 * rts/cts, or other handshaking with the host, but if the
975 * read queue backs up enough we'll be NAKing OUT packets.
976 */
977 tasklet_schedule(&port->push);
978 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
979 }
919 spin_unlock_irqrestore(&port->port_lock, flags); 980 spin_unlock_irqrestore(&port->port_lock, flags);
981}
982
983static int gs_break_ctl(struct tty_struct *tty, int duration)
984{
985 struct gs_port *port = tty->driver_data;
986 int status = 0;
987 struct gserial *gser;
988
989 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
990 port->port_num, duration);
920 991
921 pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", 992 spin_lock_irq(&port->port_lock);
922 port->port_num, started); 993 gser = port->port_usb;
994 if (gser && gser->send_break)
995 status = gser->send_break(gser, duration);
996 spin_unlock_irq(&port->port_lock);
997
998 return status;
923} 999}
924 1000
925static const struct tty_operations gs_tty_ops = { 1001static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = {
931 .write_room = gs_write_room, 1007 .write_room = gs_write_room,
932 .chars_in_buffer = gs_chars_in_buffer, 1008 .chars_in_buffer = gs_chars_in_buffer,
933 .unthrottle = gs_unthrottle, 1009 .unthrottle = gs_unthrottle,
1010 .break_ctl = gs_break_ctl,
934}; 1011};
935 1012
936/*-------------------------------------------------------------------------*/ 1013/*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
953 tasklet_init(&port->push, gs_rx_push, (unsigned long) port); 1030 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
954 1031
955 INIT_LIST_HEAD(&port->read_pool); 1032 INIT_LIST_HEAD(&port->read_pool);
1033 INIT_LIST_HEAD(&port->read_queue);
956 INIT_LIST_HEAD(&port->write_pool); 1034 INIT_LIST_HEAD(&port->write_pool);
957 1035
958 port->port_num = port_num; 1036 port->port_num = port_num;
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
997 1075
998 gs_tty_driver->owner = THIS_MODULE; 1076 gs_tty_driver->owner = THIS_MODULE;
999 gs_tty_driver->driver_name = "g_serial"; 1077 gs_tty_driver->driver_name = "g_serial";
1000 gs_tty_driver->name = "ttyGS"; 1078 gs_tty_driver->name = PREFIX;
1001 /* uses dynamically assigned dev_t values */ 1079 /* uses dynamically assigned dev_t values */
1002 1080
1003 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 1081 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void)
1104 ports[i].port = NULL; 1182 ports[i].port = NULL;
1105 mutex_unlock(&ports[i].lock); 1183 mutex_unlock(&ports[i].lock);
1106 1184
1185 tasklet_kill(&port->push);
1186
1107 /* wait for old opens to finish */ 1187 /* wait for old opens to finish */
1108 wait_event(port->close_wait, gs_closed(port)); 1188 wait_event(port->close_wait, gs_closed(port));
1109 1189
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num)
1175 1255
1176 /* REVISIT if waiting on "carrier detect", signal. */ 1256 /* REVISIT if waiting on "carrier detect", signal. */
1177 1257
1178 /* REVISIT for ACM, issue "network connection" status notification: 1258 /* if it's already open, start I/O ... and notify the serial
1179 * connected if open_count, else disconnected. 1259 * protocol about open/close status (connect/disconnect).
1180 */ 1260 */
1181
1182 /* if it's already open, start I/O */
1183 if (port->open_count) { 1261 if (port->open_count) {
1184 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); 1262 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1185 gs_start_io(port); 1263 gs_start_io(port);
1264 if (gser->connect)
1265 gser->connect(gser);
1266 } else {
1267 if (gser->disconnect)
1268 gser->disconnect(gser);
1186 } 1269 }
1187 1270
1188 spin_unlock_irqrestore(&port->port_lock, flags); 1271 spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser)
1241 if (port->open_count == 0 && !port->openclose) 1324 if (port->open_count == 0 && !port->openclose)
1242 gs_buf_free(&port->port_write_buf); 1325 gs_buf_free(&port->port_write_buf);
1243 gs_free_requests(gser->out, &port->read_pool); 1326 gs_free_requests(gser->out, &port->read_pool);
1327 gs_free_requests(gser->out, &port->read_queue);
1244 gs_free_requests(gser->in, &port->write_pool); 1328 gs_free_requests(gser->in, &port->write_pool);
1245 spin_unlock_irqrestore(&port->port_lock, flags); 1329 spin_unlock_irqrestore(&port->port_lock, flags);
1246} 1330}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b561138f90e..af3910d01aea 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
23 * style I/O using the USB peripheral endpoints listed here, including 23 * style I/O using the USB peripheral endpoints listed here, including
24 * hookups to sysfs and /dev for each logical "tty" device. 24 * hookups to sysfs and /dev for each logical "tty" device.
25 * 25 *
26 * REVISIT need TTY --> USB event flow too, so ACM can report open/close 26 * REVISIT at least ACM could support tiocmget() if needed.
27 * as carrier detect events. Model after ECM. There's more ACM state too.
28 * 27 *
29 * REVISIT someday, allow multiplexing several TTYs over these endpoints. 28 * REVISIT someday, allow multiplexing several TTYs over these endpoints.
30 */ 29 */
@@ -41,8 +40,17 @@ struct gserial {
41 40
42 /* REVISIT avoid this CDC-ACM support harder ... */ 41 /* REVISIT avoid this CDC-ACM support harder ... */
43 struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ 42 struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
43
44 /* notification callbacks */
45 void (*connect)(struct gserial *p);
46 void (*disconnect)(struct gserial *p);
47 int (*send_break)(struct gserial *p, int duration);
44}; 48};
45 49
50/* utilities to allocate/free request and buffer */
51struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
52void gs_free_req(struct usb_ep *, struct usb_request *req);
53
46/* port setup/teardown is handled by gadget driver */ 54/* port setup/teardown is handled by gadget driver */
47int gserial_setup(struct usb_gadget *g, unsigned n_ports); 55int gserial_setup(struct usb_gadget *g, unsigned n_ports);
48void gserial_cleanup(void); 56void gserial_cleanup(void);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2adb929..d22a84f86a33 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
126 * doesn't quite work because some people have to enforce 32-bit access 126 * doesn't quite work because some people have to enforce 32-bit access
127 */ 127 */
128static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, 128static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
129 __u32 __iomem *dst, u32 offset, u32 len) 129 __u32 __iomem *dst, u32 len)
130{ 130{
131 struct usb_hcd *hcd = priv_to_hcd(priv);
132 u32 val; 131 u32 val;
133 u8 *buff8; 132 u8 *buff8;
134 133
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
136 printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); 135 printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
137 return; 136 return;
138 } 137 }
139 isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
140 /* XXX
141 * 90nsec delay, the spec says something how this could be avoided.
142 */
143 mdelay(1);
144 138
145 while (len >= 4) { 139 while (len >= 4) {
146 *src = __raw_readl(dst); 140 *src = __raw_readl(dst);
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
987 printk(KERN_ERR "qh is 0\n"); 981 printk(KERN_ERR "qh is 0\n");
988 continue; 982 continue;
989 } 983 }
990 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, 984 isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
991 atl_regs, sizeof(ptd)); 985 HC_MEMORY_REG);
986 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
987 HC_MEMORY_REG);
988 /*
989 * write bank1 address twice to ensure the 90ns delay (time
990 * between BANK0 write and the priv_read_copy() call is at
991 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
992 */
993 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
994 HC_MEMORY_REG);
995
996 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
997 ISP_BANK(0), sizeof(ptd));
992 998
993 dw1 = le32_to_cpu(ptd.dw1); 999 dw1 = le32_to_cpu(ptd.dw1);
994 dw2 = le32_to_cpu(ptd.dw2); 1000 dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1091 case IN_PID: 1097 case IN_PID:
1092 priv_read_copy(priv, 1098 priv_read_copy(priv,
1093 priv->atl_ints[queue_entry].data_buffer, 1099 priv->atl_ints[queue_entry].data_buffer,
1094 usb_hcd->regs + payload, payload, 1100 usb_hcd->regs + payload + ISP_BANK(1),
1095 length); 1101 length);
1096 1102
1097 case OUT_PID: 1103 case OUT_PID:
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1122 } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { 1128 } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
1123 /* short BULK received */ 1129 /* short BULK received */
1124 1130
1125 printk(KERN_ERR "short bulk, %d instead %zu\n", length,
1126 qtd->length);
1127 if (urb->transfer_flags & URB_SHORT_NOT_OK) { 1131 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1128 urb->status = -EREMOTEIO; 1132 urb->status = -EREMOTEIO;
1129 printk(KERN_ERR "not okey\n"); 1133 isp1760_dbg(priv, "short bulk, %d instead %zu "
1134 "with URB_SHORT_NOT_OK flag.\n",
1135 length, qtd->length);
1130 } 1136 }
1131 1137
1132 if (urb->status == -EINPROGRESS) 1138 if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
1206 continue; 1212 continue;
1207 } 1213 }
1208 1214
1209 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs, 1215 isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
1210 int_regs, sizeof(ptd)); 1216 HC_MEMORY_REG);
1217 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
1218 HC_MEMORY_REG);
1219 /*
1220 * write bank1 address twice to ensure the 90ns delay (time
1221 * between BANK0 write and the priv_read_copy() call is at
1222 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
1223 */
1224 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
1225 HC_MEMORY_REG);
1226
1227 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
1228 ISP_BANK(0), sizeof(ptd));
1211 dw1 = le32_to_cpu(ptd.dw1); 1229 dw1 = le32_to_cpu(ptd.dw1);
1212 dw3 = le32_to_cpu(ptd.dw3); 1230 dw3 = le32_to_cpu(ptd.dw3);
1213 check_int_err_status(le32_to_cpu(ptd.dw4)); 1231 check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
1242 case IN_PID: 1260 case IN_PID:
1243 priv_read_copy(priv, 1261 priv_read_copy(priv,
1244 priv->int_ints[queue_entry].data_buffer, 1262 priv->int_ints[queue_entry].data_buffer,
1245 usb_hcd->regs + payload , payload, 1263 usb_hcd->regs + payload + ISP_BANK(1),
1246 length); 1264 length);
1247 case OUT_PID: 1265 case OUT_PID:
1248 1266
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1615 return -EPIPE; 1633 return -EPIPE;
1616 } 1634 }
1617 1635
1618 isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); 1636 return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
1619 return 0;
1620} 1637}
1621 1638
1622static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, 1639static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd86993c..4377277667d9 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void);
54#define BUFFER_MAP 0x7 54#define BUFFER_MAP 0x7
55 55
56#define HC_MEMORY_REG 0x33c 56#define HC_MEMORY_REG 0x33c
57#define ISP_BANK(x) ((x) << 16)
58
57#define HC_PORT1_CTRL 0x374 59#define HC_PORT1_CTRL 0x374
58#define PORT1_POWER (3 << 3) 60#define PORT1_POWER (3 << 3)
59#define PORT1_INIT1 (1 << 7) 61#define PORT1_INIT1 (1 << 7)
@@ -119,6 +121,9 @@ struct inter_packet_info {
119typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, 121typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
120 struct isp1760_qtd *qtd); 122 struct isp1760_qtd *qtd);
121 123
124#define isp1760_dbg(priv, fmt, args...) \
125 dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
126
122#define isp1760_info(priv, fmt, args...) \ 127#define isp1760_info(priv, fmt, args...) \
123 dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) 128 dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
124 129
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc47941d01..89901962cbfd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd);
86static int ohci_restart (struct ohci_hcd *ohci); 86static int ohci_restart (struct ohci_hcd *ohci);
87#endif 87#endif
88 88
89#ifdef CONFIG_PCI
90static void quirk_amd_pll(int state);
91static void amd_iso_dev_put(void);
92#else
93static inline void quirk_amd_pll(int state)
94{
95 return;
96}
97static inline void amd_iso_dev_put(void)
98{
99 return;
100}
101#endif
102
103
89#include "ohci-hub.c" 104#include "ohci-hub.c"
90#include "ohci-dbg.c" 105#include "ohci-dbg.c"
91#include "ohci-mem.c" 106#include "ohci-mem.c"
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci)
483 int ret; 498 int ret;
484 struct usb_hcd *hcd = ohci_to_hcd(ohci); 499 struct usb_hcd *hcd = ohci_to_hcd(ohci);
485 500
501 if (distrust_firmware)
502 ohci->flags |= OHCI_QUIRK_HUB_POWER;
503
486 disable (ohci); 504 disable (ohci);
487 ohci->regs = hcd->regs; 505 ohci->regs = hcd->regs;
488 506
@@ -689,7 +707,8 @@ retry:
689 temp |= RH_A_NOCP; 707 temp |= RH_A_NOCP;
690 temp &= ~(RH_A_POTPGT | RH_A_NPS); 708 temp &= ~(RH_A_POTPGT | RH_A_NPS);
691 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 709 ohci_writel (ohci, temp, &ohci->regs->roothub.a);
692 } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { 710 } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
711 (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
693 /* hub power always on; required for AMD-756 and some 712 /* hub power always on; required for AMD-756 and some
694 * Mac platforms. ganged overcurrent reporting, if any. 713 * Mac platforms. ganged overcurrent reporting, if any.
695 */ 714 */
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
882 901
883 if (quirk_zfmicro(ohci)) 902 if (quirk_zfmicro(ohci))
884 del_timer(&ohci->unlink_watchdog); 903 del_timer(&ohci->unlink_watchdog);
904 if (quirk_amdiso(ohci))
905 amd_iso_dev_put();
885 906
886 remove_debug_files (ohci); 907 remove_debug_files (ohci);
887 ohci_mem_cleanup (ohci); 908 ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b56739221d11..439beb784f3e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -483,6 +483,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
483 length++; 483 length++;
484 } 484 }
485 485
486 /* Some broken controllers never turn off RHCS in the interrupt
487 * status register. For their sake we won't re-enable RHSC
488 * interrupts if the flag is already set.
489 */
490 if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
491 changed = 1;
492
486 /* look at each port */ 493 /* look at each port */
487 for (i = 0; i < ohci->num_ports; i++) { 494 for (i = 0; i < ohci->num_ports; i++) {
488 u32 status = roothub_portstatus (ohci, i); 495 u32 status = roothub_portstatus (ohci, i);
@@ -572,8 +579,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
572 return 0; 579 return 0;
573} 580}
574 581
575static void start_hnp(struct ohci_hcd *ohci);
576
577#else 582#else
578 583
579#define ohci_start_port_reset NULL 584#define ohci_start_port_reset NULL
@@ -760,7 +765,7 @@ static int ohci_hub_control (
760#ifdef CONFIG_USB_OTG 765#ifdef CONFIG_USB_OTG
761 if (hcd->self.otg_port == (wIndex + 1) 766 if (hcd->self.otg_port == (wIndex + 1)
762 && hcd->self.b_hnp_enable) 767 && hcd->self.b_hnp_enable)
763 start_hnp(ohci); 768 ohci->start_hnp(ohci);
764 else 769 else
765#endif 770#endif
766 ohci_writel (ohci, RH_PS_PSS, 771 ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 94dfca02f7e1..3d532b709670 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
225 dev_err(hcd->self.controller, "can't find transceiver\n"); 225 dev_err(hcd->self.controller, "can't find transceiver\n");
226 return -ENODEV; 226 return -ENODEV;
227 } 227 }
228 ohci->start_hnp = start_hnp;
228 } 229 }
229#endif 230#endif
230 231
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
260 omap_cfg_reg(W4_USB_HIGHZ); 261 omap_cfg_reg(W4_USB_HIGHZ);
261 } 262 }
262 ohci_writel(ohci, rh, &ohci->regs->roothub.a); 263 ohci_writel(ohci, rh, &ohci->regs->roothub.a);
263 distrust_firmware = 0; 264 ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
264 } else if (machine_is_nokia770()) { 265 } else if (machine_is_nokia770()) {
265 /* We require a self-powered hub, which should have 266 /* We require a self-powered hub, which should have
266 * plenty of power. */ 267 * plenty of power. */
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc912e16..083e8df0a817 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
18#error "This file is PCI bus glue. CONFIG_PCI must be defined." 18#error "This file is PCI bus glue. CONFIG_PCI must be defined."
19#endif 19#endif
20 20
21#include <linux/pci.h>
22#include <linux/io.h>
23
24
25/* constants used to work around PM-related transfer
26 * glitches in some AMD 700 series southbridges
27 */
28#define AB_REG_BAR 0xf0
29#define AB_INDX(addr) ((addr) + 0x00)
30#define AB_DATA(addr) ((addr) + 0x04)
31#define AX_INDXC 0X30
32#define AX_DATAC 0x34
33
34#define NB_PCIE_INDX_ADDR 0xe0
35#define NB_PCIE_INDX_DATA 0xe4
36#define PCIE_P_CNTL 0x10040
37#define BIF_NB 0x10002
38
39static struct pci_dev *amd_smbus_dev;
40static struct pci_dev *amd_hb_dev;
41static int amd_ohci_iso_count;
42
21/*-------------------------------------------------------------------------*/ 43/*-------------------------------------------------------------------------*/
22 44
23static int broken_suspend(struct usb_hcd *hcd) 45static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
143 return 0; 165 return 0;
144} 166}
145 167
168static int ohci_quirk_amd700(struct usb_hcd *hcd)
169{
170 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
171 u8 rev = 0;
172
173 if (!amd_smbus_dev)
174 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
175 PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
176 if (!amd_smbus_dev)
177 return 0;
178
179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
180 if ((rev > 0x3b) || (rev < 0x30)) {
181 pci_dev_put(amd_smbus_dev);
182 amd_smbus_dev = NULL;
183 return 0;
184 }
185
186 amd_ohci_iso_count++;
187
188 if (!amd_hb_dev)
189 amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
190
191 ohci->flags |= OHCI_QUIRK_AMD_ISO;
192 ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
193
194 return 0;
195}
196
197/*
198 * The hardware normally enables the A-link power management feature, which
199 * lets the system lower the power consumption in idle states.
200 *
201 * Assume the system is configured to have USB 1.1 ISO transfers going
202 * to or from a USB device. Without this quirk, that stream may stutter
203 * or have breaks occasionally. For transfers going to speakers, this
204 * makes a very audible mess...
205 *
206 * That audio playback corruption is due to the audio stream getting
207 * interrupted occasionally when the link goes in lower power state
208 * This USB quirk prevents the link going into that lower power state
209 * during audio playback or other ISO operations.
210 */
211static void quirk_amd_pll(int on)
212{
213 u32 addr;
214 u32 val;
215 u32 bit = (on > 0) ? 1 : 0;
216
217 pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
218
219 /* BIT names/meanings are NDA-protected, sorry ... */
220
221 outl(AX_INDXC, AB_INDX(addr));
222 outl(0x40, AB_DATA(addr));
223 outl(AX_DATAC, AB_INDX(addr));
224 val = inl(AB_DATA(addr));
225 val &= ~((1 << 3) | (1 << 4) | (1 << 9));
226 val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
227 outl(val, AB_DATA(addr));
228
229 if (amd_hb_dev) {
230 addr = PCIE_P_CNTL;
231 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
232
233 pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
234 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
235 val |= bit | (bit << 3) | (bit << 12);
236 val |= ((!bit) << 4) | ((!bit) << 9);
237 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
238
239 addr = BIF_NB;
240 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
241
242 pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
243 val &= ~(1 << 8);
244 val |= bit << 8;
245 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
246 }
247}
248
249static void amd_iso_dev_put(void)
250{
251 amd_ohci_iso_count--;
252 if (amd_ohci_iso_count == 0) {
253 if (amd_smbus_dev) {
254 pci_dev_put(amd_smbus_dev);
255 amd_smbus_dev = NULL;
256 }
257 if (amd_hb_dev) {
258 pci_dev_put(amd_hb_dev);
259 amd_hb_dev = NULL;
260 }
261 }
262
263}
264
146/* List of quirks for OHCI */ 265/* List of quirks for OHCI */
147static const struct pci_device_id ohci_pci_quirks[] = { 266static const struct pci_device_id ohci_pci_quirks[] = {
148 { 267 {
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = {
181 PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), 300 PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
182 .driver_data = (unsigned long) broken_suspend, 301 .driver_data = (unsigned long) broken_suspend,
183 }, 302 },
303 {
304 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
305 .driver_data = (unsigned long)ohci_quirk_amd700,
306 },
307 {
308 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
309 .driver_data = (unsigned long)ohci_quirk_amd700,
310 },
311 {
312 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
313 .driver_data = (unsigned long)ohci_quirk_amd700,
314 },
315
184 /* FIXME for some of the early AMD 760 southbridges, OHCI 316 /* FIXME for some of the early AMD 760 southbridges, OHCI
185 * won't work at all. blacklist them. 317 * won't work at all. blacklist them.
186 */ 318 */
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c557953..c2d80f80448b 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@ __acquires(ohci->lock)
49 switch (usb_pipetype (urb->pipe)) { 49 switch (usb_pipetype (urb->pipe)) {
50 case PIPE_ISOCHRONOUS: 50 case PIPE_ISOCHRONOUS:
51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; 51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
53 && quirk_amdiso(ohci))
54 quirk_amd_pll(1);
52 break; 55 break;
53 case PIPE_INTERRUPT: 56 case PIPE_INTERRUPT:
54 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; 57 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@ static void td_submit_urb (
677 data + urb->iso_frame_desc [cnt].offset, 680 data + urb->iso_frame_desc [cnt].offset,
678 urb->iso_frame_desc [cnt].length, urb, cnt); 681 urb->iso_frame_desc [cnt].length, urb, cnt);
679 } 682 }
683 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
684 && quirk_amdiso(ohci))
685 quirk_amd_pll(0);
680 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 686 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
681 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; 687 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
682 break; 688 break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544ddc7849..faf622eafce7 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@ struct ohci_hcd {
371 * other external transceivers should be software-transparent 371 * other external transceivers should be software-transparent
372 */ 372 */
373 struct otg_transceiver *transceiver; 373 struct otg_transceiver *transceiver;
374 void (*start_hnp)(struct ohci_hcd *ohci);
374 375
375 /* 376 /*
376 * memory management for queue data structures 377 * memory management for queue data structures
@@ -399,6 +400,8 @@ struct ohci_hcd {
399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ 400#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
400#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ 401#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
401#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ 402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
402 // there are also chip quirks/bugs in init logic 405 // there are also chip quirks/bugs in init logic
403 406
404 struct work_struct nec_work; /* Worker for NEC quirk */ 407 struct work_struct nec_work; /* Worker for NEC quirk */
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
426{ 429{
427 return ohci->flags & OHCI_QUIRK_ZFMICRO; 430 return ohci->flags & OHCI_QUIRK_ZFMICRO;
428} 431}
432static inline int quirk_amdiso(struct ohci_hcd *ohci)
433{
434 return ohci->flags & OHCI_QUIRK_AMD_ISO;
435}
429#else 436#else
430static inline int quirk_nec(struct ohci_hcd *ohci) 437static inline int quirk_nec(struct ohci_hcd *ohci)
431{ 438{
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
435{ 442{
436 return 0; 443 return 0;
437} 444}
445static inline int quirk_amdiso(struct ohci_hcd *ohci)
446{
447 return 0;
448}
438#endif 449#endif
439 450
440/* convert between an hcd pointer and the corresponding ohci_hcd */ 451/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dddb120..ea7126f99cab 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
964 disable_irq_nrdy(r8a66597, pipenum); 964 disable_irq_nrdy(r8a66597, pipenum);
965} 965}
966 966
967static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
968{
969 mod_timer(&r8a66597->rh_timer,
970 jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
971}
972
973static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
974 int connect)
975{
976 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
977
978 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
979 rh->scount = R8A66597_MAX_SAMPLING;
980 if (connect)
981 rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
982 else
983 rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
984 rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
985
986 r8a66597_root_hub_start_polling(r8a66597);
987}
988
967/* this function must be called with interrupt disabled */ 989/* this function must be called with interrupt disabled */
968static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 990static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
969 u16 syssts) 991 u16 syssts)
970{ 992{
971 if (syssts == SE0) { 993 if (syssts == SE0) {
994 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
972 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 995 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
973 return; 996 return;
974 } 997 }
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
1002{ 1025{
1003 struct r8a66597_device *dev = r8a66597->root_hub[port].dev; 1026 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
1004 1027
1005 r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
1006 r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
1007
1008 disable_r8a66597_pipe_all(r8a66597, dev); 1028 disable_r8a66597_pipe_all(r8a66597, dev);
1009 free_usb_address(r8a66597, dev); 1029 free_usb_address(r8a66597, dev);
1010 1030
1011 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 1031 start_root_hub_sampling(r8a66597, port, 0);
1012} 1032}
1013 1033
1014/* this function must be called with interrupt disabled */ 1034/* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
1551 } 1571 }
1552} 1572}
1553 1573
1554static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
1555{
1556 mod_timer(&r8a66597->rh_timer,
1557 jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
1558}
1559
1560static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
1561{
1562 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
1563
1564 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
1565 rh->scount = R8A66597_MAX_SAMPLING;
1566 r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
1567 | (1 << USB_PORT_FEAT_C_CONNECTION);
1568 r8a66597_root_hub_start_polling(r8a66597);
1569}
1570
1571static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) 1574static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1572{ 1575{
1573 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); 1576 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1594 r8a66597_bclr(r8a66597, ATTCHE, INTENB2); 1597 r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
1595 1598
1596 /* start usb bus sampling */ 1599 /* start usb bus sampling */
1597 start_root_hub_sampling(r8a66597, 1); 1600 start_root_hub_sampling(r8a66597, 1, 1);
1598 } 1601 }
1599 if (mask2 & DTCH) { 1602 if (mask2 & DTCH) {
1600 r8a66597_write(r8a66597, ~DTCH, INTSTS2); 1603 r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1609 r8a66597_bclr(r8a66597, ATTCHE, INTENB1); 1612 r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
1610 1613
1611 /* start usb bus sampling */ 1614 /* start usb bus sampling */
1612 start_root_hub_sampling(r8a66597, 0); 1615 start_root_hub_sampling(r8a66597, 0, 1);
1613 } 1616 }
1614 if (mask1 & DTCH) { 1617 if (mask1 & DTCH) {
1615 r8a66597_write(r8a66597, ~DTCH, INTSTS1); 1618 r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c9a11a..4ea50e0abcbb 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@ config USB_ADUTUX
42 To compile this driver as a module, choose M here. The module 42 To compile this driver as a module, choose M here. The module
43 will be called adutux. 43 will be called adutux.
44 44
45config USB_AUERSWALD
46 tristate "USB Auerswald ISDN support"
47 depends on USB
48 help
49 Say Y here if you want to connect an Auerswald USB ISDN Device
50 to your computer's USB port.
51
52 To compile this driver as a module, choose M here: the
53 module will be called auerswald.
54
55config USB_RIO500 45config USB_RIO500
56 tristate "USB Diamond Rio500 support" 46 tristate "USB Diamond Rio500 support"
57 depends on USB 47 depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091cb5ec0..45b4e12afb08 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
5 5
6obj-$(CONFIG_USB_ADUTUX) += adutux.o 6obj-$(CONFIG_USB_ADUTUX) += adutux.o
7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o 7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
8obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
9obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o 8obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
10obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o 9obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
11obj-$(CONFIG_USB_CYTHERM) += cytherm.o 10obj-$(CONFIG_USB_CYTHERM) += cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5510e7..000000000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
1/*****************************************************************************/
2/*
3 * auerswald.c -- Auerswald PBX/System Telephone usb driver.
4 *
5 * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de)
6 *
7 * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
8 * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24 /*****************************************************************************/
25
26/* Standard Linux module include files */
27#include <asm/uaccess.h>
28#include <asm/byteorder.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/wait.h>
33#include <linux/usb.h>
34#include <linux/mutex.h>
35
36/*-------------------------------------------------------------------*/
37/* Debug support */
38#ifdef DEBUG
39#define dump( adr, len) \
40do { \
41 unsigned int u; \
42 printk (KERN_DEBUG); \
43 for (u = 0; u < len; u++) \
44 printk (" %02X", adr[u] & 0xFF); \
45 printk ("\n"); \
46} while (0)
47#else
48#define dump( adr, len)
49#endif
50
51/*-------------------------------------------------------------------*/
52/* Version Information */
53#define DRIVER_VERSION "0.9.11"
54#define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>"
55#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
56
57/*-------------------------------------------------------------------*/
58/* Private declarations for Auerswald USB driver */
59
60/* Auerswald Vendor ID */
61#define ID_AUERSWALD 0x09BF
62
63#define AUER_MINOR_BASE 112 /* auerswald driver minor number */
64
65/* we can have up to this number of device plugged in at once */
66#define AUER_MAX_DEVICES 16
67
68
69/* Number of read buffers for each device */
70#define AU_RBUFFERS 10
71
72/* Number of chain elements for each control chain */
73#define AUCH_ELEMENTS 20
74
75/* Number of retries in communication */
76#define AU_RETRIES 10
77
78/*-------------------------------------------------------------------*/
79/* vendor specific protocol */
80/* Header Byte */
81#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
82#define AUH_DIRECT 0x00 /* data is for USB device */
83#define AUH_INDIRECT 0x80 /* USB device is relay */
84
85#define AUH_SPLITMASK 0x40 /* mask for split bit */
86#define AUH_UNSPLIT 0x00 /* data block is full-size */
87#define AUH_SPLIT 0x40 /* data block is part of a larger one,
88 split-byte follows */
89
90#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
91#define AUH_TYPESIZE 0x40 /* different types */
92#define AUH_DCHANNEL 0x00 /* D channel data */
93#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
94#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
95/* 0x03..0x0F reserved for driver internal use */
96#define AUH_COMMAND 0x10 /* Command channel */
97#define AUH_BPROT 0x11 /* Configuration block protocol */
98#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
99#define AUH_TAPI 0x13 /* telephone api data (ATD) */
100/* 0x14..0x3F reserved for other protocols */
101#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
102#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
103
104#define AUH_SIZE 1 /* Size of Header Byte */
105
106/* Split Byte. Only present if split bit in header byte set.*/
107#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
108#define AUS_FIRST 0x80 /* first block */
109#define AUS_FOLLOW 0x00 /* following block */
110
111#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
112#define AUS_END 0x40 /* last block */
113#define AUS_NOEND 0x00 /* not the last block */
114
115#define AUS_LENMASK 0x3F /* mask for block length information */
116
117/* Request types */
118#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
119#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
120
121/* Vendor Requests */
122#define AUV_GETINFO 0x00 /* GetDeviceInfo */
123#define AUV_WBLOCK 0x01 /* Write Block */
124#define AUV_RBLOCK 0x02 /* Read Block */
125#define AUV_CHANNELCTL 0x03 /* Channel Control */
126#define AUV_DUMMY 0x04 /* Dummy Out for retry */
127
128/* Device Info Types */
129#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
130#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
131#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
132
133/* Interrupt endpoint definitions */
134#define AU_IRQENDP 1 /* Endpoint number */
135#define AU_IRQCMDID 16 /* Command-block ID */
136#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
137#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
138
139/* Device String Descriptors */
140#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
141#define AUSI_DEVICE 2 /* Name of the Device */
142#define AUSI_SERIALNR 3 /* Serial Number */
143#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
144
145#define AUSI_DLEN 100 /* Max. Length of Device Description */
146
147#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
148
149/*-------------------------------------------------------------------*/
150/* External data structures / Interface */
151typedef struct
152{
153 char __user *buf; /* return buffer for string contents */
154 unsigned int bsize; /* size of return buffer */
155} audevinfo_t,*paudevinfo_t;
156
157/* IO controls */
158#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
159#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
160#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
161#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
162#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
163#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
164#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
165/* 'U' 0xF7..0xFF reseved */
166
167/*-------------------------------------------------------------------*/
168/* Internal data structures */
169
170/* ..................................................................*/
171/* urb chain element */
172struct auerchain; /* forward for circular reference */
173typedef struct
174{
175 struct auerchain *chain; /* pointer to the chain to which this element belongs */
176 struct urb * urbp; /* pointer to attached urb */
177 void *context; /* saved URB context */
178 usb_complete_t complete; /* saved URB completion function */
179 struct list_head list; /* to include element into a list */
180} auerchainelement_t,*pauerchainelement_t;
181
182/* urb chain */
183typedef struct auerchain
184{
185 pauerchainelement_t active; /* element which is submitted to urb */
186 spinlock_t lock; /* protection agains interrupts */
187 struct list_head waiting_list; /* list of waiting elements */
188 struct list_head free_list; /* list of available elements */
189} auerchain_t,*pauerchain_t;
190
191/* urb blocking completion helper struct */
192typedef struct
193{
194 wait_queue_head_t wqh; /* wait for completion */
195 unsigned int done; /* completion flag */
196} auerchain_chs_t,*pauerchain_chs_t;
197
198/* ...................................................................*/
199/* buffer element */
200struct auerbufctl; /* forward */
201typedef struct
202{
203 char *bufp; /* reference to allocated data buffer */
204 unsigned int len; /* number of characters in data buffer */
205 unsigned int retries; /* for urb retries */
206 struct usb_ctrlrequest *dr; /* for setup data in control messages */
207 struct urb * urbp; /* USB urb */
208 struct auerbufctl *list; /* pointer to list */
209 struct list_head buff_list; /* reference to next buffer in list */
210} auerbuf_t,*pauerbuf_t;
211
212/* buffer list control block */
213typedef struct auerbufctl
214{
215 spinlock_t lock; /* protection in interrupt */
216 struct list_head free_buff_list;/* free buffers */
217 struct list_head rec_buff_list; /* buffers with receive data */
218} auerbufctl_t,*pauerbufctl_t;
219
220/* ...................................................................*/
221/* service context */
222struct auerscon; /* forward */
223typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
224typedef void (*auer_disconn_t) (struct auerscon*);
225typedef struct auerscon
226{
227 unsigned int id; /* protocol service id AUH_xxxx */
228 auer_dispatch_t dispatch; /* dispatch read buffer */
229 auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
230} auerscon_t,*pauerscon_t;
231
232/* ...................................................................*/
233/* USB device context */
234typedef struct
235{
236 struct mutex mutex; /* protection in user context */
237 char name[20]; /* name of the /dev/usb entry */
238 unsigned int dtindex; /* index in the device table */
239 struct usb_device * usbdev; /* USB device handle */
240 int open_count; /* count the number of open character channels */
241 char dev_desc[AUSI_DLEN];/* for storing a textual description */
242 unsigned int maxControlLength; /* max. Length of control paket (without header) */
243 struct urb * inturbp; /* interrupt urb */
244 char * intbufp; /* data buffer for interrupt urb */
245 unsigned int irqsize; /* size of interrupt endpoint 1 */
246 struct auerchain controlchain; /* for chaining of control messages */
247 auerbufctl_t bufctl; /* Buffer control for control transfers */
248 pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
249 unsigned int version; /* Version of the device */
250 wait_queue_head_t bufferwait; /* wait for a control buffer */
251} auerswald_t,*pauerswald_t;
252
253/* ................................................................... */
254/* character device context */
255typedef struct
256{
257 struct mutex mutex; /* protection in user context */
258 pauerswald_t auerdev; /* context pointer of assigned device */
259 auerbufctl_t bufctl; /* controls the buffer chain */
260 auerscon_t scontext; /* service context */
261 wait_queue_head_t readwait; /* for synchronous reading */
262 struct mutex readmutex; /* protection against multiple reads */
263 pauerbuf_t readbuf; /* buffer held for partial reading */
264 unsigned int readoffset; /* current offset in readbuf */
265 unsigned int removed; /* is != 0 if device is removed */
266} auerchar_t,*pauerchar_t;
267
268
269/*-------------------------------------------------------------------*/
270/* Forwards */
271static void auerswald_ctrlread_complete (struct urb * urb);
272static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
273static struct usb_driver auerswald_driver;
274
275
276/*-------------------------------------------------------------------*/
277/* USB chain helper functions */
278/* -------------------------- */
279
280/* completion function for chained urbs */
281static void auerchain_complete (struct urb * urb)
282{
283 unsigned long flags;
284 int result;
285
286 /* get pointer to element and to chain */
287 pauerchainelement_t acep = urb->context;
288 pauerchain_t acp = acep->chain;
289
290 /* restore original entries in urb */
291 urb->context = acep->context;
292 urb->complete = acep->complete;
293
294 dbg ("auerchain_complete called");
295
296 /* call original completion function
297 NOTE: this function may lead to more urbs submitted into the chain.
298 (no chain lock at calling complete()!)
299 acp->active != NULL is protecting us against recursion.*/
300 urb->complete (urb);
301
302 /* detach element from chain data structure */
303 spin_lock_irqsave (&acp->lock, flags);
304 if (acp->active != acep) /* paranoia debug check */
305 dbg ("auerchain_complete: completion on non-active element called!");
306 else
307 acp->active = NULL;
308
309 /* add the used chain element to the list of free elements */
310 list_add_tail (&acep->list, &acp->free_list);
311 acep = NULL;
312
313 /* is there a new element waiting in the chain? */
314 if (!acp->active && !list_empty (&acp->waiting_list)) {
315 /* yes: get the entry */
316 struct list_head *tmp = acp->waiting_list.next;
317 list_del (tmp);
318 acep = list_entry (tmp, auerchainelement_t, list);
319 acp->active = acep;
320 }
321 spin_unlock_irqrestore (&acp->lock, flags);
322
323 /* submit the new urb */
324 if (acep) {
325 urb = acep->urbp;
326 dbg ("auerchain_complete: submitting next urb from chain");
327 urb->status = 0; /* needed! */
328 result = usb_submit_urb(urb, GFP_ATOMIC);
329
330 /* check for submit errors */
331 if (result) {
332 urb->status = result;
333 dbg("auerchain_complete: usb_submit_urb with error code %d", result);
334 /* and do error handling via *this* completion function (recursive) */
335 auerchain_complete( urb);
336 }
337 } else {
338 /* simple return without submitting a new urb.
339 The empty chain is detected with acp->active == NULL. */
340 };
341}
342
343
344/* submit function for chained urbs
345 this function may be called from completion context or from user space!
346 early = 1 -> submit in front of chain
347*/
348static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
349{
350 int result;
351 unsigned long flags;
352 pauerchainelement_t acep = NULL;
353
354 dbg ("auerchain_submit_urb called");
355
356 /* try to get a chain element */
357 spin_lock_irqsave (&acp->lock, flags);
358 if (!list_empty (&acp->free_list)) {
359 /* yes: get the entry */
360 struct list_head *tmp = acp->free_list.next;
361 list_del (tmp);
362 acep = list_entry (tmp, auerchainelement_t, list);
363 }
364 spin_unlock_irqrestore (&acp->lock, flags);
365
366 /* if no chain element available: return with error */
367 if (!acep) {
368 return -ENOMEM;
369 }
370
371 /* fill in the new chain element values */
372 acep->chain = acp;
373 acep->context = urb->context;
374 acep->complete = urb->complete;
375 acep->urbp = urb;
376 INIT_LIST_HEAD (&acep->list);
377
378 /* modify urb */
379 urb->context = acep;
380 urb->complete = auerchain_complete;
381 urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
382
383 /* add element to chain - or start it immediately */
384 spin_lock_irqsave (&acp->lock, flags);
385 if (acp->active) {
386 /* there is traffic in the chain, simple add element to chain */
387 if (early) {
388 dbg ("adding new urb to head of chain");
389 list_add (&acep->list, &acp->waiting_list);
390 } else {
391 dbg ("adding new urb to end of chain");
392 list_add_tail (&acep->list, &acp->waiting_list);
393 }
394 acep = NULL;
395 } else {
396 /* the chain is empty. Prepare restart */
397 acp->active = acep;
398 }
399 /* Spin has to be removed before usb_submit_urb! */
400 spin_unlock_irqrestore (&acp->lock, flags);
401
402 /* Submit urb if immediate restart */
403 if (acep) {
404 dbg("submitting urb immediate");
405 urb->status = 0; /* needed! */
406 result = usb_submit_urb(urb, GFP_ATOMIC);
407 /* check for submit errors */
408 if (result) {
409 urb->status = result;
410 dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
411 /* and do error handling via completion function */
412 auerchain_complete( urb);
413 }
414 }
415
416 return 0;
417}
418
419/* submit function for chained urbs
420 this function may be called from completion context or from user space!
421*/
422static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
423{
424 return auerchain_submit_urb_list (acp, urb, 0);
425}
426
427/* cancel an urb which is submitted to the chain
428 the result is 0 if the urb is cancelled, or -EINPROGRESS if
429 the function is successfully started.
430*/
431static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
432{
433 unsigned long flags;
434 struct urb * urbp;
435 pauerchainelement_t acep;
436 struct list_head *tmp;
437
438 dbg ("auerchain_unlink_urb called");
439
440 /* search the chain of waiting elements */
441 spin_lock_irqsave (&acp->lock, flags);
442 list_for_each (tmp, &acp->waiting_list) {
443 acep = list_entry (tmp, auerchainelement_t, list);
444 if (acep->urbp == urb) {
445 list_del (tmp);
446 urb->context = acep->context;
447 urb->complete = acep->complete;
448 list_add_tail (&acep->list, &acp->free_list);
449 spin_unlock_irqrestore (&acp->lock, flags);
450 dbg ("unlink waiting urb");
451 urb->status = -ENOENT;
452 urb->complete (urb);
453 return 0;
454 }
455 }
456 /* not found. */
457 spin_unlock_irqrestore (&acp->lock, flags);
458
459 /* get the active urb */
460 acep = acp->active;
461 if (acep) {
462 urbp = acep->urbp;
463
464 /* check if we have to cancel the active urb */
465 if (urbp == urb) {
466 /* note that there is a race condition between the check above
467 and the unlink() call because of no lock. This race is harmless,
468 because the usb module will detect the unlink() after completion.
469 We can't use the acp->lock here because the completion function
470 wants to grab it.
471 */
472 dbg ("unlink active urb");
473 return usb_unlink_urb (urbp);
474 }
475 }
476
477 /* not found anyway
478 ... is some kind of success
479 */
480 dbg ("urb to unlink not found in chain");
481 return 0;
482}
483
484/* cancel all urbs which are in the chain.
485 this function must not be called from interrupt or completion handler.
486*/
487static void auerchain_unlink_all (pauerchain_t acp)
488{
489 unsigned long flags;
490 struct urb * urbp;
491 pauerchainelement_t acep;
492
493 dbg ("auerchain_unlink_all called");
494
495 /* clear the chain of waiting elements */
496 spin_lock_irqsave (&acp->lock, flags);
497 while (!list_empty (&acp->waiting_list)) {
498 /* get the next entry */
499 struct list_head *tmp = acp->waiting_list.next;
500 list_del (tmp);
501 acep = list_entry (tmp, auerchainelement_t, list);
502 urbp = acep->urbp;
503 urbp->context = acep->context;
504 urbp->complete = acep->complete;
505 list_add_tail (&acep->list, &acp->free_list);
506 spin_unlock_irqrestore (&acp->lock, flags);
507 dbg ("unlink waiting urb");
508 urbp->status = -ENOENT;
509 urbp->complete (urbp);
510 spin_lock_irqsave (&acp->lock, flags);
511 }
512 spin_unlock_irqrestore (&acp->lock, flags);
513
514 /* clear the active urb */
515 acep = acp->active;
516 if (acep) {
517 urbp = acep->urbp;
518 dbg ("unlink active urb");
519 usb_kill_urb (urbp);
520 }
521}
522
523
524/* free the chain.
525 this function must not be called from interrupt or completion handler.
526*/
527static void auerchain_free (pauerchain_t acp)
528{
529 unsigned long flags;
530 pauerchainelement_t acep;
531
532 dbg ("auerchain_free called");
533
534 /* first, cancel all pending urbs */
535 auerchain_unlink_all (acp);
536
537 /* free the elements */
538 spin_lock_irqsave (&acp->lock, flags);
539 while (!list_empty (&acp->free_list)) {
540 /* get the next entry */
541 struct list_head *tmp = acp->free_list.next;
542 list_del (tmp);
543 spin_unlock_irqrestore (&acp->lock, flags);
544 acep = list_entry (tmp, auerchainelement_t, list);
545 kfree (acep);
546 spin_lock_irqsave (&acp->lock, flags);
547 }
548 spin_unlock_irqrestore (&acp->lock, flags);
549}
550
551
552/* Init the chain control structure */
553static void auerchain_init (pauerchain_t acp)
554{
555 /* init the chain data structure */
556 acp->active = NULL;
557 spin_lock_init (&acp->lock);
558 INIT_LIST_HEAD (&acp->waiting_list);
559 INIT_LIST_HEAD (&acp->free_list);
560}
561
562/* setup a chain.
563 It is assumed that there is no concurrency while setting up the chain
564 requirement: auerchain_init()
565*/
566static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
567{
568 pauerchainelement_t acep;
569
570 dbg ("auerchain_setup called with %d elements", numElements);
571
572 /* fill the list of free elements */
573 for (;numElements; numElements--) {
574 acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
575 if (!acep)
576 goto ac_fail;
577 INIT_LIST_HEAD (&acep->list);
578 list_add_tail (&acep->list, &acp->free_list);
579 }
580 return 0;
581
582ac_fail:/* free the elements */
583 while (!list_empty (&acp->free_list)) {
584 /* get the next entry */
585 struct list_head *tmp = acp->free_list.next;
586 list_del (tmp);
587 acep = list_entry (tmp, auerchainelement_t, list);
588 kfree (acep);
589 }
590 return -ENOMEM;
591}
592
593
594/* completion handler for synchronous chained URBs */
595static void auerchain_blocking_completion (struct urb *urb)
596{
597 pauerchain_chs_t pchs = urb->context;
598 pchs->done = 1;
599 wmb();
600 wake_up (&pchs->wqh);
601}
602
603
604/* Starts chained urb and waits for completion or timeout */
605static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
606{
607 auerchain_chs_t chs;
608 int status;
609
610 dbg ("auerchain_start_wait_urb called");
611 init_waitqueue_head (&chs.wqh);
612 chs.done = 0;
613
614 urb->context = &chs;
615 status = auerchain_submit_urb (acp, urb);
616 if (status)
617 /* something went wrong */
618 return status;
619
620 timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
621
622 if (!timeout && !chs.done) {
623 if (urb->status != -EINPROGRESS) { /* No callback?!! */
624 dbg ("auerchain_start_wait_urb: raced timeout");
625 status = urb->status;
626 } else {
627 dbg ("auerchain_start_wait_urb: timeout");
628 auerchain_unlink_urb (acp, urb); /* remove urb safely */
629 status = -ETIMEDOUT;
630 }
631 } else
632 status = urb->status;
633
634 if (status >= 0)
635 *actual_length = urb->actual_length;
636
637 return status;
638}
639
640
641/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
642 acp: pointer to the auerchain
643 dev: pointer to the usb device to send the message to
644 pipe: endpoint "pipe" to send the message to
645 request: USB message request value
646 requesttype: USB message request type value
647 value: USB message value
648 index: USB message index value
649 data: pointer to the data to send
650 size: length in bytes of the data to send
651 timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
652
653 This function sends a simple control message to a specified endpoint
654 and waits for the message to complete, or timeout.
655
656 If successful, it returns the transferred length, otherwise a negative error number.
657
658 Don't use this function from within an interrupt context, like a
659 bottom half handler. If you need an asynchronous message, or need to send
660 a message from within interrupt context, use auerchain_submit_urb()
661*/
662static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
663 __u16 value, __u16 index, void *data, __u16 size, int timeout)
664{
665 int ret;
666 struct usb_ctrlrequest *dr;
667 struct urb *urb;
668 int uninitialized_var(length);
669
670 dbg ("auerchain_control_msg");
671 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
672 if (!dr)
673 return -ENOMEM;
674 urb = usb_alloc_urb (0, GFP_KERNEL);
675 if (!urb) {
676 kfree (dr);
677 return -ENOMEM;
678 }
679
680 dr->bRequestType = requesttype;
681 dr->bRequest = request;
682 dr->wValue = cpu_to_le16 (value);
683 dr->wIndex = cpu_to_le16 (index);
684 dr->wLength = cpu_to_le16 (size);
685
686 usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
687 auerchain_blocking_completion, NULL);
688 ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
689
690 usb_free_urb (urb);
691 kfree (dr);
692
693 if (ret < 0)
694 return ret;
695 else
696 return length;
697}
698
699
700/*-------------------------------------------------------------------*/
701/* Buffer List helper functions */
702
703/* free a single auerbuf */
704static void auerbuf_free (pauerbuf_t bp)
705{
706 kfree(bp->bufp);
707 kfree(bp->dr);
708 usb_free_urb(bp->urbp);
709 kfree(bp);
710}
711
712/* free the buffers from an auerbuf list */
713static void auerbuf_free_list (struct list_head *q)
714{
715 struct list_head *tmp;
716 struct list_head *p;
717 pauerbuf_t bp;
718
719 dbg ("auerbuf_free_list");
720 for (p = q->next; p != q;) {
721 bp = list_entry (p, auerbuf_t, buff_list);
722 tmp = p->next;
723 list_del (p);
724 p = tmp;
725 auerbuf_free (bp);
726 }
727}
728
729/* init the members of a list control block */
730static void auerbuf_init (pauerbufctl_t bcp)
731{
732 dbg ("auerbuf_init");
733 spin_lock_init (&bcp->lock);
734 INIT_LIST_HEAD (&bcp->free_buff_list);
735 INIT_LIST_HEAD (&bcp->rec_buff_list);
736}
737
738/* free all buffers from an auerbuf chain */
739static void auerbuf_free_buffers (pauerbufctl_t bcp)
740{
741 unsigned long flags;
742 dbg ("auerbuf_free_buffers");
743
744 spin_lock_irqsave (&bcp->lock, flags);
745
746 auerbuf_free_list (&bcp->free_buff_list);
747 auerbuf_free_list (&bcp->rec_buff_list);
748
749 spin_unlock_irqrestore (&bcp->lock, flags);
750}
751
752/* setup a list of buffers */
753/* requirement: auerbuf_init() */
754static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
755{
756 pauerbuf_t bep = NULL;
757
758 dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
759
760 /* fill the list of free elements */
761 for (;numElements; numElements--) {
762 bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
763 if (!bep)
764 goto bl_fail;
765 bep->list = bcp;
766 INIT_LIST_HEAD (&bep->buff_list);
767 bep->bufp = kmalloc (bufsize, GFP_KERNEL);
768 if (!bep->bufp)
769 goto bl_fail;
770 bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
771 if (!bep->dr)
772 goto bl_fail;
773 bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
774 if (!bep->urbp)
775 goto bl_fail;
776 list_add_tail (&bep->buff_list, &bcp->free_buff_list);
777 }
778 return 0;
779
780bl_fail:/* not enough memory. Free allocated elements */
781 dbg ("auerbuf_setup: no more memory");
782 auerbuf_free(bep);
783 auerbuf_free_buffers (bcp);
784 return -ENOMEM;
785}
786
787/* insert a used buffer into the free list */
788static void auerbuf_releasebuf( pauerbuf_t bp)
789{
790 unsigned long flags;
791 pauerbufctl_t bcp = bp->list;
792 bp->retries = 0;
793
794 dbg ("auerbuf_releasebuf called");
795 spin_lock_irqsave (&bcp->lock, flags);
796 list_add_tail (&bp->buff_list, &bcp->free_buff_list);
797 spin_unlock_irqrestore (&bcp->lock, flags);
798}
799
800
801/*-------------------------------------------------------------------*/
802/* Completion handlers */
803
804/* Values of urb->status or results of usb_submit_urb():
8050 Initial, OK
806-EINPROGRESS during submission until end
807-ENOENT if urb is unlinked
808-ETIME Device did not respond
809-ENOMEM Memory Overflow
810-ENODEV Specified USB-device or bus doesn't exist
811-ENXIO URB already queued
812-EINVAL a) Invalid transfer type specified (or not supported)
813 b) Invalid interrupt interval (0n256)
814-EAGAIN a) Specified ISO start frame too early
815 b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
816-EFBIG Too much ISO frames requested (currently uhci900)
817-EPIPE Specified pipe-handle/Endpoint is already stalled
818-EMSGSIZE Endpoint message size is zero, do interface/alternate setting
819-EPROTO a) Bitstuff error
820 b) Unknown USB error
821-EILSEQ CRC mismatch
822-ENOSR Buffer error
823-EREMOTEIO Short packet detected
824-EXDEV ISO transfer only partially completed look at individual frame status for details
825-EINVAL ISO madness, if this happens: Log off and go home
826-EOVERFLOW babble
827*/
828
829/* check if a status code allows a retry */
830static int auerswald_status_retry (int status)
831{
832 switch (status) {
833 case 0:
834 case -ETIME:
835 case -EOVERFLOW:
836 case -EAGAIN:
837 case -EPIPE:
838 case -EPROTO:
839 case -EILSEQ:
840 case -ENOSR:
841 case -EREMOTEIO:
842 return 1; /* do a retry */
843 }
844 return 0; /* no retry possible */
845}
846
847/* Completion of asynchronous write block */
848static void auerchar_ctrlwrite_complete (struct urb * urb)
849{
850 pauerbuf_t bp = urb->context;
851 pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
852 dbg ("auerchar_ctrlwrite_complete called");
853
854 /* reuse the buffer */
855 auerbuf_releasebuf (bp);
856 /* Wake up all processes waiting for a buffer */
857 wake_up (&cp->bufferwait);
858}
859
860/* Completion handler for dummy retry packet */
861static void auerswald_ctrlread_wretcomplete (struct urb * urb)
862{
863 pauerbuf_t bp = urb->context;
864 pauerswald_t cp;
865 int ret;
866 int status = urb->status;
867
868 dbg ("auerswald_ctrlread_wretcomplete called");
869 dbg ("complete with status: %d", status);
870 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
871
872 /* check if it is possible to advance */
873 if (!auerswald_status_retry(status) || !cp->usbdev) {
874 /* reuse the buffer */
875 err ("control dummy: transmission error %d, can not retry", status);
876 auerbuf_releasebuf (bp);
877 /* Wake up all processes waiting for a buffer */
878 wake_up (&cp->bufferwait);
879 return;
880 }
881
882 /* fill the control message */
883 bp->dr->bRequestType = AUT_RREQ;
884 bp->dr->bRequest = AUV_RBLOCK;
885 bp->dr->wLength = bp->dr->wValue; /* temporary stored */
886 bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
887 /* bp->dr->index = channel id; remains */
888 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
889 (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
890 auerswald_ctrlread_complete,bp);
891
892 /* submit the control msg as next paket */
893 ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
894 if (ret) {
895 dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
896 bp->urbp->status = ret;
897 auerswald_ctrlread_complete (bp->urbp);
898 }
899}
900
901/* completion handler for receiving of control messages */
902static void auerswald_ctrlread_complete (struct urb * urb)
903{
904 unsigned int serviceid;
905 pauerswald_t cp;
906 pauerscon_t scp;
907 pauerbuf_t bp = urb->context;
908 int status = urb->status;
909 int ret;
910
911 dbg ("auerswald_ctrlread_complete called");
912
913 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
914
915 /* check if there is valid data in this urb */
916 if (status) {
917 dbg ("complete with non-zero status: %d", status);
918 /* should we do a retry? */
919 if (!auerswald_status_retry(status)
920 || !cp->usbdev
921 || (cp->version < AUV_RETRY)
922 || (bp->retries >= AU_RETRIES)) {
923 /* reuse the buffer */
924 err ("control read: transmission error %d, can not retry", status);
925 auerbuf_releasebuf (bp);
926 /* Wake up all processes waiting for a buffer */
927 wake_up (&cp->bufferwait);
928 return;
929 }
930 bp->retries++;
931 dbg ("Retry count = %d", bp->retries);
932 /* send a long dummy control-write-message to allow device firmware to react */
933 bp->dr->bRequestType = AUT_WREQ;
934 bp->dr->bRequest = AUV_DUMMY;
935 bp->dr->wValue = bp->dr->wLength; /* temporary storage */
936 // bp->dr->wIndex channel ID remains
937 bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
938 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
939 (unsigned char*)bp->dr, bp->bufp, 32,
940 auerswald_ctrlread_wretcomplete,bp);
941
942 /* submit the control msg as next paket */
943 ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
944 if (ret) {
945 dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
946 bp->urbp->status = ret;
947 auerswald_ctrlread_wretcomplete (bp->urbp);
948 }
949 return;
950 }
951
952 /* get the actual bytecount (incl. headerbyte) */
953 bp->len = urb->actual_length;
954 serviceid = bp->bufp[0] & AUH_TYPEMASK;
955 dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
956
957 /* dispatch the paket */
958 scp = cp->services[serviceid];
959 if (scp) {
960 /* look, Ma, a listener! */
961 scp->dispatch (scp, bp);
962 }
963
964 /* release the paket */
965 auerbuf_releasebuf (bp);
966 /* Wake up all processes waiting for a buffer */
967 wake_up (&cp->bufferwait);
968}
969
970/*-------------------------------------------------------------------*/
971/* Handling of Interrupt Endpoint */
972/* This interrupt Endpoint is used to inform the host about waiting
973 messages from the USB device.
974*/
975/* int completion handler. */
976static void auerswald_int_complete (struct urb * urb)
977{
978 unsigned long flags;
979 unsigned int channelid;
980 unsigned int bytecount;
981 int ret;
982 int status = urb->status;
983 pauerbuf_t bp = NULL;
984 pauerswald_t cp = urb->context;
985
986 dbg ("%s called", __func__);
987
988 switch (status) {
989 case 0:
990 /* success */
991 break;
992 case -ECONNRESET:
993 case -ENOENT:
994 case -ESHUTDOWN:
995 /* this urb is terminated, clean up */
996 dbg("%s - urb shutting down with status: %d", __func__, status);
997 return;
998 default:
999 dbg("%s - nonzero urb status received: %d", __func__, status);
1000 goto exit;
1001 }
1002
1003 /* check if all needed data was received */
1004 if (urb->actual_length < AU_IRQMINSIZE) {
1005 dbg ("invalid data length received: %d bytes", urb->actual_length);
1006 goto exit;
1007 }
1008
1009 /* check the command code */
1010 if (cp->intbufp[0] != AU_IRQCMDID) {
1011 dbg ("invalid command received: %d", cp->intbufp[0]);
1012 goto exit;
1013 }
1014
1015 /* check the command type */
1016 if (cp->intbufp[1] != AU_BLOCKRDY) {
1017 dbg ("invalid command type received: %d", cp->intbufp[1]);
1018 goto exit;
1019 }
1020
1021 /* now extract the information */
1022 channelid = cp->intbufp[2];
1023 bytecount = (unsigned char)cp->intbufp[3];
1024 bytecount |= (unsigned char)cp->intbufp[4] << 8;
1025
1026 /* check the channel id */
1027 if (channelid >= AUH_TYPESIZE) {
1028 dbg ("invalid channel id received: %d", channelid);
1029 goto exit;
1030 }
1031
1032 /* check the byte count */
1033 if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
1034 dbg ("invalid byte count received: %d", bytecount);
1035 goto exit;
1036 }
1037 dbg ("Service Channel = %d", channelid);
1038 dbg ("Byte Count = %d", bytecount);
1039
1040 /* get a buffer for the next data paket */
1041 spin_lock_irqsave (&cp->bufctl.lock, flags);
1042 if (!list_empty (&cp->bufctl.free_buff_list)) {
1043 /* yes: get the entry */
1044 struct list_head *tmp = cp->bufctl.free_buff_list.next;
1045 list_del (tmp);
1046 bp = list_entry (tmp, auerbuf_t, buff_list);
1047 }
1048 spin_unlock_irqrestore (&cp->bufctl.lock, flags);
1049
1050 /* if no buffer available: skip it */
1051 if (!bp) {
1052 dbg ("auerswald_int_complete: no data buffer available");
1053 /* can we do something more?
1054 This is a big problem: if this int packet is ignored, the
1055 device will wait forever and not signal any more data.
1056 The only real solution is: having enough buffers!
1057 Or perhaps temporary disabling the int endpoint?
1058 */
1059 goto exit;
1060 }
1061
1062 /* fill the control message */
1063 bp->dr->bRequestType = AUT_RREQ;
1064 bp->dr->bRequest = AUV_RBLOCK;
1065 bp->dr->wValue = cpu_to_le16 (0);
1066 bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
1067 bp->dr->wLength = cpu_to_le16 (bytecount);
1068 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
1069 (unsigned char*)bp->dr, bp->bufp, bytecount,
1070 auerswald_ctrlread_complete,bp);
1071
1072 /* submit the control msg */
1073 ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
1074 if (ret) {
1075 dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
1076 bp->urbp->status = ret;
1077 auerswald_ctrlread_complete( bp->urbp);
1078 /* here applies the same problem as above: device locking! */
1079 }
1080exit:
1081 ret = usb_submit_urb (urb, GFP_ATOMIC);
1082 if (ret)
1083 err ("%s - usb_submit_urb failed with result %d",
1084 __func__, ret);
1085}
1086
1087/* int memory deallocation
1088 NOTE: no mutex please!
1089*/
1090static void auerswald_int_free (pauerswald_t cp)
1091{
1092 if (cp->inturbp) {
1093 usb_free_urb(cp->inturbp);
1094 cp->inturbp = NULL;
1095 }
1096 kfree(cp->intbufp);
1097 cp->intbufp = NULL;
1098}
1099
1100/* This function is called to activate the interrupt
1101 endpoint. This function returns 0 if successful or an error code.
1102 NOTE: no mutex please!
1103*/
1104static int auerswald_int_open (pauerswald_t cp)
1105{
1106 int ret;
1107 struct usb_host_endpoint *ep;
1108 int irqsize;
1109 dbg ("auerswald_int_open");
1110
1111 ep = cp->usbdev->ep_in[AU_IRQENDP];
1112 if (!ep) {
1113 ret = -EFAULT;
1114 goto intoend;
1115 }
1116 irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
1117 cp->irqsize = irqsize;
1118
1119 /* allocate the urb and data buffer */
1120 if (!cp->inturbp) {
1121 cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
1122 if (!cp->inturbp) {
1123 ret = -ENOMEM;
1124 goto intoend;
1125 }
1126 }
1127 if (!cp->intbufp) {
1128 cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
1129 if (!cp->intbufp) {
1130 ret = -ENOMEM;
1131 goto intoend;
1132 }
1133 }
1134 /* setup urb */
1135 usb_fill_int_urb (cp->inturbp, cp->usbdev,
1136 usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
1137 irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
1138 /* start the urb */
1139 cp->inturbp->status = 0; /* needed! */
1140 ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
1141
1142intoend:
1143 if (ret < 0) {
1144 /* activation of interrupt endpoint has failed. Now clean up. */
1145 dbg ("auerswald_int_open: activation of int endpoint failed");
1146
1147 /* deallocate memory */
1148 auerswald_int_free (cp);
1149 }
1150 return ret;
1151}
1152
1153/* This function is called to deactivate the interrupt
1154 endpoint. This function returns 0 if successful or an error code.
1155 NOTE: no mutex please!
1156*/
1157static void auerswald_int_release (pauerswald_t cp)
1158{
1159 dbg ("auerswald_int_release");
1160
1161 /* stop the int endpoint */
1162 usb_kill_urb (cp->inturbp);
1163
1164 /* deallocate memory */
1165 auerswald_int_free (cp);
1166}
1167
1168/* --------------------------------------------------------------------- */
1169/* Helper functions */
1170
1171/* wake up waiting readers */
1172static void auerchar_disconnect (pauerscon_t scp)
1173{
1174 pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
1175 dbg ("auerchar_disconnect called");
1176 ccp->removed = 1;
1177 wake_up (&ccp->readwait);
1178}
1179
1180
1181/* dispatch a read paket to a waiting character device */
1182static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
1183{
1184 unsigned long flags;
1185 pauerchar_t ccp;
1186 pauerbuf_t newbp = NULL;
1187 char * charp;
1188 dbg ("auerchar_ctrlread_dispatch called");
1189 ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
1190
1191 /* get a read buffer from character device context */
1192 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1193 if (!list_empty (&ccp->bufctl.free_buff_list)) {
1194 /* yes: get the entry */
1195 struct list_head *tmp = ccp->bufctl.free_buff_list.next;
1196 list_del (tmp);
1197 newbp = list_entry (tmp, auerbuf_t, buff_list);
1198 }
1199 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1200
1201 if (!newbp) {
1202 dbg ("No read buffer available, discard paket!");
1203 return; /* no buffer, no dispatch */
1204 }
1205
1206 /* copy information to new buffer element
1207 (all buffers have the same length) */
1208 charp = newbp->bufp;
1209 newbp->bufp = bp->bufp;
1210 bp->bufp = charp;
1211 newbp->len = bp->len;
1212
1213 /* insert new buffer in read list */
1214 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1215 list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
1216 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1217 dbg ("read buffer appended to rec_list");
1218
1219 /* wake up pending synchronous reads */
1220 wake_up (&ccp->readwait);
1221}
1222
1223
1224/* Delete an auerswald driver context */
1225static void auerswald_delete( pauerswald_t cp)
1226{
1227 dbg( "auerswald_delete");
1228 if (cp == NULL)
1229 return;
1230
1231 /* Wake up all processes waiting for a buffer */
1232 wake_up (&cp->bufferwait);
1233
1234 /* Cleaning up */
1235 auerswald_int_release (cp);
1236 auerchain_free (&cp->controlchain);
1237 auerbuf_free_buffers (&cp->bufctl);
1238
1239 /* release the memory */
1240 kfree( cp);
1241}
1242
1243
1244/* Delete an auerswald character context */
1245static void auerchar_delete( pauerchar_t ccp)
1246{
1247 dbg ("auerchar_delete");
1248 if (ccp == NULL)
1249 return;
1250
1251 /* wake up pending synchronous reads */
1252 ccp->removed = 1;
1253 wake_up (&ccp->readwait);
1254
1255 /* remove the read buffer */
1256 if (ccp->readbuf) {
1257 auerbuf_releasebuf (ccp->readbuf);
1258 ccp->readbuf = NULL;
1259 }
1260
1261 /* remove the character buffers */
1262 auerbuf_free_buffers (&ccp->bufctl);
1263
1264 /* release the memory */
1265 kfree( ccp);
1266}
1267
1268
1269/* add a new service to the device
1270 scp->id must be set!
1271 return: 0 if OK, else error code
1272*/
1273static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
1274{
1275 int ret;
1276
1277 /* is the device available? */
1278 if (!cp->usbdev) {
1279 dbg ("usbdev == NULL");
1280 return -EIO; /*no: can not add a service, sorry*/
1281 }
1282
1283 /* is the service available? */
1284 if (cp->services[scp->id]) {
1285 dbg ("service is busy");
1286 return -EBUSY;
1287 }
1288
1289 /* device is available, service is free */
1290 cp->services[scp->id] = scp;
1291
1292 /* register service in device */
1293 ret = auerchain_control_msg(
1294 &cp->controlchain, /* pointer to control chain */
1295 cp->usbdev, /* pointer to device */
1296 usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
1297 AUV_CHANNELCTL, /* USB message request value */
1298 AUT_WREQ, /* USB message request type value */
1299 0x01, /* open USB message value */
1300 scp->id, /* USB message index value */
1301 NULL, /* pointer to the data to send */
1302 0, /* length in bytes of the data to send */
1303 HZ * 2); /* time to wait for the message to complete before timing out */
1304 if (ret < 0) {
1305 dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
1306 /* undo above actions */
1307 cp->services[scp->id] = NULL;
1308 return ret;
1309 }
1310
1311 dbg ("auerswald_addservice: channel open OK");
1312 return 0;
1313}
1314
1315
1316/* remove a service from the device
1317 scp->id must be set! */
1318static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
1319{
1320 dbg ("auerswald_removeservice called");
1321
1322 /* check if we have a service allocated */
1323 if (scp->id == AUH_UNASSIGNED)
1324 return;
1325
1326 /* If there is a device: close the channel */
1327 if (cp->usbdev) {
1328 /* Close the service channel inside the device */
1329 int ret = auerchain_control_msg(
1330 &cp->controlchain, /* pointer to control chain */
1331 cp->usbdev, /* pointer to device */
1332 usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
1333 AUV_CHANNELCTL, /* USB message request value */
1334 AUT_WREQ, /* USB message request type value */
1335 0x00, // close /* USB message value */
1336 scp->id, /* USB message index value */
1337 NULL, /* pointer to the data to send */
1338 0, /* length in bytes of the data to send */
1339 HZ * 2); /* time to wait for the message to complete before timing out */
1340 if (ret < 0) {
1341 dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
1342 }
1343 else {
1344 dbg ("auerswald_removeservice: channel close OK");
1345 }
1346 }
1347
1348 /* remove the service from the device */
1349 cp->services[scp->id] = NULL;
1350 scp->id = AUH_UNASSIGNED;
1351}
1352
1353
1354/* --------------------------------------------------------------------- */
1355/* Char device functions */
1356
1357/* Open a new character device */
1358static int auerchar_open (struct inode *inode, struct file *file)
1359{
1360 int dtindex = iminor(inode);
1361 pauerswald_t cp = NULL;
1362 pauerchar_t ccp = NULL;
1363 struct usb_interface *intf;
1364 int ret;
1365
1366 /* minor number in range? */
1367 if (dtindex < 0) {
1368 return -ENODEV;
1369 }
1370 intf = usb_find_interface(&auerswald_driver, dtindex);
1371 if (!intf) {
1372 return -ENODEV;
1373 }
1374
1375 /* usb device available? */
1376 cp = usb_get_intfdata (intf);
1377 if (cp == NULL) {
1378 return -ENODEV;
1379 }
1380 if (mutex_lock_interruptible(&cp->mutex)) {
1381 return -ERESTARTSYS;
1382 }
1383
1384 /* we have access to the device. Now lets allocate memory */
1385 ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
1386 if (ccp == NULL) {
1387 err ("out of memory");
1388 ret = -ENOMEM;
1389 goto ofail;
1390 }
1391
1392 /* Initialize device descriptor */
1393 mutex_init(&ccp->mutex);
1394 mutex_init(&ccp->readmutex);
1395 auerbuf_init (&ccp->bufctl);
1396 ccp->scontext.id = AUH_UNASSIGNED;
1397 ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
1398 ccp->scontext.disconnect = auerchar_disconnect;
1399 init_waitqueue_head (&ccp->readwait);
1400
1401 ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
1402 if (ret) {
1403 goto ofail;
1404 }
1405
1406 cp->open_count++;
1407 ccp->auerdev = cp;
1408 dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
1409 mutex_unlock(&cp->mutex);
1410
1411 /* file IO stuff */
1412 file->f_pos = 0;
1413 file->private_data = ccp;
1414 return nonseekable_open(inode, file);
1415
1416 /* Error exit */
1417ofail: mutex_unlock(&cp->mutex);
1418 auerchar_delete (ccp);
1419 return ret;
1420}
1421
1422
1423/* IOCTL functions */
1424static long auerchar_ioctl(struct file *file, unsigned int cmd,
1425 unsigned long arg)
1426{
1427 pauerchar_t ccp = (pauerchar_t) file->private_data;
1428 int ret = 0;
1429 audevinfo_t devinfo;
1430 pauerswald_t cp = NULL;
1431 unsigned int u;
1432 unsigned int __user *user_arg = (unsigned int __user *)arg;
1433
1434 dbg ("ioctl");
1435
1436 /* get the mutexes */
1437 if (mutex_lock_interruptible(&ccp->mutex)) {
1438 return -ERESTARTSYS;
1439 }
1440 cp = ccp->auerdev;
1441 if (!cp) {
1442 mutex_unlock(&ccp->mutex);
1443 return -ENODEV;
1444 }
1445 if (mutex_lock_interruptible(&cp->mutex)) {
1446 mutex_unlock(&ccp->mutex);
1447 return -ERESTARTSYS;
1448 }
1449
1450 /* Check for removal */
1451 if (!cp->usbdev) {
1452 mutex_unlock(&cp->mutex);
1453 mutex_unlock(&ccp->mutex);
1454 return -ENODEV;
1455 }
1456 lock_kernel();
1457 switch (cmd) {
1458
1459 /* return != 0 if Transmitt channel ready to send */
1460 case IOCTL_AU_TXREADY:
1461 dbg ("IOCTL_AU_TXREADY");
1462 u = ccp->auerdev
1463 && (ccp->scontext.id != AUH_UNASSIGNED)
1464 && !list_empty (&cp->bufctl.free_buff_list);
1465 ret = put_user (u, user_arg);
1466 break;
1467
1468 /* return != 0 if connected to a service channel */
1469 case IOCTL_AU_CONNECT:
1470 dbg ("IOCTL_AU_CONNECT");
1471 u = (ccp->scontext.id != AUH_UNASSIGNED);
1472 ret = put_user (u, user_arg);
1473 break;
1474
1475 /* return != 0 if Receive Data available */
1476 case IOCTL_AU_RXAVAIL:
1477 dbg ("IOCTL_AU_RXAVAIL");
1478 if (ccp->scontext.id == AUH_UNASSIGNED) {
1479 ret = -EIO;
1480 break;
1481 }
1482 u = 0; /* no data */
1483 if (ccp->readbuf) {
1484 int restlen = ccp->readbuf->len - ccp->readoffset;
1485 if (restlen > 0)
1486 u = 1;
1487 }
1488 if (!u) {
1489 if (!list_empty (&ccp->bufctl.rec_buff_list)) {
1490 u = 1;
1491 }
1492 }
1493 ret = put_user (u, user_arg);
1494 break;
1495
1496 /* return the max. buffer length for the device */
1497 case IOCTL_AU_BUFLEN:
1498 dbg ("IOCTL_AU_BUFLEN");
1499 u = cp->maxControlLength;
1500 ret = put_user (u, user_arg);
1501 break;
1502
1503 /* requesting a service channel */
1504 case IOCTL_AU_SERVREQ:
1505 dbg ("IOCTL_AU_SERVREQ");
1506 /* requesting a service means: release the previous one first */
1507 auerswald_removeservice (cp, &ccp->scontext);
1508 /* get the channel number */
1509 ret = get_user (u, user_arg);
1510 if (ret) {
1511 break;
1512 }
1513 if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
1514 ret = -EIO;
1515 break;
1516 }
1517 dbg ("auerchar service request parameters are ok");
1518 ccp->scontext.id = u;
1519
1520 /* request the service now */
1521 ret = auerswald_addservice (cp, &ccp->scontext);
1522 if (ret) {
1523 /* no: revert service entry */
1524 ccp->scontext.id = AUH_UNASSIGNED;
1525 }
1526 break;
1527
1528 /* get a string descriptor for the device */
1529 case IOCTL_AU_DEVINFO:
1530 dbg ("IOCTL_AU_DEVINFO");
1531 if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
1532 ret = -EFAULT;
1533 break;
1534 }
1535 u = strlen(cp->dev_desc)+1;
1536 if (u > devinfo.bsize) {
1537 u = devinfo.bsize;
1538 }
1539 ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
1540 break;
1541
1542 /* get the max. string descriptor length */
1543 case IOCTL_AU_SLEN:
1544 dbg ("IOCTL_AU_SLEN");
1545 u = AUSI_DLEN;
1546 ret = put_user (u, user_arg);
1547 break;
1548
1549 default:
1550 dbg ("IOCTL_AU_UNKNOWN");
1551 ret = -ENOTTY;
1552 break;
1553 }
1554 unlock_kernel();
1555 /* release the mutexes */
1556 mutex_unlock(&cp->mutex);
1557 mutex_unlock(&ccp->mutex);
1558 return ret;
1559}
1560
1561/* Read data from the device */
1562static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
1563{
1564 unsigned long flags;
1565 pauerchar_t ccp = (pauerchar_t) file->private_data;
1566 pauerbuf_t bp = NULL;
1567 wait_queue_t wait;
1568
1569 dbg ("auerchar_read");
1570
1571 /* Error checking */
1572 if (!ccp)
1573 return -EIO;
1574 if (*ppos)
1575 return -ESPIPE;
1576 if (count == 0)
1577 return 0;
1578
1579 /* get the mutex */
1580 if (mutex_lock_interruptible(&ccp->mutex))
1581 return -ERESTARTSYS;
1582
1583 /* Can we expect to read something? */
1584 if (ccp->scontext.id == AUH_UNASSIGNED) {
1585 mutex_unlock(&ccp->mutex);
1586 return -EIO;
1587 }
1588
1589 /* only one reader per device allowed */
1590 if (mutex_lock_interruptible(&ccp->readmutex)) {
1591 mutex_unlock(&ccp->mutex);
1592 return -ERESTARTSYS;
1593 }
1594
1595 /* read data from readbuf, if available */
1596doreadbuf:
1597 bp = ccp->readbuf;
1598 if (bp) {
1599 /* read the maximum bytes */
1600 int restlen = bp->len - ccp->readoffset;
1601 if (restlen < 0)
1602 restlen = 0;
1603 if (count > restlen)
1604 count = restlen;
1605 if (count) {
1606 if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
1607 dbg ("auerswald_read: copy_to_user failed");
1608 mutex_unlock(&ccp->readmutex);
1609 mutex_unlock(&ccp->mutex);
1610 return -EFAULT;
1611 }
1612 }
1613 /* advance the read offset */
1614 ccp->readoffset += count;
1615 restlen -= count;
1616 // reuse the read buffer
1617 if (restlen <= 0) {
1618 auerbuf_releasebuf (bp);
1619 ccp->readbuf = NULL;
1620 }
1621 /* return with number of bytes read */
1622 if (count) {
1623 mutex_unlock(&ccp->readmutex);
1624 mutex_unlock(&ccp->mutex);
1625 return count;
1626 }
1627 }
1628
1629 /* a read buffer is not available. Try to get the next data block. */
1630doreadlist:
1631 /* Preparing for sleep */
1632 init_waitqueue_entry (&wait, current);
1633 set_current_state (TASK_INTERRUPTIBLE);
1634 add_wait_queue (&ccp->readwait, &wait);
1635
1636 bp = NULL;
1637 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1638 if (!list_empty (&ccp->bufctl.rec_buff_list)) {
1639 /* yes: get the entry */
1640 struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
1641 list_del (tmp);
1642 bp = list_entry (tmp, auerbuf_t, buff_list);
1643 }
1644 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1645
1646 /* have we got data? */
1647 if (bp) {
1648 ccp->readbuf = bp;
1649 ccp->readoffset = AUH_SIZE; /* for headerbyte */
1650 set_current_state (TASK_RUNNING);
1651 remove_wait_queue (&ccp->readwait, &wait);
1652 goto doreadbuf; /* now we can read! */
1653 }
1654
1655 /* no data available. Should we wait? */
1656 if (file->f_flags & O_NONBLOCK) {
1657 dbg ("No read buffer available, returning -EAGAIN");
1658 set_current_state (TASK_RUNNING);
1659 remove_wait_queue (&ccp->readwait, &wait);
1660 mutex_unlock(&ccp->readmutex);
1661 mutex_unlock(&ccp->mutex);
1662 return -EAGAIN; /* nonblocking, no data available */
1663 }
1664
1665 /* yes, we should wait! */
1666 mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
1667 schedule();
1668 remove_wait_queue (&ccp->readwait, &wait);
1669 if (signal_pending (current)) {
1670 /* waked up by a signal */
1671 mutex_unlock(&ccp->readmutex);
1672 return -ERESTARTSYS;
1673 }
1674
1675 /* Anything left to read? */
1676 if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
1677 mutex_unlock(&ccp->readmutex);
1678 return -EIO;
1679 }
1680
1681 if (mutex_lock_interruptible(&ccp->mutex)) {
1682 mutex_unlock(&ccp->readmutex);
1683 return -ERESTARTSYS;
1684 }
1685
1686 /* try to read the incoming data again */
1687 goto doreadlist;
1688}
1689
1690
1691/* Write a data block into the right service channel of the device */
1692static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
1693{
1694 pauerchar_t ccp = (pauerchar_t) file->private_data;
1695 pauerswald_t cp = NULL;
1696 pauerbuf_t bp;
1697 unsigned long flags;
1698 int ret;
1699 wait_queue_t wait;
1700
1701 dbg ("auerchar_write %zd bytes", len);
1702
1703 /* Error checking */
1704 if (!ccp)
1705 return -EIO;
1706 if (*ppos)
1707 return -ESPIPE;
1708 if (len == 0)
1709 return 0;
1710
1711write_again:
1712 /* get the mutex */
1713 if (mutex_lock_interruptible(&ccp->mutex))
1714 return -ERESTARTSYS;
1715
1716 /* Can we expect to write something? */
1717 if (ccp->scontext.id == AUH_UNASSIGNED) {
1718 mutex_unlock(&ccp->mutex);
1719 return -EIO;
1720 }
1721
1722 cp = ccp->auerdev;
1723 if (!cp) {
1724 mutex_unlock(&ccp->mutex);
1725 return -ERESTARTSYS;
1726 }
1727 if (mutex_lock_interruptible(&cp->mutex)) {
1728 mutex_unlock(&ccp->mutex);
1729 return -ERESTARTSYS;
1730 }
1731 if (!cp->usbdev) {
1732 mutex_unlock(&cp->mutex);
1733 mutex_unlock(&ccp->mutex);
1734 return -EIO;
1735 }
1736 /* Prepare for sleep */
1737 init_waitqueue_entry (&wait, current);
1738 set_current_state (TASK_INTERRUPTIBLE);
1739 add_wait_queue (&cp->bufferwait, &wait);
1740
1741 /* Try to get a buffer from the device pool.
1742 We can't use a buffer from ccp->bufctl because the write
1743 command will last beond a release() */
1744 bp = NULL;
1745 spin_lock_irqsave (&cp->bufctl.lock, flags);
1746 if (!list_empty (&cp->bufctl.free_buff_list)) {
1747 /* yes: get the entry */
1748 struct list_head *tmp = cp->bufctl.free_buff_list.next;
1749 list_del (tmp);
1750 bp = list_entry (tmp, auerbuf_t, buff_list);
1751 }
1752 spin_unlock_irqrestore (&cp->bufctl.lock, flags);
1753
1754 /* are there any buffers left? */
1755 if (!bp) {
1756 mutex_unlock(&cp->mutex);
1757 mutex_unlock(&ccp->mutex);
1758
1759 /* NONBLOCK: don't wait */
1760 if (file->f_flags & O_NONBLOCK) {
1761 set_current_state (TASK_RUNNING);
1762 remove_wait_queue (&cp->bufferwait, &wait);
1763 return -EAGAIN;
1764 }
1765
1766 /* BLOCKING: wait */
1767 schedule();
1768 remove_wait_queue (&cp->bufferwait, &wait);
1769 if (signal_pending (current)) {
1770 /* waked up by a signal */
1771 return -ERESTARTSYS;
1772 }
1773 goto write_again;
1774 } else {
1775 set_current_state (TASK_RUNNING);
1776 remove_wait_queue (&cp->bufferwait, &wait);
1777 }
1778
1779 /* protect against too big write requests */
1780 if (len > cp->maxControlLength)
1781 len = cp->maxControlLength;
1782
1783 /* Fill the buffer */
1784 if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
1785 dbg ("copy_from_user failed");
1786 auerbuf_releasebuf (bp);
1787 /* Wake up all processes waiting for a buffer */
1788 wake_up (&cp->bufferwait);
1789 mutex_unlock(&cp->mutex);
1790 mutex_unlock(&ccp->mutex);
1791 return -EFAULT;
1792 }
1793
1794 /* set the header byte */
1795 *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
1796
1797 /* Set the transfer Parameters */
1798 bp->len = len+AUH_SIZE;
1799 bp->dr->bRequestType = AUT_WREQ;
1800 bp->dr->bRequest = AUV_WBLOCK;
1801 bp->dr->wValue = cpu_to_le16 (0);
1802 bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
1803 bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
1804 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
1805 (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
1806 auerchar_ctrlwrite_complete, bp);
1807 /* up we go */
1808 ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
1809 mutex_unlock(&cp->mutex);
1810 if (ret) {
1811 dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
1812 auerbuf_releasebuf (bp);
1813 /* Wake up all processes waiting for a buffer */
1814 wake_up (&cp->bufferwait);
1815 mutex_unlock(&ccp->mutex);
1816 return -EIO;
1817 }
1818 else {
1819 dbg ("auerchar_write: Write OK");
1820 mutex_unlock(&ccp->mutex);
1821 return len;
1822 }
1823}
1824
1825
1826/* Close a character device */
1827static int auerchar_release (struct inode *inode, struct file *file)
1828{
1829 pauerchar_t ccp = (pauerchar_t) file->private_data;
1830 pauerswald_t cp;
1831 dbg("release");
1832
1833 mutex_lock(&ccp->mutex);
1834 cp = ccp->auerdev;
1835 if (cp) {
1836 mutex_lock(&cp->mutex);
1837 /* remove an open service */
1838 auerswald_removeservice (cp, &ccp->scontext);
1839 /* detach from device */
1840 if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
1841 /* usb device waits for removal */
1842 mutex_unlock(&cp->mutex);
1843 auerswald_delete (cp);
1844 } else {
1845 mutex_unlock(&cp->mutex);
1846 }
1847 cp = NULL;
1848 ccp->auerdev = NULL;
1849 }
1850 mutex_unlock(&ccp->mutex);
1851 auerchar_delete (ccp);
1852
1853 return 0;
1854}
1855
1856
1857/*----------------------------------------------------------------------*/
1858/* File operation structure */
1859static const struct file_operations auerswald_fops =
1860{
1861 .owner = THIS_MODULE,
1862 .llseek = no_llseek,
1863 .read = auerchar_read,
1864 .write = auerchar_write,
1865 .unlocked_ioctl = auerchar_ioctl,
1866 .open = auerchar_open,
1867 .release = auerchar_release,
1868};
1869
1870static struct usb_class_driver auerswald_class = {
1871 .name = "auer%d",
1872 .fops = &auerswald_fops,
1873 .minor_base = AUER_MINOR_BASE,
1874};
1875
1876
1877/* --------------------------------------------------------------------- */
1878/* Special USB driver functions */
1879
1880/* Probe if this driver wants to serve an USB device
1881
1882 This entry point is called whenever a new device is attached to the bus.
1883 Then the device driver has to create a new instance of its internal data
1884 structures for the new device.
1885
1886 The dev argument specifies the device context, which contains pointers
1887 to all USB descriptors. The interface argument specifies the interface
1888 number. If a USB driver wants to bind itself to a particular device and
1889 interface it has to return a pointer. This pointer normally references
1890 the device driver's context structure.
1891
1892 Probing normally is done by checking the vendor and product identifications
1893 or the class and subclass definitions. If they match the interface number
1894 is compared with the ones supported by the driver. When probing is done
1895 class based it might be necessary to parse some more USB descriptors because
1896 the device properties can differ in a wide range.
1897*/
1898static int auerswald_probe (struct usb_interface *intf,
1899 const struct usb_device_id *id)
1900{
1901 struct usb_device *usbdev = interface_to_usbdev(intf);
1902 pauerswald_t cp = NULL;
1903 unsigned int u = 0;
1904 __le16 *pbuf;
1905 int ret;
1906
1907 dbg ("probe: vendor id 0x%x, device id 0x%x",
1908 le16_to_cpu(usbdev->descriptor.idVendor),
1909 le16_to_cpu(usbdev->descriptor.idProduct));
1910
1911 /* we use only the first -and only- interface */
1912 if (intf->altsetting->desc.bInterfaceNumber != 0)
1913 return -ENODEV;
1914
1915 /* allocate memory for our device and initialize it */
1916 cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
1917 if (cp == NULL) {
1918 err ("out of memory");
1919 goto pfail;
1920 }
1921
1922 /* Initialize device descriptor */
1923 mutex_init(&cp->mutex);
1924 cp->usbdev = usbdev;
1925 auerchain_init (&cp->controlchain);
1926 auerbuf_init (&cp->bufctl);
1927 init_waitqueue_head (&cp->bufferwait);
1928
1929 ret = usb_register_dev(intf, &auerswald_class);
1930 if (ret) {
1931 err ("Not able to get a minor for this device.");
1932 goto pfail;
1933 }
1934
1935 /* Give the device a name */
1936 sprintf (cp->name, "usb/auer%d", intf->minor);
1937
1938 /* Store the index */
1939 cp->dtindex = intf->minor;
1940
1941 /* Get the usb version of the device */
1942 cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
1943 dbg ("Version is %X", cp->version);
1944
1945 /* allow some time to settle the device */
1946 msleep(334);
1947
1948 /* Try to get a suitable textual description of the device */
1949 /* Device name:*/
1950 ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
1951 if (ret >= 0) {
1952 u += ret;
1953 /* Append Serial Number */
1954 memcpy(&cp->dev_desc[u], ",Ser# ", 6);
1955 u += 6;
1956 ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
1957 if (ret >= 0) {
1958 u += ret;
1959 /* Append subscriber number */
1960 memcpy(&cp->dev_desc[u], ", ", 2);
1961 u += 2;
1962 ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
1963 if (ret >= 0) {
1964 u += ret;
1965 }
1966 }
1967 }
1968 cp->dev_desc[u] = '\0';
1969 info("device is a %s", cp->dev_desc);
1970
1971 /* get the maximum allowed control transfer length */
1972 pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
1973 if (!pbuf) {
1974 err( "out of memory");
1975 goto pfail;
1976 }
1977 ret = usb_control_msg(cp->usbdev, /* pointer to device */
1978 usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
1979 AUV_GETINFO, /* USB message request value */
1980 AUT_RREQ, /* USB message request type value */
1981 0, /* USB message value */
1982 AUDI_MBCTRANS, /* USB message index value */
1983 pbuf, /* pointer to the receive buffer */
1984 2, /* length of the buffer */
1985 2000); /* time to wait for the message to complete before timing out */
1986 if (ret == 2) {
1987 cp->maxControlLength = le16_to_cpup(pbuf);
1988 kfree(pbuf);
1989 dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
1990 } else {
1991 kfree(pbuf);
1992 err("setup: getting max. allowed control transfer length failed with error %d", ret);
1993 goto pfail;
1994 }
1995
1996 /* allocate a chain for the control messages */
1997 if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
1998 err ("out of memory");
1999 goto pfail;
2000 }
2001
2002 /* allocate buffers for control messages */
2003 if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
2004 err ("out of memory");
2005 goto pfail;
2006 }
2007
2008 /* start the interrupt endpoint */
2009 if (auerswald_int_open (cp)) {
2010 err ("int endpoint failed");
2011 goto pfail;
2012 }
2013
2014 /* all OK */
2015 usb_set_intfdata (intf, cp);
2016 return 0;
2017
2018 /* Error exit: clean up the memory */
2019pfail: auerswald_delete (cp);
2020 return -EIO;
2021}
2022
2023
2024/* Disconnect driver from a served device
2025
2026 This function is called whenever a device which was served by this driver
2027 is disconnected.
2028
2029 The argument dev specifies the device context and the driver_context
2030 returns a pointer to the previously registered driver_context of the
2031 probe function. After returning from the disconnect function the USB
2032 framework completely deallocates all data structures associated with
2033 this device. So especially the usb_device structure must not be used
2034 any longer by the usb driver.
2035*/
2036static void auerswald_disconnect (struct usb_interface *intf)
2037{
2038 pauerswald_t cp = usb_get_intfdata (intf);
2039 unsigned int u;
2040
2041 usb_set_intfdata (intf, NULL);
2042 if (!cp)
2043 return;
2044
2045 /* give back our USB minor number */
2046 usb_deregister_dev(intf, &auerswald_class);
2047
2048 mutex_lock(&cp->mutex);
2049 info ("device /dev/%s now disconnecting", cp->name);
2050
2051 /* Stop the interrupt endpoint */
2052 auerswald_int_release (cp);
2053
2054 /* remove the control chain allocated in auerswald_probe
2055 This has the benefit of
2056 a) all pending (a)synchronous urbs are unlinked
2057 b) all buffers dealing with urbs are reclaimed
2058 */
2059 auerchain_free (&cp->controlchain);
2060
2061 if (cp->open_count == 0) {
2062 /* nobody is using this device. So we can clean up now */
2063 mutex_unlock(&cp->mutex);
2064 /* mutex_unlock() is possible here because no other task
2065 can open the device (see above). I don't want
2066 to kfree() a locked mutex. */
2067
2068 auerswald_delete (cp);
2069 } else {
2070 /* device is used. Remove the pointer to the
2071 usb device (it's not valid any more). The last
2072 release() will do the clean up */
2073 cp->usbdev = NULL;
2074 mutex_unlock(&cp->mutex);
2075 /* Terminate waiting writers */
2076 wake_up (&cp->bufferwait);
2077 /* Inform all waiting readers */
2078 for ( u = 0; u < AUH_TYPESIZE; u++) {
2079 pauerscon_t scp = cp->services[u];
2080 if (scp)
2081 scp->disconnect( scp);
2082 }
2083 }
2084}
2085
2086/* Descriptor for the devices which are served by this driver.
2087 NOTE: this struct is parsed by the usbmanager install scripts.
2088 Don't change without caution!
2089*/
2090static struct usb_device_id auerswald_ids [] = {
2091 { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
2092 { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
2093 { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
2094 { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
2095 { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
2096 { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
2097 { } /* Terminating entry */
2098};
2099
2100/* Standard module device table */
2101MODULE_DEVICE_TABLE (usb, auerswald_ids);
2102
2103/* Standard usb driver struct */
2104static struct usb_driver auerswald_driver = {
2105 .name = "auerswald",
2106 .probe = auerswald_probe,
2107 .disconnect = auerswald_disconnect,
2108 .id_table = auerswald_ids,
2109};
2110
2111
2112/* --------------------------------------------------------------------- */
2113/* Module loading/unloading */
2114
2115/* Driver initialisation. Called after module loading.
2116 NOTE: there is no concurrency at _init
2117*/
2118static int __init auerswald_init (void)
2119{
2120 int result;
2121 dbg ("init");
2122
2123 /* register driver at the USB subsystem */
2124 result = usb_register (&auerswald_driver);
2125 if (result < 0) {
2126 err ("driver could not be registered");
2127 return -1;
2128 }
2129 return 0;
2130}
2131
2132/* Driver deinit. Called before module removal.
2133 NOTE: there is no concurrency at _cleanup
2134*/
2135static void __exit auerswald_cleanup (void)
2136{
2137 dbg ("cleanup");
2138 usb_deregister (&auerswald_driver);
2139}
2140
2141/* --------------------------------------------------------------------- */
2142/* Linux device driver module description */
2143
2144MODULE_AUTHOR (DRIVER_AUTHOR);
2145MODULE_DESCRIPTION (DRIVER_DESC);
2146MODULE_LICENSE ("GPL");
2147
2148module_init (auerswald_init);
2149module_exit (auerswald_cleanup);
2150
2151/* --------------------------------------------------------------------- */
2152
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 000000000000..faca4333f27a
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,176 @@
1#
2# USB Dual Role (OTG-ready) Controller Drivers
3# for silicon based on Mentor Graphics INVENTRA designs
4#
5
6comment "Enable Host or Gadget support to see Inventra options"
7 depends on !USB && USB_GADGET=n
8
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK
12 select TWL4030_USB if MACH_OMAP_3430SDP
13 tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
14 help
15 Say Y here if your system has a dual role high speed USB
16 controller based on the Mentor Graphics silicon IP. Then
17 configure options to match your silicon and the board
18 it's being used with, including the USB peripheral role,
19 or the USB host role, or both.
20
21 Texas Instruments parts using this IP include DaVinci 644x,
22 OMAP 243x, OMAP 343x, and TUSB 6010.
23
24 If you do not know what this is, please say N.
25
26 To compile this driver as a module, choose M here; the
27 module will be called "musb_hdrc".
28
29config USB_MUSB_SOC
30 boolean
31 depends on USB_MUSB_HDRC
32 default y if ARCH_DAVINCI
33 default y if ARCH_OMAP2430
34 default y if ARCH_OMAP34XX
35 help
36 Use a static <asm/arch/hdrc_cnf.h> file to describe how the
37 controller is configured (endpoints, mechanisms, etc) on the
38 current iteration of a given system-on-chip.
39
40comment "DaVinci 644x USB support"
41 depends on USB_MUSB_HDRC && ARCH_DAVINCI
42
43comment "OMAP 243x high speed USB support"
44 depends on USB_MUSB_HDRC && ARCH_OMAP2430
45
46comment "OMAP 343x high speed USB support"
47 depends on USB_MUSB_HDRC && ARCH_OMAP34XX
48
49config USB_TUSB6010
50 boolean "TUSB 6010 support"
51 depends on USB_MUSB_HDRC && !USB_MUSB_SOC
52 default y
53 help
54 The TUSB 6010 chip, from Texas Instruments, connects a discrete
55 HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
56 (a high speed serial link). It can use system-specific external
57 DMA controllers.
58
59choice
60 prompt "Driver Mode"
61 depends on USB_MUSB_HDRC
62 help
63 Dual-Role devices can support both host and peripheral roles,
64 as well as a the special "OTG Device" role which can switch
65 between both roles as needed.
66
67# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
68# OTG needs both roles, not just USB_MUSB_HOST.
69config USB_MUSB_HOST
70 depends on USB
71 bool "USB Host"
72 help
73 Say Y here if your system supports the USB host role.
74 If it has a USB "A" (rectangular), "Mini-A" (uncommon),
75 or "Mini-AB" connector, it supports the host role.
76 (With a "Mini-AB" connector, you should enable USB OTG.)
77
78# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
79# side support ... OTG needs both roles
80config USB_MUSB_PERIPHERAL
81 depends on USB_GADGET
82 bool "USB Peripheral (gadget stack)"
83 select USB_GADGET_MUSB_HDRC
84 help
85 Say Y here if your system supports the USB peripheral role.
86 If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
87 connector, it supports the peripheral role.
88 (With a "Mini-AB" connector, you should enable USB OTG.)
89
90config USB_MUSB_OTG
91 depends on USB && USB_GADGET && PM && EXPERIMENTAL
92 bool "Both host and peripheral: USB OTG (On The Go) Device"
93 select USB_GADGET_MUSB_HDRC
94 select USB_OTG
95 help
96 The most notable feature of USB OTG is support for a
97 "Dual-Role" device, which can act as either a device
98 or a host. The initial role choice can be changed
99 later, when two dual-role devices talk to each other.
100
101 At this writing, the OTG support in this driver is incomplete,
102 omitting the mandatory HNP or SRP protocols. However, some
103 of the cable based role switching works. (That is, grounding
104 the ID pin switches the controller to host mode, while leaving
105 it floating leaves it in peripheral mode.)
106
107 Select this if your system has a Mini-AB connector, or
108 to simplify certain kinds of configuration.
109
110 To implement your OTG Targeted Peripherals List (TPL), enable
111 USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
112 to match your requirements.
113
114endchoice
115
116# enable peripheral support (including with OTG)
117config USB_GADGET_MUSB_HDRC
118 bool
119 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
120# default y
121# select USB_GADGET_DUALSPEED
122# select USB_GADGET_SELECTED
123
124# enables host support (including with OTG)
125config USB_MUSB_HDRC_HCD
126 bool
127 depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
128 select USB_OTG if USB_GADGET_MUSB_HDRC
129 default y
130
131
132config MUSB_PIO_ONLY
133 bool 'Disable DMA (always use PIO)'
134 depends on USB_MUSB_HDRC
135 default y if USB_TUSB6010
136 help
137 All data is copied between memory and FIFO by the CPU.
138 DMA controllers are ignored.
139
140 Do not select 'n' here unless DMA support for your SOC or board
141 is unavailable (or unstable). When DMA is enabled at compile time,
142 you can still disable it at run time using the "use_dma=n" module
143 parameter.
144
145config USB_INVENTRA_DMA
146 bool
147 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
148 default ARCH_OMAP2430 || ARCH_OMAP34XX
149 help
150 Enable DMA transfers using Mentor's engine.
151
152config USB_TI_CPPI_DMA
153 bool
154 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
155 default ARCH_DAVINCI
156 help
157 Enable DMA transfers when TI CPPI DMA is available.
158
159config USB_TUSB_OMAP_DMA
160 bool
161 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
162 depends on USB_TUSB6010
163 depends on ARCH_OMAP
164 default y
165 help
166 Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
167
168config USB_MUSB_LOGLEVEL
169 depends on USB_MUSB_HDRC
170 int 'Logging Level (0 - none / 3 - annoying / ... )'
171 default 0
172 help
173 Set the logging level. 0 disables the debugging altogether,
174 although when USB_DEBUG is set the value is at least 1.
175 Starting at level 3, per-transfer (urb, usb_request, packet,
176 or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 000000000000..88eb67de08ae
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,86 @@
1#
2# for USB OTG silicon based on Mentor Graphics INVENTRA designs
3#
4
5musb_hdrc-objs := musb_core.o
6
7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
8
9ifeq ($(CONFIG_ARCH_DAVINCI),y)
10 musb_hdrc-objs += davinci.o
11endif
12
13ifeq ($(CONFIG_USB_TUSB6010),y)
14 musb_hdrc-objs += tusb6010.o
15endif
16
17ifeq ($(CONFIG_ARCH_OMAP2430),y)
18 musb_hdrc-objs += omap2430.o
19endif
20
21ifeq ($(CONFIG_ARCH_OMAP3430),y)
22 musb_hdrc-objs += omap2430.o
23endif
24
25ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
26 musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
27endif
28
29ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
30 musb_hdrc-objs += musb_virthub.o musb_host.o
31endif
32
33# the kconfig must guarantee that only one of the
34# possible I/O schemes will be enabled at a time ...
35# PIO only, or DMA (several potential schemes).
36# though PIO is always there to back up DMA, and for ep0
37
38ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
39
40 ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
41 musb_hdrc-objs += musbhsdma.o
42
43 else
44 ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
45 musb_hdrc-objs += cppi_dma.o
46
47 else
48 ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
49 musb_hdrc-objs += tusb6010_omap.o
50
51 endif
52 endif
53 endif
54endif
55
56
57################################################################################
58
59# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
60
61ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
62 EXTRA_CFLAGS += -DMUSB_AHB_ID
63endif
64
65# Debugging
66
67MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL)
68
69ifeq ("$(strip $(MUSB_DEBUG))","")
70 ifdef CONFIG_USB_DEBUG
71 MUSB_DEBUG:=1
72 else
73 MUSB_DEBUG:=0
74 endif
75endif
76
77ifneq ($(MUSB_DEBUG),0)
78 EXTRA_CFLAGS += -DDEBUG
79
80 ifeq ($(CONFIG_PROC_FS),y)
81 musb_hdrc-objs += musb_procfs.o
82 endif
83
84endif
85
86EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 000000000000..5ad6d0893cbe
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * This file implements a DMA interface using TI's CPPI DMA.
5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
7 */
8
9#include <linux/usb.h>
10
11#include "musb_core.h"
12#include "cppi_dma.h"
13
14
15/* CPPI DMA status 7-mar-2006:
16 *
17 * - See musb_{host,gadget}.c for more info
18 *
19 * - Correct RX DMA generally forces the engine into irq-per-packet mode,
20 * which can easily saturate the CPU under non-mass-storage loads.
21 *
22 * NOTES 24-aug-2006 (2.6.18-rc4):
23 *
24 * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
25 * evidently after the 1 byte packet was received and acked, the queue
26 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
27 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
28 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
29 * of its next (512 byte) packet. IRQ issues?
30 *
31 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
32 * evidently also directly update the RX and TX CSRs ... so audit all
33 * host and peripheral side DMA code to avoid CSR access after DMA has
34 * been started.
35 */
36
37/* REVISIT now we can avoid preallocating these descriptors; or
38 * more simply, switch to a global freelist not per-channel ones.
39 * Note: at full speed, 64 descriptors == 4K bulk data.
40 */
41#define NUM_TXCHAN_BD 64
42#define NUM_RXCHAN_BD 64
43
44static inline void cpu_drain_writebuffer(void)
45{
46 wmb();
47#ifdef CONFIG_CPU_ARM926T
48 /* REVISIT this "should not be needed",
49 * but lack of it sure seemed to hurt ...
50 */
51 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
52#endif
53}
54
55static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
56{
57 struct cppi_descriptor *bd = c->freelist;
58
59 if (bd)
60 c->freelist = bd->next;
61 return bd;
62}
63
64static inline void
65cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
66{
67 if (!bd)
68 return;
69 bd->next = c->freelist;
70 c->freelist = bd;
71}
72
73/*
74 * Start DMA controller
75 *
76 * Initialize the DMA controller as necessary.
77 */
78
79/* zero out entire rx state RAM entry for the channel */
80static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
81{
82 musb_writel(&rx->rx_skipbytes, 0, 0);
83 musb_writel(&rx->rx_head, 0, 0);
84 musb_writel(&rx->rx_sop, 0, 0);
85 musb_writel(&rx->rx_current, 0, 0);
86 musb_writel(&rx->rx_buf_current, 0, 0);
87 musb_writel(&rx->rx_len_len, 0, 0);
88 musb_writel(&rx->rx_cnt_cnt, 0, 0);
89}
90
91/* zero out entire tx state RAM entry for the channel */
92static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
93{
94 musb_writel(&tx->tx_head, 0, 0);
95 musb_writel(&tx->tx_buf, 0, 0);
96 musb_writel(&tx->tx_current, 0, 0);
97 musb_writel(&tx->tx_buf_current, 0, 0);
98 musb_writel(&tx->tx_info, 0, 0);
99 musb_writel(&tx->tx_rem_len, 0, 0);
100 /* musb_writel(&tx->tx_dummy, 0, 0); */
101 musb_writel(&tx->tx_complete, 0, ptr);
102}
103
104static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
105{
106 int j;
107
108 /* initialize channel fields */
109 c->head = NULL;
110 c->tail = NULL;
111 c->last_processed = NULL;
112 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
113 c->controller = cppi;
114 c->is_rndis = 0;
115 c->freelist = NULL;
116
117 /* build the BD Free list for the channel */
118 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
119 struct cppi_descriptor *bd;
120 dma_addr_t dma;
121
122 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
123 bd->dma = dma;
124 cppi_bd_free(c, bd);
125 }
126}
127
128static int cppi_channel_abort(struct dma_channel *);
129
130static void cppi_pool_free(struct cppi_channel *c)
131{
132 struct cppi *cppi = c->controller;
133 struct cppi_descriptor *bd;
134
135 (void) cppi_channel_abort(&c->channel);
136 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
137 c->controller = NULL;
138
139 /* free all its bds */
140 bd = c->last_processed;
141 do {
142 if (bd)
143 dma_pool_free(cppi->pool, bd, bd->dma);
144 bd = cppi_bd_alloc(c);
145 } while (bd);
146 c->last_processed = NULL;
147}
148
149static int __init cppi_controller_start(struct dma_controller *c)
150{
151 struct cppi *controller;
152 void __iomem *tibase;
153 int i;
154
155 controller = container_of(c, struct cppi, controller);
156
157 /* do whatever is necessary to start controller */
158 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
159 controller->tx[i].transmit = true;
160 controller->tx[i].index = i;
161 }
162 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
163 controller->rx[i].transmit = false;
164 controller->rx[i].index = i;
165 }
166
167 /* setup BD list on a per channel basis */
168 for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
169 cppi_pool_init(controller, controller->tx + i);
170 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
171 cppi_pool_init(controller, controller->rx + i);
172
173 tibase = controller->tibase;
174 INIT_LIST_HEAD(&controller->tx_complete);
175
176 /* initialise tx/rx channel head pointers to zero */
177 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
178 struct cppi_channel *tx_ch = controller->tx + i;
179 struct cppi_tx_stateram __iomem *tx;
180
181 INIT_LIST_HEAD(&tx_ch->tx_complete);
182
183 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
184 tx_ch->state_ram = tx;
185 cppi_reset_tx(tx, 0);
186 }
187 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
188 struct cppi_channel *rx_ch = controller->rx + i;
189 struct cppi_rx_stateram __iomem *rx;
190
191 INIT_LIST_HEAD(&rx_ch->tx_complete);
192
193 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
194 rx_ch->state_ram = rx;
195 cppi_reset_rx(rx);
196 }
197
198 /* enable individual cppi channels */
199 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
200 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
201 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
202 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
203
204 /* enable tx/rx CPPI control */
205 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
206 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
207
208 /* disable RNDIS mode, also host rx RNDIS autorequest */
209 musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
210 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
211
212 return 0;
213}
214
215/*
216 * Stop DMA controller
217 *
218 * De-Init the DMA controller as necessary.
219 */
220
221static int cppi_controller_stop(struct dma_controller *c)
222{
223 struct cppi *controller;
224 void __iomem *tibase;
225 int i;
226
227 controller = container_of(c, struct cppi, controller);
228
229 tibase = controller->tibase;
230 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
231 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
232 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
233 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
234 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
235
236 DBG(1, "Tearing down RX and TX Channels\n");
237 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
238 /* FIXME restructure of txdma to use bds like rxdma */
239 controller->tx[i].last_processed = NULL;
240 cppi_pool_free(controller->tx + i);
241 }
242 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
243 cppi_pool_free(controller->rx + i);
244
245 /* in Tx Case proper teardown is supported. We resort to disabling
246 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
247 * complete TX CPPI cannot be disabled.
248 */
249 /*disable tx/rx cppi */
250 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
251 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
252
253 return 0;
254}
255
256/* While dma channel is allocated, we only want the core irqs active
257 * for fault reports, otherwise we'd get irqs that we don't care about.
258 * Except for TX irqs, where dma done != fifo empty and reusable ...
259 *
260 * NOTE: docs don't say either way, but irq masking **enables** irqs.
261 *
262 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
263 */
264static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
265{
266 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
267}
268
269static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
270{
271 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
272}
273
274
275/*
276 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
277 * each transfer direction of a non-control endpoint, so allocating
278 * (and deallocating) is mostly a way to notice bad housekeeping on
279 * the software side. We assume the irqs are always active.
280 */
281static struct dma_channel *
282cppi_channel_allocate(struct dma_controller *c,
283 struct musb_hw_ep *ep, u8 transmit)
284{
285 struct cppi *controller;
286 u8 index;
287 struct cppi_channel *cppi_ch;
288 void __iomem *tibase;
289
290 controller = container_of(c, struct cppi, controller);
291 tibase = controller->tibase;
292
293 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
294 index = ep->epnum - 1;
295
296 /* return the corresponding CPPI Channel Handle, and
297 * probably disable the non-CPPI irq until we need it.
298 */
299 if (transmit) {
300 if (index >= ARRAY_SIZE(controller->tx)) {
301 DBG(1, "no %cX%d CPPI channel\n", 'T', index);
302 return NULL;
303 }
304 cppi_ch = controller->tx + index;
305 } else {
306 if (index >= ARRAY_SIZE(controller->rx)) {
307 DBG(1, "no %cX%d CPPI channel\n", 'R', index);
308 return NULL;
309 }
310 cppi_ch = controller->rx + index;
311 core_rxirq_disable(tibase, ep->epnum);
312 }
313
314 /* REVISIT make this an error later once the same driver code works
315 * with the other DMA engine too
316 */
317 if (cppi_ch->hw_ep)
318 DBG(1, "re-allocating DMA%d %cX channel %p\n",
319 index, transmit ? 'T' : 'R', cppi_ch);
320 cppi_ch->hw_ep = ep;
321 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
322
323 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
324 return &cppi_ch->channel;
325}
326
327/* Release a CPPI Channel. */
328static void cppi_channel_release(struct dma_channel *channel)
329{
330 struct cppi_channel *c;
331 void __iomem *tibase;
332
333 /* REVISIT: for paranoia, check state and abort if needed... */
334
335 c = container_of(channel, struct cppi_channel, channel);
336 tibase = c->controller->tibase;
337 if (!c->hw_ep)
338 DBG(1, "releasing idle DMA channel %p\n", c);
339 else if (!c->transmit)
340 core_rxirq_enable(tibase, c->index + 1);
341
342 /* for now, leave its cppi IRQ enabled (we won't trigger it) */
343 c->hw_ep = NULL;
344 channel->status = MUSB_DMA_STATUS_UNKNOWN;
345}
346
347/* Context: controller irqlocked */
348static void
349cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
350{
351 void __iomem *base = c->controller->mregs;
352 struct cppi_rx_stateram __iomem *rx = c->state_ram;
353
354 musb_ep_select(base, c->index + 1);
355
356 DBG(level, "RX DMA%d%s: %d left, csr %04x, "
357 "%08x H%08x S%08x C%08x, "
358 "B%08x L%08x %08x .. %08x"
359 "\n",
360 c->index, tag,
361 musb_readl(c->controller->tibase,
362 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
363 musb_readw(c->hw_ep->regs, MUSB_RXCSR),
364
365 musb_readl(&rx->rx_skipbytes, 0),
366 musb_readl(&rx->rx_head, 0),
367 musb_readl(&rx->rx_sop, 0),
368 musb_readl(&rx->rx_current, 0),
369
370 musb_readl(&rx->rx_buf_current, 0),
371 musb_readl(&rx->rx_len_len, 0),
372 musb_readl(&rx->rx_cnt_cnt, 0),
373 musb_readl(&rx->rx_complete, 0)
374 );
375}
376
377/* Context: controller irqlocked */
378static void
379cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
380{
381 void __iomem *base = c->controller->mregs;
382 struct cppi_tx_stateram __iomem *tx = c->state_ram;
383
384 musb_ep_select(base, c->index + 1);
385
386 DBG(level, "TX DMA%d%s: csr %04x, "
387 "H%08x S%08x C%08x %08x, "
388 "F%08x L%08x .. %08x"
389 "\n",
390 c->index, tag,
391 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
392
393 musb_readl(&tx->tx_head, 0),
394 musb_readl(&tx->tx_buf, 0),
395 musb_readl(&tx->tx_current, 0),
396 musb_readl(&tx->tx_buf_current, 0),
397
398 musb_readl(&tx->tx_info, 0),
399 musb_readl(&tx->tx_rem_len, 0),
400 /* dummy/unused word 6 */
401 musb_readl(&tx->tx_complete, 0)
402 );
403}
404
405/* Context: controller irqlocked */
406static inline void
407cppi_rndis_update(struct cppi_channel *c, int is_rx,
408 void __iomem *tibase, int is_rndis)
409{
410 /* we may need to change the rndis flag for this cppi channel */
411 if (c->is_rndis != is_rndis) {
412 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
413 u32 temp = 1 << (c->index);
414
415 if (is_rx)
416 temp <<= 16;
417 if (is_rndis)
418 value |= temp;
419 else
420 value &= ~temp;
421 musb_writel(tibase, DAVINCI_RNDIS_REG, value);
422 c->is_rndis = is_rndis;
423 }
424}
425
426static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
427{
428 pr_debug("RXBD/%s %08x: "
429 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
430 tag, bd->dma,
431 bd->hw_next, bd->hw_bufp, bd->hw_off_len,
432 bd->hw_options);
433}
434
435static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
436{
437#if MUSB_DEBUG > 0
438 struct cppi_descriptor *bd;
439
440 if (!_dbg_level(level))
441 return;
442 cppi_dump_rx(level, rx, tag);
443 if (rx->last_processed)
444 cppi_dump_rxbd("last", rx->last_processed);
445 for (bd = rx->head; bd; bd = bd->next)
446 cppi_dump_rxbd("active", bd);
447#endif
448}
449
450
451/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
452 * so we won't ever use it (see "CPPI RX Woes" below).
453 */
454static inline int cppi_autoreq_update(struct cppi_channel *rx,
455 void __iomem *tibase, int onepacket, unsigned n_bds)
456{
457 u32 val;
458
459#ifdef RNDIS_RX_IS_USABLE
460 u32 tmp;
461 /* assert(is_host_active(musb)) */
462
463 /* start from "AutoReq never" */
464 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
465 val = tmp & ~((0x3) << (rx->index * 2));
466
467 /* HCD arranged reqpkt for packet #1. we arrange int
468 * for all but the last one, maybe in two segments.
469 */
470 if (!onepacket) {
471#if 0
472 /* use two segments, autoreq "all" then the last "never" */
473 val |= ((0x3) << (rx->index * 2));
474 n_bds--;
475#else
476 /* one segment, autoreq "all-but-last" */
477 val |= ((0x1) << (rx->index * 2));
478#endif
479 }
480
481 if (val != tmp) {
482 int n = 100;
483
484 /* make sure that autoreq is updated before continuing */
485 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
486 do {
487 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
488 if (tmp == val)
489 break;
490 cpu_relax();
491 } while (n-- > 0);
492 }
493#endif
494
495 /* REQPKT is turned off after each segment */
496 if (n_bds && rx->channel.actual_len) {
497 void __iomem *regs = rx->hw_ep->regs;
498
499 val = musb_readw(regs, MUSB_RXCSR);
500 if (!(val & MUSB_RXCSR_H_REQPKT)) {
501 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
502 musb_writew(regs, MUSB_RXCSR, val);
503 /* flush writebufer */
504 val = musb_readw(regs, MUSB_RXCSR);
505 }
506 }
507 return n_bds;
508}
509
510
511/* Buffer enqueuing Logic:
512 *
513 * - RX builds new queues each time, to help handle routine "early
514 * termination" cases (faults, including errors and short reads)
515 * more correctly.
516 *
517 * - for now, TX reuses the same queue of BDs every time
518 *
519 * REVISIT long term, we want a normal dynamic model.
520 * ... the goal will be to append to the
521 * existing queue, processing completed "dma buffers" (segments) on the fly.
522 *
523 * Otherwise we force an IRQ latency between requests, which slows us a lot
524 * (especially in "transparent" dma). Unfortunately that model seems to be
525 * inherent in the DMA model from the Mentor code, except in the rare case
526 * of transfers big enough (~128+ KB) that we could append "middle" segments
527 * in the TX paths. (RX can't do this, see below.)
528 *
529 * That's true even in the CPPI- friendly iso case, where most urbs have
530 * several small segments provided in a group and where the "packet at a time"
531 * "transparent" DMA model is always correct, even on the RX side.
532 */
533
534/*
535 * CPPI TX:
536 * ========
537 * TX is a lot more reasonable than RX; it doesn't need to run in
538 * irq-per-packet mode very often. RNDIS mode seems to behave too
539 * (except how it handles the exactly-N-packets case). Building a
540 * txdma queue with multiple requests (urb or usb_request) looks
541 * like it would work ... but fault handling would need much testing.
542 *
543 * The main issue with TX mode RNDIS relates to transfer lengths that
544 * are an exact multiple of the packet length. It appears that there's
545 * a hiccup in that case (maybe the DMA completes before the ZLP gets
546 * written?) boiling down to not being able to rely on CPPI writing any
547 * terminating zero length packet before the next transfer is written.
548 * So that's punted to PIO; better yet, gadget drivers can avoid it.
549 *
550 * Plus, there's allegedly an undocumented constraint that rndis transfer
551 * length be a multiple of 64 bytes ... but the chip doesn't act that
552 * way, and we really don't _want_ that behavior anyway.
553 *
554 * On TX, "transparent" mode works ... although experiments have shown
555 * problems trying to use the SOP/EOP bits in different USB packets.
556 *
557 * REVISIT try to handle terminating zero length packets using CPPI
558 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
559 * links avoid that issue by forcing them to avoid zlps.)
560 */
561static void
562cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
563{
564 unsigned maxpacket = tx->maxpacket;
565 dma_addr_t addr = tx->buf_dma + tx->offset;
566 size_t length = tx->buf_len - tx->offset;
567 struct cppi_descriptor *bd;
568 unsigned n_bds;
569 unsigned i;
570 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
571 int rndis;
572
573 /* TX can use the CPPI "rndis" mode, where we can probably fit this
574 * transfer in one BD and one IRQ. The only time we would NOT want
575 * to use it is when hardware constraints prevent it, or if we'd
576 * trigger the "send a ZLP?" confusion.
577 */
578 rndis = (maxpacket & 0x3f) == 0
579 && length < 0xffff
580 && (length % maxpacket) != 0;
581
582 if (rndis) {
583 maxpacket = length;
584 n_bds = 1;
585 } else {
586 n_bds = length / maxpacket;
587 if (!length || (length % maxpacket))
588 n_bds++;
589 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
590 length = min(n_bds * maxpacket, length);
591 }
592
593 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
594 tx->index,
595 maxpacket,
596 rndis ? "rndis" : "transparent",
597 n_bds,
598 addr, length);
599
600 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
601
602 /* assuming here that channel_program is called during
603 * transfer initiation ... current code maintains state
604 * for one outstanding request only (no queues, not even
605 * the implicit ones of an iso urb).
606 */
607
608 bd = tx->freelist;
609 tx->head = bd;
610 tx->last_processed = NULL;
611
612 /* FIXME use BD pool like RX side does, and just queue
613 * the minimum number for this request.
614 */
615
616 /* Prepare queue of BDs first, then hand it to hardware.
617 * All BDs except maybe the last should be of full packet
618 * size; for RNDIS there _is_ only that last packet.
619 */
620 for (i = 0; i < n_bds; ) {
621 if (++i < n_bds && bd->next)
622 bd->hw_next = bd->next->dma;
623 else
624 bd->hw_next = 0;
625
626 bd->hw_bufp = tx->buf_dma + tx->offset;
627
628 /* FIXME set EOP only on the last packet,
629 * SOP only on the first ... avoid IRQs
630 */
631 if ((tx->offset + maxpacket) <= tx->buf_len) {
632 tx->offset += maxpacket;
633 bd->hw_off_len = maxpacket;
634 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
635 | CPPI_OWN_SET | maxpacket;
636 } else {
637 /* only this one may be a partial USB Packet */
638 u32 partial_len;
639
640 partial_len = tx->buf_len - tx->offset;
641 tx->offset = tx->buf_len;
642 bd->hw_off_len = partial_len;
643
644 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
645 | CPPI_OWN_SET | partial_len;
646 if (partial_len == 0)
647 bd->hw_options |= CPPI_ZERO_SET;
648 }
649
650 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
651 bd, bd->hw_next, bd->hw_bufp,
652 bd->hw_off_len, bd->hw_options);
653
654 /* update the last BD enqueued to the list */
655 tx->tail = bd;
656 bd = bd->next;
657 }
658
659 /* BDs live in DMA-coherent memory, but writes might be pending */
660 cpu_drain_writebuffer();
661
662 /* Write to the HeadPtr in state RAM to trigger */
663 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
664
665 cppi_dump_tx(5, tx, "/S");
666}
667
668/*
669 * CPPI RX Woes:
670 * =============
671 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
672 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
673 * (Full speed transfers have similar scenarios.)
674 *
675 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
676 * and the next packet goes into a buffer that's queued later; while (b) fills
677 * the buffer with 1024 bytes. How to do that with CPPI?
678 *
679 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
680 * (b) loses **BADLY** because nothing (!) happens when that second packet
681 * fills the buffer, much less when a third one arrives. (Which makes this
682 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
683 * is optional, and it's fine if peripherals -- not hosts! -- pad messages
684 * out to end-of-buffer. Standard PCI host controller DMA descriptors
685 * implement that mode by default ... which is no accident.)
686 *
687 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
688 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
689 * ignores SOP/EOP markings and processes both of those BDs; so both packets
690 * are loaded into the buffer (with a 212 byte gap between them), and the next
691 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
692 * are intended as outputs for RX queues, not inputs...)
693 *
694 * - A variant of "transparent" mode -- one BD at a time -- is the only way to
695 * reliably make both cases work, with software handling both cases correctly
696 * and at the significant penalty of needing an IRQ per packet. (The lack of
697 * I/O overlap can be slightly ameliorated by enabling double buffering.)
698 *
699 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
700 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
701 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
702 * with guaranteed driver level fault recovery and scrubbing out what's left
703 * of that garbaged datastream.
704 *
705 * But there seems to be no way to identify the cases where CPPI RNDIS mode
706 * is appropriate -- which do NOT include RNDIS host drivers, but do include
707 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
708 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
709 * that applies best on the peripheral side (and which could fail rudely).
710 *
711 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
712 * cases other than mass storage class. Otherwise we're correct but slow,
713 * since CPPI penalizes our need for a "true RNDIS" default mode.
714 */
715
716
717/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
718 *
719 * IFF
720 * (a) peripheral mode ... since rndis peripherals could pad their
721 * writes to hosts, causing i/o failure; or we'd have to cope with
722 * a largely unknowable variety of host side protocol variants
723 * (b) and short reads are NOT errors ... since full reads would
724 * cause those same i/o failures
725 * (c) and read length is
726 * - less than 64KB (max per cppi descriptor)
727 * - not a multiple of 4096 (g_zero default, full reads typical)
728 * - N (>1) packets long, ditto (full reads not EXPECTED)
729 * THEN
730 * try rx rndis mode
731 *
732 * Cost of heuristic failing: RXDMA wedges at the end of transfers that
733 * fill out the whole buffer. Buggy host side usb network drivers could
734 * trigger that, but "in the field" such bugs seem to be all but unknown.
735 *
736 * So this module parameter lets the heuristic be disabled. When using
737 * gadgetfs, the heuristic will probably need to be disabled.
738 */
739static int cppi_rx_rndis = 1;
740
741module_param(cppi_rx_rndis, bool, 0);
742MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
743
744
745/**
746 * cppi_next_rx_segment - dma read for the next chunk of a buffer
747 * @musb: the controller
748 * @rx: dma channel
749 * @onepacket: true unless caller treats short reads as errors, and
750 * performs fault recovery above usbcore.
751 * Context: controller irqlocked
752 *
753 * See above notes about why we can't use multi-BD RX queues except in
754 * rare cases (mass storage class), and can never use the hardware "rndis"
755 * mode (since it's not a "true" RNDIS mode) with complete safety..
756 *
757 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
758 * code to recover from corrupted datastreams after each short transfer.
759 */
760static void
761cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
762{
763 unsigned maxpacket = rx->maxpacket;
764 dma_addr_t addr = rx->buf_dma + rx->offset;
765 size_t length = rx->buf_len - rx->offset;
766 struct cppi_descriptor *bd, *tail;
767 unsigned n_bds;
768 unsigned i;
769 void __iomem *tibase = musb->ctrl_base;
770 int is_rndis = 0;
771 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
772
773 if (onepacket) {
774 /* almost every USB driver, host or peripheral side */
775 n_bds = 1;
776
777 /* maybe apply the heuristic above */
778 if (cppi_rx_rndis
779 && is_peripheral_active(musb)
780 && length > maxpacket
781 && (length & ~0xffff) == 0
782 && (length & 0x0fff) != 0
783 && (length & (maxpacket - 1)) == 0) {
784 maxpacket = length;
785 is_rndis = 1;
786 }
787 } else {
788 /* virtually nothing except mass storage class */
789 if (length > 0xffff) {
790 n_bds = 0xffff / maxpacket;
791 length = n_bds * maxpacket;
792 } else {
793 n_bds = length / maxpacket;
794 if (length % maxpacket)
795 n_bds++;
796 }
797 if (n_bds == 1)
798 onepacket = 1;
799 else
800 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
801 }
802
803 /* In host mode, autorequest logic can generate some IN tokens; it's
804 * tricky since we can't leave REQPKT set in RXCSR after the transfer
805 * finishes. So: multipacket transfers involve two or more segments.
806 * And always at least two IRQs ... RNDIS mode is not an option.
807 */
808 if (is_host_active(musb))
809 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
810
811 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
812
813 length = min(n_bds * maxpacket, length);
814
815 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
816 "dma 0x%x len %u %u/%u\n",
817 rx->index, maxpacket,
818 onepacket
819 ? (is_rndis ? "rndis" : "onepacket")
820 : "multipacket",
821 n_bds,
822 musb_readl(tibase,
823 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
824 & 0xffff,
825 addr, length, rx->channel.actual_len, rx->buf_len);
826
827 /* only queue one segment at a time, since the hardware prevents
828 * correct queue shutdown after unexpected short packets
829 */
830 bd = cppi_bd_alloc(rx);
831 rx->head = bd;
832
833 /* Build BDs for all packets in this segment */
834 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
835 u32 bd_len;
836
837 if (i) {
838 bd = cppi_bd_alloc(rx);
839 if (!bd)
840 break;
841 tail->next = bd;
842 tail->hw_next = bd->dma;
843 }
844 bd->hw_next = 0;
845
846 /* all but the last packet will be maxpacket size */
847 if (maxpacket < length)
848 bd_len = maxpacket;
849 else
850 bd_len = length;
851
852 bd->hw_bufp = addr;
853 addr += bd_len;
854 rx->offset += bd_len;
855
856 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
857 bd->buflen = bd_len;
858
859 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
860 length -= bd_len;
861 }
862
863 /* we always expect at least one reusable BD! */
864 if (!tail) {
865 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
866 return;
867 } else if (i < n_bds)
868 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
869
870 tail->next = NULL;
871 tail->hw_next = 0;
872
873 bd = rx->head;
874 rx->tail = tail;
875
876 /* short reads and other faults should terminate this entire
877 * dma segment. we want one "dma packet" per dma segment, not
878 * one per USB packet, terminating the whole queue at once...
879 * NOTE that current hardware seems to ignore SOP and EOP.
880 */
881 bd->hw_options |= CPPI_SOP_SET;
882 tail->hw_options |= CPPI_EOP_SET;
883
884 if (debug >= 5) {
885 struct cppi_descriptor *d;
886
887 for (d = rx->head; d; d = d->next)
888 cppi_dump_rxbd("S", d);
889 }
890
891 /* in case the preceding transfer left some state... */
892 tail = rx->last_processed;
893 if (tail) {
894 tail->next = bd;
895 tail->hw_next = bd->dma;
896 }
897
898 core_rxirq_enable(tibase, rx->index + 1);
899
900 /* BDs live in DMA-coherent memory, but writes might be pending */
901 cpu_drain_writebuffer();
902
903 /* REVISIT specs say to write this AFTER the BUFCNT register
904 * below ... but that loses badly.
905 */
906 musb_writel(&rx_ram->rx_head, 0, bd->dma);
907
908 /* bufferCount must be at least 3, and zeroes on completion
909 * unless it underflows below zero, or stops at two, or keeps
910 * growing ... grr.
911 */
912 i = musb_readl(tibase,
913 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
914 & 0xffff;
915
916 if (!i)
917 musb_writel(tibase,
918 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
919 n_bds + 2);
920 else if (n_bds > (i - 3))
921 musb_writel(tibase,
922 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
923 n_bds - (i - 3));
924
925 i = musb_readl(tibase,
926 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
927 & 0xffff;
928 if (i < (2 + n_bds)) {
929 DBG(2, "bufcnt%d underrun - %d (for %d)\n",
930 rx->index, i, n_bds);
931 musb_writel(tibase,
932 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
933 n_bds + 2);
934 }
935
936 cppi_dump_rx(4, rx, "/S");
937}
938
939/**
940 * cppi_channel_program - program channel for data transfer
941 * @ch: the channel
942 * @maxpacket: max packet size
943 * @mode: For RX, 1 unless the usb protocol driver promised to treat
944 * all short reads as errors and kick in high level fault recovery.
945 * For TX, ignored because of RNDIS mode races/glitches.
946 * @dma_addr: dma address of buffer
947 * @len: length of buffer
948 * Context: controller irqlocked
949 */
950static int cppi_channel_program(struct dma_channel *ch,
951 u16 maxpacket, u8 mode,
952 dma_addr_t dma_addr, u32 len)
953{
954 struct cppi_channel *cppi_ch;
955 struct cppi *controller;
956 struct musb *musb;
957
958 cppi_ch = container_of(ch, struct cppi_channel, channel);
959 controller = cppi_ch->controller;
960 musb = controller->musb;
961
962 switch (ch->status) {
963 case MUSB_DMA_STATUS_BUS_ABORT:
964 case MUSB_DMA_STATUS_CORE_ABORT:
965 /* fault irq handler should have handled cleanup */
966 WARNING("%cX DMA%d not cleaned up after abort!\n",
967 cppi_ch->transmit ? 'T' : 'R',
968 cppi_ch->index);
969 /* WARN_ON(1); */
970 break;
971 case MUSB_DMA_STATUS_BUSY:
972 WARNING("program active channel? %cX DMA%d\n",
973 cppi_ch->transmit ? 'T' : 'R',
974 cppi_ch->index);
975 /* WARN_ON(1); */
976 break;
977 case MUSB_DMA_STATUS_UNKNOWN:
978 DBG(1, "%cX DMA%d not allocated!\n",
979 cppi_ch->transmit ? 'T' : 'R',
980 cppi_ch->index);
981 /* FALLTHROUGH */
982 case MUSB_DMA_STATUS_FREE:
983 break;
984 }
985
986 ch->status = MUSB_DMA_STATUS_BUSY;
987
988 /* set transfer parameters, then queue up its first segment */
989 cppi_ch->buf_dma = dma_addr;
990 cppi_ch->offset = 0;
991 cppi_ch->maxpacket = maxpacket;
992 cppi_ch->buf_len = len;
993
994 /* TX channel? or RX? */
995 if (cppi_ch->transmit)
996 cppi_next_tx_segment(musb, cppi_ch);
997 else
998 cppi_next_rx_segment(musb, cppi_ch, mode);
999
1000 return true;
1001}
1002
1003static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1004{
1005 struct cppi_channel *rx = &cppi->rx[ch];
1006 struct cppi_rx_stateram __iomem *state = rx->state_ram;
1007 struct cppi_descriptor *bd;
1008 struct cppi_descriptor *last = rx->last_processed;
1009 bool completed = false;
1010 bool acked = false;
1011 int i;
1012 dma_addr_t safe2ack;
1013 void __iomem *regs = rx->hw_ep->regs;
1014
1015 cppi_dump_rx(6, rx, "/K");
1016
1017 bd = last ? last->next : rx->head;
1018 if (!bd)
1019 return false;
1020
1021 /* run through all completed BDs */
1022 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
1023 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
1024 i++, bd = bd->next) {
1025 u16 len;
1026
1027 /* catch latest BD writes from CPPI */
1028 rmb();
1029 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1030 break;
1031
1032 DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
1033 "off.len %08x opt.len %08x (%d)\n",
1034 bd->dma, bd->hw_next, bd->hw_bufp,
1035 bd->hw_off_len, bd->hw_options,
1036 rx->channel.actual_len);
1037
1038 /* actual packet received length */
1039 if ((bd->hw_options & CPPI_SOP_SET) && !completed)
1040 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
1041 else
1042 len = 0;
1043
1044 if (bd->hw_options & CPPI_EOQ_MASK)
1045 completed = true;
1046
1047 if (!completed && len < bd->buflen) {
1048 /* NOTE: when we get a short packet, RXCSR_H_REQPKT
1049 * must have been cleared, and no more DMA packets may
1050 * active be in the queue... TI docs didn't say, but
1051 * CPPI ignores those BDs even though OWN is still set.
1052 */
1053 completed = true;
1054 DBG(3, "rx short %d/%d (%d)\n",
1055 len, bd->buflen,
1056 rx->channel.actual_len);
1057 }
1058
1059 /* If we got here, we expect to ack at least one BD; meanwhile
1060 * CPPI may completing other BDs while we scan this list...
1061 *
1062 * RACE: we can notice OWN cleared before CPPI raises the
1063 * matching irq by writing that BD as the completion pointer.
1064 * In such cases, stop scanning and wait for the irq, avoiding
1065 * lost acks and states where BD ownership is unclear.
1066 */
1067 if (bd->dma == safe2ack) {
1068 musb_writel(&state->rx_complete, 0, safe2ack);
1069 safe2ack = musb_readl(&state->rx_complete, 0);
1070 acked = true;
1071 if (bd->dma == safe2ack)
1072 safe2ack = 0;
1073 }
1074
1075 rx->channel.actual_len += len;
1076
1077 cppi_bd_free(rx, last);
1078 last = bd;
1079
1080 /* stop scanning on end-of-segment */
1081 if (bd->hw_next == 0)
1082 completed = true;
1083 }
1084 rx->last_processed = last;
1085
1086 /* dma abort, lost ack, or ... */
1087 if (!acked && last) {
1088 int csr;
1089
1090 if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
1091 musb_writel(&state->rx_complete, 0, safe2ack);
1092 if (safe2ack == 0) {
1093 cppi_bd_free(rx, last);
1094 rx->last_processed = NULL;
1095
1096 /* if we land here on the host side, H_REQPKT will
1097 * be clear and we need to restart the queue...
1098 */
1099 WARN_ON(rx->head);
1100 }
1101 musb_ep_select(cppi->mregs, rx->index + 1);
1102 csr = musb_readw(regs, MUSB_RXCSR);
1103 if (csr & MUSB_RXCSR_DMAENAB) {
1104 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
1105 rx->index,
1106 rx->head, rx->tail,
1107 rx->last_processed
1108 ? rx->last_processed->dma
1109 : 0,
1110 completed ? ", completed" : "",
1111 csr);
1112 cppi_dump_rxq(4, "/what?", rx);
1113 }
1114 }
1115 if (!completed) {
1116 int csr;
1117
1118 rx->head = bd;
1119
1120 /* REVISIT seems like "autoreq all but EOP" doesn't...
1121 * setting it here "should" be racey, but seems to work
1122 */
1123 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1124 if (is_host_active(cppi->musb)
1125 && bd
1126 && !(csr & MUSB_RXCSR_H_REQPKT)) {
1127 csr |= MUSB_RXCSR_H_REQPKT;
1128 musb_writew(regs, MUSB_RXCSR,
1129 MUSB_RXCSR_H_WZC_BITS | csr);
1130 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1131 }
1132 } else {
1133 rx->head = NULL;
1134 rx->tail = NULL;
1135 }
1136
1137 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
1138 return completed;
1139}
1140
1141void cppi_completion(struct musb *musb, u32 rx, u32 tx)
1142{
1143 void __iomem *tibase;
1144 int i, index;
1145 struct cppi *cppi;
1146 struct musb_hw_ep *hw_ep = NULL;
1147
1148 cppi = container_of(musb->dma_controller, struct cppi, controller);
1149
1150 tibase = musb->ctrl_base;
1151
1152 /* process TX channels */
1153 for (index = 0; tx; tx = tx >> 1, index++) {
1154 struct cppi_channel *tx_ch;
1155 struct cppi_tx_stateram __iomem *tx_ram;
1156 bool completed = false;
1157 struct cppi_descriptor *bd;
1158
1159 if (!(tx & 1))
1160 continue;
1161
1162 tx_ch = cppi->tx + index;
1163 tx_ram = tx_ch->state_ram;
1164
1165 /* FIXME need a cppi_tx_scan() routine, which
1166 * can also be called from abort code
1167 */
1168
1169 cppi_dump_tx(5, tx_ch, "/E");
1170
1171 bd = tx_ch->head;
1172
1173 if (NULL == bd) {
1174 DBG(1, "null BD\n");
1175 continue;
1176 }
1177
1178 /* run through all completed BDs */
1179 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
1180 i++, bd = bd->next) {
1181 u16 len;
1182
1183 /* catch latest BD writes from CPPI */
1184 rmb();
1185 if (bd->hw_options & CPPI_OWN_SET)
1186 break;
1187
1188 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
1189 bd, bd->hw_next, bd->hw_bufp,
1190 bd->hw_off_len, bd->hw_options);
1191
1192 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
1193 tx_ch->channel.actual_len += len;
1194
1195 tx_ch->last_processed = bd;
1196
1197 /* write completion register to acknowledge
1198 * processing of completed BDs, and possibly
1199 * release the IRQ; EOQ might not be set ...
1200 *
1201 * REVISIT use the same ack strategy as rx
1202 *
1203 * REVISIT have observed bit 18 set; huh??
1204 */
1205 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1206 musb_writel(&tx_ram->tx_complete, 0, bd->dma);
1207
1208 /* stop scanning on end-of-segment */
1209 if (bd->hw_next == 0)
1210 completed = true;
1211 }
1212
1213 /* on end of segment, maybe go to next one */
1214 if (completed) {
1215 /* cppi_dump_tx(4, tx_ch, "/complete"); */
1216
1217 /* transfer more, or report completion */
1218 if (tx_ch->offset >= tx_ch->buf_len) {
1219 tx_ch->head = NULL;
1220 tx_ch->tail = NULL;
1221 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1222
1223 hw_ep = tx_ch->hw_ep;
1224
1225 /* Peripheral role never repurposes the
1226 * endpoint, so immediate completion is
1227 * safe. Host role waits for the fifo
1228 * to empty (TXPKTRDY irq) before going
1229 * to the next queued bulk transfer.
1230 */
1231 if (is_host_active(cppi->musb)) {
1232#if 0
1233 /* WORKAROUND because we may
1234 * not always get TXKPTRDY ...
1235 */
1236 int csr;
1237
1238 csr = musb_readw(hw_ep->regs,
1239 MUSB_TXCSR);
1240 if (csr & MUSB_TXCSR_TXPKTRDY)
1241#endif
1242 completed = false;
1243 }
1244 if (completed)
1245 musb_dma_completion(musb, index + 1, 1);
1246
1247 } else {
1248 /* Bigger transfer than we could fit in
1249 * that first batch of descriptors...
1250 */
1251 cppi_next_tx_segment(musb, tx_ch);
1252 }
1253 } else
1254 tx_ch->head = bd;
1255 }
1256
1257 /* Start processing the RX block */
1258 for (index = 0; rx; rx = rx >> 1, index++) {
1259
1260 if (rx & 1) {
1261 struct cppi_channel *rx_ch;
1262
1263 rx_ch = cppi->rx + index;
1264
1265 /* let incomplete dma segments finish */
1266 if (!cppi_rx_scan(cppi, index))
1267 continue;
1268
1269 /* start another dma segment if needed */
1270 if (rx_ch->channel.actual_len != rx_ch->buf_len
1271 && rx_ch->channel.actual_len
1272 == rx_ch->offset) {
1273 cppi_next_rx_segment(musb, rx_ch, 1);
1274 continue;
1275 }
1276
1277 /* all segments completed! */
1278 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1279
1280 hw_ep = rx_ch->hw_ep;
1281
1282 core_rxirq_disable(tibase, index + 1);
1283 musb_dma_completion(musb, index + 1, 0);
1284 }
1285 }
1286
1287 /* write to CPPI EOI register to re-enable interrupts */
1288 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1289}
1290
1291/* Instantiate a software object representing a DMA controller. */
1292struct dma_controller *__init
1293dma_controller_create(struct musb *musb, void __iomem *mregs)
1294{
1295 struct cppi *controller;
1296
1297 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1298 if (!controller)
1299 return NULL;
1300
1301 controller->mregs = mregs;
1302 controller->tibase = mregs - DAVINCI_BASE_OFFSET;
1303
1304 controller->musb = musb;
1305 controller->controller.start = cppi_controller_start;
1306 controller->controller.stop = cppi_controller_stop;
1307 controller->controller.channel_alloc = cppi_channel_allocate;
1308 controller->controller.channel_release = cppi_channel_release;
1309 controller->controller.channel_program = cppi_channel_program;
1310 controller->controller.channel_abort = cppi_channel_abort;
1311
1312 /* NOTE: allocating from on-chip SRAM would give the least
1313 * contention for memory access, if that ever matters here.
1314 */
1315
1316 /* setup BufferPool */
1317 controller->pool = dma_pool_create("cppi",
1318 controller->musb->controller,
1319 sizeof(struct cppi_descriptor),
1320 CPPI_DESCRIPTOR_ALIGN, 0);
1321 if (!controller->pool) {
1322 kfree(controller);
1323 return NULL;
1324 }
1325
1326 return &controller->controller;
1327}
1328
1329/*
1330 * Destroy a previously-instantiated DMA controller.
1331 */
1332void dma_controller_destroy(struct dma_controller *c)
1333{
1334 struct cppi *cppi;
1335
1336 cppi = container_of(c, struct cppi, controller);
1337
1338 /* assert: caller stopped the controller first */
1339 dma_pool_destroy(cppi->pool);
1340
1341 kfree(cppi);
1342}
1343
1344/*
1345 * Context: controller irqlocked, endpoint selected
1346 */
1347static int cppi_channel_abort(struct dma_channel *channel)
1348{
1349 struct cppi_channel *cppi_ch;
1350 struct cppi *controller;
1351 void __iomem *mbase;
1352 void __iomem *tibase;
1353 void __iomem *regs;
1354 u32 value;
1355 struct cppi_descriptor *queue;
1356
1357 cppi_ch = container_of(channel, struct cppi_channel, channel);
1358
1359 controller = cppi_ch->controller;
1360
1361 switch (channel->status) {
1362 case MUSB_DMA_STATUS_BUS_ABORT:
1363 case MUSB_DMA_STATUS_CORE_ABORT:
1364 /* from RX or TX fault irq handler */
1365 case MUSB_DMA_STATUS_BUSY:
1366 /* the hardware needs shutting down */
1367 regs = cppi_ch->hw_ep->regs;
1368 break;
1369 case MUSB_DMA_STATUS_UNKNOWN:
1370 case MUSB_DMA_STATUS_FREE:
1371 return 0;
1372 default:
1373 return -EINVAL;
1374 }
1375
1376 if (!cppi_ch->transmit && cppi_ch->head)
1377 cppi_dump_rxq(3, "/abort", cppi_ch);
1378
1379 mbase = controller->mregs;
1380 tibase = controller->tibase;
1381
1382 queue = cppi_ch->head;
1383 cppi_ch->head = NULL;
1384 cppi_ch->tail = NULL;
1385
1386 /* REVISIT should rely on caller having done this,
1387 * and caller should rely on us not changing it.
1388 * peripheral code is safe ... check host too.
1389 */
1390 musb_ep_select(mbase, cppi_ch->index + 1);
1391
1392 if (cppi_ch->transmit) {
1393 struct cppi_tx_stateram __iomem *tx_ram;
1394 int enabled;
1395
1396 /* mask interrupts raised to signal teardown complete. */
1397 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
1398 & (1 << cppi_ch->index);
1399 if (enabled)
1400 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
1401 (1 << cppi_ch->index));
1402
1403 /* REVISIT put timeouts on these controller handshakes */
1404
1405 cppi_dump_tx(6, cppi_ch, " (teardown)");
1406
1407 /* teardown DMA engine then usb core */
1408 do {
1409 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
1410 } while (!(value & CPPI_TEAR_READY));
1411 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
1412
1413 tx_ram = cppi_ch->state_ram;
1414 do {
1415 value = musb_readl(&tx_ram->tx_complete, 0);
1416 } while (0xFFFFFFFC != value);
1417 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
1418
1419 /* FIXME clean up the transfer state ... here?
1420 * the completion routine should get called with
1421 * an appropriate status code.
1422 */
1423
1424 value = musb_readw(regs, MUSB_TXCSR);
1425 value &= ~MUSB_TXCSR_DMAENAB;
1426 value |= MUSB_TXCSR_FLUSHFIFO;
1427 musb_writew(regs, MUSB_TXCSR, value);
1428 musb_writew(regs, MUSB_TXCSR, value);
1429
1430 /* re-enable interrupt */
1431 if (enabled)
1432 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1433 (1 << cppi_ch->index));
1434
1435 /* While we scrub the TX state RAM, ensure that we clean
1436 * up any interrupt that's currently asserted:
1437 * 1. Write to completion Ptr value 0x1(bit 0 set)
1438 * (write back mode)
1439 * 2. Write to completion Ptr value 0x0(bit 0 cleared)
1440 * (compare mode)
1441 * Value written is compared(for bits 31:2) and when
1442 * equal, interrupt is deasserted.
1443 */
1444 cppi_reset_tx(tx_ram, 1);
1445 musb_writel(&tx_ram->tx_complete, 0, 0);
1446
1447 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1448
1449 /* REVISIT tx side _should_ clean up the same way
1450 * as the RX side ... this does no cleanup at all!
1451 */
1452
1453 } else /* RX */ {
1454 u16 csr;
1455
1456 /* NOTE: docs don't guarantee any of this works ... we
1457 * expect that if the usb core stops telling the cppi core
1458 * to pull more data from it, then it'll be safe to flush
1459 * current RX DMA state iff any pending fifo transfer is done.
1460 */
1461
1462 core_rxirq_disable(tibase, cppi_ch->index + 1);
1463
1464 /* for host, ensure ReqPkt is never set again */
1465 if (is_host_active(cppi_ch->controller->musb)) {
1466 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
1467 value &= ~((0x3) << (cppi_ch->index * 2));
1468 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
1469 }
1470
1471 csr = musb_readw(regs, MUSB_RXCSR);
1472
1473 /* for host, clear (just) ReqPkt at end of current packet(s) */
1474 if (is_host_active(cppi_ch->controller->musb)) {
1475 csr |= MUSB_RXCSR_H_WZC_BITS;
1476 csr &= ~MUSB_RXCSR_H_REQPKT;
1477 } else
1478 csr |= MUSB_RXCSR_P_WZC_BITS;
1479
1480 /* clear dma enable */
1481 csr &= ~(MUSB_RXCSR_DMAENAB);
1482 musb_writew(regs, MUSB_RXCSR, csr);
1483 csr = musb_readw(regs, MUSB_RXCSR);
1484
1485 /* Quiesce: wait for current dma to finish (if not cleanup).
1486 * We can't use bit zero of stateram->rx_sop, since that
1487 * refers to an entire "DMA packet" not just emptying the
1488 * current fifo. Most segments need multiple usb packets.
1489 */
1490 if (channel->status == MUSB_DMA_STATUS_BUSY)
1491 udelay(50);
1492
1493 /* scan the current list, reporting any data that was
1494 * transferred and acking any IRQ
1495 */
1496 cppi_rx_scan(controller, cppi_ch->index);
1497
1498 /* clobber the existing state once it's idle
1499 *
1500 * NOTE: arguably, we should also wait for all the other
1501 * RX channels to quiesce (how??) and then temporarily
1502 * disable RXCPPI_CTRL_REG ... but it seems that we can
1503 * rely on the controller restarting from state ram, with
1504 * only RXCPPI_BUFCNT state being bogus. BUFCNT will
1505 * correct itself after the next DMA transfer though.
1506 *
1507 * REVISIT does using rndis mode change that?
1508 */
1509 cppi_reset_rx(cppi_ch->state_ram);
1510
1511 /* next DMA request _should_ load cppi head ptr */
1512
1513 /* ... we don't "free" that list, only mutate it in place. */
1514 cppi_dump_rx(5, cppi_ch, " (done abort)");
1515
1516 /* clean up previously pending bds */
1517 cppi_bd_free(cppi_ch, cppi_ch->last_processed);
1518 cppi_ch->last_processed = NULL;
1519
1520 while (queue) {
1521 struct cppi_descriptor *tmp = queue->next;
1522
1523 cppi_bd_free(cppi_ch, queue);
1524 queue = tmp;
1525 }
1526 }
1527
1528 channel->status = MUSB_DMA_STATUS_FREE;
1529 cppi_ch->buf_dma = 0;
1530 cppi_ch->offset = 0;
1531 cppi_ch->buf_len = 0;
1532 cppi_ch->maxpacket = 0;
1533 return 0;
1534}
1535
1536/* TBD Queries:
1537 *
1538 * Power Management ... probably turn off cppi during suspend, restart;
1539 * check state ram? Clocking is presumably shared with usb core.
1540 */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 000000000000..fc5216b5d2c5
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
1/* Copyright (C) 2005-2006 by Texas Instruments */
2
3#ifndef _CPPI_DMA_H_
4#define _CPPI_DMA_H_
5
6#include <linux/slab.h>
7#include <linux/list.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h>
10#include <linux/dmapool.h>
11
12#include "musb_dma.h"
13#include "musb_core.h"
14
15
16/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
17 * would seem to be shared with the TUSB6020 (over VLYNQ).
18 */
19
20#include "davinci.h"
21
22
23/* CPPI RX/TX state RAM */
24
25struct cppi_tx_stateram {
26 u32 tx_head; /* "DMA packet" head descriptor */
27 u32 tx_buf;
28 u32 tx_current; /* current descriptor */
29 u32 tx_buf_current;
30 u32 tx_info; /* flags, remaining buflen */
31 u32 tx_rem_len;
32 u32 tx_dummy; /* unused */
33 u32 tx_complete;
34};
35
36struct cppi_rx_stateram {
37 u32 rx_skipbytes;
38 u32 rx_head;
39 u32 rx_sop; /* "DMA packet" head descriptor */
40 u32 rx_current; /* current descriptor */
41 u32 rx_buf_current;
42 u32 rx_len_len;
43 u32 rx_cnt_cnt;
44 u32 rx_complete;
45};
46
47/* hw_options bits in CPPI buffer descriptors */
48#define CPPI_SOP_SET ((u32)(1 << 31))
49#define CPPI_EOP_SET ((u32)(1 << 30))
50#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
51#define CPPI_EOQ_MASK ((u32)(1 << 28))
52#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
53#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
54
55#define CPPI_RECV_PKTLEN_MASK 0xFFFF
56#define CPPI_BUFFER_LEN_MASK 0xFFFF
57
58#define CPPI_TEAR_READY ((u32)(1 << 31))
59
60/* CPPI data structure definitions */
61
62#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
63
64struct cppi_descriptor {
65 /* hardware overlay */
66 u32 hw_next; /* next buffer descriptor Pointer */
67 u32 hw_bufp; /* i/o buffer pointer */
68 u32 hw_off_len; /* buffer_offset16, buffer_length16 */
69 u32 hw_options; /* flags: SOP, EOP etc*/
70
71 struct cppi_descriptor *next;
72 dma_addr_t dma; /* address of this descriptor */
73 u32 buflen; /* for RX: original buffer length */
74} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
75
76
77struct cppi;
78
79/* CPPI Channel Control structure */
80struct cppi_channel {
81 struct dma_channel channel;
82
83 /* back pointer to the DMA controller structure */
84 struct cppi *controller;
85
86 /* which direction of which endpoint? */
87 struct musb_hw_ep *hw_ep;
88 bool transmit;
89 u8 index;
90
91 /* DMA modes: RNDIS or "transparent" */
92 u8 is_rndis;
93
94 /* book keeping for current transfer request */
95 dma_addr_t buf_dma;
96 u32 buf_len;
97 u32 maxpacket;
98 u32 offset; /* dma requested */
99
100 void __iomem *state_ram; /* CPPI state */
101
102 struct cppi_descriptor *freelist;
103
104 /* BD management fields */
105 struct cppi_descriptor *head;
106 struct cppi_descriptor *tail;
107 struct cppi_descriptor *last_processed;
108
109 /* use tx_complete in host role to track endpoints waiting for
110 * FIFONOTEMPTY to clear.
111 */
112 struct list_head tx_complete;
113};
114
115/* CPPI DMA controller object */
116struct cppi {
117 struct dma_controller controller;
118 struct musb *musb;
119 void __iomem *mregs; /* Mentor regs */
120 void __iomem *tibase; /* TI/CPPI regs */
121
122 struct cppi_channel tx[MUSB_C_NUM_EPT - 1];
123 struct cppi_channel rx[MUSB_C_NUM_EPR - 1];
124
125 struct dma_pool *pool;
126
127 struct list_head tx_complete;
128};
129
130/* irq handling hook */
131extern void cppi_completion(struct musb *, u32 rx, u32 tx);
132
133#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 000000000000..75baf181a8cd
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * This file is part of the Inventra Controller Driver for Linux.
5 *
6 * The Inventra Controller Driver for Linux is free software; you
7 * can redistribute it and/or modify it under the terms of the GNU
8 * General Public License version 2 as published by the Free Software
9 * Foundation.
10 *
11 * The Inventra Controller Driver for Linux is distributed in
12 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
13 * without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 * License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with The Inventra Controller Driver for Linux ; if not,
19 * write to the Free Software Foundation, Inc., 59 Temple Place,
20 * Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/init.h>
29#include <linux/list.h>
30#include <linux/delay.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33
34#include <asm/arch/hardware.h>
35#include <asm/arch/memory.h>
36#include <asm/arch/gpio.h>
37#include <asm/mach-types.h>
38
39#include "musb_core.h"
40
41#ifdef CONFIG_MACH_DAVINCI_EVM
42#include <asm/arch/i2c-client.h>
43#endif
44
45#include "davinci.h"
46#include "cppi_dma.h"
47
48
49/* REVISIT (PM) we should be able to keep the PHY in low power mode most
50 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
51 * and, when in host mode, autosuspending idle root ports... PHYPLLON
52 * (overriding SUSPENDM?) then likely needs to stay off.
53 */
54
55static inline void phy_on(void)
56{
57 /* start the on-chip PHY and its PLL */
58 __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
59 (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
60 while ((__raw_readl((void __force __iomem *)
61 IO_ADDRESS(USBPHY_CTL_PADDR))
62 & USBPHY_PHYCLKGD) == 0)
63 cpu_relax();
64}
65
66static inline void phy_off(void)
67{
68 /* powerdown the on-chip PHY and its oscillator */
69 __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
70 IO_ADDRESS(USBPHY_CTL_PADDR));
71}
72
73static int dma_off = 1;
74
75void musb_platform_enable(struct musb *musb)
76{
77 u32 tmp, old, val;
78
79 /* workaround: setup irqs through both register sets */
80 tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
81 << DAVINCI_USB_TXINT_SHIFT;
82 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
83 old = tmp;
84 tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
85 << DAVINCI_USB_RXINT_SHIFT;
86 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
87 tmp |= old;
88
89 val = ~MUSB_INTR_SOF;
90 tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
91 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
92
93 if (is_dma_capable() && !dma_off)
94 printk(KERN_WARNING "%s %s: dma not reactivated\n",
95 __FILE__, __func__);
96 else
97 dma_off = 0;
98
99 /* force a DRVVBUS irq so we can start polling for ID change */
100 if (is_otg_enabled(musb))
101 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
102 DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
103}
104
105/*
106 * Disable the HDRC and flush interrupts
107 */
108void musb_platform_disable(struct musb *musb)
109{
110 /* because we don't set CTRLR.UINT, "important" to:
111 * - not read/write INTRUSB/INTRUSBE
112 * - (except during initial setup, as workaround)
113 * - use INTSETR/INTCLRR instead
114 */
115 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
116 DAVINCI_USB_USBINT_MASK
117 | DAVINCI_USB_TXINT_MASK
118 | DAVINCI_USB_RXINT_MASK);
119 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
120 musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
121
122 if (is_dma_capable() && !dma_off)
123 WARNING("dma still active\n");
124}
125
126
127/* REVISIT it's not clear whether DaVinci can support full OTG. */
128
129static int vbus_state = -1;
130
131#ifdef CONFIG_USB_MUSB_HDRC_HCD
132#define portstate(stmt) stmt
133#else
134#define portstate(stmt)
135#endif
136
137
138/* VBUS SWITCHING IS BOARD-SPECIFIC */
139
140#ifdef CONFIG_MACH_DAVINCI_EVM
141#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
142
143/* I2C operations are always synchronous, and require a task context.
144 * With unloaded systems, using the shared workqueue seems to suffice
145 * to satisfy the 100msec A_WAIT_VRISE timeout...
146 */
147static void evm_deferred_drvvbus(struct work_struct *ignored)
148{
149 davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
150 vbus_state = !vbus_state;
151}
152static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
153
154#endif /* modified board */
155#endif /* EVM */
156
157static void davinci_source_power(struct musb *musb, int is_on, int immediate)
158{
159 if (is_on)
160 is_on = 1;
161
162 if (vbus_state == is_on)
163 return;
164 vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
165
166#ifdef CONFIG_MACH_DAVINCI_EVM
167 if (machine_is_davinci_evm()) {
168#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
169 /* modified EVM board switching VBUS with GPIO(6) not I2C
170 * NOTE: PINMUX0.RGB888 (bit23) must be clear
171 */
172 if (is_on)
173 gpio_set(GPIO(6));
174 else
175 gpio_clear(GPIO(6));
176 immediate = 1;
177#else
178 if (immediate)
179 davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
180 else
181 schedule_work(&evm_vbus_work);
182#endif
183 }
184#endif
185 if (immediate)
186 vbus_state = is_on;
187}
188
189static void davinci_set_vbus(struct musb *musb, int is_on)
190{
191 WARN_ON(is_on && is_peripheral_active(musb));
192 davinci_source_power(musb, is_on, 0);
193}
194
195
196#define POLL_SECONDS 2
197
198static struct timer_list otg_workaround;
199
200static void otg_timer(unsigned long _musb)
201{
202 struct musb *musb = (void *)_musb;
203 void __iomem *mregs = musb->mregs;
204 u8 devctl;
205 unsigned long flags;
206
207 /* We poll because DaVinci's won't expose several OTG-critical
208 * status change events (from the transceiver) otherwise.
209 */
210 devctl = musb_readb(mregs, MUSB_DEVCTL);
211 DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
212
213 spin_lock_irqsave(&musb->lock, flags);
214 switch (musb->xceiv.state) {
215 case OTG_STATE_A_WAIT_VFALL:
216 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
217 * seems to mis-handle session "start" otherwise (or in our
218 * case "recover"), in routine "VBUS was valid by the time
219 * VBUSERR got reported during enumeration" cases.
220 */
221 if (devctl & MUSB_DEVCTL_VBUS) {
222 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
223 break;
224 }
225 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
226 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
227 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
228 break;
229 case OTG_STATE_B_IDLE:
230 if (!is_peripheral_enabled(musb))
231 break;
232
233 /* There's no ID-changed IRQ, so we have no good way to tell
234 * when to switch to the A-Default state machine (by setting
235 * the DEVCTL.SESSION flag).
236 *
237 * Workaround: whenever we're in B_IDLE, try setting the
238 * session flag every few seconds. If it works, ID was
239 * grounded and we're now in the A-Default state machine.
240 *
241 * NOTE setting the session flag is _supposed_ to trigger
242 * SRP, but clearly it doesn't.
243 */
244 musb_writeb(mregs, MUSB_DEVCTL,
245 devctl | MUSB_DEVCTL_SESSION);
246 devctl = musb_readb(mregs, MUSB_DEVCTL);
247 if (devctl & MUSB_DEVCTL_BDEVICE)
248 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
249 else
250 musb->xceiv.state = OTG_STATE_A_IDLE;
251 break;
252 default:
253 break;
254 }
255 spin_unlock_irqrestore(&musb->lock, flags);
256}
257
258static irqreturn_t davinci_interrupt(int irq, void *__hci)
259{
260 unsigned long flags;
261 irqreturn_t retval = IRQ_NONE;
262 struct musb *musb = __hci;
263 void __iomem *tibase = musb->ctrl_base;
264 u32 tmp;
265
266 spin_lock_irqsave(&musb->lock, flags);
267
268 /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
269 * the Mentor registers (except for setup), use the TI ones and EOI.
270 *
271 * Docs describe irq "vector" registers asociated with the CPPI and
272 * USB EOI registers. These hold a bitmask corresponding to the
273 * current IRQ, not an irq handler address. Would using those bits
274 * resolve some of the races observed in this dispatch code??
275 */
276
277 /* CPPI interrupts share the same IRQ line, but have their own
278 * mask, state, "vector", and EOI registers.
279 */
280 if (is_cppi_enabled()) {
281 u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
282 u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
283
284 if (cppi_tx || cppi_rx) {
285 DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
286 cppi_completion(musb, cppi_rx, cppi_tx);
287 retval = IRQ_HANDLED;
288 }
289 }
290
291 /* ack and handle non-CPPI interrupts */
292 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
293 musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
294 DBG(4, "IRQ %08x\n", tmp);
295
296 musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
297 >> DAVINCI_USB_RXINT_SHIFT;
298 musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
299 >> DAVINCI_USB_TXINT_SHIFT;
300 musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
301 >> DAVINCI_USB_USBINT_SHIFT;
302
303 /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
304 * DaVinci's missing ID change IRQ. We need an ID change IRQ to
305 * switch appropriately between halves of the OTG state machine.
306 * Managing DEVCTL.SESSION per Mentor docs requires we know its
307 * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
308 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
309 */
310 if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
311 int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
312 void __iomem *mregs = musb->mregs;
313 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
314 int err = musb->int_usb & MUSB_INTR_VBUSERROR;
315
316 err = is_host_enabled(musb)
317 && (musb->int_usb & MUSB_INTR_VBUSERROR);
318 if (err) {
319 /* The Mentor core doesn't debounce VBUS as needed
320 * to cope with device connect current spikes. This
321 * means it's not uncommon for bus-powered devices
322 * to get VBUS errors during enumeration.
323 *
324 * This is a workaround, but newer RTL from Mentor
325 * seems to allow a better one: "re"starting sessions
326 * without waiting (on EVM, a **long** time) for VBUS
327 * to stop registering in devctl.
328 */
329 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
330 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
331 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
332 WARNING("VBUS error workaround (delay coming)\n");
333 } else if (is_host_enabled(musb) && drvvbus) {
334 musb->is_active = 1;
335 MUSB_HST_MODE(musb);
336 musb->xceiv.default_a = 1;
337 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
338 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
339 del_timer(&otg_workaround);
340 } else {
341 musb->is_active = 0;
342 MUSB_DEV_MODE(musb);
343 musb->xceiv.default_a = 0;
344 musb->xceiv.state = OTG_STATE_B_IDLE;
345 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
346 }
347
348 /* NOTE: this must complete poweron within 100 msec */
349 davinci_source_power(musb, drvvbus, 0);
350 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
351 drvvbus ? "on" : "off",
352 otg_state_string(musb),
353 err ? " ERROR" : "",
354 devctl);
355 retval = IRQ_HANDLED;
356 }
357
358 if (musb->int_tx || musb->int_rx || musb->int_usb)
359 retval |= musb_interrupt(musb);
360
361 /* irq stays asserted until EOI is written */
362 musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
363
364 /* poll for ID change */
365 if (is_otg_enabled(musb)
366 && musb->xceiv.state == OTG_STATE_B_IDLE)
367 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
368
369 spin_unlock_irqrestore(&musb->lock, flags);
370
371 /* REVISIT we sometimes get unhandled IRQs
372 * (e.g. ep0). not clear why...
373 */
374 if (retval != IRQ_HANDLED)
375 DBG(5, "unhandled? %08x\n", tmp);
376 return IRQ_HANDLED;
377}
378
379int __init musb_platform_init(struct musb *musb)
380{
381 void __iomem *tibase = musb->ctrl_base;
382 u32 revision;
383
384 musb->mregs += DAVINCI_BASE_OFFSET;
385#if 0
386 /* REVISIT there's something odd about clocking, this
387 * didn't appear do the job ...
388 */
389 musb->clock = clk_get(pDevice, "usb");
390 if (IS_ERR(musb->clock))
391 return PTR_ERR(musb->clock);
392
393 status = clk_enable(musb->clock);
394 if (status < 0)
395 return -ENODEV;
396#endif
397
398 /* returns zero if e.g. not clocked */
399 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
400 if (revision == 0)
401 return -ENODEV;
402
403 if (is_host_enabled(musb))
404 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
405
406 musb->board_set_vbus = davinci_set_vbus;
407 davinci_source_power(musb, 0, 1);
408
409 /* reset the controller */
410 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
411
412 /* start the on-chip PHY and its PLL */
413 phy_on();
414
415 msleep(5);
416
417 /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
418 pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
419 revision, __raw_readl((void __force __iomem *)
420 IO_ADDRESS(USBPHY_CTL_PADDR)),
421 musb_readb(tibase, DAVINCI_USB_CTRL_REG));
422
423 musb->isr = davinci_interrupt;
424 return 0;
425}
426
427int musb_platform_exit(struct musb *musb)
428{
429 if (is_host_enabled(musb))
430 del_timer_sync(&otg_workaround);
431
432 davinci_source_power(musb, 0 /*off*/, 1);
433
434 /* delay, to avoid problems with module reload */
435 if (is_host_enabled(musb) && musb->xceiv.default_a) {
436 int maxdelay = 30;
437 u8 devctl, warn = 0;
438
439 /* if there's no peripheral connected, this can take a
440 * long time to fall, especially on EVM with huge C133.
441 */
442 do {
443 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
444 if (!(devctl & MUSB_DEVCTL_VBUS))
445 break;
446 if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
447 warn = devctl & MUSB_DEVCTL_VBUS;
448 DBG(1, "VBUS %d\n",
449 warn >> MUSB_DEVCTL_VBUS_SHIFT);
450 }
451 msleep(1000);
452 maxdelay--;
453 } while (maxdelay > 0);
454
455 /* in OTG mode, another host might be connected */
456 if (devctl & MUSB_DEVCTL_VBUS)
457 DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
458 }
459
460 phy_off();
461 return 0;
462}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 000000000000..7fb6238e270f
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * The Inventra Controller Driver for Linux is free software; you
5 * can redistribute it and/or modify it under the terms of the GNU
6 * General Public License version 2 as published by the Free Software
7 * Foundation.
8 */
9
10#ifndef __MUSB_HDRDF_H__
11#define __MUSB_HDRDF_H__
12
13/*
14 * DaVinci-specific definitions
15 */
16
17/* Integrated highspeed/otg PHY */
18#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
19#define USBPHY_PHYCLKGD (1 << 8)
20#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
21#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
22#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
23#define USBPHY_CLKO1SEL (1 << 3)
24#define USBPHY_OSCPDWN (1 << 2)
25#define USBPHY_PHYPDWN (1 << 0)
26
27/* For now include usb OTG module registers here */
28#define DAVINCI_USB_VERSION_REG 0x00
29#define DAVINCI_USB_CTRL_REG 0x04
30#define DAVINCI_USB_STAT_REG 0x08
31#define DAVINCI_RNDIS_REG 0x10
32#define DAVINCI_AUTOREQ_REG 0x14
33#define DAVINCI_USB_INT_SOURCE_REG 0x20
34#define DAVINCI_USB_INT_SET_REG 0x24
35#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
36#define DAVINCI_USB_INT_MASK_REG 0x2c
37#define DAVINCI_USB_INT_MASK_SET_REG 0x30
38#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
39#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
40#define DAVINCI_USB_EOI_REG 0x3c
41#define DAVINCI_USB_EOI_INTVEC 0x40
42
43/* BEGIN CPPI-generic (?) */
44
45/* CPPI related registers */
46#define DAVINCI_TXCPPI_CTRL_REG 0x80
47#define DAVINCI_TXCPPI_TEAR_REG 0x84
48#define DAVINCI_CPPI_EOI_REG 0x88
49#define DAVINCI_CPPI_INTVEC_REG 0x8c
50#define DAVINCI_TXCPPI_MASKED_REG 0x90
51#define DAVINCI_TXCPPI_RAW_REG 0x94
52#define DAVINCI_TXCPPI_INTENAB_REG 0x98
53#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
54
55#define DAVINCI_RXCPPI_CTRL_REG 0xC0
56#define DAVINCI_RXCPPI_MASKED_REG 0xD0
57#define DAVINCI_RXCPPI_RAW_REG 0xD4
58#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
59#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
60
61#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
62#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
63#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
64#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
65
66/* CPPI state RAM entries */
67#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
68
69#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
70 (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
71#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
72 (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
73
74/* CPPI masks */
75#define DAVINCI_DMA_CTRL_ENABLE 1
76#define DAVINCI_DMA_CTRL_DISABLE 0
77
78#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
79#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
80
81/* END CPPI-generic (?) */
82
83#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
84#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
85
86#define DAVINCI_USB_USBINT_SHIFT 16
87#define DAVINCI_USB_TXINT_SHIFT 0
88#define DAVINCI_USB_RXINT_SHIFT 8
89
90#define DAVINCI_INTR_DRVVBUS 0x0100
91
92#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
93#define DAVINCI_USB_TXINT_MASK \
94 (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
95#define DAVINCI_USB_RXINT_MASK \
96 (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
97
98#define DAVINCI_BASE_OFFSET 0x400
99
100#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 000000000000..d68ec6daf335
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2261 @@
1/*
2 * MUSB OTG driver core code
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35/*
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
37 *
38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread.
42 *
43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver.
47 *
48 *
49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include:
53 *
54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must.
56 *
57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode.
61 *
62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in
66 * each direction.
67 *
68 * RESULT: one device may be perceived as blocking another one.
69 *
70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices.
74 *
75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub.
80 */
81
82/*
83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable
85 * - <asm/arch/hdrc_cnf.h> for SOC or family details
86 * - platform_device for addressing, irq, and platform_data
87 * - platform_data is mostly for board-specific informarion
88 *
89 * Most of the conditional compilation will (someday) vanish.
90 */
91
92#include <linux/module.h>
93#include <linux/kernel.h>
94#include <linux/sched.h>
95#include <linux/slab.h>
96#include <linux/init.h>
97#include <linux/list.h>
98#include <linux/kobject.h>
99#include <linux/platform_device.h>
100#include <linux/io.h>
101
102#ifdef CONFIG_ARM
103#include <asm/arch/hardware.h>
104#include <asm/arch/memory.h>
105#include <asm/mach-types.h>
106#endif
107
108#include "musb_core.h"
109
110
111#ifdef CONFIG_ARCH_DAVINCI
112#include "davinci.h"
113#endif
114
115
116
117#if MUSB_DEBUG > 0
118unsigned debug = MUSB_DEBUG;
119module_param(debug, uint, 0);
120MODULE_PARM_DESC(debug, "initial debug message level");
121
122#define MUSB_VERSION_SUFFIX "/dbg"
123#endif
124
125#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
126#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
127
128#define MUSB_VERSION_BASE "6.0"
129
130#ifndef MUSB_VERSION_SUFFIX
131#define MUSB_VERSION_SUFFIX ""
132#endif
133#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
134
135#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
136
137#define MUSB_DRIVER_NAME "musb_hdrc"
138const char musb_driver_name[] = MUSB_DRIVER_NAME;
139
140MODULE_DESCRIPTION(DRIVER_INFO);
141MODULE_AUTHOR(DRIVER_AUTHOR);
142MODULE_LICENSE("GPL");
143MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
144
145
146/*-------------------------------------------------------------------------*/
147
148static inline struct musb *dev_to_musb(struct device *dev)
149{
150#ifdef CONFIG_USB_MUSB_HDRC_HCD
151 /* usbcore insists dev->driver_data is a "struct hcd *" */
152 return hcd_to_musb(dev_get_drvdata(dev));
153#else
154 return dev_get_drvdata(dev);
155#endif
156}
157
158/*-------------------------------------------------------------------------*/
159
160#ifndef CONFIG_USB_TUSB6010
161/*
162 * Load an endpoint's FIFO
163 */
164void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
165{
166 void __iomem *fifo = hw_ep->fifo;
167
168 prefetch((u8 *)src);
169
170 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
171 'T', hw_ep->epnum, fifo, len, src);
172
173 /* we can't assume unaligned reads work */
174 if (likely((0x01 & (unsigned long) src) == 0)) {
175 u16 index = 0;
176
177 /* best case is 32bit-aligned source address */
178 if ((0x02 & (unsigned long) src) == 0) {
179 if (len >= 4) {
180 writesl(fifo, src + index, len >> 2);
181 index += len & ~0x03;
182 }
183 if (len & 0x02) {
184 musb_writew(fifo, 0, *(u16 *)&src[index]);
185 index += 2;
186 }
187 } else {
188 if (len >= 2) {
189 writesw(fifo, src + index, len >> 1);
190 index += len & ~0x01;
191 }
192 }
193 if (len & 0x01)
194 musb_writeb(fifo, 0, src[index]);
195 } else {
196 /* byte aligned */
197 writesb(fifo, src, len);
198 }
199}
200
201/*
202 * Unload an endpoint's FIFO
203 */
204void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
205{
206 void __iomem *fifo = hw_ep->fifo;
207
208 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
209 'R', hw_ep->epnum, fifo, len, dst);
210
211 /* we can't assume unaligned writes work */
212 if (likely((0x01 & (unsigned long) dst) == 0)) {
213 u16 index = 0;
214
215 /* best case is 32bit-aligned destination address */
216 if ((0x02 & (unsigned long) dst) == 0) {
217 if (len >= 4) {
218 readsl(fifo, dst, len >> 2);
219 index = len & ~0x03;
220 }
221 if (len & 0x02) {
222 *(u16 *)&dst[index] = musb_readw(fifo, 0);
223 index += 2;
224 }
225 } else {
226 if (len >= 2) {
227 readsw(fifo, dst, len >> 1);
228 index = len & ~0x01;
229 }
230 }
231 if (len & 0x01)
232 dst[index] = musb_readb(fifo, 0);
233 } else {
234 /* byte aligned */
235 readsb(fifo, dst, len);
236 }
237}
238
239#endif /* normal PIO */
240
241
242/*-------------------------------------------------------------------------*/
243
244/* for high speed test mode; see USB 2.0 spec 7.1.20 */
245static const u8 musb_test_packet[53] = {
246 /* implicit SYNC then DATA0 to start */
247
248 /* JKJKJKJK x9 */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 /* JJKKJJKK x8 */
251 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
252 /* JJJJKKKK x8 */
253 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
254 /* JJJJJJJKKKKKKK x8 */
255 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
256 /* JJJJJJJK x8 */
257 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
258 /* JKKKKKKK x10, JK */
259 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
260
261 /* implicit CRC16 then EOP to end */
262};
263
264void musb_load_testpacket(struct musb *musb)
265{
266 void __iomem *regs = musb->endpoints[0].regs;
267
268 musb_ep_select(musb->mregs, 0);
269 musb_write_fifo(musb->control_ep,
270 sizeof(musb_test_packet), musb_test_packet);
271 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
272}
273
274/*-------------------------------------------------------------------------*/
275
276const char *otg_state_string(struct musb *musb)
277{
278 switch (musb->xceiv.state) {
279 case OTG_STATE_A_IDLE: return "a_idle";
280 case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
281 case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
282 case OTG_STATE_A_HOST: return "a_host";
283 case OTG_STATE_A_SUSPEND: return "a_suspend";
284 case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
285 case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
286 case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
287 case OTG_STATE_B_IDLE: return "b_idle";
288 case OTG_STATE_B_SRP_INIT: return "b_srp_init";
289 case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
290 case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
291 case OTG_STATE_B_HOST: return "b_host";
292 default: return "UNDEFINED";
293 }
294}
295
296#ifdef CONFIG_USB_MUSB_OTG
297
298/*
299 * See also USB_OTG_1-3.pdf 6.6.5 Timers
300 * REVISIT: Are the other timers done in the hardware?
301 */
302#define TB_ASE0_BRST 100 /* Min 3.125 ms */
303
304/*
305 * Handles OTG hnp timeouts, such as b_ase0_brst
306 */
307void musb_otg_timer_func(unsigned long data)
308{
309 struct musb *musb = (struct musb *)data;
310 unsigned long flags;
311
312 spin_lock_irqsave(&musb->lock, flags);
313 switch (musb->xceiv.state) {
314 case OTG_STATE_B_WAIT_ACON:
315 DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
316 musb_g_disconnect(musb);
317 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
318 musb->is_active = 0;
319 break;
320 case OTG_STATE_A_WAIT_BCON:
321 DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
322 musb_hnp_stop(musb);
323 break;
324 default:
325 DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
326 }
327 musb->ignore_disconnect = 0;
328 spin_unlock_irqrestore(&musb->lock, flags);
329}
330
331static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
332
333/*
334 * Stops the B-device HNP state. Caller must take care of locking.
335 */
336void musb_hnp_stop(struct musb *musb)
337{
338 struct usb_hcd *hcd = musb_to_hcd(musb);
339 void __iomem *mbase = musb->mregs;
340 u8 reg;
341
342 switch (musb->xceiv.state) {
343 case OTG_STATE_A_PERIPHERAL:
344 case OTG_STATE_A_WAIT_VFALL:
345 case OTG_STATE_A_WAIT_BCON:
346 DBG(1, "HNP: Switching back to A-host\n");
347 musb_g_disconnect(musb);
348 musb->xceiv.state = OTG_STATE_A_IDLE;
349 MUSB_HST_MODE(musb);
350 musb->is_active = 0;
351 break;
352 case OTG_STATE_B_HOST:
353 DBG(1, "HNP: Disabling HR\n");
354 hcd->self.is_b_host = 0;
355 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
356 MUSB_DEV_MODE(musb);
357 reg = musb_readb(mbase, MUSB_POWER);
358 reg |= MUSB_POWER_SUSPENDM;
359 musb_writeb(mbase, MUSB_POWER, reg);
360 /* REVISIT: Start SESSION_REQUEST here? */
361 break;
362 default:
363 DBG(1, "HNP: Stopping in unknown state %s\n",
364 otg_state_string(musb));
365 }
366
367 /*
368 * When returning to A state after HNP, avoid hub_port_rebounce(),
369 * which cause occasional OPT A "Did not receive reset after connect"
370 * errors.
371 */
372 musb->port1_status &=
373 ~(1 << USB_PORT_FEAT_C_CONNECTION);
374}
375
376#endif
377
378/*
379 * Interrupt Service Routine to record USB "global" interrupts.
380 * Since these do not happen often and signify things of
381 * paramount importance, it seems OK to check them individually;
382 * the order of the tests is specified in the manual
383 *
384 * @param musb instance pointer
385 * @param int_usb register contents
386 * @param devctl
387 * @param power
388 */
389
390#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
391 | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
392 | MUSB_INTR_RESET)
393
394static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
395 u8 devctl, u8 power)
396{
397 irqreturn_t handled = IRQ_NONE;
398 void __iomem *mbase = musb->mregs;
399
400 DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
401 int_usb);
402
403 /* in host mode, the peripheral may issue remote wakeup.
404 * in peripheral mode, the host may resume the link.
405 * spurious RESUME irqs happen too, paired with SUSPEND.
406 */
407 if (int_usb & MUSB_INTR_RESUME) {
408 handled = IRQ_HANDLED;
409 DBG(3, "RESUME (%s)\n", otg_state_string(musb));
410
411 if (devctl & MUSB_DEVCTL_HM) {
412#ifdef CONFIG_USB_MUSB_HDRC_HCD
413 switch (musb->xceiv.state) {
414 case OTG_STATE_A_SUSPEND:
415 /* remote wakeup? later, GetPortStatus
416 * will stop RESUME signaling
417 */
418
419 if (power & MUSB_POWER_SUSPENDM) {
420 /* spurious */
421 musb->int_usb &= ~MUSB_INTR_SUSPEND;
422 DBG(2, "Spurious SUSPENDM\n");
423 break;
424 }
425
426 power &= ~MUSB_POWER_SUSPENDM;
427 musb_writeb(mbase, MUSB_POWER,
428 power | MUSB_POWER_RESUME);
429
430 musb->port1_status |=
431 (USB_PORT_STAT_C_SUSPEND << 16)
432 | MUSB_PORT_STAT_RESUME;
433 musb->rh_timer = jiffies
434 + msecs_to_jiffies(20);
435
436 musb->xceiv.state = OTG_STATE_A_HOST;
437 musb->is_active = 1;
438 usb_hcd_resume_root_hub(musb_to_hcd(musb));
439 break;
440 case OTG_STATE_B_WAIT_ACON:
441 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
442 musb->is_active = 1;
443 MUSB_DEV_MODE(musb);
444 break;
445 default:
446 WARNING("bogus %s RESUME (%s)\n",
447 "host",
448 otg_state_string(musb));
449 }
450#endif
451 } else {
452 switch (musb->xceiv.state) {
453#ifdef CONFIG_USB_MUSB_HDRC_HCD
454 case OTG_STATE_A_SUSPEND:
455 /* possibly DISCONNECT is upcoming */
456 musb->xceiv.state = OTG_STATE_A_HOST;
457 usb_hcd_resume_root_hub(musb_to_hcd(musb));
458 break;
459#endif
460#ifdef CONFIG_USB_GADGET_MUSB_HDRC
461 case OTG_STATE_B_WAIT_ACON:
462 case OTG_STATE_B_PERIPHERAL:
463 /* disconnect while suspended? we may
464 * not get a disconnect irq...
465 */
466 if ((devctl & MUSB_DEVCTL_VBUS)
467 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
468 ) {
469 musb->int_usb |= MUSB_INTR_DISCONNECT;
470 musb->int_usb &= ~MUSB_INTR_SUSPEND;
471 break;
472 }
473 musb_g_resume(musb);
474 break;
475 case OTG_STATE_B_IDLE:
476 musb->int_usb &= ~MUSB_INTR_SUSPEND;
477 break;
478#endif
479 default:
480 WARNING("bogus %s RESUME (%s)\n",
481 "peripheral",
482 otg_state_string(musb));
483 }
484 }
485 }
486
487#ifdef CONFIG_USB_MUSB_HDRC_HCD
488 /* see manual for the order of the tests */
489 if (int_usb & MUSB_INTR_SESSREQ) {
490 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
491
492 /* IRQ arrives from ID pin sense or (later, if VBUS power
493 * is removed) SRP. responses are time critical:
494 * - turn on VBUS (with silicon-specific mechanism)
495 * - go through A_WAIT_VRISE
496 * - ... to A_WAIT_BCON.
497 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
498 */
499 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
500 musb->ep0_stage = MUSB_EP0_START;
501 musb->xceiv.state = OTG_STATE_A_IDLE;
502 MUSB_HST_MODE(musb);
503 musb_set_vbus(musb, 1);
504
505 handled = IRQ_HANDLED;
506 }
507
508 if (int_usb & MUSB_INTR_VBUSERROR) {
509 int ignore = 0;
510
511 /* During connection as an A-Device, we may see a short
512 * current spikes causing voltage drop, because of cable
513 * and peripheral capacitance combined with vbus draw.
514 * (So: less common with truly self-powered devices, where
515 * vbus doesn't act like a power supply.)
516 *
517 * Such spikes are short; usually less than ~500 usec, max
518 * of ~2 msec. That is, they're not sustained overcurrent
519 * errors, though they're reported using VBUSERROR irqs.
520 *
521 * Workarounds: (a) hardware: use self powered devices.
522 * (b) software: ignore non-repeated VBUS errors.
523 *
524 * REVISIT: do delays from lots of DEBUG_KERNEL checks
525 * make trouble here, keeping VBUS < 4.4V ?
526 */
527 switch (musb->xceiv.state) {
528 case OTG_STATE_A_HOST:
529 /* recovery is dicey once we've gotten past the
530 * initial stages of enumeration, but if VBUS
531 * stayed ok at the other end of the link, and
532 * another reset is due (at least for high speed,
533 * to redo the chirp etc), it might work OK...
534 */
535 case OTG_STATE_A_WAIT_BCON:
536 case OTG_STATE_A_WAIT_VRISE:
537 if (musb->vbuserr_retry) {
538 musb->vbuserr_retry--;
539 ignore = 1;
540 devctl |= MUSB_DEVCTL_SESSION;
541 musb_writeb(mbase, MUSB_DEVCTL, devctl);
542 } else {
543 musb->port1_status |=
544 (1 << USB_PORT_FEAT_OVER_CURRENT)
545 | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
546 }
547 break;
548 default:
549 break;
550 }
551
552 DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
553 otg_state_string(musb),
554 devctl,
555 ({ char *s;
556 switch (devctl & MUSB_DEVCTL_VBUS) {
557 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
558 s = "<SessEnd"; break;
559 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
560 s = "<AValid"; break;
561 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
562 s = "<VBusValid"; break;
563 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
564 default:
565 s = "VALID"; break;
566 }; s; }),
567 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
568 musb->port1_status);
569
570 /* go through A_WAIT_VFALL then start a new session */
571 if (!ignore)
572 musb_set_vbus(musb, 0);
573 handled = IRQ_HANDLED;
574 }
575
576 if (int_usb & MUSB_INTR_CONNECT) {
577 struct usb_hcd *hcd = musb_to_hcd(musb);
578
579 handled = IRQ_HANDLED;
580 musb->is_active = 1;
581 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
582
583 musb->ep0_stage = MUSB_EP0_START;
584
585#ifdef CONFIG_USB_MUSB_OTG
586 /* flush endpoints when transitioning from Device Mode */
587 if (is_peripheral_active(musb)) {
588 /* REVISIT HNP; just force disconnect */
589 }
590 musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
591 musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
592 musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
593#endif
594 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
595 |USB_PORT_STAT_HIGH_SPEED
596 |USB_PORT_STAT_ENABLE
597 );
598 musb->port1_status |= USB_PORT_STAT_CONNECTION
599 |(USB_PORT_STAT_C_CONNECTION << 16);
600
601 /* high vs full speed is just a guess until after reset */
602 if (devctl & MUSB_DEVCTL_LSDEV)
603 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
604
605 if (hcd->status_urb)
606 usb_hcd_poll_rh_status(hcd);
607 else
608 usb_hcd_resume_root_hub(hcd);
609
610 MUSB_HST_MODE(musb);
611
612 /* indicate new connection to OTG machine */
613 switch (musb->xceiv.state) {
614 case OTG_STATE_B_PERIPHERAL:
615 if (int_usb & MUSB_INTR_SUSPEND) {
616 DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
617 musb->xceiv.state = OTG_STATE_B_HOST;
618 hcd->self.is_b_host = 1;
619 int_usb &= ~MUSB_INTR_SUSPEND;
620 } else
621 DBG(1, "CONNECT as b_peripheral???\n");
622 break;
623 case OTG_STATE_B_WAIT_ACON:
624 DBG(1, "HNP: Waiting to switch to b_host state\n");
625 musb->xceiv.state = OTG_STATE_B_HOST;
626 hcd->self.is_b_host = 1;
627 break;
628 default:
629 if ((devctl & MUSB_DEVCTL_VBUS)
630 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
631 musb->xceiv.state = OTG_STATE_A_HOST;
632 hcd->self.is_b_host = 0;
633 }
634 break;
635 }
636 DBG(1, "CONNECT (%s) devctl %02x\n",
637 otg_state_string(musb), devctl);
638 }
639#endif /* CONFIG_USB_MUSB_HDRC_HCD */
640
641 /* mentor saves a bit: bus reset and babble share the same irq.
642 * only host sees babble; only peripheral sees bus reset.
643 */
644 if (int_usb & MUSB_INTR_RESET) {
645 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
646 /*
647 * Looks like non-HS BABBLE can be ignored, but
648 * HS BABBLE is an error condition. For HS the solution
649 * is to avoid babble in the first place and fix what
650 * caused BABBLE. When HS BABBLE happens we can only
651 * stop the session.
652 */
653 if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
654 DBG(1, "BABBLE devctl: %02x\n", devctl);
655 else {
656 ERR("Stopping host session -- babble\n");
657 musb_writeb(mbase, MUSB_DEVCTL, 0);
658 }
659 } else if (is_peripheral_capable()) {
660 DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
661 switch (musb->xceiv.state) {
662#ifdef CONFIG_USB_OTG
663 case OTG_STATE_A_SUSPEND:
664 /* We need to ignore disconnect on suspend
665 * otherwise tusb 2.0 won't reconnect after a
666 * power cycle, which breaks otg compliance.
667 */
668 musb->ignore_disconnect = 1;
669 musb_g_reset(musb);
670 /* FALLTHROUGH */
671 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
672 DBG(1, "HNP: Setting timer as %s\n",
673 otg_state_string(musb));
674 musb_otg_timer.data = (unsigned long)musb;
675 mod_timer(&musb_otg_timer, jiffies
676 + msecs_to_jiffies(100));
677 break;
678 case OTG_STATE_A_PERIPHERAL:
679 musb_hnp_stop(musb);
680 break;
681 case OTG_STATE_B_WAIT_ACON:
682 DBG(1, "HNP: RESET (%s), to b_peripheral\n",
683 otg_state_string(musb));
684 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
685 musb_g_reset(musb);
686 break;
687#endif
688 case OTG_STATE_B_IDLE:
689 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
690 /* FALLTHROUGH */
691 case OTG_STATE_B_PERIPHERAL:
692 musb_g_reset(musb);
693 break;
694 default:
695 DBG(1, "Unhandled BUS RESET as %s\n",
696 otg_state_string(musb));
697 }
698 }
699
700 handled = IRQ_HANDLED;
701 }
702 schedule_work(&musb->irq_work);
703
704 return handled;
705}
706
707/*
708 * Interrupt Service Routine to record USB "global" interrupts.
709 * Since these do not happen often and signify things of
710 * paramount importance, it seems OK to check them individually;
711 * the order of the tests is specified in the manual
712 *
713 * @param musb instance pointer
714 * @param int_usb register contents
715 * @param devctl
716 * @param power
717 */
718static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
719 u8 devctl, u8 power)
720{
721 irqreturn_t handled = IRQ_NONE;
722
723#if 0
724/* REVISIT ... this would be for multiplexing periodic endpoints, or
725 * supporting transfer phasing to prevent exceeding ISO bandwidth
726 * limits of a given frame or microframe.
727 *
728 * It's not needed for peripheral side, which dedicates endpoints;
729 * though it _might_ use SOF irqs for other purposes.
730 *
731 * And it's not currently needed for host side, which also dedicates
732 * endpoints, relies on TX/RX interval registers, and isn't claimed
733 * to support ISO transfers yet.
734 */
735 if (int_usb & MUSB_INTR_SOF) {
736 void __iomem *mbase = musb->mregs;
737 struct musb_hw_ep *ep;
738 u8 epnum;
739 u16 frame;
740
741 DBG(6, "START_OF_FRAME\n");
742 handled = IRQ_HANDLED;
743
744 /* start any periodic Tx transfers waiting for current frame */
745 frame = musb_readw(mbase, MUSB_FRAME);
746 ep = musb->endpoints;
747 for (epnum = 1; (epnum < musb->nr_endpoints)
748 && (musb->epmask >= (1 << epnum));
749 epnum++, ep++) {
750 /*
751 * FIXME handle framecounter wraps (12 bits)
752 * eliminate duplicated StartUrb logic
753 */
754 if (ep->dwWaitFrame >= frame) {
755 ep->dwWaitFrame = 0;
756 pr_debug("SOF --> periodic TX%s on %d\n",
757 ep->tx_channel ? " DMA" : "",
758 epnum);
759 if (!ep->tx_channel)
760 musb_h_tx_start(musb, epnum);
761 else
762 cppi_hostdma_start(musb, epnum);
763 }
764 } /* end of for loop */
765 }
766#endif
767
768 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
769 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
770 otg_state_string(musb),
771 MUSB_MODE(musb), devctl);
772 handled = IRQ_HANDLED;
773
774 switch (musb->xceiv.state) {
775#ifdef CONFIG_USB_MUSB_HDRC_HCD
776 case OTG_STATE_A_HOST:
777 case OTG_STATE_A_SUSPEND:
778 musb_root_disconnect(musb);
779 if (musb->a_wait_bcon != 0)
780 musb_platform_try_idle(musb, jiffies
781 + msecs_to_jiffies(musb->a_wait_bcon));
782 break;
783#endif /* HOST */
784#ifdef CONFIG_USB_MUSB_OTG
785 case OTG_STATE_B_HOST:
786 musb_hnp_stop(musb);
787 break;
788 case OTG_STATE_A_PERIPHERAL:
789 musb_hnp_stop(musb);
790 musb_root_disconnect(musb);
791 /* FALLTHROUGH */
792 case OTG_STATE_B_WAIT_ACON:
793 /* FALLTHROUGH */
794#endif /* OTG */
795#ifdef CONFIG_USB_GADGET_MUSB_HDRC
796 case OTG_STATE_B_PERIPHERAL:
797 case OTG_STATE_B_IDLE:
798 musb_g_disconnect(musb);
799 break;
800#endif /* GADGET */
801 default:
802 WARNING("unhandled DISCONNECT transition (%s)\n",
803 otg_state_string(musb));
804 break;
805 }
806
807 schedule_work(&musb->irq_work);
808 }
809
810 if (int_usb & MUSB_INTR_SUSPEND) {
811 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
812 otg_state_string(musb), devctl, power);
813 handled = IRQ_HANDLED;
814
815 switch (musb->xceiv.state) {
816#ifdef CONFIG_USB_MUSB_OTG
817 case OTG_STATE_A_PERIPHERAL:
818 /*
819 * We cannot stop HNP here, devctl BDEVICE might be
820 * still set.
821 */
822 break;
823#endif
824 case OTG_STATE_B_PERIPHERAL:
825 musb_g_suspend(musb);
826 musb->is_active = is_otg_enabled(musb)
827 && musb->xceiv.gadget->b_hnp_enable;
828 if (musb->is_active) {
829#ifdef CONFIG_USB_MUSB_OTG
830 musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
831 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
832 musb_otg_timer.data = (unsigned long)musb;
833 mod_timer(&musb_otg_timer, jiffies
834 + msecs_to_jiffies(TB_ASE0_BRST));
835#endif
836 }
837 break;
838 case OTG_STATE_A_WAIT_BCON:
839 if (musb->a_wait_bcon != 0)
840 musb_platform_try_idle(musb, jiffies
841 + msecs_to_jiffies(musb->a_wait_bcon));
842 break;
843 case OTG_STATE_A_HOST:
844 musb->xceiv.state = OTG_STATE_A_SUSPEND;
845 musb->is_active = is_otg_enabled(musb)
846 && musb->xceiv.host->b_hnp_enable;
847 break;
848 case OTG_STATE_B_HOST:
849 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
850 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
851 break;
852 default:
853 /* "should not happen" */
854 musb->is_active = 0;
855 break;
856 }
857 schedule_work(&musb->irq_work);
858 }
859
860
861 return handled;
862}
863
864/*-------------------------------------------------------------------------*/
865
866/*
867* Program the HDRC to start (enable interrupts, dma, etc.).
868*/
869void musb_start(struct musb *musb)
870{
871 void __iomem *regs = musb->mregs;
872 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
873
874 DBG(2, "<== devctl %02x\n", devctl);
875
876 /* Set INT enable registers, enable interrupts */
877 musb_writew(regs, MUSB_INTRTXE, musb->epmask);
878 musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
879 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
880
881 musb_writeb(regs, MUSB_TESTMODE, 0);
882
883 /* put into basic highspeed mode and start session */
884 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
885 | MUSB_POWER_SOFTCONN
886 | MUSB_POWER_HSENAB
887 /* ENSUSPEND wedges tusb */
888 /* | MUSB_POWER_ENSUSPEND */
889 );
890
891 musb->is_active = 0;
892 devctl = musb_readb(regs, MUSB_DEVCTL);
893 devctl &= ~MUSB_DEVCTL_SESSION;
894
895 if (is_otg_enabled(musb)) {
896 /* session started after:
897 * (a) ID-grounded irq, host mode;
898 * (b) vbus present/connect IRQ, peripheral mode;
899 * (c) peripheral initiates, using SRP
900 */
901 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
902 musb->is_active = 1;
903 else
904 devctl |= MUSB_DEVCTL_SESSION;
905
906 } else if (is_host_enabled(musb)) {
907 /* assume ID pin is hard-wired to ground */
908 devctl |= MUSB_DEVCTL_SESSION;
909
910 } else /* peripheral is enabled */ {
911 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
912 musb->is_active = 1;
913 }
914 musb_platform_enable(musb);
915 musb_writeb(regs, MUSB_DEVCTL, devctl);
916}
917
918
919static void musb_generic_disable(struct musb *musb)
920{
921 void __iomem *mbase = musb->mregs;
922 u16 temp;
923
924 /* disable interrupts */
925 musb_writeb(mbase, MUSB_INTRUSBE, 0);
926 musb_writew(mbase, MUSB_INTRTXE, 0);
927 musb_writew(mbase, MUSB_INTRRXE, 0);
928
929 /* off */
930 musb_writeb(mbase, MUSB_DEVCTL, 0);
931
932 /* flush pending interrupts */
933 temp = musb_readb(mbase, MUSB_INTRUSB);
934 temp = musb_readw(mbase, MUSB_INTRTX);
935 temp = musb_readw(mbase, MUSB_INTRRX);
936
937}
938
939/*
940 * Make the HDRC stop (disable interrupts, etc.);
941 * reversible by musb_start
942 * called on gadget driver unregister
943 * with controller locked, irqs blocked
944 * acts as a NOP unless some role activated the hardware
945 */
946void musb_stop(struct musb *musb)
947{
948 /* stop IRQs, timers, ... */
949 musb_platform_disable(musb);
950 musb_generic_disable(musb);
951 DBG(3, "HDRC disabled\n");
952
953 /* FIXME
954 * - mark host and/or peripheral drivers unusable/inactive
955 * - disable DMA (and enable it in HdrcStart)
956 * - make sure we can musb_start() after musb_stop(); with
957 * OTG mode, gadget driver module rmmod/modprobe cycles that
958 * - ...
959 */
960 musb_platform_try_idle(musb, 0);
961}
962
963static void musb_shutdown(struct platform_device *pdev)
964{
965 struct musb *musb = dev_to_musb(&pdev->dev);
966 unsigned long flags;
967
968 spin_lock_irqsave(&musb->lock, flags);
969 musb_platform_disable(musb);
970 musb_generic_disable(musb);
971 if (musb->clock) {
972 clk_put(musb->clock);
973 musb->clock = NULL;
974 }
975 spin_unlock_irqrestore(&musb->lock, flags);
976
977 /* FIXME power down */
978}
979
980
981/*-------------------------------------------------------------------------*/
982
983/*
984 * The silicon either has hard-wired endpoint configurations, or else
985 * "dynamic fifo" sizing. The driver has support for both, though at this
986 * writing only the dynamic sizing is very well tested. We use normal
987 * idioms to so both modes are compile-tested, but dead code elimination
988 * leaves only the relevant one in the object file.
989 *
990 * We don't currently use dynamic fifo setup capability to do anything
991 * more than selecting one of a bunch of predefined configurations.
992 */
993#if defined(CONFIG_USB_TUSB6010) || \
994 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
995static ushort __initdata fifo_mode = 4;
996#else
997static ushort __initdata fifo_mode = 2;
998#endif
999
1000/* "modprobe ... fifo_mode=1" etc */
1001module_param(fifo_mode, ushort, 0);
1002MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1003
1004
1005enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
1006enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
1007
1008struct fifo_cfg {
1009 u8 hw_ep_num;
1010 enum fifo_style style;
1011 enum buf_mode mode;
1012 u16 maxpacket;
1013};
1014
1015/*
1016 * tables defining fifo_mode values. define more if you like.
1017 * for host side, make sure both halves of ep1 are set up.
1018 */
1019
1020/* mode 0 - fits in 2KB */
1021static struct fifo_cfg __initdata mode_0_cfg[] = {
1022{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1023{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1024{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1025{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1026{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1027};
1028
1029/* mode 1 - fits in 4KB */
1030static struct fifo_cfg __initdata mode_1_cfg[] = {
1031{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1032{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1033{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1034{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1035{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1036};
1037
1038/* mode 2 - fits in 4KB */
1039static struct fifo_cfg __initdata mode_2_cfg[] = {
1040{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1041{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1042{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1043{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1044{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1045{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1046};
1047
1048/* mode 3 - fits in 4KB */
1049static struct fifo_cfg __initdata mode_3_cfg[] = {
1050{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1051{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1052{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1053{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1054{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1055{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1056};
1057
1058/* mode 4 - fits in 16KB */
1059static struct fifo_cfg __initdata mode_4_cfg[] = {
1060{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1061{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1062{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1063{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1064{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1065{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1066{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1067{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1068{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1069{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1070{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1071{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1072{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1073{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1074{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1075{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1076{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1077{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1078{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
1079{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
1080{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
1081{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
1082{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
1083{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
1084{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
1085{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
1086{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1087{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1088};
1089
1090
1091/*
1092 * configure a fifo; for non-shared endpoints, this may be called
1093 * once for a tx fifo and once for an rx fifo.
1094 *
1095 * returns negative errno or offset for next fifo.
1096 */
1097static int __init
1098fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1099 const struct fifo_cfg *cfg, u16 offset)
1100{
1101 void __iomem *mbase = musb->mregs;
1102 int size = 0;
1103 u16 maxpacket = cfg->maxpacket;
1104 u16 c_off = offset >> 3;
1105 u8 c_size;
1106
1107 /* expect hw_ep has already been zero-initialized */
1108
1109 size = ffs(max(maxpacket, (u16) 8)) - 1;
1110 maxpacket = 1 << size;
1111
1112 c_size = size - 3;
1113 if (cfg->mode == BUF_DOUBLE) {
1114 if ((offset + (maxpacket << 1)) >
1115 (1 << (musb->config->ram_bits + 2)))
1116 return -EMSGSIZE;
1117 c_size |= MUSB_FIFOSZ_DPB;
1118 } else {
1119 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1120 return -EMSGSIZE;
1121 }
1122
1123 /* configure the FIFO */
1124 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1125
1126#ifdef CONFIG_USB_MUSB_HDRC_HCD
1127 /* EP0 reserved endpoint for control, bidirectional;
1128 * EP1 reserved for bulk, two unidirection halves.
1129 */
1130 if (hw_ep->epnum == 1)
1131 musb->bulk_ep = hw_ep;
1132 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1133#endif
1134 switch (cfg->style) {
1135 case FIFO_TX:
1136 musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1137 musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1138 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1139 hw_ep->max_packet_sz_tx = maxpacket;
1140 break;
1141 case FIFO_RX:
1142 musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1143 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1144 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1145 hw_ep->max_packet_sz_rx = maxpacket;
1146 break;
1147 case FIFO_RXTX:
1148 musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1149 musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1150 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1151 hw_ep->max_packet_sz_rx = maxpacket;
1152
1153 musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1154 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1155 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1156 hw_ep->max_packet_sz_tx = maxpacket;
1157
1158 hw_ep->is_shared_fifo = true;
1159 break;
1160 }
1161
1162 /* NOTE rx and tx endpoint irqs aren't managed separately,
1163 * which happens to be ok
1164 */
1165 musb->epmask |= (1 << hw_ep->epnum);
1166
1167 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1168}
1169
1170static struct fifo_cfg __initdata ep0_cfg = {
1171 .style = FIFO_RXTX, .maxpacket = 64,
1172};
1173
1174static int __init ep_config_from_table(struct musb *musb)
1175{
1176 const struct fifo_cfg *cfg;
1177 unsigned i, n;
1178 int offset;
1179 struct musb_hw_ep *hw_ep = musb->endpoints;
1180
1181 switch (fifo_mode) {
1182 default:
1183 fifo_mode = 0;
1184 /* FALLTHROUGH */
1185 case 0:
1186 cfg = mode_0_cfg;
1187 n = ARRAY_SIZE(mode_0_cfg);
1188 break;
1189 case 1:
1190 cfg = mode_1_cfg;
1191 n = ARRAY_SIZE(mode_1_cfg);
1192 break;
1193 case 2:
1194 cfg = mode_2_cfg;
1195 n = ARRAY_SIZE(mode_2_cfg);
1196 break;
1197 case 3:
1198 cfg = mode_3_cfg;
1199 n = ARRAY_SIZE(mode_3_cfg);
1200 break;
1201 case 4:
1202 cfg = mode_4_cfg;
1203 n = ARRAY_SIZE(mode_4_cfg);
1204 break;
1205 }
1206
1207 printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
1208 musb_driver_name, fifo_mode);
1209
1210
1211 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1212 /* assert(offset > 0) */
1213
1214 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1215 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1216 */
1217
1218 for (i = 0; i < n; i++) {
1219 u8 epn = cfg->hw_ep_num;
1220
1221 if (epn >= musb->config->num_eps) {
1222 pr_debug("%s: invalid ep %d\n",
1223 musb_driver_name, epn);
1224 continue;
1225 }
1226 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1227 if (offset < 0) {
1228 pr_debug("%s: mem overrun, ep %d\n",
1229 musb_driver_name, epn);
1230 return -EINVAL;
1231 }
1232 epn++;
1233 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1234 }
1235
1236 printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
1237 musb_driver_name,
1238 n + 1, musb->config->num_eps * 2 - 1,
1239 offset, (1 << (musb->config->ram_bits + 2)));
1240
1241#ifdef CONFIG_USB_MUSB_HDRC_HCD
1242 if (!musb->bulk_ep) {
1243 pr_debug("%s: missing bulk\n", musb_driver_name);
1244 return -EINVAL;
1245 }
1246#endif
1247
1248 return 0;
1249}
1250
1251
1252/*
1253 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1254 * @param musb the controller
1255 */
1256static int __init ep_config_from_hw(struct musb *musb)
1257{
1258 u8 epnum = 0, reg;
1259 struct musb_hw_ep *hw_ep;
1260 void *mbase = musb->mregs;
1261
1262 DBG(2, "<== static silicon ep config\n");
1263
1264 /* FIXME pick up ep0 maxpacket size */
1265
1266 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1267 musb_ep_select(mbase, epnum);
1268 hw_ep = musb->endpoints + epnum;
1269
1270 /* read from core using indexed model */
1271 reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
1272 if (!reg) {
1273 /* 0's returned when no more endpoints */
1274 break;
1275 }
1276 musb->nr_endpoints++;
1277 musb->epmask |= (1 << epnum);
1278
1279 hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
1280
1281 /* shared TX/RX FIFO? */
1282 if ((reg & 0xf0) == 0xf0) {
1283 hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
1284 hw_ep->is_shared_fifo = true;
1285 continue;
1286 } else {
1287 hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
1288 hw_ep->is_shared_fifo = false;
1289 }
1290
1291 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1292
1293#ifdef CONFIG_USB_MUSB_HDRC_HCD
1294 /* pick an RX/TX endpoint for bulk */
1295 if (hw_ep->max_packet_sz_tx < 512
1296 || hw_ep->max_packet_sz_rx < 512)
1297 continue;
1298
1299 /* REVISIT: this algorithm is lazy, we should at least
1300 * try to pick a double buffered endpoint.
1301 */
1302 if (musb->bulk_ep)
1303 continue;
1304 musb->bulk_ep = hw_ep;
1305#endif
1306 }
1307
1308#ifdef CONFIG_USB_MUSB_HDRC_HCD
1309 if (!musb->bulk_ep) {
1310 pr_debug("%s: missing bulk\n", musb_driver_name);
1311 return -EINVAL;
1312 }
1313#endif
1314
1315 return 0;
1316}
1317
1318enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1319
1320/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1321 * configure endpoints, or take their config from silicon
1322 */
1323static int __init musb_core_init(u16 musb_type, struct musb *musb)
1324{
1325#ifdef MUSB_AHB_ID
1326 u32 data;
1327#endif
1328 u8 reg;
1329 char *type;
1330 u16 hwvers, rev_major, rev_minor;
1331 char aInfo[78], aRevision[32], aDate[12];
1332 void __iomem *mbase = musb->mregs;
1333 int status = 0;
1334 int i;
1335
1336 /* log core options (read using indexed model) */
1337 musb_ep_select(mbase, 0);
1338 reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
1339
1340 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1341 if (reg & MUSB_CONFIGDATA_DYNFIFO)
1342 strcat(aInfo, ", dyn FIFOs");
1343 if (reg & MUSB_CONFIGDATA_MPRXE) {
1344 strcat(aInfo, ", bulk combine");
1345#ifdef C_MP_RX
1346 musb->bulk_combine = true;
1347#else
1348 strcat(aInfo, " (X)"); /* no driver support */
1349#endif
1350 }
1351 if (reg & MUSB_CONFIGDATA_MPTXE) {
1352 strcat(aInfo, ", bulk split");
1353#ifdef C_MP_TX
1354 musb->bulk_split = true;
1355#else
1356 strcat(aInfo, " (X)"); /* no driver support */
1357#endif
1358 }
1359 if (reg & MUSB_CONFIGDATA_HBRXE) {
1360 strcat(aInfo, ", HB-ISO Rx");
1361 strcat(aInfo, " (X)"); /* no driver support */
1362 }
1363 if (reg & MUSB_CONFIGDATA_HBTXE) {
1364 strcat(aInfo, ", HB-ISO Tx");
1365 strcat(aInfo, " (X)"); /* no driver support */
1366 }
1367 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1368 strcat(aInfo, ", SoftConn");
1369
1370 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
1371 musb_driver_name, reg, aInfo);
1372
1373#ifdef MUSB_AHB_ID
1374 data = musb_readl(mbase, 0x404);
1375 sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
1376 (data >> 16) & 0xff, (data >> 24) & 0xff);
1377 /* FIXME ID2 and ID3 are unused */
1378 data = musb_readl(mbase, 0x408);
1379 printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
1380 data = musb_readl(mbase, 0x40c);
1381 printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
1382 reg = musb_readb(mbase, 0x400);
1383 musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
1384#else
1385 aDate[0] = 0;
1386#endif
1387 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1388 musb->is_multipoint = 1;
1389 type = "M";
1390 } else {
1391 musb->is_multipoint = 0;
1392 type = "";
1393#ifdef CONFIG_USB_MUSB_HDRC_HCD
1394#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1395 printk(KERN_ERR
1396 "%s: kernel must blacklist external hubs\n",
1397 musb_driver_name);
1398#endif
1399#endif
1400 }
1401
1402 /* log release info */
1403 hwvers = musb_readw(mbase, MUSB_HWVERS);
1404 rev_major = (hwvers >> 10) & 0x1f;
1405 rev_minor = hwvers & 0x3ff;
1406 snprintf(aRevision, 32, "%d.%d%s", rev_major,
1407 rev_minor, (hwvers & 0x8000) ? "RC" : "");
1408 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
1409 musb_driver_name, type, aRevision, aDate);
1410
1411 /* configure ep0 */
1412 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
1413 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
1414
1415 /* discover endpoint configuration */
1416 musb->nr_endpoints = 1;
1417 musb->epmask = 1;
1418
1419 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1420 if (musb->config->dyn_fifo)
1421 status = ep_config_from_table(musb);
1422 else {
1423 ERR("reconfigure software for Dynamic FIFOs\n");
1424 status = -ENODEV;
1425 }
1426 } else {
1427 if (!musb->config->dyn_fifo)
1428 status = ep_config_from_hw(musb);
1429 else {
1430 ERR("reconfigure software for static FIFOs\n");
1431 return -ENODEV;
1432 }
1433 }
1434
1435 if (status < 0)
1436 return status;
1437
1438 /* finish init, and print endpoint config */
1439 for (i = 0; i < musb->nr_endpoints; i++) {
1440 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1441
1442 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
1443#ifdef CONFIG_USB_TUSB6010
1444 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
1445 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
1446 hw_ep->fifo_sync_va =
1447 musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
1448
1449 if (i == 0)
1450 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1451 else
1452 hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
1453#endif
1454
1455 hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
1456#ifdef CONFIG_USB_MUSB_HDRC_HCD
1457 hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
1458 hw_ep->rx_reinit = 1;
1459 hw_ep->tx_reinit = 1;
1460#endif
1461
1462 if (hw_ep->max_packet_sz_tx) {
1463 printk(KERN_DEBUG
1464 "%s: hw_ep %d%s, %smax %d\n",
1465 musb_driver_name, i,
1466 hw_ep->is_shared_fifo ? "shared" : "tx",
1467 hw_ep->tx_double_buffered
1468 ? "doublebuffer, " : "",
1469 hw_ep->max_packet_sz_tx);
1470 }
1471 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1472 printk(KERN_DEBUG
1473 "%s: hw_ep %d%s, %smax %d\n",
1474 musb_driver_name, i,
1475 "rx",
1476 hw_ep->rx_double_buffered
1477 ? "doublebuffer, " : "",
1478 hw_ep->max_packet_sz_rx);
1479 }
1480 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1481 DBG(1, "hw_ep %d not configured\n", i);
1482 }
1483
1484 return 0;
1485}
1486
1487/*-------------------------------------------------------------------------*/
1488
1489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
1490
1491static irqreturn_t generic_interrupt(int irq, void *__hci)
1492{
1493 unsigned long flags;
1494 irqreturn_t retval = IRQ_NONE;
1495 struct musb *musb = __hci;
1496
1497 spin_lock_irqsave(&musb->lock, flags);
1498
1499 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
1500 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
1501 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
1502
1503 if (musb->int_usb || musb->int_tx || musb->int_rx)
1504 retval = musb_interrupt(musb);
1505
1506 spin_unlock_irqrestore(&musb->lock, flags);
1507
1508 /* REVISIT we sometimes get spurious IRQs on g_ep0
1509 * not clear why...
1510 */
1511 if (retval != IRQ_HANDLED)
1512 DBG(5, "spurious?\n");
1513
1514 return IRQ_HANDLED;
1515}
1516
1517#else
1518#define generic_interrupt NULL
1519#endif
1520
1521/*
1522 * handle all the irqs defined by the HDRC core. for now we expect: other
1523 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1524 * will be assigned, and the irq will already have been acked.
1525 *
1526 * called in irq context with spinlock held, irqs blocked
1527 */
1528irqreturn_t musb_interrupt(struct musb *musb)
1529{
1530 irqreturn_t retval = IRQ_NONE;
1531 u8 devctl, power;
1532 int ep_num;
1533 u32 reg;
1534
1535 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1536 power = musb_readb(musb->mregs, MUSB_POWER);
1537
1538 DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
1539 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
1540 musb->int_usb, musb->int_tx, musb->int_rx);
1541
1542 /* the core can interrupt us for multiple reasons; docs have
1543 * a generic interrupt flowchart to follow
1544 */
1545 if (musb->int_usb & STAGE0_MASK)
1546 retval |= musb_stage0_irq(musb, musb->int_usb,
1547 devctl, power);
1548
1549 /* "stage 1" is handling endpoint irqs */
1550
1551 /* handle endpoint 0 first */
1552 if (musb->int_tx & 1) {
1553 if (devctl & MUSB_DEVCTL_HM)
1554 retval |= musb_h_ep0_irq(musb);
1555 else
1556 retval |= musb_g_ep0_irq(musb);
1557 }
1558
1559 /* RX on endpoints 1-15 */
1560 reg = musb->int_rx >> 1;
1561 ep_num = 1;
1562 while (reg) {
1563 if (reg & 1) {
1564 /* musb_ep_select(musb->mregs, ep_num); */
1565 /* REVISIT just retval = ep->rx_irq(...) */
1566 retval = IRQ_HANDLED;
1567 if (devctl & MUSB_DEVCTL_HM) {
1568 if (is_host_capable())
1569 musb_host_rx(musb, ep_num);
1570 } else {
1571 if (is_peripheral_capable())
1572 musb_g_rx(musb, ep_num);
1573 }
1574 }
1575
1576 reg >>= 1;
1577 ep_num++;
1578 }
1579
1580 /* TX on endpoints 1-15 */
1581 reg = musb->int_tx >> 1;
1582 ep_num = 1;
1583 while (reg) {
1584 if (reg & 1) {
1585 /* musb_ep_select(musb->mregs, ep_num); */
1586 /* REVISIT just retval |= ep->tx_irq(...) */
1587 retval = IRQ_HANDLED;
1588 if (devctl & MUSB_DEVCTL_HM) {
1589 if (is_host_capable())
1590 musb_host_tx(musb, ep_num);
1591 } else {
1592 if (is_peripheral_capable())
1593 musb_g_tx(musb, ep_num);
1594 }
1595 }
1596 reg >>= 1;
1597 ep_num++;
1598 }
1599
1600 /* finish handling "global" interrupts after handling fifos */
1601 if (musb->int_usb)
1602 retval |= musb_stage2_irq(musb,
1603 musb->int_usb, devctl, power);
1604
1605 return retval;
1606}
1607
1608
1609#ifndef CONFIG_MUSB_PIO_ONLY
1610static int __initdata use_dma = 1;
1611
1612/* "modprobe ... use_dma=0" etc */
1613module_param(use_dma, bool, 0);
1614MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1615
1616void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1617{
1618 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1619
1620 /* called with controller lock already held */
1621
1622 if (!epnum) {
1623#ifndef CONFIG_USB_TUSB_OMAP_DMA
1624 if (!is_cppi_enabled()) {
1625 /* endpoint 0 */
1626 if (devctl & MUSB_DEVCTL_HM)
1627 musb_h_ep0_irq(musb);
1628 else
1629 musb_g_ep0_irq(musb);
1630 }
1631#endif
1632 } else {
1633 /* endpoints 1..15 */
1634 if (transmit) {
1635 if (devctl & MUSB_DEVCTL_HM) {
1636 if (is_host_capable())
1637 musb_host_tx(musb, epnum);
1638 } else {
1639 if (is_peripheral_capable())
1640 musb_g_tx(musb, epnum);
1641 }
1642 } else {
1643 /* receive */
1644 if (devctl & MUSB_DEVCTL_HM) {
1645 if (is_host_capable())
1646 musb_host_rx(musb, epnum);
1647 } else {
1648 if (is_peripheral_capable())
1649 musb_g_rx(musb, epnum);
1650 }
1651 }
1652 }
1653}
1654
1655#else
1656#define use_dma 0
1657#endif
1658
1659/*-------------------------------------------------------------------------*/
1660
1661#ifdef CONFIG_SYSFS
1662
1663static ssize_t
1664musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1665{
1666 struct musb *musb = dev_to_musb(dev);
1667 unsigned long flags;
1668 int ret = -EINVAL;
1669
1670 spin_lock_irqsave(&musb->lock, flags);
1671 ret = sprintf(buf, "%s\n", otg_state_string(musb));
1672 spin_unlock_irqrestore(&musb->lock, flags);
1673
1674 return ret;
1675}
1676
1677static ssize_t
1678musb_mode_store(struct device *dev, struct device_attribute *attr,
1679 const char *buf, size_t n)
1680{
1681 struct musb *musb = dev_to_musb(dev);
1682 unsigned long flags;
1683
1684 spin_lock_irqsave(&musb->lock, flags);
1685 if (!strncmp(buf, "host", 4))
1686 musb_platform_set_mode(musb, MUSB_HOST);
1687 if (!strncmp(buf, "peripheral", 10))
1688 musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1689 if (!strncmp(buf, "otg", 3))
1690 musb_platform_set_mode(musb, MUSB_OTG);
1691 spin_unlock_irqrestore(&musb->lock, flags);
1692
1693 return n;
1694}
1695static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1696
1697static ssize_t
1698musb_vbus_store(struct device *dev, struct device_attribute *attr,
1699 const char *buf, size_t n)
1700{
1701 struct musb *musb = dev_to_musb(dev);
1702 unsigned long flags;
1703 unsigned long val;
1704
1705 if (sscanf(buf, "%lu", &val) < 1) {
1706 printk(KERN_ERR "Invalid VBUS timeout ms value\n");
1707 return -EINVAL;
1708 }
1709
1710 spin_lock_irqsave(&musb->lock, flags);
1711 musb->a_wait_bcon = val;
1712 if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
1713 musb->is_active = 0;
1714 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1715 spin_unlock_irqrestore(&musb->lock, flags);
1716
1717 return n;
1718}
1719
1720static ssize_t
1721musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1722{
1723 struct musb *musb = dev_to_musb(dev);
1724 unsigned long flags;
1725 unsigned long val;
1726 int vbus;
1727
1728 spin_lock_irqsave(&musb->lock, flags);
1729 val = musb->a_wait_bcon;
1730 vbus = musb_platform_get_vbus_status(musb);
1731 spin_unlock_irqrestore(&musb->lock, flags);
1732
1733 return sprintf(buf, "Vbus %s, timeout %lu\n",
1734 vbus ? "on" : "off", val);
1735}
1736static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1737
1738#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1739
1740/* Gadget drivers can't know that a host is connected so they might want
1741 * to start SRP, but users can. This allows userspace to trigger SRP.
1742 */
1743static ssize_t
1744musb_srp_store(struct device *dev, struct device_attribute *attr,
1745 const char *buf, size_t n)
1746{
1747 struct musb *musb = dev_to_musb(dev);
1748 unsigned short srp;
1749
1750 if (sscanf(buf, "%hu", &srp) != 1
1751 || (srp != 1)) {
1752 printk(KERN_ERR "SRP: Value must be 1\n");
1753 return -EINVAL;
1754 }
1755
1756 if (srp == 1)
1757 musb_g_wakeup(musb);
1758
1759 return n;
1760}
1761static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1762
1763#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
1764
1765#endif /* sysfs */
1766
1767/* Only used to provide driver mode change events */
1768static void musb_irq_work(struct work_struct *data)
1769{
1770 struct musb *musb = container_of(data, struct musb, irq_work);
1771 static int old_state;
1772
1773 if (musb->xceiv.state != old_state) {
1774 old_state = musb->xceiv.state;
1775 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1776 }
1777}
1778
1779/* --------------------------------------------------------------------------
1780 * Init support
1781 */
1782
1783static struct musb *__init
1784allocate_instance(struct device *dev,
1785 struct musb_hdrc_config *config, void __iomem *mbase)
1786{
1787 struct musb *musb;
1788 struct musb_hw_ep *ep;
1789 int epnum;
1790#ifdef CONFIG_USB_MUSB_HDRC_HCD
1791 struct usb_hcd *hcd;
1792
1793 hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
1794 if (!hcd)
1795 return NULL;
1796 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
1797
1798 musb = hcd_to_musb(hcd);
1799 INIT_LIST_HEAD(&musb->control);
1800 INIT_LIST_HEAD(&musb->in_bulk);
1801 INIT_LIST_HEAD(&musb->out_bulk);
1802
1803 hcd->uses_new_polling = 1;
1804
1805 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1806#else
1807 musb = kzalloc(sizeof *musb, GFP_KERNEL);
1808 if (!musb)
1809 return NULL;
1810 dev_set_drvdata(dev, musb);
1811
1812#endif
1813
1814 musb->mregs = mbase;
1815 musb->ctrl_base = mbase;
1816 musb->nIrq = -ENODEV;
1817 musb->config = config;
1818 for (epnum = 0, ep = musb->endpoints;
1819 epnum < musb->config->num_eps;
1820 epnum++, ep++) {
1821
1822 ep->musb = musb;
1823 ep->epnum = epnum;
1824 }
1825
1826 musb->controller = dev;
1827 return musb;
1828}
1829
1830static void musb_free(struct musb *musb)
1831{
1832 /* this has multiple entry modes. it handles fault cleanup after
1833 * probe(), where things may be partially set up, as well as rmmod
1834 * cleanup after everything's been de-activated.
1835 */
1836
1837#ifdef CONFIG_SYSFS
1838 device_remove_file(musb->controller, &dev_attr_mode);
1839 device_remove_file(musb->controller, &dev_attr_vbus);
1840#ifdef CONFIG_USB_MUSB_OTG
1841 device_remove_file(musb->controller, &dev_attr_srp);
1842#endif
1843#endif
1844
1845#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1846 musb_gadget_cleanup(musb);
1847#endif
1848
1849 if (musb->nIrq >= 0) {
1850 disable_irq_wake(musb->nIrq);
1851 free_irq(musb->nIrq, musb);
1852 }
1853 if (is_dma_capable() && musb->dma_controller) {
1854 struct dma_controller *c = musb->dma_controller;
1855
1856 (void) c->stop(c);
1857 dma_controller_destroy(c);
1858 }
1859
1860 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1861 musb_platform_exit(musb);
1862 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1863
1864 if (musb->clock) {
1865 clk_disable(musb->clock);
1866 clk_put(musb->clock);
1867 }
1868
1869#ifdef CONFIG_USB_MUSB_OTG
1870 put_device(musb->xceiv.dev);
1871#endif
1872
1873#ifdef CONFIG_USB_MUSB_HDRC_HCD
1874 usb_put_hcd(musb_to_hcd(musb));
1875#else
1876 kfree(musb);
1877#endif
1878}
1879
1880/*
1881 * Perform generic per-controller initialization.
1882 *
1883 * @pDevice: the controller (already clocked, etc)
1884 * @nIrq: irq
1885 * @mregs: virtual address of controller registers,
1886 * not yet corrected for platform-specific offsets
1887 */
1888static int __init
1889musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1890{
1891 int status;
1892 struct musb *musb;
1893 struct musb_hdrc_platform_data *plat = dev->platform_data;
1894
1895 /* The driver might handle more features than the board; OK.
1896 * Fail when the board needs a feature that's not enabled.
1897 */
1898 if (!plat) {
1899 dev_dbg(dev, "no platform_data?\n");
1900 return -ENODEV;
1901 }
1902 switch (plat->mode) {
1903 case MUSB_HOST:
1904#ifdef CONFIG_USB_MUSB_HDRC_HCD
1905 break;
1906#else
1907 goto bad_config;
1908#endif
1909 case MUSB_PERIPHERAL:
1910#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1911 break;
1912#else
1913 goto bad_config;
1914#endif
1915 case MUSB_OTG:
1916#ifdef CONFIG_USB_MUSB_OTG
1917 break;
1918#else
1919bad_config:
1920#endif
1921 default:
1922 dev_err(dev, "incompatible Kconfig role setting\n");
1923 return -EINVAL;
1924 }
1925
1926 /* allocate */
1927 musb = allocate_instance(dev, plat->config, ctrl);
1928 if (!musb)
1929 return -ENOMEM;
1930
1931 spin_lock_init(&musb->lock);
1932 musb->board_mode = plat->mode;
1933 musb->board_set_power = plat->set_power;
1934 musb->set_clock = plat->set_clock;
1935 musb->min_power = plat->min_power;
1936
1937 /* Clock usage is chip-specific ... functional clock (DaVinci,
1938 * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
1939 * code does is make sure a clock handle is available; platform
1940 * code manages it during start/stop and suspend/resume.
1941 */
1942 if (plat->clock) {
1943 musb->clock = clk_get(dev, plat->clock);
1944 if (IS_ERR(musb->clock)) {
1945 status = PTR_ERR(musb->clock);
1946 musb->clock = NULL;
1947 goto fail;
1948 }
1949 }
1950
1951 /* assume vbus is off */
1952
1953 /* platform adjusts musb->mregs and musb->isr if needed,
1954 * and activates clocks
1955 */
1956 musb->isr = generic_interrupt;
1957 status = musb_platform_init(musb);
1958
1959 if (status < 0)
1960 goto fail;
1961 if (!musb->isr) {
1962 status = -ENODEV;
1963 goto fail2;
1964 }
1965
1966#ifndef CONFIG_MUSB_PIO_ONLY
1967 if (use_dma && dev->dma_mask) {
1968 struct dma_controller *c;
1969
1970 c = dma_controller_create(musb, musb->mregs);
1971 musb->dma_controller = c;
1972 if (c)
1973 (void) c->start(c);
1974 }
1975#endif
1976 /* ideally this would be abstracted in platform setup */
1977 if (!is_dma_capable() || !musb->dma_controller)
1978 dev->dma_mask = NULL;
1979
1980 /* be sure interrupts are disabled before connecting ISR */
1981 musb_platform_disable(musb);
1982 musb_generic_disable(musb);
1983
1984 /* setup musb parts of the core (especially endpoints) */
1985 status = musb_core_init(plat->config->multipoint
1986 ? MUSB_CONTROLLER_MHDRC
1987 : MUSB_CONTROLLER_HDRC, musb);
1988 if (status < 0)
1989 goto fail2;
1990
1991 /* Init IRQ workqueue before request_irq */
1992 INIT_WORK(&musb->irq_work, musb_irq_work);
1993
1994 /* attach to the IRQ */
1995 if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
1996 dev_err(dev, "request_irq %d failed!\n", nIrq);
1997 status = -ENODEV;
1998 goto fail2;
1999 }
2000 musb->nIrq = nIrq;
2001/* FIXME this handles wakeup irqs wrong */
2002 if (enable_irq_wake(nIrq) == 0)
2003 device_init_wakeup(dev, 1);
2004
2005 pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
2006 musb_driver_name,
2007 ({char *s;
2008 switch (musb->board_mode) {
2009 case MUSB_HOST: s = "Host"; break;
2010 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2011 default: s = "OTG"; break;
2012 }; s; }),
2013 ctrl,
2014 (is_dma_capable() && musb->dma_controller)
2015 ? "DMA" : "PIO",
2016 musb->nIrq);
2017
2018#ifdef CONFIG_USB_MUSB_HDRC_HCD
2019 /* host side needs more setup, except for no-host modes */
2020 if (musb->board_mode != MUSB_PERIPHERAL) {
2021 struct usb_hcd *hcd = musb_to_hcd(musb);
2022
2023 if (musb->board_mode == MUSB_OTG)
2024 hcd->self.otg_port = 1;
2025 musb->xceiv.host = &hcd->self;
2026 hcd->power_budget = 2 * (plat->power ? : 250);
2027 }
2028#endif /* CONFIG_USB_MUSB_HDRC_HCD */
2029
2030 /* For the host-only role, we can activate right away.
2031 * (We expect the ID pin to be forcibly grounded!!)
2032 * Otherwise, wait till the gadget driver hooks up.
2033 */
2034 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
2035 MUSB_HST_MODE(musb);
2036 musb->xceiv.default_a = 1;
2037 musb->xceiv.state = OTG_STATE_A_IDLE;
2038
2039 status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
2040
2041 DBG(1, "%s mode, status %d, devctl %02x %c\n",
2042 "HOST", status,
2043 musb_readb(musb->mregs, MUSB_DEVCTL),
2044 (musb_readb(musb->mregs, MUSB_DEVCTL)
2045 & MUSB_DEVCTL_BDEVICE
2046 ? 'B' : 'A'));
2047
2048 } else /* peripheral is enabled */ {
2049 MUSB_DEV_MODE(musb);
2050 musb->xceiv.default_a = 0;
2051 musb->xceiv.state = OTG_STATE_B_IDLE;
2052
2053 status = musb_gadget_setup(musb);
2054
2055 DBG(1, "%s mode, status %d, dev%02x\n",
2056 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
2057 status,
2058 musb_readb(musb->mregs, MUSB_DEVCTL));
2059
2060 }
2061
2062 if (status == 0)
2063 musb_debug_create("driver/musb_hdrc", musb);
2064 else {
2065fail:
2066 if (musb->clock)
2067 clk_put(musb->clock);
2068 device_init_wakeup(dev, 0);
2069 musb_free(musb);
2070 return status;
2071 }
2072
2073#ifdef CONFIG_SYSFS
2074 status = device_create_file(dev, &dev_attr_mode);
2075 status = device_create_file(dev, &dev_attr_vbus);
2076#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2077 status = device_create_file(dev, &dev_attr_srp);
2078#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
2079 status = 0;
2080#endif
2081
2082 return status;
2083
2084fail2:
2085 musb_platform_exit(musb);
2086 goto fail;
2087}
2088
2089/*-------------------------------------------------------------------------*/
2090
2091/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2092 * bridge to a platform device; this driver then suffices.
2093 */
2094
2095#ifndef CONFIG_MUSB_PIO_ONLY
2096static u64 *orig_dma_mask;
2097#endif
2098
2099static int __init musb_probe(struct platform_device *pdev)
2100{
2101 struct device *dev = &pdev->dev;
2102 int irq = platform_get_irq(pdev, 0);
2103 struct resource *iomem;
2104 void __iomem *base;
2105
2106 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2107 if (!iomem || irq == 0)
2108 return -ENODEV;
2109
2110 base = ioremap(iomem->start, iomem->end - iomem->start + 1);
2111 if (!base) {
2112 dev_err(dev, "ioremap failed\n");
2113 return -ENOMEM;
2114 }
2115
2116#ifndef CONFIG_MUSB_PIO_ONLY
2117 /* clobbered by use_dma=n */
2118 orig_dma_mask = dev->dma_mask;
2119#endif
2120 return musb_init_controller(dev, irq, base);
2121}
2122
2123static int __devexit musb_remove(struct platform_device *pdev)
2124{
2125 struct musb *musb = dev_to_musb(&pdev->dev);
2126 void __iomem *ctrl_base = musb->ctrl_base;
2127
2128 /* this gets called on rmmod.
2129 * - Host mode: host may still be active
2130 * - Peripheral mode: peripheral is deactivated (or never-activated)
2131 * - OTG mode: both roles are deactivated (or never-activated)
2132 */
2133 musb_shutdown(pdev);
2134 musb_debug_delete("driver/musb_hdrc", musb);
2135#ifdef CONFIG_USB_MUSB_HDRC_HCD
2136 if (musb->board_mode == MUSB_HOST)
2137 usb_remove_hcd(musb_to_hcd(musb));
2138#endif
2139 musb_free(musb);
2140 iounmap(ctrl_base);
2141 device_init_wakeup(&pdev->dev, 0);
2142#ifndef CONFIG_MUSB_PIO_ONLY
2143 pdev->dev.dma_mask = orig_dma_mask;
2144#endif
2145 return 0;
2146}
2147
2148#ifdef CONFIG_PM
2149
2150static int musb_suspend(struct platform_device *pdev, pm_message_t message)
2151{
2152 unsigned long flags;
2153 struct musb *musb = dev_to_musb(&pdev->dev);
2154
2155 if (!musb->clock)
2156 return 0;
2157
2158 spin_lock_irqsave(&musb->lock, flags);
2159
2160 if (is_peripheral_active(musb)) {
2161 /* FIXME force disconnect unless we know USB will wake
2162 * the system up quickly enough to respond ...
2163 */
2164 } else if (is_host_active(musb)) {
2165 /* we know all the children are suspended; sometimes
2166 * they will even be wakeup-enabled.
2167 */
2168 }
2169
2170 if (musb->set_clock)
2171 musb->set_clock(musb->clock, 0);
2172 else
2173 clk_disable(musb->clock);
2174 spin_unlock_irqrestore(&musb->lock, flags);
2175 return 0;
2176}
2177
2178static int musb_resume(struct platform_device *pdev)
2179{
2180 unsigned long flags;
2181 struct musb *musb = dev_to_musb(&pdev->dev);
2182
2183 if (!musb->clock)
2184 return 0;
2185
2186 spin_lock_irqsave(&musb->lock, flags);
2187
2188 if (musb->set_clock)
2189 musb->set_clock(musb->clock, 1);
2190 else
2191 clk_enable(musb->clock);
2192
2193 /* for static cmos like DaVinci, register values were preserved
2194 * unless for some reason the whole soc powered down and we're
2195 * not treating that as a whole-system restart (e.g. swsusp)
2196 */
2197 spin_unlock_irqrestore(&musb->lock, flags);
2198 return 0;
2199}
2200
2201#else
2202#define musb_suspend NULL
2203#define musb_resume NULL
2204#endif
2205
2206static struct platform_driver musb_driver = {
2207 .driver = {
2208 .name = (char *)musb_driver_name,
2209 .bus = &platform_bus_type,
2210 .owner = THIS_MODULE,
2211 },
2212 .remove = __devexit_p(musb_remove),
2213 .shutdown = musb_shutdown,
2214 .suspend = musb_suspend,
2215 .resume = musb_resume,
2216};
2217
2218/*-------------------------------------------------------------------------*/
2219
2220static int __init musb_init(void)
2221{
2222#ifdef CONFIG_USB_MUSB_HDRC_HCD
2223 if (usb_disabled())
2224 return 0;
2225#endif
2226
2227 pr_info("%s: version " MUSB_VERSION ", "
2228#ifdef CONFIG_MUSB_PIO_ONLY
2229 "pio"
2230#elif defined(CONFIG_USB_TI_CPPI_DMA)
2231 "cppi-dma"
2232#elif defined(CONFIG_USB_INVENTRA_DMA)
2233 "musb-dma"
2234#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
2235 "tusb-omap-dma"
2236#else
2237 "?dma?"
2238#endif
2239 ", "
2240#ifdef CONFIG_USB_MUSB_OTG
2241 "otg (peripheral+host)"
2242#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
2243 "peripheral"
2244#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
2245 "host"
2246#endif
2247 ", debug=%d\n",
2248 musb_driver_name, debug);
2249 return platform_driver_probe(&musb_driver, musb_probe);
2250}
2251
2252/* make us init after usbcore and before usb
2253 * gadget and host-side drivers start to register
2254 */
2255subsys_initcall(musb_init);
2256
2257static void __exit musb_cleanup(void)
2258{
2259 platform_driver_unregister(&musb_driver);
2260}
2261module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 000000000000..eade46d81708
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,507 @@
1/*
2 * MUSB OTG driver defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_CORE_H__
36#define __MUSB_CORE_H__
37
38#include <linux/slab.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/smp_lock.h>
42#include <linux/errno.h>
43#include <linux/clk.h>
44#include <linux/device.h>
45#include <linux/usb/ch9.h>
46#include <linux/usb/gadget.h>
47#include <linux/usb.h>
48#include <linux/usb/otg.h>
49#include <linux/usb/musb.h>
50
51struct musb;
52struct musb_hw_ep;
53struct musb_ep;
54
55
56#include "musb_debug.h"
57#include "musb_dma.h"
58
59#include "musb_io.h"
60#include "musb_regs.h"
61
62#include "musb_gadget.h"
63#include "../core/hcd.h"
64#include "musb_host.h"
65
66
67
68#ifdef CONFIG_USB_MUSB_OTG
69
70#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
71#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
72#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
73
74/* NOTE: otg and peripheral-only state machines start at B_IDLE.
75 * OTG or host-only go to A_IDLE when ID is sensed.
76 */
77#define is_peripheral_active(m) (!(m)->is_host)
78#define is_host_active(m) ((m)->is_host)
79
80#else
81#define is_peripheral_enabled(musb) is_peripheral_capable()
82#define is_host_enabled(musb) is_host_capable()
83#define is_otg_enabled(musb) 0
84
85#define is_peripheral_active(musb) is_peripheral_capable()
86#define is_host_active(musb) is_host_capable()
87#endif
88
89#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
90/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
91 * override that choice selection (often USB_GADGET_DUMMY_HCD).
92 */
93#ifndef CONFIG_USB_GADGET_MUSB_HDRC
94#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
95#endif
96#endif /* need MUSB gadget selection */
97
98
99#ifdef CONFIG_PROC_FS
100#include <linux/fs.h>
101#define MUSB_CONFIG_PROC_FS
102#endif
103
104/****************************** PERIPHERAL ROLE *****************************/
105
106#ifdef CONFIG_USB_GADGET_MUSB_HDRC
107
108#define is_peripheral_capable() (1)
109
110extern irqreturn_t musb_g_ep0_irq(struct musb *);
111extern void musb_g_tx(struct musb *, u8);
112extern void musb_g_rx(struct musb *, u8);
113extern void musb_g_reset(struct musb *);
114extern void musb_g_suspend(struct musb *);
115extern void musb_g_resume(struct musb *);
116extern void musb_g_wakeup(struct musb *);
117extern void musb_g_disconnect(struct musb *);
118
119#else
120
121#define is_peripheral_capable() (0)
122
123static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
124static inline void musb_g_reset(struct musb *m) {}
125static inline void musb_g_suspend(struct musb *m) {}
126static inline void musb_g_resume(struct musb *m) {}
127static inline void musb_g_wakeup(struct musb *m) {}
128static inline void musb_g_disconnect(struct musb *m) {}
129
130#endif
131
132/****************************** HOST ROLE ***********************************/
133
134#ifdef CONFIG_USB_MUSB_HDRC_HCD
135
136#define is_host_capable() (1)
137
138extern irqreturn_t musb_h_ep0_irq(struct musb *);
139extern void musb_host_tx(struct musb *, u8);
140extern void musb_host_rx(struct musb *, u8);
141
142#else
143
144#define is_host_capable() (0)
145
146static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
147static inline void musb_host_tx(struct musb *m, u8 e) {}
148static inline void musb_host_rx(struct musb *m, u8 e) {}
149
150#endif
151
152
153/****************************** CONSTANTS ********************************/
154
155#ifndef MUSB_C_NUM_EPS
156#define MUSB_C_NUM_EPS ((u8)16)
157#endif
158
159#ifndef MUSB_MAX_END0_PACKET
160#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
161#endif
162
163/* host side ep0 states */
164enum musb_h_ep0_state {
165 MUSB_EP0_IDLE,
166 MUSB_EP0_START, /* expect ack of setup */
167 MUSB_EP0_IN, /* expect IN DATA */
168 MUSB_EP0_OUT, /* expect ack of OUT DATA */
169 MUSB_EP0_STATUS, /* expect ack of STATUS */
170} __attribute__ ((packed));
171
172/* peripheral side ep0 states */
173enum musb_g_ep0_state {
174 MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
175 MUSB_EP0_STAGE_TX, /* IN data */
176 MUSB_EP0_STAGE_RX, /* OUT data */
177 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
178 MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
179 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
180} __attribute__ ((packed));
181
182/* OTG protocol constants */
183#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
184#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
185#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
186
187/*************************** REGISTER ACCESS ********************************/
188
189/* Endpoint registers (other than dynfifo setup) can be accessed either
190 * directly with the "flat" model, or after setting up an index register.
191 */
192
193#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
194 || defined(CONFIG_ARCH_OMAP3430)
195/* REVISIT indexed access seemed to
196 * misbehave (on DaVinci) for at least peripheral IN ...
197 */
198#define MUSB_FLAT_REG
199#endif
200
201/* TUSB mapping: "flat" plus ep0 special cases */
202#if defined(CONFIG_USB_TUSB6010)
203#define musb_ep_select(_mbase, _epnum) \
204 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
205#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
206
207/* "flat" mapping: each endpoint has its own i/o address */
208#elif defined(MUSB_FLAT_REG)
209#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
210#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
211
212/* "indexed" mapping: INDEX register controls register bank select */
213#else
214#define musb_ep_select(_mbase, _epnum) \
215 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
216#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
217#endif
218
219/****************************** FUNCTIONS ********************************/
220
221#define MUSB_HST_MODE(_musb)\
222 { (_musb)->is_host = true; }
223#define MUSB_DEV_MODE(_musb) \
224 { (_musb)->is_host = false; }
225
226#define test_devctl_hst_mode(_x) \
227 (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
228
229#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
230
231/******************************** TYPES *************************************/
232
233/*
234 * struct musb_hw_ep - endpoint hardware (bidirectional)
235 *
236 * Ordered slightly for better cacheline locality.
237 */
238struct musb_hw_ep {
239 struct musb *musb;
240 void __iomem *fifo;
241 void __iomem *regs;
242
243#ifdef CONFIG_USB_TUSB6010
244 void __iomem *conf;
245#endif
246
247 /* index in musb->endpoints[] */
248 u8 epnum;
249
250 /* hardware configuration, possibly dynamic */
251 bool is_shared_fifo;
252 bool tx_double_buffered;
253 bool rx_double_buffered;
254 u16 max_packet_sz_tx;
255 u16 max_packet_sz_rx;
256
257 struct dma_channel *tx_channel;
258 struct dma_channel *rx_channel;
259
260#ifdef CONFIG_USB_TUSB6010
261 /* TUSB has "asynchronous" and "synchronous" dma modes */
262 dma_addr_t fifo_async;
263 dma_addr_t fifo_sync;
264 void __iomem *fifo_sync_va;
265#endif
266
267#ifdef CONFIG_USB_MUSB_HDRC_HCD
268 void __iomem *target_regs;
269
270 /* currently scheduled peripheral endpoint */
271 struct musb_qh *in_qh;
272 struct musb_qh *out_qh;
273
274 u8 rx_reinit;
275 u8 tx_reinit;
276#endif
277
278#ifdef CONFIG_USB_GADGET_MUSB_HDRC
279 /* peripheral side */
280 struct musb_ep ep_in; /* TX */
281 struct musb_ep ep_out; /* RX */
282#endif
283};
284
285static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
286{
287#ifdef CONFIG_USB_GADGET_MUSB_HDRC
288 return next_request(&hw_ep->ep_in);
289#else
290 return NULL;
291#endif
292}
293
294static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
295{
296#ifdef CONFIG_USB_GADGET_MUSB_HDRC
297 return next_request(&hw_ep->ep_out);
298#else
299 return NULL;
300#endif
301}
302
303/*
304 * struct musb - Driver instance data.
305 */
306struct musb {
307 /* device lock */
308 spinlock_t lock;
309 struct clk *clock;
310 irqreturn_t (*isr)(int, void *);
311 struct work_struct irq_work;
312
313/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
314#define MUSB_PORT_STAT_RESUME (1 << 31)
315
316 u32 port1_status;
317
318#ifdef CONFIG_USB_MUSB_HDRC_HCD
319 unsigned long rh_timer;
320
321 enum musb_h_ep0_state ep0_stage;
322
323 /* bulk traffic normally dedicates endpoint hardware, and each
324 * direction has its own ring of host side endpoints.
325 * we try to progress the transfer at the head of each endpoint's
326 * queue until it completes or NAKs too much; then we try the next
327 * endpoint.
328 */
329 struct musb_hw_ep *bulk_ep;
330
331 struct list_head control; /* of musb_qh */
332 struct list_head in_bulk; /* of musb_qh */
333 struct list_head out_bulk; /* of musb_qh */
334 struct musb_qh *periodic[32]; /* tree of interrupt+iso */
335#endif
336
337 /* called with IRQs blocked; ON/nonzero implies starting a session,
338 * and waiting at least a_wait_vrise_tmout.
339 */
340 void (*board_set_vbus)(struct musb *, int is_on);
341
342 struct dma_controller *dma_controller;
343
344 struct device *controller;
345 void __iomem *ctrl_base;
346 void __iomem *mregs;
347
348#ifdef CONFIG_USB_TUSB6010
349 dma_addr_t async;
350 dma_addr_t sync;
351 void __iomem *sync_va;
352#endif
353
354 /* passed down from chip/board specific irq handlers */
355 u8 int_usb;
356 u16 int_rx;
357 u16 int_tx;
358
359 struct otg_transceiver xceiv;
360
361 int nIrq;
362
363 struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
364#define control_ep endpoints
365
366#define VBUSERR_RETRY_COUNT 3
367 u16 vbuserr_retry;
368 u16 epmask;
369 u8 nr_endpoints;
370
371 u8 board_mode; /* enum musb_mode */
372 int (*board_set_power)(int state);
373
374 int (*set_clock)(struct clk *clk, int is_active);
375
376 u8 min_power; /* vbus for periph, in mA/2 */
377
378 bool is_host;
379
380 int a_wait_bcon; /* VBUS timeout in msecs */
381 unsigned long idle_timeout; /* Next timeout in jiffies */
382
383 /* active means connected and not suspended */
384 unsigned is_active:1;
385
386 unsigned is_multipoint:1;
387 unsigned ignore_disconnect:1; /* during bus resets */
388
389#ifdef C_MP_TX
390 unsigned bulk_split:1;
391#define can_bulk_split(musb,type) \
392 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
393#else
394#define can_bulk_split(musb, type) 0
395#endif
396
397#ifdef C_MP_RX
398 unsigned bulk_combine:1;
399#define can_bulk_combine(musb,type) \
400 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
401#else
402#define can_bulk_combine(musb, type) 0
403#endif
404
405#ifdef CONFIG_USB_GADGET_MUSB_HDRC
406 /* is_suspended means USB B_PERIPHERAL suspend */
407 unsigned is_suspended:1;
408
409 /* may_wakeup means remote wakeup is enabled */
410 unsigned may_wakeup:1;
411
412 /* is_self_powered is reported in device status and the
413 * config descriptor. is_bus_powered means B_PERIPHERAL
414 * draws some VBUS current; both can be true.
415 */
416 unsigned is_self_powered:1;
417 unsigned is_bus_powered:1;
418
419 unsigned set_address:1;
420 unsigned test_mode:1;
421 unsigned softconnect:1;
422
423 u8 address;
424 u8 test_mode_nr;
425 u16 ackpend; /* ep0 */
426 enum musb_g_ep0_state ep0_state;
427 struct usb_gadget g; /* the gadget */
428 struct usb_gadget_driver *gadget_driver; /* its driver */
429#endif
430
431 struct musb_hdrc_config *config;
432
433#ifdef MUSB_CONFIG_PROC_FS
434 struct proc_dir_entry *proc_entry;
435#endif
436};
437
438static inline void musb_set_vbus(struct musb *musb, int is_on)
439{
440 musb->board_set_vbus(musb, is_on);
441}
442
443#ifdef CONFIG_USB_GADGET_MUSB_HDRC
444static inline struct musb *gadget_to_musb(struct usb_gadget *g)
445{
446 return container_of(g, struct musb, g);
447}
448#endif
449
450
451/***************************** Glue it together *****************************/
452
453extern const char musb_driver_name[];
454
455extern void musb_start(struct musb *musb);
456extern void musb_stop(struct musb *musb);
457
458extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
459extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
460
461extern void musb_load_testpacket(struct musb *);
462
463extern irqreturn_t musb_interrupt(struct musb *);
464
465extern void musb_platform_enable(struct musb *musb);
466extern void musb_platform_disable(struct musb *musb);
467
468extern void musb_hnp_stop(struct musb *musb);
469
470extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
471
472#if defined(CONFIG_USB_TUSB6010) || \
473 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
474extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
475#else
476#define musb_platform_try_idle(x, y) do {} while (0)
477#endif
478
479#ifdef CONFIG_USB_TUSB6010
480extern int musb_platform_get_vbus_status(struct musb *musb);
481#else
482#define musb_platform_get_vbus_status(x) 0
483#endif
484
485extern int __init musb_platform_init(struct musb *musb);
486extern int musb_platform_exit(struct musb *musb);
487
488/*-------------------------- ProcFS definitions ---------------------*/
489
490struct proc_dir_entry;
491
492#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
493extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
494extern void musb_debug_delete(char *name, struct musb *data);
495
496#else
497static inline struct proc_dir_entry *
498musb_debug_create(char *name, struct musb *data)
499{
500 return NULL;
501}
502static inline void musb_debug_delete(char *name, struct musb *data)
503{
504}
505#endif
506
507#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 000000000000..3bdb311e820d
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,66 @@
1/*
2 * MUSB OTG driver debug defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_LINUX_DEBUG_H__
36#define __MUSB_LINUX_DEBUG_H__
37
38#define yprintk(facility, format, args...) \
39 do { printk(facility "%s %d: " format , \
40 __func__, __LINE__ , ## args); } while (0)
41#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
42#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
43#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
44
45#define xprintk(level, facility, format, args...) do { \
46 if (_dbg_level(level)) { \
47 printk(facility "%s %d: " format , \
48 __func__, __LINE__ , ## args); \
49 } } while (0)
50
51#if MUSB_DEBUG > 0
52extern unsigned debug;
53#else
54#define debug 0
55#endif
56
57static inline int _dbg_level(unsigned l)
58{
59 return debug >= l;
60}
61
62#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
63
64extern const char *otg_state_string(struct musb *);
65
66#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 000000000000..0a2c4e3602c1
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
1/*
2 * MUSB OTG driver DMA controller abstraction
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_DMA_H__
36#define __MUSB_DMA_H__
37
38struct musb_hw_ep;
39
40/*
41 * DMA Controller Abstraction
42 *
43 * DMA Controllers are abstracted to allow use of a variety of different
44 * implementations of DMA, as allowed by the Inventra USB cores. On the
45 * host side, usbcore sets up the DMA mappings and flushes caches; on the
46 * peripheral side, the gadget controller driver does. Responsibilities
47 * of a DMA controller driver include:
48 *
49 * - Handling the details of moving multiple USB packets
50 * in cooperation with the Inventra USB core, including especially
51 * the correct RX side treatment of short packets and buffer-full
52 * states (both of which terminate transfers).
53 *
54 * - Knowing the correlation between dma channels and the
55 * Inventra core's local endpoint resources and data direction.
56 *
57 * - Maintaining a list of allocated/available channels.
58 *
59 * - Updating channel status on interrupts,
60 * whether shared with the Inventra core or separate.
61 */
62
63#define DMA_ADDR_INVALID (~(dma_addr_t)0)
64
65#ifndef CONFIG_MUSB_PIO_ONLY
66#define is_dma_capable() (1)
67#else
68#define is_dma_capable() (0)
69#endif
70
71#ifdef CONFIG_USB_TI_CPPI_DMA
72#define is_cppi_enabled() 1
73#else
74#define is_cppi_enabled() 0
75#endif
76
77#ifdef CONFIG_USB_TUSB_OMAP_DMA
78#define tusb_dma_omap() 1
79#else
80#define tusb_dma_omap() 0
81#endif
82
83/*
84 * DMA channel status ... updated by the dma controller driver whenever that
85 * status changes, and protected by the overall controller spinlock.
86 */
87enum dma_channel_status {
88 /* unallocated */
89 MUSB_DMA_STATUS_UNKNOWN,
90 /* allocated ... but not busy, no errors */
91 MUSB_DMA_STATUS_FREE,
92 /* busy ... transactions are active */
93 MUSB_DMA_STATUS_BUSY,
94 /* transaction(s) aborted due to ... dma or memory bus error */
95 MUSB_DMA_STATUS_BUS_ABORT,
96 /* transaction(s) aborted due to ... core error or USB fault */
97 MUSB_DMA_STATUS_CORE_ABORT
98};
99
100struct dma_controller;
101
102/**
103 * struct dma_channel - A DMA channel.
104 * @private_data: channel-private data
105 * @max_len: the maximum number of bytes the channel can move in one
106 * transaction (typically representing many USB maximum-sized packets)
107 * @actual_len: how many bytes have been transferred
108 * @status: current channel status (updated e.g. on interrupt)
109 * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
110 *
111 * channels are associated with an endpoint for the duration of at least
112 * one usb transfer.
113 */
114struct dma_channel {
115 void *private_data;
116 /* FIXME not void* private_data, but a dma_controller * */
117 size_t max_len;
118 size_t actual_len;
119 enum dma_channel_status status;
120 bool desired_mode;
121};
122
123/*
124 * dma_channel_status - return status of dma channel
125 * @c: the channel
126 *
127 * Returns the software's view of the channel status. If that status is BUSY
128 * then it's possible that the hardware has completed (or aborted) a transfer,
129 * so the driver needs to update that status.
130 */
131static inline enum dma_channel_status
132dma_channel_status(struct dma_channel *c)
133{
134 return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
135}
136
137/**
138 * struct dma_controller - A DMA Controller.
139 * @start: call this to start a DMA controller;
140 * return 0 on success, else negative errno
141 * @stop: call this to stop a DMA controller
142 * return 0 on success, else negative errno
143 * @channel_alloc: call this to allocate a DMA channel
144 * @channel_release: call this to release a DMA channel
145 * @channel_abort: call this to abort a pending DMA transaction,
146 * returning it to FREE (but allocated) state
147 *
148 * Controllers manage dma channels.
149 */
150struct dma_controller {
151 int (*start)(struct dma_controller *);
152 int (*stop)(struct dma_controller *);
153 struct dma_channel *(*channel_alloc)(struct dma_controller *,
154 struct musb_hw_ep *, u8 is_tx);
155 void (*channel_release)(struct dma_channel *);
156 int (*channel_program)(struct dma_channel *channel,
157 u16 maxpacket, u8 mode,
158 dma_addr_t dma_addr,
159 u32 length);
160 int (*channel_abort)(struct dma_channel *);
161};
162
163/* called after channel_program(), may indicate a fault */
164extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
165
166
167extern struct dma_controller *__init
168dma_controller_create(struct musb *, void __iomem *);
169
170extern void dma_controller_destroy(struct dma_controller *);
171
172#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 000000000000..d6a802c224fa
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
1/*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/timer.h>
38#include <linux/module.h>
39#include <linux/smp.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42#include <linux/moduleparam.h>
43#include <linux/stat.h>
44#include <linux/dma-mapping.h>
45
46#include "musb_core.h"
47
48
49/* MUSB PERIPHERAL status 3-mar-2006:
50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches:
53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL?
59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
63 * required.
64 *
65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
71 *
72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
78 *
79 * - ISO not tested ... might work, but only weakly isochronous
80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
84 *
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
89 */
90
91/* ----------------------------------------------------------------------- */
92
93/*
94 * Immediately complete a request.
95 *
96 * @param request the request to complete
97 * @param status the status to complete the request with
98 * Context: controller locked, IRQs blocked.
99 */
100void musb_g_giveback(
101 struct musb_ep *ep,
102 struct usb_request *request,
103 int status)
104__releases(ep->musb->lock)
105__acquires(ep->musb->lock)
106{
107 struct musb_request *req;
108 struct musb *musb;
109 int busy = ep->busy;
110
111 req = to_musb_request(request);
112
113 list_del(&request->list);
114 if (req->request.status == -EINPROGRESS)
115 req->request.status = status;
116 musb = req->musb;
117
118 ep->busy = 1;
119 spin_unlock(&musb->lock);
120 if (is_dma_capable()) {
121 if (req->mapped) {
122 dma_unmap_single(musb->controller,
123 req->request.dma,
124 req->request.length,
125 req->tx
126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE);
128 req->request.dma = DMA_ADDR_INVALID;
129 req->mapped = 0;
130 } else if (req->request.dma != DMA_ADDR_INVALID)
131 dma_sync_single_for_cpu(musb->controller,
132 req->request.dma,
133 req->request.length,
134 req->tx
135 ? DMA_TO_DEVICE
136 : DMA_FROM_DEVICE);
137 }
138 if (request->status == 0)
139 DBG(5, "%s done request %p, %d/%d\n",
140 ep->end_point.name, request,
141 req->request.actual, req->request.length);
142 else
143 DBG(2, "%s request %p, %d/%d fault %d\n",
144 ep->end_point.name, request,
145 req->request.actual, req->request.length,
146 request->status);
147 req->request.complete(&req->ep->end_point, &req->request);
148 spin_lock(&musb->lock);
149 ep->busy = busy;
150}
151
152/* ----------------------------------------------------------------------- */
153
154/*
155 * Abort requests queued to an endpoint using the status. Synchronous.
156 * caller locked controller and blocked irqs, and selected this ep.
157 */
158static void nuke(struct musb_ep *ep, const int status)
159{
160 struct musb_request *req = NULL;
161 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
162
163 ep->busy = 1;
164
165 if (is_dma_capable() && ep->dma) {
166 struct dma_controller *c = ep->musb->dma_controller;
167 int value;
168 if (ep->is_in) {
169 musb_writew(epio, MUSB_TXCSR,
170 0 | MUSB_TXCSR_FLUSHFIFO);
171 musb_writew(epio, MUSB_TXCSR,
172 0 | MUSB_TXCSR_FLUSHFIFO);
173 } else {
174 musb_writew(epio, MUSB_RXCSR,
175 0 | MUSB_RXCSR_FLUSHFIFO);
176 musb_writew(epio, MUSB_RXCSR,
177 0 | MUSB_RXCSR_FLUSHFIFO);
178 }
179
180 value = c->channel_abort(ep->dma);
181 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
182 c->channel_release(ep->dma);
183 ep->dma = NULL;
184 }
185
186 while (!list_empty(&(ep->req_list))) {
187 req = container_of(ep->req_list.next, struct musb_request,
188 request.list);
189 musb_g_giveback(ep, &req->request, status);
190 }
191}
192
193/* ----------------------------------------------------------------------- */
194
195/* Data transfers - pure PIO, pure DMA, or mixed mode */
196
197/*
198 * This assumes the separate CPPI engine is responding to DMA requests
199 * from the usb core ... sequenced a bit differently from mentor dma.
200 */
201
202static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
203{
204 if (can_bulk_split(musb, ep->type))
205 return ep->hw_ep->max_packet_sz_tx;
206 else
207 return ep->packet_sz;
208}
209
210
211#ifdef CONFIG_USB_INVENTRA_DMA
212
213/* Peripheral tx (IN) using Mentor DMA works as follows:
214 Only mode 0 is used for transfers <= wPktSize,
215 mode 1 is used for larger transfers,
216
217 One of the following happens:
218 - Host sends IN token which causes an endpoint interrupt
219 -> TxAvail
220 -> if DMA is currently busy, exit.
221 -> if queue is non-empty, txstate().
222
223 - Request is queued by the gadget driver.
224 -> if queue was previously empty, txstate()
225
226 txstate()
227 -> start
228 /\ -> setup DMA
229 | (data is transferred to the FIFO, then sent out when
230 | IN token(s) are recd from Host.
231 | -> DMA interrupt on completion
232 | calls TxAvail.
233 | -> stop DMA, ~DmaEenab,
234 | -> set TxPktRdy for last short pkt or zlp
235 | -> Complete Request
236 | -> Continue next request (call txstate)
237 |___________________________________|
238
239 * Non-Mentor DMA engines can of course work differently, such as by
240 * upleveling from irq-per-packet to irq-per-buffer.
241 */
242
243#endif
244
245/*
246 * An endpoint is transmitting data. This can be called either from
247 * the IRQ routine or from ep.queue() to kickstart a request on an
248 * endpoint.
249 *
250 * Context: controller locked, IRQs blocked, endpoint selected
251 */
252static void txstate(struct musb *musb, struct musb_request *req)
253{
254 u8 epnum = req->epnum;
255 struct musb_ep *musb_ep;
256 void __iomem *epio = musb->endpoints[epnum].regs;
257 struct usb_request *request;
258 u16 fifo_count = 0, csr;
259 int use_dma = 0;
260
261 musb_ep = req->ep;
262
263 /* we shouldn't get here while DMA is active ... but we do ... */
264 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
265 DBG(4, "dma pending...\n");
266 return;
267 }
268
269 /* read TXCSR before */
270 csr = musb_readw(epio, MUSB_TXCSR);
271
272 request = &req->request;
273 fifo_count = min(max_ep_writesize(musb, musb_ep),
274 (int)(request->length - request->actual));
275
276 if (csr & MUSB_TXCSR_TXPKTRDY) {
277 DBG(5, "%s old packet still ready , txcsr %03x\n",
278 musb_ep->end_point.name, csr);
279 return;
280 }
281
282 if (csr & MUSB_TXCSR_P_SENDSTALL) {
283 DBG(5, "%s stalling, txcsr %03x\n",
284 musb_ep->end_point.name, csr);
285 return;
286 }
287
288 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
289 epnum, musb_ep->packet_sz, fifo_count,
290 csr);
291
292#ifndef CONFIG_MUSB_PIO_ONLY
293 if (is_dma_capable() && musb_ep->dma) {
294 struct dma_controller *c = musb->dma_controller;
295
296 use_dma = (request->dma != DMA_ADDR_INVALID);
297
298 /* MUSB_TXCSR_P_ISO is still set correctly */
299
300#ifdef CONFIG_USB_INVENTRA_DMA
301 {
302 size_t request_size;
303
304 /* setup DMA, then program endpoint CSR */
305 request_size = min(request->length,
306 musb_ep->dma->max_len);
307 if (request_size <= musb_ep->packet_sz)
308 musb_ep->dma->desired_mode = 0;
309 else
310 musb_ep->dma->desired_mode = 1;
311
312 use_dma = use_dma && c->channel_program(
313 musb_ep->dma, musb_ep->packet_sz,
314 musb_ep->dma->desired_mode,
315 request->dma, request_size);
316 if (use_dma) {
317 if (musb_ep->dma->desired_mode == 0) {
318 /* ASSERT: DMAENAB is clear */
319 csr &= ~(MUSB_TXCSR_AUTOSET |
320 MUSB_TXCSR_DMAMODE);
321 csr |= (MUSB_TXCSR_DMAENAB |
322 MUSB_TXCSR_MODE);
323 /* against programming guide */
324 } else
325 csr |= (MUSB_TXCSR_AUTOSET
326 | MUSB_TXCSR_DMAENAB
327 | MUSB_TXCSR_DMAMODE
328 | MUSB_TXCSR_MODE);
329
330 csr &= ~MUSB_TXCSR_P_UNDERRUN;
331 musb_writew(epio, MUSB_TXCSR, csr);
332 }
333 }
334
335#elif defined(CONFIG_USB_TI_CPPI_DMA)
336 /* program endpoint CSR first, then setup DMA */
337 csr &= ~(MUSB_TXCSR_AUTOSET
338 | MUSB_TXCSR_DMAMODE
339 | MUSB_TXCSR_P_UNDERRUN
340 | MUSB_TXCSR_TXPKTRDY);
341 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
342 musb_writew(epio, MUSB_TXCSR,
343 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
344 | csr);
345
346 /* ensure writebuffer is empty */
347 csr = musb_readw(epio, MUSB_TXCSR);
348
349 /* NOTE host side sets DMAENAB later than this; both are
350 * OK since the transfer dma glue (between CPPI and Mentor
351 * fifos) just tells CPPI it could start. Data only moves
352 * to the USB TX fifo when both fifos are ready.
353 */
354
355 /* "mode" is irrelevant here; handle terminating ZLPs like
356 * PIO does, since the hardware RNDIS mode seems unreliable
357 * except for the last-packet-is-already-short case.
358 */
359 use_dma = use_dma && c->channel_program(
360 musb_ep->dma, musb_ep->packet_sz,
361 0,
362 request->dma,
363 request->length);
364 if (!use_dma) {
365 c->channel_release(musb_ep->dma);
366 musb_ep->dma = NULL;
367 /* ASSERT: DMAENAB clear */
368 csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
369 /* invariant: prequest->buf is non-null */
370 }
371#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
372 use_dma = use_dma && c->channel_program(
373 musb_ep->dma, musb_ep->packet_sz,
374 request->zero,
375 request->dma,
376 request->length);
377#endif
378 }
379#endif
380
381 if (!use_dma) {
382 musb_write_fifo(musb_ep->hw_ep, fifo_count,
383 (u8 *) (request->buf + request->actual));
384 request->actual += fifo_count;
385 csr |= MUSB_TXCSR_TXPKTRDY;
386 csr &= ~MUSB_TXCSR_P_UNDERRUN;
387 musb_writew(epio, MUSB_TXCSR, csr);
388 }
389
390 /* host may already have the data when this message shows... */
391 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
392 musb_ep->end_point.name, use_dma ? "dma" : "pio",
393 request->actual, request->length,
394 musb_readw(epio, MUSB_TXCSR),
395 fifo_count,
396 musb_readw(epio, MUSB_TXMAXP));
397}
398
399/*
400 * FIFO state update (e.g. data ready).
401 * Called from IRQ, with controller locked.
402 */
403void musb_g_tx(struct musb *musb, u8 epnum)
404{
405 u16 csr;
406 struct usb_request *request;
407 u8 __iomem *mbase = musb->mregs;
408 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
409 void __iomem *epio = musb->endpoints[epnum].regs;
410 struct dma_channel *dma;
411
412 musb_ep_select(mbase, epnum);
413 request = next_request(musb_ep);
414
415 csr = musb_readw(epio, MUSB_TXCSR);
416 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
417
418 dma = is_dma_capable() ? musb_ep->dma : NULL;
419 do {
420 /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
421 * probably rates reporting as a host error
422 */
423 if (csr & MUSB_TXCSR_P_SENTSTALL) {
424 csr |= MUSB_TXCSR_P_WZC_BITS;
425 csr &= ~MUSB_TXCSR_P_SENTSTALL;
426 musb_writew(epio, MUSB_TXCSR, csr);
427 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
428 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
429 musb->dma_controller->channel_abort(dma);
430 }
431
432 if (request)
433 musb_g_giveback(musb_ep, request, -EPIPE);
434
435 break;
436 }
437
438 if (csr & MUSB_TXCSR_P_UNDERRUN) {
439 /* we NAKed, no big deal ... little reason to care */
440 csr |= MUSB_TXCSR_P_WZC_BITS;
441 csr &= ~(MUSB_TXCSR_P_UNDERRUN
442 | MUSB_TXCSR_TXPKTRDY);
443 musb_writew(epio, MUSB_TXCSR, csr);
444 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
445 }
446
447 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
448 /* SHOULD NOT HAPPEN ... has with cppi though, after
449 * changing SENDSTALL (and other cases); harmless?
450 */
451 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
452 break;
453 }
454
455 if (request) {
456 u8 is_dma = 0;
457
458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
459 is_dma = 1;
460 csr |= MUSB_TXCSR_P_WZC_BITS;
461 csr &= ~(MUSB_TXCSR_DMAENAB
462 | MUSB_TXCSR_P_UNDERRUN
463 | MUSB_TXCSR_TXPKTRDY);
464 musb_writew(epio, MUSB_TXCSR, csr);
465 /* ensure writebuffer is empty */
466 csr = musb_readw(epio, MUSB_TXCSR);
467 request->actual += musb_ep->dma->actual_len;
468 DBG(4, "TXCSR%d %04x, dma off, "
469 "len %zu, req %p\n",
470 epnum, csr,
471 musb_ep->dma->actual_len,
472 request);
473 }
474
475 if (is_dma || request->actual == request->length) {
476
477 /* First, maybe a terminating short packet.
478 * Some DMA engines might handle this by
479 * themselves.
480 */
481 if ((request->zero
482 && request->length
483 && (request->length
484 % musb_ep->packet_sz)
485 == 0)
486#ifdef CONFIG_USB_INVENTRA_DMA
487 || (is_dma &&
488 ((!dma->desired_mode) ||
489 (request->actual &
490 (musb_ep->packet_sz - 1))))
491#endif
492 ) {
493 /* on dma completion, fifo may not
494 * be available yet ...
495 */
496 if (csr & MUSB_TXCSR_TXPKTRDY)
497 break;
498
499 DBG(4, "sending zero pkt\n");
500 musb_writew(epio, MUSB_TXCSR,
501 MUSB_TXCSR_MODE
502 | MUSB_TXCSR_TXPKTRDY);
503 request->zero = 0;
504 }
505
506 /* ... or if not, then complete it */
507 musb_g_giveback(musb_ep, request, 0);
508
509 /* kickstart next transfer if appropriate;
510 * the packet that just completed might not
511 * be transmitted for hours or days.
512 * REVISIT for double buffering...
513 * FIXME revisit for stalls too...
514 */
515 musb_ep_select(mbase, epnum);
516 csr = musb_readw(epio, MUSB_TXCSR);
517 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
518 break;
519 request = musb_ep->desc
520 ? next_request(musb_ep)
521 : NULL;
522 if (!request) {
523 DBG(4, "%s idle now\n",
524 musb_ep->end_point.name);
525 break;
526 }
527 }
528
529 txstate(musb, to_musb_request(request));
530 }
531
532 } while (0);
533}
534
535/* ------------------------------------------------------------ */
536
537#ifdef CONFIG_USB_INVENTRA_DMA
538
539/* Peripheral rx (OUT) using Mentor DMA works as follows:
540 - Only mode 0 is used.
541
542 - Request is queued by the gadget class driver.
543 -> if queue was previously empty, rxstate()
544
545 - Host sends OUT token which causes an endpoint interrupt
546 /\ -> RxReady
547 | -> if request queued, call rxstate
548 | /\ -> setup DMA
549 | | -> DMA interrupt on completion
550 | | -> RxReady
551 | | -> stop DMA
552 | | -> ack the read
553 | | -> if data recd = max expected
554 | | by the request, or host
555 | | sent a short packet,
556 | | complete the request,
557 | | and start the next one.
558 | |_____________________________________|
559 | else just wait for the host
560 | to send the next OUT token.
561 |__________________________________________________|
562
563 * Non-Mentor DMA engines can of course work differently.
564 */
565
566#endif
567
568/*
569 * Context: controller locked, IRQs blocked, endpoint selected
570 */
571static void rxstate(struct musb *musb, struct musb_request *req)
572{
573 u16 csr = 0;
574 const u8 epnum = req->epnum;
575 struct usb_request *request = &req->request;
576 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
577 void __iomem *epio = musb->endpoints[epnum].regs;
578 u16 fifo_count = 0;
579 u16 len = musb_ep->packet_sz;
580
581 csr = musb_readw(epio, MUSB_RXCSR);
582
583 if (is_cppi_enabled() && musb_ep->dma) {
584 struct dma_controller *c = musb->dma_controller;
585 struct dma_channel *channel = musb_ep->dma;
586
587 /* NOTE: CPPI won't actually stop advancing the DMA
588 * queue after short packet transfers, so this is almost
589 * always going to run as IRQ-per-packet DMA so that
590 * faults will be handled correctly.
591 */
592 if (c->channel_program(channel,
593 musb_ep->packet_sz,
594 !request->short_not_ok,
595 request->dma + request->actual,
596 request->length - request->actual)) {
597
598 /* make sure that if an rxpkt arrived after the irq,
599 * the cppi engine will be ready to take it as soon
600 * as DMA is enabled
601 */
602 csr &= ~(MUSB_RXCSR_AUTOCLEAR
603 | MUSB_RXCSR_DMAMODE);
604 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
605 musb_writew(epio, MUSB_RXCSR, csr);
606 return;
607 }
608 }
609
610 if (csr & MUSB_RXCSR_RXPKTRDY) {
611 len = musb_readw(epio, MUSB_RXCOUNT);
612 if (request->actual < request->length) {
613#ifdef CONFIG_USB_INVENTRA_DMA
614 if (is_dma_capable() && musb_ep->dma) {
615 struct dma_controller *c;
616 struct dma_channel *channel;
617 int use_dma = 0;
618
619 c = musb->dma_controller;
620 channel = musb_ep->dma;
621
622 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
623 * mode 0 only. So we do not get endpoint interrupts due to DMA
624 * completion. We only get interrupts from DMA controller.
625 *
626 * We could operate in DMA mode 1 if we knew the size of the tranfer
627 * in advance. For mass storage class, request->length = what the host
628 * sends, so that'd work. But for pretty much everything else,
629 * request->length is routinely more than what the host sends. For
630 * most these gadgets, end of is signified either by a short packet,
631 * or filling the last byte of the buffer. (Sending extra data in
632 * that last pckate should trigger an overflow fault.) But in mode 1,
633 * we don't get DMA completion interrrupt for short packets.
634 *
635 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
636 * to get endpoint interrupt on every DMA req, but that didn't seem
637 * to work reliably.
638 *
639 * REVISIT an updated g_file_storage can set req->short_not_ok, which
640 * then becomes usable as a runtime "use mode 1" hint...
641 */
642
643 csr |= MUSB_RXCSR_DMAENAB;
644#ifdef USE_MODE1
645 csr |= MUSB_RXCSR_AUTOCLEAR;
646 /* csr |= MUSB_RXCSR_DMAMODE; */
647
648 /* this special sequence (enabling and then
649 * disabling MUSB_RXCSR_DMAMODE) is required
650 * to get DMAReq to activate
651 */
652 musb_writew(epio, MUSB_RXCSR,
653 csr | MUSB_RXCSR_DMAMODE);
654#endif
655 musb_writew(epio, MUSB_RXCSR, csr);
656
657 if (request->actual < request->length) {
658 int transfer_size = 0;
659#ifdef USE_MODE1
660 transfer_size = min(request->length,
661 channel->max_len);
662#else
663 transfer_size = len;
664#endif
665 if (transfer_size <= musb_ep->packet_sz)
666 musb_ep->dma->desired_mode = 0;
667 else
668 musb_ep->dma->desired_mode = 1;
669
670 use_dma = c->channel_program(
671 channel,
672 musb_ep->packet_sz,
673 channel->desired_mode,
674 request->dma
675 + request->actual,
676 transfer_size);
677 }
678
679 if (use_dma)
680 return;
681 }
682#endif /* Mentor's DMA */
683
684 fifo_count = request->length - request->actual;
685 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
686 musb_ep->end_point.name,
687 len, fifo_count,
688 musb_ep->packet_sz);
689
690 fifo_count = min(len, fifo_count);
691
692#ifdef CONFIG_USB_TUSB_OMAP_DMA
693 if (tusb_dma_omap() && musb_ep->dma) {
694 struct dma_controller *c = musb->dma_controller;
695 struct dma_channel *channel = musb_ep->dma;
696 u32 dma_addr = request->dma + request->actual;
697 int ret;
698
699 ret = c->channel_program(channel,
700 musb_ep->packet_sz,
701 channel->desired_mode,
702 dma_addr,
703 fifo_count);
704 if (ret)
705 return;
706 }
707#endif
708
709 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
710 (request->buf + request->actual));
711 request->actual += fifo_count;
712
713 /* REVISIT if we left anything in the fifo, flush
714 * it and report -EOVERFLOW
715 */
716
717 /* ack the read! */
718 csr |= MUSB_RXCSR_P_WZC_BITS;
719 csr &= ~MUSB_RXCSR_RXPKTRDY;
720 musb_writew(epio, MUSB_RXCSR, csr);
721 }
722 }
723
724 /* reach the end or short packet detected */
725 if (request->actual == request->length || len < musb_ep->packet_sz)
726 musb_g_giveback(musb_ep, request, 0);
727}
728
729/*
730 * Data ready for a request; called from IRQ
731 */
732void musb_g_rx(struct musb *musb, u8 epnum)
733{
734 u16 csr;
735 struct usb_request *request;
736 void __iomem *mbase = musb->mregs;
737 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
738 void __iomem *epio = musb->endpoints[epnum].regs;
739 struct dma_channel *dma;
740
741 musb_ep_select(mbase, epnum);
742
743 request = next_request(musb_ep);
744
745 csr = musb_readw(epio, MUSB_RXCSR);
746 dma = is_dma_capable() ? musb_ep->dma : NULL;
747
748 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
749 csr, dma ? " (dma)" : "", request);
750
751 if (csr & MUSB_RXCSR_P_SENTSTALL) {
752 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
753 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
754 (void) musb->dma_controller->channel_abort(dma);
755 request->actual += musb_ep->dma->actual_len;
756 }
757
758 csr |= MUSB_RXCSR_P_WZC_BITS;
759 csr &= ~MUSB_RXCSR_P_SENTSTALL;
760 musb_writew(epio, MUSB_RXCSR, csr);
761
762 if (request)
763 musb_g_giveback(musb_ep, request, -EPIPE);
764 goto done;
765 }
766
767 if (csr & MUSB_RXCSR_P_OVERRUN) {
768 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
769 csr &= ~MUSB_RXCSR_P_OVERRUN;
770 musb_writew(epio, MUSB_RXCSR, csr);
771
772 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
773 if (request && request->status == -EINPROGRESS)
774 request->status = -EOVERFLOW;
775 }
776 if (csr & MUSB_RXCSR_INCOMPRX) {
777 /* REVISIT not necessarily an error */
778 DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
779 }
780
781 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
782 /* "should not happen"; likely RXPKTRDY pending for DMA */
783 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
784 "%s busy, csr %04x\n",
785 musb_ep->end_point.name, csr);
786 goto done;
787 }
788
789 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
790 csr &= ~(MUSB_RXCSR_AUTOCLEAR
791 | MUSB_RXCSR_DMAENAB
792 | MUSB_RXCSR_DMAMODE);
793 musb_writew(epio, MUSB_RXCSR,
794 MUSB_RXCSR_P_WZC_BITS | csr);
795
796 request->actual += musb_ep->dma->actual_len;
797
798 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
799 epnum, csr,
800 musb_readw(epio, MUSB_RXCSR),
801 musb_ep->dma->actual_len, request);
802
803#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
804 /* Autoclear doesn't clear RxPktRdy for short packets */
805 if ((dma->desired_mode == 0)
806 || (dma->actual_len
807 & (musb_ep->packet_sz - 1))) {
808 /* ack the read! */
809 csr &= ~MUSB_RXCSR_RXPKTRDY;
810 musb_writew(epio, MUSB_RXCSR, csr);
811 }
812
813 /* incomplete, and not short? wait for next IN packet */
814 if ((request->actual < request->length)
815 && (musb_ep->dma->actual_len
816 == musb_ep->packet_sz))
817 goto done;
818#endif
819 musb_g_giveback(musb_ep, request, 0);
820
821 request = next_request(musb_ep);
822 if (!request)
823 goto done;
824
825 /* don't start more i/o till the stall clears */
826 musb_ep_select(mbase, epnum);
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if (csr & MUSB_RXCSR_P_SENDSTALL)
829 goto done;
830 }
831
832
833 /* analyze request if the ep is hot */
834 if (request)
835 rxstate(musb, to_musb_request(request));
836 else
837 DBG(3, "packet waiting for %s%s request\n",
838 musb_ep->desc ? "" : "inactive ",
839 musb_ep->end_point.name);
840
841done:
842 return;
843}
844
845/* ------------------------------------------------------------ */
846
847static int musb_gadget_enable(struct usb_ep *ep,
848 const struct usb_endpoint_descriptor *desc)
849{
850 unsigned long flags;
851 struct musb_ep *musb_ep;
852 struct musb_hw_ep *hw_ep;
853 void __iomem *regs;
854 struct musb *musb;
855 void __iomem *mbase;
856 u8 epnum;
857 u16 csr;
858 unsigned tmp;
859 int status = -EINVAL;
860
861 if (!ep || !desc)
862 return -EINVAL;
863
864 musb_ep = to_musb_ep(ep);
865 hw_ep = musb_ep->hw_ep;
866 regs = hw_ep->regs;
867 musb = musb_ep->musb;
868 mbase = musb->mregs;
869 epnum = musb_ep->current_epnum;
870
871 spin_lock_irqsave(&musb->lock, flags);
872
873 if (musb_ep->desc) {
874 status = -EBUSY;
875 goto fail;
876 }
877 musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
878
879 /* check direction and (later) maxpacket size against endpoint */
880 if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
881 goto fail;
882
883 /* REVISIT this rules out high bandwidth periodic transfers */
884 tmp = le16_to_cpu(desc->wMaxPacketSize);
885 if (tmp & ~0x07ff)
886 goto fail;
887 musb_ep->packet_sz = tmp;
888
889 /* enable the interrupts for the endpoint, set the endpoint
890 * packet size (or fail), set the mode, clear the fifo
891 */
892 musb_ep_select(mbase, epnum);
893 if (desc->bEndpointAddress & USB_DIR_IN) {
894 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
895
896 if (hw_ep->is_shared_fifo)
897 musb_ep->is_in = 1;
898 if (!musb_ep->is_in)
899 goto fail;
900 if (tmp > hw_ep->max_packet_sz_tx)
901 goto fail;
902
903 int_txe |= (1 << epnum);
904 musb_writew(mbase, MUSB_INTRTXE, int_txe);
905
906 /* REVISIT if can_bulk_split(), use by updating "tmp";
907 * likewise high bandwidth periodic tx
908 */
909 musb_writew(regs, MUSB_TXMAXP, tmp);
910
911 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
912 if (musb_readw(regs, MUSB_TXCSR)
913 & MUSB_TXCSR_FIFONOTEMPTY)
914 csr |= MUSB_TXCSR_FLUSHFIFO;
915 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
916 csr |= MUSB_TXCSR_P_ISO;
917
918 /* set twice in case of double buffering */
919 musb_writew(regs, MUSB_TXCSR, csr);
920 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
921 musb_writew(regs, MUSB_TXCSR, csr);
922
923 } else {
924 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
925
926 if (hw_ep->is_shared_fifo)
927 musb_ep->is_in = 0;
928 if (musb_ep->is_in)
929 goto fail;
930 if (tmp > hw_ep->max_packet_sz_rx)
931 goto fail;
932
933 int_rxe |= (1 << epnum);
934 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
935
936 /* REVISIT if can_bulk_combine() use by updating "tmp"
937 * likewise high bandwidth periodic rx
938 */
939 musb_writew(regs, MUSB_RXMAXP, tmp);
940
941 /* force shared fifo to OUT-only mode */
942 if (hw_ep->is_shared_fifo) {
943 csr = musb_readw(regs, MUSB_TXCSR);
944 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
945 musb_writew(regs, MUSB_TXCSR, csr);
946 }
947
948 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
949 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
950 csr |= MUSB_RXCSR_P_ISO;
951 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
952 csr |= MUSB_RXCSR_DISNYET;
953
954 /* set twice in case of double buffering */
955 musb_writew(regs, MUSB_RXCSR, csr);
956 musb_writew(regs, MUSB_RXCSR, csr);
957 }
958
959 /* NOTE: all the I/O code _should_ work fine without DMA, in case
960 * for some reason you run out of channels here.
961 */
962 if (is_dma_capable() && musb->dma_controller) {
963 struct dma_controller *c = musb->dma_controller;
964
965 musb_ep->dma = c->channel_alloc(c, hw_ep,
966 (desc->bEndpointAddress & USB_DIR_IN));
967 } else
968 musb_ep->dma = NULL;
969
970 musb_ep->desc = desc;
971 musb_ep->busy = 0;
972 status = 0;
973
974 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
975 musb_driver_name, musb_ep->end_point.name,
976 ({ char *s; switch (musb_ep->type) {
977 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
978 case USB_ENDPOINT_XFER_INT: s = "int"; break;
979 default: s = "iso"; break;
980 }; s; }),
981 musb_ep->is_in ? "IN" : "OUT",
982 musb_ep->dma ? "dma, " : "",
983 musb_ep->packet_sz);
984
985 schedule_work(&musb->irq_work);
986
987fail:
988 spin_unlock_irqrestore(&musb->lock, flags);
989 return status;
990}
991
992/*
993 * Disable an endpoint flushing all requests queued.
994 */
995static int musb_gadget_disable(struct usb_ep *ep)
996{
997 unsigned long flags;
998 struct musb *musb;
999 u8 epnum;
1000 struct musb_ep *musb_ep;
1001 void __iomem *epio;
1002 int status = 0;
1003
1004 musb_ep = to_musb_ep(ep);
1005 musb = musb_ep->musb;
1006 epnum = musb_ep->current_epnum;
1007 epio = musb->endpoints[epnum].regs;
1008
1009 spin_lock_irqsave(&musb->lock, flags);
1010 musb_ep_select(musb->mregs, epnum);
1011
1012 /* zero the endpoint sizes */
1013 if (musb_ep->is_in) {
1014 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1015 int_txe &= ~(1 << epnum);
1016 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1017 musb_writew(epio, MUSB_TXMAXP, 0);
1018 } else {
1019 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1020 int_rxe &= ~(1 << epnum);
1021 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1022 musb_writew(epio, MUSB_RXMAXP, 0);
1023 }
1024
1025 musb_ep->desc = NULL;
1026
1027 /* abort all pending DMA and requests */
1028 nuke(musb_ep, -ESHUTDOWN);
1029
1030 schedule_work(&musb->irq_work);
1031
1032 spin_unlock_irqrestore(&(musb->lock), flags);
1033
1034 DBG(2, "%s\n", musb_ep->end_point.name);
1035
1036 return status;
1037}
1038
1039/*
1040 * Allocate a request for an endpoint.
1041 * Reused by ep0 code.
1042 */
1043struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1044{
1045 struct musb_ep *musb_ep = to_musb_ep(ep);
1046 struct musb_request *request = NULL;
1047
1048 request = kzalloc(sizeof *request, gfp_flags);
1049 if (request) {
1050 INIT_LIST_HEAD(&request->request.list);
1051 request->request.dma = DMA_ADDR_INVALID;
1052 request->epnum = musb_ep->current_epnum;
1053 request->ep = musb_ep;
1054 }
1055
1056 return &request->request;
1057}
1058
1059/*
1060 * Free a request
1061 * Reused by ep0 code.
1062 */
1063void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1064{
1065 kfree(to_musb_request(req));
1066}
1067
1068static LIST_HEAD(buffers);
1069
1070struct free_record {
1071 struct list_head list;
1072 struct device *dev;
1073 unsigned bytes;
1074 dma_addr_t dma;
1075};
1076
1077/*
1078 * Context: controller locked, IRQs blocked.
1079 */
1080static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1081{
1082 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1083 req->tx ? "TX/IN" : "RX/OUT",
1084 &req->request, req->request.length, req->epnum);
1085
1086 musb_ep_select(musb->mregs, req->epnum);
1087 if (req->tx)
1088 txstate(musb, req);
1089 else
1090 rxstate(musb, req);
1091}
1092
1093static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1094 gfp_t gfp_flags)
1095{
1096 struct musb_ep *musb_ep;
1097 struct musb_request *request;
1098 struct musb *musb;
1099 int status = 0;
1100 unsigned long lockflags;
1101
1102 if (!ep || !req)
1103 return -EINVAL;
1104 if (!req->buf)
1105 return -ENODATA;
1106
1107 musb_ep = to_musb_ep(ep);
1108 musb = musb_ep->musb;
1109
1110 request = to_musb_request(req);
1111 request->musb = musb;
1112
1113 if (request->ep != musb_ep)
1114 return -EINVAL;
1115
1116 DBG(4, "<== to %s request=%p\n", ep->name, req);
1117
1118 /* request is mine now... */
1119 request->request.actual = 0;
1120 request->request.status = -EINPROGRESS;
1121 request->epnum = musb_ep->current_epnum;
1122 request->tx = musb_ep->is_in;
1123
1124 if (is_dma_capable() && musb_ep->dma) {
1125 if (request->request.dma == DMA_ADDR_INVALID) {
1126 request->request.dma = dma_map_single(
1127 musb->controller,
1128 request->request.buf,
1129 request->request.length,
1130 request->tx
1131 ? DMA_TO_DEVICE
1132 : DMA_FROM_DEVICE);
1133 request->mapped = 1;
1134 } else {
1135 dma_sync_single_for_device(musb->controller,
1136 request->request.dma,
1137 request->request.length,
1138 request->tx
1139 ? DMA_TO_DEVICE
1140 : DMA_FROM_DEVICE);
1141 request->mapped = 0;
1142 }
1143 } else if (!req->buf) {
1144 return -ENODATA;
1145 } else
1146 request->mapped = 0;
1147
1148 spin_lock_irqsave(&musb->lock, lockflags);
1149
1150 /* don't queue if the ep is down */
1151 if (!musb_ep->desc) {
1152 DBG(4, "req %p queued to %s while ep %s\n",
1153 req, ep->name, "disabled");
1154 status = -ESHUTDOWN;
1155 goto cleanup;
1156 }
1157
1158 /* add request to the list */
1159 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1160
1161 /* it this is the head of the queue, start i/o ... */
1162 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1163 musb_ep_restart(musb, request);
1164
1165cleanup:
1166 spin_unlock_irqrestore(&musb->lock, lockflags);
1167 return status;
1168}
1169
1170static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1171{
1172 struct musb_ep *musb_ep = to_musb_ep(ep);
1173 struct usb_request *r;
1174 unsigned long flags;
1175 int status = 0;
1176 struct musb *musb = musb_ep->musb;
1177
1178 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1179 return -EINVAL;
1180
1181 spin_lock_irqsave(&musb->lock, flags);
1182
1183 list_for_each_entry(r, &musb_ep->req_list, list) {
1184 if (r == request)
1185 break;
1186 }
1187 if (r != request) {
1188 DBG(3, "request %p not queued to %s\n", request, ep->name);
1189 status = -EINVAL;
1190 goto done;
1191 }
1192
1193 /* if the hardware doesn't have the request, easy ... */
1194 if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1195 musb_g_giveback(musb_ep, request, -ECONNRESET);
1196
1197 /* ... else abort the dma transfer ... */
1198 else if (is_dma_capable() && musb_ep->dma) {
1199 struct dma_controller *c = musb->dma_controller;
1200
1201 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1202 if (c->channel_abort)
1203 status = c->channel_abort(musb_ep->dma);
1204 else
1205 status = -EBUSY;
1206 if (status == 0)
1207 musb_g_giveback(musb_ep, request, -ECONNRESET);
1208 } else {
1209 /* NOTE: by sticking to easily tested hardware/driver states,
1210 * we leave counting of in-flight packets imprecise.
1211 */
1212 musb_g_giveback(musb_ep, request, -ECONNRESET);
1213 }
1214
1215done:
1216 spin_unlock_irqrestore(&musb->lock, flags);
1217 return status;
1218}
1219
1220/*
1221 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1222 * data but will queue requests.
1223 *
1224 * exported to ep0 code
1225 */
1226int musb_gadget_set_halt(struct usb_ep *ep, int value)
1227{
1228 struct musb_ep *musb_ep = to_musb_ep(ep);
1229 u8 epnum = musb_ep->current_epnum;
1230 struct musb *musb = musb_ep->musb;
1231 void __iomem *epio = musb->endpoints[epnum].regs;
1232 void __iomem *mbase;
1233 unsigned long flags;
1234 u16 csr;
1235 struct musb_request *request = NULL;
1236 int status = 0;
1237
1238 if (!ep)
1239 return -EINVAL;
1240 mbase = musb->mregs;
1241
1242 spin_lock_irqsave(&musb->lock, flags);
1243
1244 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1245 status = -EINVAL;
1246 goto done;
1247 }
1248
1249 musb_ep_select(mbase, epnum);
1250
1251 /* cannot portably stall with non-empty FIFO */
1252 request = to_musb_request(next_request(musb_ep));
1253 if (value && musb_ep->is_in) {
1254 csr = musb_readw(epio, MUSB_TXCSR);
1255 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1256 DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1257 spin_unlock_irqrestore(&musb->lock, flags);
1258 return -EAGAIN;
1259 }
1260
1261 }
1262
1263 /* set/clear the stall and toggle bits */
1264 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1265 if (musb_ep->is_in) {
1266 csr = musb_readw(epio, MUSB_TXCSR);
1267 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
1268 csr |= MUSB_TXCSR_FLUSHFIFO;
1269 csr |= MUSB_TXCSR_P_WZC_BITS
1270 | MUSB_TXCSR_CLRDATATOG;
1271 if (value)
1272 csr |= MUSB_TXCSR_P_SENDSTALL;
1273 else
1274 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1275 | MUSB_TXCSR_P_SENTSTALL);
1276 csr &= ~MUSB_TXCSR_TXPKTRDY;
1277 musb_writew(epio, MUSB_TXCSR, csr);
1278 } else {
1279 csr = musb_readw(epio, MUSB_RXCSR);
1280 csr |= MUSB_RXCSR_P_WZC_BITS
1281 | MUSB_RXCSR_FLUSHFIFO
1282 | MUSB_RXCSR_CLRDATATOG;
1283 if (value)
1284 csr |= MUSB_RXCSR_P_SENDSTALL;
1285 else
1286 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1287 | MUSB_RXCSR_P_SENTSTALL);
1288 musb_writew(epio, MUSB_RXCSR, csr);
1289 }
1290
1291done:
1292
1293 /* maybe start the first request in the queue */
1294 if (!musb_ep->busy && !value && request) {
1295 DBG(3, "restarting the request\n");
1296 musb_ep_restart(musb, request);
1297 }
1298
1299 spin_unlock_irqrestore(&musb->lock, flags);
1300 return status;
1301}
1302
1303static int musb_gadget_fifo_status(struct usb_ep *ep)
1304{
1305 struct musb_ep *musb_ep = to_musb_ep(ep);
1306 void __iomem *epio = musb_ep->hw_ep->regs;
1307 int retval = -EINVAL;
1308
1309 if (musb_ep->desc && !musb_ep->is_in) {
1310 struct musb *musb = musb_ep->musb;
1311 int epnum = musb_ep->current_epnum;
1312 void __iomem *mbase = musb->mregs;
1313 unsigned long flags;
1314
1315 spin_lock_irqsave(&musb->lock, flags);
1316
1317 musb_ep_select(mbase, epnum);
1318 /* FIXME return zero unless RXPKTRDY is set */
1319 retval = musb_readw(epio, MUSB_RXCOUNT);
1320
1321 spin_unlock_irqrestore(&musb->lock, flags);
1322 }
1323 return retval;
1324}
1325
1326static void musb_gadget_fifo_flush(struct usb_ep *ep)
1327{
1328 struct musb_ep *musb_ep = to_musb_ep(ep);
1329 struct musb *musb = musb_ep->musb;
1330 u8 epnum = musb_ep->current_epnum;
1331 void __iomem *epio = musb->endpoints[epnum].regs;
1332 void __iomem *mbase;
1333 unsigned long flags;
1334 u16 csr, int_txe;
1335
1336 mbase = musb->mregs;
1337
1338 spin_lock_irqsave(&musb->lock, flags);
1339 musb_ep_select(mbase, (u8) epnum);
1340
1341 /* disable interrupts */
1342 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1343 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1344
1345 if (musb_ep->is_in) {
1346 csr = musb_readw(epio, MUSB_TXCSR);
1347 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1348 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1349 musb_writew(epio, MUSB_TXCSR, csr);
1350 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1351 musb_writew(epio, MUSB_TXCSR, csr);
1352 }
1353 } else {
1354 csr = musb_readw(epio, MUSB_RXCSR);
1355 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1356 musb_writew(epio, MUSB_RXCSR, csr);
1357 musb_writew(epio, MUSB_RXCSR, csr);
1358 }
1359
1360 /* re-enable interrupt */
1361 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1362 spin_unlock_irqrestore(&musb->lock, flags);
1363}
1364
1365static const struct usb_ep_ops musb_ep_ops = {
1366 .enable = musb_gadget_enable,
1367 .disable = musb_gadget_disable,
1368 .alloc_request = musb_alloc_request,
1369 .free_request = musb_free_request,
1370 .queue = musb_gadget_queue,
1371 .dequeue = musb_gadget_dequeue,
1372 .set_halt = musb_gadget_set_halt,
1373 .fifo_status = musb_gadget_fifo_status,
1374 .fifo_flush = musb_gadget_fifo_flush
1375};
1376
1377/* ----------------------------------------------------------------------- */
1378
1379static int musb_gadget_get_frame(struct usb_gadget *gadget)
1380{
1381 struct musb *musb = gadget_to_musb(gadget);
1382
1383 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1384}
1385
1386static int musb_gadget_wakeup(struct usb_gadget *gadget)
1387{
1388 struct musb *musb = gadget_to_musb(gadget);
1389 void __iomem *mregs = musb->mregs;
1390 unsigned long flags;
1391 int status = -EINVAL;
1392 u8 power, devctl;
1393 int retries;
1394
1395 spin_lock_irqsave(&musb->lock, flags);
1396
1397 switch (musb->xceiv.state) {
1398 case OTG_STATE_B_PERIPHERAL:
1399 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1400 * that's part of the standard usb 1.1 state machine, and
1401 * doesn't affect OTG transitions.
1402 */
1403 if (musb->may_wakeup && musb->is_suspended)
1404 break;
1405 goto done;
1406 case OTG_STATE_B_IDLE:
1407 /* Start SRP ... OTG not required. */
1408 devctl = musb_readb(mregs, MUSB_DEVCTL);
1409 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1410 devctl |= MUSB_DEVCTL_SESSION;
1411 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1412 devctl = musb_readb(mregs, MUSB_DEVCTL);
1413 retries = 100;
1414 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1415 devctl = musb_readb(mregs, MUSB_DEVCTL);
1416 if (retries-- < 1)
1417 break;
1418 }
1419 retries = 10000;
1420 while (devctl & MUSB_DEVCTL_SESSION) {
1421 devctl = musb_readb(mregs, MUSB_DEVCTL);
1422 if (retries-- < 1)
1423 break;
1424 }
1425
1426 /* Block idling for at least 1s */
1427 musb_platform_try_idle(musb,
1428 jiffies + msecs_to_jiffies(1 * HZ));
1429
1430 status = 0;
1431 goto done;
1432 default:
1433 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1434 goto done;
1435 }
1436
1437 status = 0;
1438
1439 power = musb_readb(mregs, MUSB_POWER);
1440 power |= MUSB_POWER_RESUME;
1441 musb_writeb(mregs, MUSB_POWER, power);
1442 DBG(2, "issue wakeup\n");
1443
1444 /* FIXME do this next chunk in a timer callback, no udelay */
1445 mdelay(2);
1446
1447 power = musb_readb(mregs, MUSB_POWER);
1448 power &= ~MUSB_POWER_RESUME;
1449 musb_writeb(mregs, MUSB_POWER, power);
1450done:
1451 spin_unlock_irqrestore(&musb->lock, flags);
1452 return status;
1453}
1454
1455static int
1456musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1457{
1458 struct musb *musb = gadget_to_musb(gadget);
1459
1460 musb->is_self_powered = !!is_selfpowered;
1461 return 0;
1462}
1463
1464static void musb_pullup(struct musb *musb, int is_on)
1465{
1466 u8 power;
1467
1468 power = musb_readb(musb->mregs, MUSB_POWER);
1469 if (is_on)
1470 power |= MUSB_POWER_SOFTCONN;
1471 else
1472 power &= ~MUSB_POWER_SOFTCONN;
1473
1474 /* FIXME if on, HdrcStart; if off, HdrcStop */
1475
1476 DBG(3, "gadget %s D+ pullup %s\n",
1477 musb->gadget_driver->function, is_on ? "on" : "off");
1478 musb_writeb(musb->mregs, MUSB_POWER, power);
1479}
1480
1481#if 0
1482static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1483{
1484 DBG(2, "<= %s =>\n", __func__);
1485
1486 /*
1487 * FIXME iff driver's softconnect flag is set (as it is during probe,
1488 * though that can clear it), just musb_pullup().
1489 */
1490
1491 return -EINVAL;
1492}
1493#endif
1494
1495static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1496{
1497 struct musb *musb = gadget_to_musb(gadget);
1498
1499 if (!musb->xceiv.set_power)
1500 return -EOPNOTSUPP;
1501 return otg_set_power(&musb->xceiv, mA);
1502}
1503
1504static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1505{
1506 struct musb *musb = gadget_to_musb(gadget);
1507 unsigned long flags;
1508
1509 is_on = !!is_on;
1510
1511 /* NOTE: this assumes we are sensing vbus; we'd rather
1512 * not pullup unless the B-session is active.
1513 */
1514 spin_lock_irqsave(&musb->lock, flags);
1515 if (is_on != musb->softconnect) {
1516 musb->softconnect = is_on;
1517 musb_pullup(musb, is_on);
1518 }
1519 spin_unlock_irqrestore(&musb->lock, flags);
1520 return 0;
1521}
1522
1523static const struct usb_gadget_ops musb_gadget_operations = {
1524 .get_frame = musb_gadget_get_frame,
1525 .wakeup = musb_gadget_wakeup,
1526 .set_selfpowered = musb_gadget_set_self_powered,
1527 /* .vbus_session = musb_gadget_vbus_session, */
1528 .vbus_draw = musb_gadget_vbus_draw,
1529 .pullup = musb_gadget_pullup,
1530};
1531
1532/* ----------------------------------------------------------------------- */
1533
1534/* Registration */
1535
1536/* Only this registration code "knows" the rule (from USB standards)
1537 * about there being only one external upstream port. It assumes
1538 * all peripheral ports are external...
1539 */
1540static struct musb *the_gadget;
1541
1542static void musb_gadget_release(struct device *dev)
1543{
1544 /* kref_put(WHAT) */
1545 dev_dbg(dev, "%s\n", __func__);
1546}
1547
1548
1549static void __init
1550init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1551{
1552 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1553
1554 memset(ep, 0, sizeof *ep);
1555
1556 ep->current_epnum = epnum;
1557 ep->musb = musb;
1558 ep->hw_ep = hw_ep;
1559 ep->is_in = is_in;
1560
1561 INIT_LIST_HEAD(&ep->req_list);
1562
1563 sprintf(ep->name, "ep%d%s", epnum,
1564 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1565 is_in ? "in" : "out"));
1566 ep->end_point.name = ep->name;
1567 INIT_LIST_HEAD(&ep->end_point.ep_list);
1568 if (!epnum) {
1569 ep->end_point.maxpacket = 64;
1570 ep->end_point.ops = &musb_g_ep0_ops;
1571 musb->g.ep0 = &ep->end_point;
1572 } else {
1573 if (is_in)
1574 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1575 else
1576 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1577 ep->end_point.ops = &musb_ep_ops;
1578 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1579 }
1580}
1581
1582/*
1583 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1584 * to the rest of the driver state.
1585 */
1586static inline void __init musb_g_init_endpoints(struct musb *musb)
1587{
1588 u8 epnum;
1589 struct musb_hw_ep *hw_ep;
1590 unsigned count = 0;
1591
1592 /* intialize endpoint list just once */
1593 INIT_LIST_HEAD(&(musb->g.ep_list));
1594
1595 for (epnum = 0, hw_ep = musb->endpoints;
1596 epnum < musb->nr_endpoints;
1597 epnum++, hw_ep++) {
1598 if (hw_ep->is_shared_fifo /* || !epnum */) {
1599 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1600 count++;
1601 } else {
1602 if (hw_ep->max_packet_sz_tx) {
1603 init_peripheral_ep(musb, &hw_ep->ep_in,
1604 epnum, 1);
1605 count++;
1606 }
1607 if (hw_ep->max_packet_sz_rx) {
1608 init_peripheral_ep(musb, &hw_ep->ep_out,
1609 epnum, 0);
1610 count++;
1611 }
1612 }
1613 }
1614}
1615
1616/* called once during driver setup to initialize and link into
1617 * the driver model; memory is zeroed.
1618 */
1619int __init musb_gadget_setup(struct musb *musb)
1620{
1621 int status;
1622
1623 /* REVISIT minor race: if (erroneously) setting up two
1624 * musb peripherals at the same time, only the bus lock
1625 * is probably held.
1626 */
1627 if (the_gadget)
1628 return -EBUSY;
1629 the_gadget = musb;
1630
1631 musb->g.ops = &musb_gadget_operations;
1632 musb->g.is_dualspeed = 1;
1633 musb->g.speed = USB_SPEED_UNKNOWN;
1634
1635 /* this "gadget" abstracts/virtualizes the controller */
1636 strcpy(musb->g.dev.bus_id, "gadget");
1637 musb->g.dev.parent = musb->controller;
1638 musb->g.dev.dma_mask = musb->controller->dma_mask;
1639 musb->g.dev.release = musb_gadget_release;
1640 musb->g.name = musb_driver_name;
1641
1642 if (is_otg_enabled(musb))
1643 musb->g.is_otg = 1;
1644
1645 musb_g_init_endpoints(musb);
1646
1647 musb->is_active = 0;
1648 musb_platform_try_idle(musb, 0);
1649
1650 status = device_register(&musb->g.dev);
1651 if (status != 0)
1652 the_gadget = NULL;
1653 return status;
1654}
1655
1656void musb_gadget_cleanup(struct musb *musb)
1657{
1658 if (musb != the_gadget)
1659 return;
1660
1661 device_unregister(&musb->g.dev);
1662 the_gadget = NULL;
1663}
1664
1665/*
1666 * Register the gadget driver. Used by gadget drivers when
1667 * registering themselves with the controller.
1668 *
1669 * -EINVAL something went wrong (not driver)
1670 * -EBUSY another gadget is already using the controller
1671 * -ENOMEM no memeory to perform the operation
1672 *
1673 * @param driver the gadget driver
1674 * @return <0 if error, 0 if everything is fine
1675 */
1676int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1677{
1678 int retval;
1679 unsigned long flags;
1680 struct musb *musb = the_gadget;
1681
1682 if (!driver
1683 || driver->speed != USB_SPEED_HIGH
1684 || !driver->bind
1685 || !driver->setup)
1686 return -EINVAL;
1687
1688 /* driver must be initialized to support peripheral mode */
1689 if (!musb || !(musb->board_mode == MUSB_OTG
1690 || musb->board_mode != MUSB_OTG)) {
1691 DBG(1, "%s, no dev??\n", __func__);
1692 return -ENODEV;
1693 }
1694
1695 DBG(3, "registering driver %s\n", driver->function);
1696 spin_lock_irqsave(&musb->lock, flags);
1697
1698 if (musb->gadget_driver) {
1699 DBG(1, "%s is already bound to %s\n",
1700 musb_driver_name,
1701 musb->gadget_driver->driver.name);
1702 retval = -EBUSY;
1703 } else {
1704 musb->gadget_driver = driver;
1705 musb->g.dev.driver = &driver->driver;
1706 driver->driver.bus = NULL;
1707 musb->softconnect = 1;
1708 retval = 0;
1709 }
1710
1711 spin_unlock_irqrestore(&musb->lock, flags);
1712
1713 if (retval == 0) {
1714 retval = driver->bind(&musb->g);
1715 if (retval != 0) {
1716 DBG(3, "bind to driver %s failed --> %d\n",
1717 driver->driver.name, retval);
1718 musb->gadget_driver = NULL;
1719 musb->g.dev.driver = NULL;
1720 }
1721
1722 spin_lock_irqsave(&musb->lock, flags);
1723
1724 /* REVISIT always use otg_set_peripheral(), handling
1725 * issues including the root hub one below ...
1726 */
1727 musb->xceiv.gadget = &musb->g;
1728 musb->xceiv.state = OTG_STATE_B_IDLE;
1729 musb->is_active = 1;
1730
1731 /* FIXME this ignores the softconnect flag. Drivers are
1732 * allowed hold the peripheral inactive until for example
1733 * userspace hooks up printer hardware or DSP codecs, so
1734 * hosts only see fully functional devices.
1735 */
1736
1737 if (!is_otg_enabled(musb))
1738 musb_start(musb);
1739
1740 spin_unlock_irqrestore(&musb->lock, flags);
1741
1742 if (is_otg_enabled(musb)) {
1743 DBG(3, "OTG startup...\n");
1744
1745 /* REVISIT: funcall to other code, which also
1746 * handles power budgeting ... this way also
1747 * ensures HdrcStart is indirectly called.
1748 */
1749 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1750 if (retval < 0) {
1751 DBG(1, "add_hcd failed, %d\n", retval);
1752 spin_lock_irqsave(&musb->lock, flags);
1753 musb->xceiv.gadget = NULL;
1754 musb->xceiv.state = OTG_STATE_UNDEFINED;
1755 musb->gadget_driver = NULL;
1756 musb->g.dev.driver = NULL;
1757 spin_unlock_irqrestore(&musb->lock, flags);
1758 }
1759 }
1760 }
1761
1762 return retval;
1763}
1764EXPORT_SYMBOL(usb_gadget_register_driver);
1765
1766static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1767{
1768 int i;
1769 struct musb_hw_ep *hw_ep;
1770
1771 /* don't disconnect if it's not connected */
1772 if (musb->g.speed == USB_SPEED_UNKNOWN)
1773 driver = NULL;
1774 else
1775 musb->g.speed = USB_SPEED_UNKNOWN;
1776
1777 /* deactivate the hardware */
1778 if (musb->softconnect) {
1779 musb->softconnect = 0;
1780 musb_pullup(musb, 0);
1781 }
1782 musb_stop(musb);
1783
1784 /* killing any outstanding requests will quiesce the driver;
1785 * then report disconnect
1786 */
1787 if (driver) {
1788 for (i = 0, hw_ep = musb->endpoints;
1789 i < musb->nr_endpoints;
1790 i++, hw_ep++) {
1791 musb_ep_select(musb->mregs, i);
1792 if (hw_ep->is_shared_fifo /* || !epnum */) {
1793 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1794 } else {
1795 if (hw_ep->max_packet_sz_tx)
1796 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1797 if (hw_ep->max_packet_sz_rx)
1798 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1799 }
1800 }
1801
1802 spin_unlock(&musb->lock);
1803 driver->disconnect(&musb->g);
1804 spin_lock(&musb->lock);
1805 }
1806}
1807
1808/*
1809 * Unregister the gadget driver. Used by gadget drivers when
1810 * unregistering themselves from the controller.
1811 *
1812 * @param driver the gadget driver to unregister
1813 */
1814int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1815{
1816 unsigned long flags;
1817 int retval = 0;
1818 struct musb *musb = the_gadget;
1819
1820 if (!driver || !driver->unbind || !musb)
1821 return -EINVAL;
1822
1823 /* REVISIT always use otg_set_peripheral() here too;
1824 * this needs to shut down the OTG engine.
1825 */
1826
1827 spin_lock_irqsave(&musb->lock, flags);
1828
1829#ifdef CONFIG_USB_MUSB_OTG
1830 musb_hnp_stop(musb);
1831#endif
1832
1833 if (musb->gadget_driver == driver) {
1834
1835 (void) musb_gadget_vbus_draw(&musb->g, 0);
1836
1837 musb->xceiv.state = OTG_STATE_UNDEFINED;
1838 stop_activity(musb, driver);
1839
1840 DBG(3, "unregistering driver %s\n", driver->function);
1841 spin_unlock_irqrestore(&musb->lock, flags);
1842 driver->unbind(&musb->g);
1843 spin_lock_irqsave(&musb->lock, flags);
1844
1845 musb->gadget_driver = NULL;
1846 musb->g.dev.driver = NULL;
1847
1848 musb->is_active = 0;
1849 musb_platform_try_idle(musb, 0);
1850 } else
1851 retval = -EINVAL;
1852 spin_unlock_irqrestore(&musb->lock, flags);
1853
1854 if (is_otg_enabled(musb) && retval == 0) {
1855 usb_remove_hcd(musb_to_hcd(musb));
1856 /* FIXME we need to be able to register another
1857 * gadget driver here and have everything work;
1858 * that currently misbehaves.
1859 */
1860 }
1861
1862 return retval;
1863}
1864EXPORT_SYMBOL(usb_gadget_unregister_driver);
1865
1866
1867/* ----------------------------------------------------------------------- */
1868
1869/* lifecycle operations called through plat_uds.c */
1870
1871void musb_g_resume(struct musb *musb)
1872{
1873 musb->is_suspended = 0;
1874 switch (musb->xceiv.state) {
1875 case OTG_STATE_B_IDLE:
1876 break;
1877 case OTG_STATE_B_WAIT_ACON:
1878 case OTG_STATE_B_PERIPHERAL:
1879 musb->is_active = 1;
1880 if (musb->gadget_driver && musb->gadget_driver->resume) {
1881 spin_unlock(&musb->lock);
1882 musb->gadget_driver->resume(&musb->g);
1883 spin_lock(&musb->lock);
1884 }
1885 break;
1886 default:
1887 WARNING("unhandled RESUME transition (%s)\n",
1888 otg_state_string(musb));
1889 }
1890}
1891
1892/* called when SOF packets stop for 3+ msec */
1893void musb_g_suspend(struct musb *musb)
1894{
1895 u8 devctl;
1896
1897 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1898 DBG(3, "devctl %02x\n", devctl);
1899
1900 switch (musb->xceiv.state) {
1901 case OTG_STATE_B_IDLE:
1902 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1903 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
1904 break;
1905 case OTG_STATE_B_PERIPHERAL:
1906 musb->is_suspended = 1;
1907 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1908 spin_unlock(&musb->lock);
1909 musb->gadget_driver->suspend(&musb->g);
1910 spin_lock(&musb->lock);
1911 }
1912 break;
1913 default:
1914 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1915 * A_PERIPHERAL may need care too
1916 */
1917 WARNING("unhandled SUSPEND transition (%s)\n",
1918 otg_state_string(musb));
1919 }
1920}
1921
1922/* Called during SRP */
1923void musb_g_wakeup(struct musb *musb)
1924{
1925 musb_gadget_wakeup(&musb->g);
1926}
1927
1928/* called when VBUS drops below session threshold, and in other cases */
1929void musb_g_disconnect(struct musb *musb)
1930{
1931 void __iomem *mregs = musb->mregs;
1932 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1933
1934 DBG(3, "devctl %02x\n", devctl);
1935
1936 /* clear HR */
1937 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1938
1939 /* don't draw vbus until new b-default session */
1940 (void) musb_gadget_vbus_draw(&musb->g, 0);
1941
1942 musb->g.speed = USB_SPEED_UNKNOWN;
1943 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1944 spin_unlock(&musb->lock);
1945 musb->gadget_driver->disconnect(&musb->g);
1946 spin_lock(&musb->lock);
1947 }
1948
1949 switch (musb->xceiv.state) {
1950 default:
1951#ifdef CONFIG_USB_MUSB_OTG
1952 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1953 otg_state_string(musb));
1954 musb->xceiv.state = OTG_STATE_A_IDLE;
1955 break;
1956 case OTG_STATE_A_PERIPHERAL:
1957 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
1958 break;
1959 case OTG_STATE_B_WAIT_ACON:
1960 case OTG_STATE_B_HOST:
1961#endif
1962 case OTG_STATE_B_PERIPHERAL:
1963 case OTG_STATE_B_IDLE:
1964 musb->xceiv.state = OTG_STATE_B_IDLE;
1965 break;
1966 case OTG_STATE_B_SRP_INIT:
1967 break;
1968 }
1969
1970 musb->is_active = 0;
1971}
1972
1973void musb_g_reset(struct musb *musb)
1974__releases(musb->lock)
1975__acquires(musb->lock)
1976{
1977 void __iomem *mbase = musb->mregs;
1978 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
1979 u8 power;
1980
1981 DBG(3, "<== %s addr=%x driver '%s'\n",
1982 (devctl & MUSB_DEVCTL_BDEVICE)
1983 ? "B-Device" : "A-Device",
1984 musb_readb(mbase, MUSB_FADDR),
1985 musb->gadget_driver
1986 ? musb->gadget_driver->driver.name
1987 : NULL
1988 );
1989
1990 /* report disconnect, if we didn't already (flushing EP state) */
1991 if (musb->g.speed != USB_SPEED_UNKNOWN)
1992 musb_g_disconnect(musb);
1993
1994 /* clear HR */
1995 else if (devctl & MUSB_DEVCTL_HR)
1996 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
1997
1998
1999 /* what speed did we negotiate? */
2000 power = musb_readb(mbase, MUSB_POWER);
2001 musb->g.speed = (power & MUSB_POWER_HSMODE)
2002 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2003
2004 /* start in USB_STATE_DEFAULT */
2005 musb->is_active = 1;
2006 musb->is_suspended = 0;
2007 MUSB_DEV_MODE(musb);
2008 musb->address = 0;
2009 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2010
2011 musb->may_wakeup = 0;
2012 musb->g.b_hnp_enable = 0;
2013 musb->g.a_alt_hnp_support = 0;
2014 musb->g.a_hnp_support = 0;
2015
2016 /* Normal reset, as B-Device;
2017 * or else after HNP, as A-Device
2018 */
2019 if (devctl & MUSB_DEVCTL_BDEVICE) {
2020 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
2021 musb->g.is_a_peripheral = 0;
2022 } else if (is_otg_enabled(musb)) {
2023 musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
2024 musb->g.is_a_peripheral = 1;
2025 } else
2026 WARN_ON(1);
2027
2028 /* start with default limits on VBUS power draw */
2029 (void) musb_gadget_vbus_draw(&musb->g,
2030 is_otg_enabled(musb) ? 8 : 100);
2031}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 000000000000..59502da9f739
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
1/*
2 * MUSB OTG driver peripheral defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_GADGET_H
36#define __MUSB_GADGET_H
37
38struct musb_request {
39 struct usb_request request;
40 struct musb_ep *ep;
41 struct musb *musb;
42 u8 tx; /* endpoint direction */
43 u8 epnum;
44 u8 mapped;
45};
46
47static inline struct musb_request *to_musb_request(struct usb_request *req)
48{
49 return req ? container_of(req, struct musb_request, request) : NULL;
50}
51
52extern struct usb_request *
53musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
54extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
55
56
57/*
58 * struct musb_ep - peripheral side view of endpoint rx or tx side
59 */
60struct musb_ep {
61 /* stuff towards the head is basically write-once. */
62 struct usb_ep end_point;
63 char name[12];
64 struct musb_hw_ep *hw_ep;
65 struct musb *musb;
66 u8 current_epnum;
67
68 /* ... when enabled/disabled ... */
69 u8 type;
70 u8 is_in;
71 u16 packet_sz;
72 const struct usb_endpoint_descriptor *desc;
73 struct dma_channel *dma;
74
75 /* later things are modified based on usage */
76 struct list_head req_list;
77
78 /* true if lock must be dropped but req_list may not be advanced */
79 u8 busy;
80};
81
82static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
83{
84 return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
85}
86
87static inline struct usb_request *next_request(struct musb_ep *ep)
88{
89 struct list_head *queue = &ep->req_list;
90
91 if (list_empty(queue))
92 return NULL;
93 return container_of(queue->next, struct usb_request, list);
94}
95
96extern void musb_g_tx(struct musb *musb, u8 epnum);
97extern void musb_g_rx(struct musb *musb, u8 epnum);
98
99extern const struct usb_ep_ops musb_g_ep0_ops;
100
101extern int musb_gadget_setup(struct musb *);
102extern void musb_gadget_cleanup(struct musb *);
103
104extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
105
106extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
107
108#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 000000000000..48d7d3ccb243
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,981 @@
1/*
2 * MUSB OTG peripheral driver ep0 handling
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/timer.h>
38#include <linux/spinlock.h>
39#include <linux/init.h>
40#include <linux/device.h>
41#include <linux/interrupt.h>
42
43#include "musb_core.h"
44
45/* ep0 is always musb->endpoints[0].ep_in */
46#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
47
48/*
49 * locking note: we use only the controller lock, for simpler correctness.
50 * It's always held with IRQs blocked.
51 *
52 * It protects the ep0 request queue as well as ep0_state, not just the
53 * controller and indexed registers. And that lock stays held unless it
54 * needs to be dropped to allow reentering this driver ... like upcalls to
55 * the gadget driver, or adjusting endpoint halt status.
56 */
57
58static char *decode_ep0stage(u8 stage)
59{
60 switch (stage) {
61 case MUSB_EP0_STAGE_SETUP: return "idle";
62 case MUSB_EP0_STAGE_TX: return "in";
63 case MUSB_EP0_STAGE_RX: return "out";
64 case MUSB_EP0_STAGE_ACKWAIT: return "wait";
65 case MUSB_EP0_STAGE_STATUSIN: return "in/status";
66 case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
67 default: return "?";
68 }
69}
70
71/* handle a standard GET_STATUS request
72 * Context: caller holds controller lock
73 */
74static int service_tx_status_request(
75 struct musb *musb,
76 const struct usb_ctrlrequest *ctrlrequest)
77{
78 void __iomem *mbase = musb->mregs;
79 int handled = 1;
80 u8 result[2], epnum = 0;
81 const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
82
83 result[1] = 0;
84
85 switch (recip) {
86 case USB_RECIP_DEVICE:
87 result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
88 result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
89#ifdef CONFIG_USB_MUSB_OTG
90 if (musb->g.is_otg) {
91 result[0] |= musb->g.b_hnp_enable
92 << USB_DEVICE_B_HNP_ENABLE;
93 result[0] |= musb->g.a_alt_hnp_support
94 << USB_DEVICE_A_ALT_HNP_SUPPORT;
95 result[0] |= musb->g.a_hnp_support
96 << USB_DEVICE_A_HNP_SUPPORT;
97 }
98#endif
99 break;
100
101 case USB_RECIP_INTERFACE:
102 result[0] = 0;
103 break;
104
105 case USB_RECIP_ENDPOINT: {
106 int is_in;
107 struct musb_ep *ep;
108 u16 tmp;
109 void __iomem *regs;
110
111 epnum = (u8) ctrlrequest->wIndex;
112 if (!epnum) {
113 result[0] = 0;
114 break;
115 }
116
117 is_in = epnum & USB_DIR_IN;
118 if (is_in) {
119 epnum &= 0x0f;
120 ep = &musb->endpoints[epnum].ep_in;
121 } else {
122 ep = &musb->endpoints[epnum].ep_out;
123 }
124 regs = musb->endpoints[epnum].regs;
125
126 if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
127 handled = -EINVAL;
128 break;
129 }
130
131 musb_ep_select(mbase, epnum);
132 if (is_in)
133 tmp = musb_readw(regs, MUSB_TXCSR)
134 & MUSB_TXCSR_P_SENDSTALL;
135 else
136 tmp = musb_readw(regs, MUSB_RXCSR)
137 & MUSB_RXCSR_P_SENDSTALL;
138 musb_ep_select(mbase, 0);
139
140 result[0] = tmp ? 1 : 0;
141 } break;
142
143 default:
144 /* class, vendor, etc ... delegate */
145 handled = 0;
146 break;
147 }
148
149 /* fill up the fifo; caller updates csr0 */
150 if (handled > 0) {
151 u16 len = le16_to_cpu(ctrlrequest->wLength);
152
153 if (len > 2)
154 len = 2;
155 musb_write_fifo(&musb->endpoints[0], len, result);
156 }
157
158 return handled;
159}
160
161/*
162 * handle a control-IN request, the end0 buffer contains the current request
163 * that is supposed to be a standard control request. Assumes the fifo to
164 * be at least 2 bytes long.
165 *
166 * @return 0 if the request was NOT HANDLED,
167 * < 0 when error
168 * > 0 when the request is processed
169 *
170 * Context: caller holds controller lock
171 */
172static int
173service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
174{
175 int handled = 0; /* not handled */
176
177 if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
178 == USB_TYPE_STANDARD) {
179 switch (ctrlrequest->bRequest) {
180 case USB_REQ_GET_STATUS:
181 handled = service_tx_status_request(musb,
182 ctrlrequest);
183 break;
184
185 /* case USB_REQ_SYNC_FRAME: */
186
187 default:
188 break;
189 }
190 }
191 return handled;
192}
193
194/*
195 * Context: caller holds controller lock
196 */
197static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
198{
199 musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
200 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
201}
202
203/*
204 * Tries to start B-device HNP negotiation if enabled via sysfs
205 */
206static inline void musb_try_b_hnp_enable(struct musb *musb)
207{
208 void __iomem *mbase = musb->mregs;
209 u8 devctl;
210
211 DBG(1, "HNP: Setting HR\n");
212 devctl = musb_readb(mbase, MUSB_DEVCTL);
213 musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
214}
215
216/*
217 * Handle all control requests with no DATA stage, including standard
218 * requests such as:
219 * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
220 * always delegated to the gadget driver
221 * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
222 * always handled here, except for class/vendor/... features
223 *
224 * Context: caller holds controller lock
225 */
226static int
227service_zero_data_request(struct musb *musb,
228 struct usb_ctrlrequest *ctrlrequest)
229__releases(musb->lock)
230__acquires(musb->lock)
231{
232 int handled = -EINVAL;
233 void __iomem *mbase = musb->mregs;
234 const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
235
236 /* the gadget driver handles everything except what we MUST handle */
237 if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
238 == USB_TYPE_STANDARD) {
239 switch (ctrlrequest->bRequest) {
240 case USB_REQ_SET_ADDRESS:
241 /* change it after the status stage */
242 musb->set_address = true;
243 musb->address = (u8) (ctrlrequest->wValue & 0x7f);
244 handled = 1;
245 break;
246
247 case USB_REQ_CLEAR_FEATURE:
248 switch (recip) {
249 case USB_RECIP_DEVICE:
250 if (ctrlrequest->wValue
251 != USB_DEVICE_REMOTE_WAKEUP)
252 break;
253 musb->may_wakeup = 0;
254 handled = 1;
255 break;
256 case USB_RECIP_INTERFACE:
257 break;
258 case USB_RECIP_ENDPOINT:{
259 const u8 num = ctrlrequest->wIndex & 0x0f;
260 struct musb_ep *musb_ep;
261
262 if (num == 0
263 || num >= MUSB_C_NUM_EPS
264 || ctrlrequest->wValue
265 != USB_ENDPOINT_HALT)
266 break;
267
268 if (ctrlrequest->wIndex & USB_DIR_IN)
269 musb_ep = &musb->endpoints[num].ep_in;
270 else
271 musb_ep = &musb->endpoints[num].ep_out;
272 if (!musb_ep->desc)
273 break;
274
275 /* REVISIT do it directly, no locking games */
276 spin_unlock(&musb->lock);
277 musb_gadget_set_halt(&musb_ep->end_point, 0);
278 spin_lock(&musb->lock);
279
280 /* select ep0 again */
281 musb_ep_select(mbase, 0);
282 handled = 1;
283 } break;
284 default:
285 /* class, vendor, etc ... delegate */
286 handled = 0;
287 break;
288 }
289 break;
290
291 case USB_REQ_SET_FEATURE:
292 switch (recip) {
293 case USB_RECIP_DEVICE:
294 handled = 1;
295 switch (ctrlrequest->wValue) {
296 case USB_DEVICE_REMOTE_WAKEUP:
297 musb->may_wakeup = 1;
298 break;
299 case USB_DEVICE_TEST_MODE:
300 if (musb->g.speed != USB_SPEED_HIGH)
301 goto stall;
302 if (ctrlrequest->wIndex & 0xff)
303 goto stall;
304
305 switch (ctrlrequest->wIndex >> 8) {
306 case 1:
307 pr_debug("TEST_J\n");
308 /* TEST_J */
309 musb->test_mode_nr =
310 MUSB_TEST_J;
311 break;
312 case 2:
313 /* TEST_K */
314 pr_debug("TEST_K\n");
315 musb->test_mode_nr =
316 MUSB_TEST_K;
317 break;
318 case 3:
319 /* TEST_SE0_NAK */
320 pr_debug("TEST_SE0_NAK\n");
321 musb->test_mode_nr =
322 MUSB_TEST_SE0_NAK;
323 break;
324 case 4:
325 /* TEST_PACKET */
326 pr_debug("TEST_PACKET\n");
327 musb->test_mode_nr =
328 MUSB_TEST_PACKET;
329 break;
330 default:
331 goto stall;
332 }
333
334 /* enter test mode after irq */
335 if (handled > 0)
336 musb->test_mode = true;
337 break;
338#ifdef CONFIG_USB_MUSB_OTG
339 case USB_DEVICE_B_HNP_ENABLE:
340 if (!musb->g.is_otg)
341 goto stall;
342 musb->g.b_hnp_enable = 1;
343 musb_try_b_hnp_enable(musb);
344 break;
345 case USB_DEVICE_A_HNP_SUPPORT:
346 if (!musb->g.is_otg)
347 goto stall;
348 musb->g.a_hnp_support = 1;
349 break;
350 case USB_DEVICE_A_ALT_HNP_SUPPORT:
351 if (!musb->g.is_otg)
352 goto stall;
353 musb->g.a_alt_hnp_support = 1;
354 break;
355#endif
356stall:
357 default:
358 handled = -EINVAL;
359 break;
360 }
361 break;
362
363 case USB_RECIP_INTERFACE:
364 break;
365
366 case USB_RECIP_ENDPOINT:{
367 const u8 epnum =
368 ctrlrequest->wIndex & 0x0f;
369 struct musb_ep *musb_ep;
370 struct musb_hw_ep *ep;
371 void __iomem *regs;
372 int is_in;
373 u16 csr;
374
375 if (epnum == 0
376 || epnum >= MUSB_C_NUM_EPS
377 || ctrlrequest->wValue
378 != USB_ENDPOINT_HALT)
379 break;
380
381 ep = musb->endpoints + epnum;
382 regs = ep->regs;
383 is_in = ctrlrequest->wIndex & USB_DIR_IN;
384 if (is_in)
385 musb_ep = &ep->ep_in;
386 else
387 musb_ep = &ep->ep_out;
388 if (!musb_ep->desc)
389 break;
390
391 musb_ep_select(mbase, epnum);
392 if (is_in) {
393 csr = musb_readw(regs,
394 MUSB_TXCSR);
395 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
396 csr |= MUSB_TXCSR_FLUSHFIFO;
397 csr |= MUSB_TXCSR_P_SENDSTALL
398 | MUSB_TXCSR_CLRDATATOG
399 | MUSB_TXCSR_P_WZC_BITS;
400 musb_writew(regs, MUSB_TXCSR,
401 csr);
402 } else {
403 csr = musb_readw(regs,
404 MUSB_RXCSR);
405 csr |= MUSB_RXCSR_P_SENDSTALL
406 | MUSB_RXCSR_FLUSHFIFO
407 | MUSB_RXCSR_CLRDATATOG
408 | MUSB_TXCSR_P_WZC_BITS;
409 musb_writew(regs, MUSB_RXCSR,
410 csr);
411 }
412
413 /* select ep0 again */
414 musb_ep_select(mbase, 0);
415 handled = 1;
416 } break;
417
418 default:
419 /* class, vendor, etc ... delegate */
420 handled = 0;
421 break;
422 }
423 break;
424 default:
425 /* delegate SET_CONFIGURATION, etc */
426 handled = 0;
427 }
428 } else
429 handled = 0;
430 return handled;
431}
432
433/* we have an ep0out data packet
434 * Context: caller holds controller lock
435 */
436static void ep0_rxstate(struct musb *musb)
437{
438 void __iomem *regs = musb->control_ep->regs;
439 struct usb_request *req;
440 u16 tmp;
441
442 req = next_ep0_request(musb);
443
444 /* read packet and ack; or stall because of gadget driver bug:
445 * should have provided the rx buffer before setup() returned.
446 */
447 if (req) {
448 void *buf = req->buf + req->actual;
449 unsigned len = req->length - req->actual;
450
451 /* read the buffer */
452 tmp = musb_readb(regs, MUSB_COUNT0);
453 if (tmp > len) {
454 req->status = -EOVERFLOW;
455 tmp = len;
456 }
457 musb_read_fifo(&musb->endpoints[0], tmp, buf);
458 req->actual += tmp;
459 tmp = MUSB_CSR0_P_SVDRXPKTRDY;
460 if (tmp < 64 || req->actual == req->length) {
461 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
462 tmp |= MUSB_CSR0_P_DATAEND;
463 } else
464 req = NULL;
465 } else
466 tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
467
468
469 /* Completion handler may choose to stall, e.g. because the
470 * message just received holds invalid data.
471 */
472 if (req) {
473 musb->ackpend = tmp;
474 musb_g_ep0_giveback(musb, req);
475 if (!musb->ackpend)
476 return;
477 musb->ackpend = 0;
478 }
479 musb_writew(regs, MUSB_CSR0, tmp);
480}
481
482/*
483 * transmitting to the host (IN), this code might be called from IRQ
484 * and from kernel thread.
485 *
486 * Context: caller holds controller lock
487 */
488static void ep0_txstate(struct musb *musb)
489{
490 void __iomem *regs = musb->control_ep->regs;
491 struct usb_request *request = next_ep0_request(musb);
492 u16 csr = MUSB_CSR0_TXPKTRDY;
493 u8 *fifo_src;
494 u8 fifo_count;
495
496 if (!request) {
497 /* WARN_ON(1); */
498 DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
499 return;
500 }
501
502 /* load the data */
503 fifo_src = (u8 *) request->buf + request->actual;
504 fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
505 request->length - request->actual);
506 musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
507 request->actual += fifo_count;
508
509 /* update the flags */
510 if (fifo_count < MUSB_MAX_END0_PACKET
511 || request->actual == request->length) {
512 musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
513 csr |= MUSB_CSR0_P_DATAEND;
514 } else
515 request = NULL;
516
517 /* report completions as soon as the fifo's loaded; there's no
518 * win in waiting till this last packet gets acked. (other than
519 * very precise fault reporting, needed by USB TMC; possible with
520 * this hardware, but not usable from portable gadget drivers.)
521 */
522 if (request) {
523 musb->ackpend = csr;
524 musb_g_ep0_giveback(musb, request);
525 if (!musb->ackpend)
526 return;
527 musb->ackpend = 0;
528 }
529
530 /* send it out, triggering a "txpktrdy cleared" irq */
531 musb_writew(regs, MUSB_CSR0, csr);
532}
533
534/*
535 * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
536 * Fields are left in USB byte-order.
537 *
538 * Context: caller holds controller lock.
539 */
540static void
541musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
542{
543 struct usb_request *r;
544 void __iomem *regs = musb->control_ep->regs;
545
546 musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
547
548 /* NOTE: earlier 2.6 versions changed setup packets to host
549 * order, but now USB packets always stay in USB byte order.
550 */
551 DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
552 req->bRequestType,
553 req->bRequest,
554 le16_to_cpu(req->wValue),
555 le16_to_cpu(req->wIndex),
556 le16_to_cpu(req->wLength));
557
558 /* clean up any leftover transfers */
559 r = next_ep0_request(musb);
560 if (r)
561 musb_g_ep0_giveback(musb, r);
562
563 /* For zero-data requests we want to delay the STATUS stage to
564 * avoid SETUPEND errors. If we read data (OUT), delay accepting
565 * packets until there's a buffer to store them in.
566 *
567 * If we write data, the controller acts happier if we enable
568 * the TX FIFO right away, and give the controller a moment
569 * to switch modes...
570 */
571 musb->set_address = false;
572 musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
573 if (req->wLength == 0) {
574 if (req->bRequestType & USB_DIR_IN)
575 musb->ackpend |= MUSB_CSR0_TXPKTRDY;
576 musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
577 } else if (req->bRequestType & USB_DIR_IN) {
578 musb->ep0_state = MUSB_EP0_STAGE_TX;
579 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
580 while ((musb_readw(regs, MUSB_CSR0)
581 & MUSB_CSR0_RXPKTRDY) != 0)
582 cpu_relax();
583 musb->ackpend = 0;
584 } else
585 musb->ep0_state = MUSB_EP0_STAGE_RX;
586}
587
588static int
589forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
590__releases(musb->lock)
591__acquires(musb->lock)
592{
593 int retval;
594 if (!musb->gadget_driver)
595 return -EOPNOTSUPP;
596 spin_unlock(&musb->lock);
597 retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
598 spin_lock(&musb->lock);
599 return retval;
600}
601
602/*
603 * Handle peripheral ep0 interrupt
604 *
605 * Context: irq handler; we won't re-enter the driver that way.
606 */
607irqreturn_t musb_g_ep0_irq(struct musb *musb)
608{
609 u16 csr;
610 u16 len;
611 void __iomem *mbase = musb->mregs;
612 void __iomem *regs = musb->endpoints[0].regs;
613 irqreturn_t retval = IRQ_NONE;
614
615 musb_ep_select(mbase, 0); /* select ep0 */
616 csr = musb_readw(regs, MUSB_CSR0);
617 len = musb_readb(regs, MUSB_COUNT0);
618
619 DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
620 csr, len,
621 musb_readb(mbase, MUSB_FADDR),
622 decode_ep0stage(musb->ep0_state));
623
624 /* I sent a stall.. need to acknowledge it now.. */
625 if (csr & MUSB_CSR0_P_SENTSTALL) {
626 musb_writew(regs, MUSB_CSR0,
627 csr & ~MUSB_CSR0_P_SENTSTALL);
628 retval = IRQ_HANDLED;
629 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
630 csr = musb_readw(regs, MUSB_CSR0);
631 }
632
633 /* request ended "early" */
634 if (csr & MUSB_CSR0_P_SETUPEND) {
635 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
636 retval = IRQ_HANDLED;
637 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
638 csr = musb_readw(regs, MUSB_CSR0);
639 /* NOTE: request may need completion */
640 }
641
642 /* docs from Mentor only describe tx, rx, and idle/setup states.
643 * we need to handle nuances around status stages, and also the
644 * case where status and setup stages come back-to-back ...
645 */
646 switch (musb->ep0_state) {
647
648 case MUSB_EP0_STAGE_TX:
649 /* irq on clearing txpktrdy */
650 if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
651 ep0_txstate(musb);
652 retval = IRQ_HANDLED;
653 }
654 break;
655
656 case MUSB_EP0_STAGE_RX:
657 /* irq on set rxpktrdy */
658 if (csr & MUSB_CSR0_RXPKTRDY) {
659 ep0_rxstate(musb);
660 retval = IRQ_HANDLED;
661 }
662 break;
663
664 case MUSB_EP0_STAGE_STATUSIN:
665 /* end of sequence #2 (OUT/RX state) or #3 (no data) */
666
667 /* update address (if needed) only @ the end of the
668 * status phase per usb spec, which also guarantees
669 * we get 10 msec to receive this irq... until this
670 * is done we won't see the next packet.
671 */
672 if (musb->set_address) {
673 musb->set_address = false;
674 musb_writeb(mbase, MUSB_FADDR, musb->address);
675 }
676
677 /* enter test mode if needed (exit by reset) */
678 else if (musb->test_mode) {
679 DBG(1, "entering TESTMODE\n");
680
681 if (MUSB_TEST_PACKET == musb->test_mode_nr)
682 musb_load_testpacket(musb);
683
684 musb_writeb(mbase, MUSB_TESTMODE,
685 musb->test_mode_nr);
686 }
687 /* FALLTHROUGH */
688
689 case MUSB_EP0_STAGE_STATUSOUT:
690 /* end of sequence #1: write to host (TX state) */
691 {
692 struct usb_request *req;
693
694 req = next_ep0_request(musb);
695 if (req)
696 musb_g_ep0_giveback(musb, req);
697 }
698 retval = IRQ_HANDLED;
699 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
700 /* FALLTHROUGH */
701
702 case MUSB_EP0_STAGE_SETUP:
703 if (csr & MUSB_CSR0_RXPKTRDY) {
704 struct usb_ctrlrequest setup;
705 int handled = 0;
706
707 if (len != 8) {
708 ERR("SETUP packet len %d != 8 ?\n", len);
709 break;
710 }
711 musb_read_setup(musb, &setup);
712 retval = IRQ_HANDLED;
713
714 /* sometimes the RESET won't be reported */
715 if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
716 u8 power;
717
718 printk(KERN_NOTICE "%s: peripheral reset "
719 "irq lost!\n",
720 musb_driver_name);
721 power = musb_readb(mbase, MUSB_POWER);
722 musb->g.speed = (power & MUSB_POWER_HSMODE)
723 ? USB_SPEED_HIGH : USB_SPEED_FULL;
724
725 }
726
727 switch (musb->ep0_state) {
728
729 /* sequence #3 (no data stage), includes requests
730 * we can't forward (notably SET_ADDRESS and the
731 * device/endpoint feature set/clear operations)
732 * plus SET_CONFIGURATION and others we must
733 */
734 case MUSB_EP0_STAGE_ACKWAIT:
735 handled = service_zero_data_request(
736 musb, &setup);
737
738 /* status stage might be immediate */
739 if (handled > 0) {
740 musb->ackpend |= MUSB_CSR0_P_DATAEND;
741 musb->ep0_state =
742 MUSB_EP0_STAGE_STATUSIN;
743 }
744 break;
745
746 /* sequence #1 (IN to host), includes GET_STATUS
747 * requests that we can't forward, GET_DESCRIPTOR
748 * and others that we must
749 */
750 case MUSB_EP0_STAGE_TX:
751 handled = service_in_request(musb, &setup);
752 if (handled > 0) {
753 musb->ackpend = MUSB_CSR0_TXPKTRDY
754 | MUSB_CSR0_P_DATAEND;
755 musb->ep0_state =
756 MUSB_EP0_STAGE_STATUSOUT;
757 }
758 break;
759
760 /* sequence #2 (OUT from host), always forward */
761 default: /* MUSB_EP0_STAGE_RX */
762 break;
763 }
764
765 DBG(3, "handled %d, csr %04x, ep0stage %s\n",
766 handled, csr,
767 decode_ep0stage(musb->ep0_state));
768
769 /* unless we need to delegate this to the gadget
770 * driver, we know how to wrap this up: csr0 has
771 * not yet been written.
772 */
773 if (handled < 0)
774 goto stall;
775 else if (handled > 0)
776 goto finish;
777
778 handled = forward_to_driver(musb, &setup);
779 if (handled < 0) {
780 musb_ep_select(mbase, 0);
781stall:
782 DBG(3, "stall (%d)\n", handled);
783 musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
784 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
785finish:
786 musb_writew(regs, MUSB_CSR0,
787 musb->ackpend);
788 musb->ackpend = 0;
789 }
790 }
791 break;
792
793 case MUSB_EP0_STAGE_ACKWAIT:
794 /* This should not happen. But happens with tusb6010 with
795 * g_file_storage and high speed. Do nothing.
796 */
797 retval = IRQ_HANDLED;
798 break;
799
800 default:
801 /* "can't happen" */
802 WARN_ON(1);
803 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
804 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
805 break;
806 }
807
808 return retval;
809}
810
811
812static int
813musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
814{
815 /* always enabled */
816 return -EINVAL;
817}
818
819static int musb_g_ep0_disable(struct usb_ep *e)
820{
821 /* always enabled */
822 return -EINVAL;
823}
824
825static int
826musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
827{
828 struct musb_ep *ep;
829 struct musb_request *req;
830 struct musb *musb;
831 int status;
832 unsigned long lockflags;
833 void __iomem *regs;
834
835 if (!e || !r)
836 return -EINVAL;
837
838 ep = to_musb_ep(e);
839 musb = ep->musb;
840 regs = musb->control_ep->regs;
841
842 req = to_musb_request(r);
843 req->musb = musb;
844 req->request.actual = 0;
845 req->request.status = -EINPROGRESS;
846 req->tx = ep->is_in;
847
848 spin_lock_irqsave(&musb->lock, lockflags);
849
850 if (!list_empty(&ep->req_list)) {
851 status = -EBUSY;
852 goto cleanup;
853 }
854
855 switch (musb->ep0_state) {
856 case MUSB_EP0_STAGE_RX: /* control-OUT data */
857 case MUSB_EP0_STAGE_TX: /* control-IN data */
858 case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
859 status = 0;
860 break;
861 default:
862 DBG(1, "ep0 request queued in state %d\n",
863 musb->ep0_state);
864 status = -EINVAL;
865 goto cleanup;
866 }
867
868 /* add request to the list */
869 list_add_tail(&(req->request.list), &(ep->req_list));
870
871 DBG(3, "queue to %s (%s), length=%d\n",
872 ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
873 req->request.length);
874
875 musb_ep_select(musb->mregs, 0);
876
877 /* sequence #1, IN ... start writing the data */
878 if (musb->ep0_state == MUSB_EP0_STAGE_TX)
879 ep0_txstate(musb);
880
881 /* sequence #3, no-data ... issue IN status */
882 else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
883 if (req->request.length)
884 status = -EINVAL;
885 else {
886 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
887 musb_writew(regs, MUSB_CSR0,
888 musb->ackpend | MUSB_CSR0_P_DATAEND);
889 musb->ackpend = 0;
890 musb_g_ep0_giveback(ep->musb, r);
891 }
892
893 /* else for sequence #2 (OUT), caller provides a buffer
894 * before the next packet arrives. deferred responses
895 * (after SETUP is acked) are racey.
896 */
897 } else if (musb->ackpend) {
898 musb_writew(regs, MUSB_CSR0, musb->ackpend);
899 musb->ackpend = 0;
900 }
901
902cleanup:
903 spin_unlock_irqrestore(&musb->lock, lockflags);
904 return status;
905}
906
907static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
908{
909 /* we just won't support this */
910 return -EINVAL;
911}
912
913static int musb_g_ep0_halt(struct usb_ep *e, int value)
914{
915 struct musb_ep *ep;
916 struct musb *musb;
917 void __iomem *base, *regs;
918 unsigned long flags;
919 int status;
920 u16 csr;
921
922 if (!e || !value)
923 return -EINVAL;
924
925 ep = to_musb_ep(e);
926 musb = ep->musb;
927 base = musb->mregs;
928 regs = musb->control_ep->regs;
929 status = 0;
930
931 spin_lock_irqsave(&musb->lock, flags);
932
933 if (!list_empty(&ep->req_list)) {
934 status = -EBUSY;
935 goto cleanup;
936 }
937
938 musb_ep_select(base, 0);
939 csr = musb->ackpend;
940
941 switch (musb->ep0_state) {
942
943 /* Stalls are usually issued after parsing SETUP packet, either
944 * directly in irq context from setup() or else later.
945 */
946 case MUSB_EP0_STAGE_TX: /* control-IN data */
947 case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
948 case MUSB_EP0_STAGE_RX: /* control-OUT data */
949 csr = musb_readw(regs, MUSB_CSR0);
950 /* FALLTHROUGH */
951
952 /* It's also OK to issue stalls during callbacks when a non-empty
953 * DATA stage buffer has been read (or even written).
954 */
955 case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
956 case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
957
958 csr |= MUSB_CSR0_P_SENDSTALL;
959 musb_writew(regs, MUSB_CSR0, csr);
960 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
961 musb->ackpend = 0;
962 break;
963 default:
964 DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
965 status = -EINVAL;
966 }
967
968cleanup:
969 spin_unlock_irqrestore(&musb->lock, flags);
970 return status;
971}
972
973const struct usb_ep_ops musb_g_ep0_ops = {
974 .enable = musb_g_ep0_enable,
975 .disable = musb_g_ep0_disable,
976 .alloc_request = musb_alloc_request,
977 .free_request = musb_free_request,
978 .queue = musb_g_ep0_queue,
979 .dequeue = musb_g_ep0_dequeue,
980 .set_halt = musb_g_ep0_halt,
981};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 000000000000..8b4be012669a
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
1/*
2 * MUSB OTG driver host support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/errno.h>
41#include <linux/init.h>
42#include <linux/list.h>
43
44#include "musb_core.h"
45#include "musb_host.h"
46
47
48/* MUSB HOST status 22-mar-2006
49 *
50 * - There's still lots of partial code duplication for fault paths, so
51 * they aren't handled as consistently as they need to be.
52 *
53 * - PIO mostly behaved when last tested.
54 * + including ep0, with all usbtest cases 9, 10
55 * + usbtest 14 (ep0out) doesn't seem to run at all
56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 * configurations, but otherwise double buffering passes basic tests.
58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
59 *
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 * + about 1/15 the speed of typical EHCI implementations (PCI)
62 * + RX, all too often reqpkt seems to misbehave after tx
63 * + TX, no known issues (other than evident silicon issue)
64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control
68 * transfers unable to starve other requests; or to make efficient use
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
72 *
73 * - Not tested with HNP, but some SRP paths seem to behave.
74 *
75 * NOTE 24-August-2006:
76 *
77 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78 * extra endpoint for periodic use enabling hub + keybd + mouse. That
79 * mostly works, except that with "usbnet" it's easy to trigger cases
80 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
81 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82 * although ARP RX wins. (That test was done with a full speed link.)
83 */
84
85
86/*
87 * NOTE on endpoint usage:
88 *
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
96 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be
99 * "claimed" until its software queue is no longer refilled. No multiplexing
100 * of transfers between endpoints, or anything clever.
101 */
102
103
104static void musb_ep_program(struct musb *musb, u8 epnum,
105 struct urb *urb, unsigned int nOut,
106 u8 *buf, u32 len);
107
108/*
109 * Clear TX fifo. Needed to avoid BABBLE errors.
110 */
111static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112{
113 void __iomem *epio = ep->regs;
114 u16 csr;
115 int retries = 1000;
116
117 csr = musb_readw(epio, MUSB_TXCSR);
118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
119 DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
120 csr |= MUSB_TXCSR_FLUSHFIFO;
121 musb_writew(epio, MUSB_TXCSR, csr);
122 csr = musb_readw(epio, MUSB_TXCSR);
123 if (retries-- < 1) {
124 ERR("Could not flush host TX fifo: csr: %04x\n", csr);
125 return;
126 }
127 mdelay(1);
128 }
129}
130
131/*
132 * Start transmit. Caller is responsible for locking shared resources.
133 * musb must be locked.
134 */
135static inline void musb_h_tx_start(struct musb_hw_ep *ep)
136{
137 u16 txcsr;
138
139 /* NOTE: no locks here; caller should lock and select EP */
140 if (ep->epnum) {
141 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
142 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
143 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
144 } else {
145 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
146 musb_writew(ep->regs, MUSB_CSR0, txcsr);
147 }
148
149}
150
151static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
152{
153 u16 txcsr;
154
155 /* NOTE: no locks here; caller should lock and select EP */
156 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
157 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
158 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
159}
160
161/*
162 * Start the URB at the front of an endpoint's queue
163 * end must be claimed from the caller.
164 *
165 * Context: controller locked, irqs blocked
166 */
167static void
168musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
169{
170 u16 frame;
171 u32 len;
172 void *buf;
173 void __iomem *mbase = musb->mregs;
174 struct urb *urb = next_urb(qh);
175 struct musb_hw_ep *hw_ep = qh->hw_ep;
176 unsigned pipe = urb->pipe;
177 u8 address = usb_pipedevice(pipe);
178 int epnum = hw_ep->epnum;
179
180 /* initialize software qh state */
181 qh->offset = 0;
182 qh->segsize = 0;
183
184 /* gather right source of data */
185 switch (qh->type) {
186 case USB_ENDPOINT_XFER_CONTROL:
187 /* control transfers always start with SETUP */
188 is_in = 0;
189 hw_ep->out_qh = qh;
190 musb->ep0_stage = MUSB_EP0_START;
191 buf = urb->setup_packet;
192 len = 8;
193 break;
194 case USB_ENDPOINT_XFER_ISOC:
195 qh->iso_idx = 0;
196 qh->frame = 0;
197 buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
198 len = urb->iso_frame_desc[0].length;
199 break;
200 default: /* bulk, interrupt */
201 buf = urb->transfer_buffer;
202 len = urb->transfer_buffer_length;
203 }
204
205 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
206 qh, urb, address, qh->epnum,
207 is_in ? "in" : "out",
208 ({char *s; switch (qh->type) {
209 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
210 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
211 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
212 default: s = "-intr"; break;
213 }; s; }),
214 epnum, buf, len);
215
216 /* Configure endpoint */
217 if (is_in || hw_ep->is_shared_fifo)
218 hw_ep->in_qh = qh;
219 else
220 hw_ep->out_qh = qh;
221 musb_ep_program(musb, epnum, urb, !is_in, buf, len);
222
223 /* transmit may have more work: start it when it is time */
224 if (is_in)
225 return;
226
227 /* determine if the time is right for a periodic transfer */
228 switch (qh->type) {
229 case USB_ENDPOINT_XFER_ISOC:
230 case USB_ENDPOINT_XFER_INT:
231 DBG(3, "check whether there's still time for periodic Tx\n");
232 qh->iso_idx = 0;
233 frame = musb_readw(mbase, MUSB_FRAME);
234 /* FIXME this doesn't implement that scheduling policy ...
235 * or handle framecounter wrapping
236 */
237 if ((urb->transfer_flags & URB_ISO_ASAP)
238 || (frame >= urb->start_frame)) {
239 /* REVISIT the SOF irq handler shouldn't duplicate
240 * this code; and we don't init urb->start_frame...
241 */
242 qh->frame = 0;
243 goto start;
244 } else {
245 qh->frame = urb->start_frame;
246 /* enable SOF interrupt so we can count down */
247 DBG(1, "SOF for %d\n", epnum);
248#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
249 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
250#endif
251 }
252 break;
253 default:
254start:
255 DBG(4, "Start TX%d %s\n", epnum,
256 hw_ep->tx_channel ? "dma" : "pio");
257
258 if (!hw_ep->tx_channel)
259 musb_h_tx_start(hw_ep);
260 else if (is_cppi_enabled() || tusb_dma_omap())
261 cppi_host_txdma_start(hw_ep);
262 }
263}
264
265/* caller owns controller lock, irqs are blocked */
266static void
267__musb_giveback(struct musb *musb, struct urb *urb, int status)
268__releases(musb->lock)
269__acquires(musb->lock)
270{
271 DBG(({ int level; switch (urb->status) {
272 case 0:
273 level = 4;
274 break;
275 /* common/boring faults */
276 case -EREMOTEIO:
277 case -ESHUTDOWN:
278 case -ECONNRESET:
279 case -EPIPE:
280 level = 3;
281 break;
282 default:
283 level = 2;
284 break;
285 }; level; }),
286 "complete %p (%d), dev%d ep%d%s, %d/%d\n",
287 urb, urb->status,
288 usb_pipedevice(urb->pipe),
289 usb_pipeendpoint(urb->pipe),
290 usb_pipein(urb->pipe) ? "in" : "out",
291 urb->actual_length, urb->transfer_buffer_length
292 );
293
294 spin_unlock(&musb->lock);
295 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
296 spin_lock(&musb->lock);
297}
298
299/* for bulk/interrupt endpoints only */
300static inline void
301musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
302{
303 struct usb_device *udev = urb->dev;
304 u16 csr;
305 void __iomem *epio = ep->regs;
306 struct musb_qh *qh;
307
308 /* FIXME: the current Mentor DMA code seems to have
309 * problems getting toggle correct.
310 */
311
312 if (is_in || ep->is_shared_fifo)
313 qh = ep->in_qh;
314 else
315 qh = ep->out_qh;
316
317 if (!is_in) {
318 csr = musb_readw(epio, MUSB_TXCSR);
319 usb_settoggle(udev, qh->epnum, 1,
320 (csr & MUSB_TXCSR_H_DATATOGGLE)
321 ? 1 : 0);
322 } else {
323 csr = musb_readw(epio, MUSB_RXCSR);
324 usb_settoggle(udev, qh->epnum, 0,
325 (csr & MUSB_RXCSR_H_DATATOGGLE)
326 ? 1 : 0);
327 }
328}
329
330/* caller owns controller lock, irqs are blocked */
331static struct musb_qh *
332musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
333{
334 int is_in;
335 struct musb_hw_ep *ep = qh->hw_ep;
336 struct musb *musb = ep->musb;
337 int ready = qh->is_ready;
338
339 if (ep->is_shared_fifo)
340 is_in = 1;
341 else
342 is_in = usb_pipein(urb->pipe);
343
344 /* save toggle eagerly, for paranoia */
345 switch (qh->type) {
346 case USB_ENDPOINT_XFER_BULK:
347 case USB_ENDPOINT_XFER_INT:
348 musb_save_toggle(ep, is_in, urb);
349 break;
350 case USB_ENDPOINT_XFER_ISOC:
351 if (status == 0 && urb->error_count)
352 status = -EXDEV;
353 break;
354 }
355
356 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
357
358 qh->is_ready = 0;
359 __musb_giveback(musb, urb, status);
360 qh->is_ready = ready;
361
362 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
363 * invalidate qh as soon as list_empty(&hep->urb_list)
364 */
365 if (list_empty(&qh->hep->urb_list)) {
366 struct list_head *head;
367
368 if (is_in)
369 ep->rx_reinit = 1;
370 else
371 ep->tx_reinit = 1;
372
373 /* clobber old pointers to this qh */
374 if (is_in || ep->is_shared_fifo)
375 ep->in_qh = NULL;
376 else
377 ep->out_qh = NULL;
378 qh->hep->hcpriv = NULL;
379
380 switch (qh->type) {
381
382 case USB_ENDPOINT_XFER_ISOC:
383 case USB_ENDPOINT_XFER_INT:
384 /* this is where periodic bandwidth should be
385 * de-allocated if it's tracked and allocated;
386 * and where we'd update the schedule tree...
387 */
388 musb->periodic[ep->epnum] = NULL;
389 kfree(qh);
390 qh = NULL;
391 break;
392
393 case USB_ENDPOINT_XFER_CONTROL:
394 case USB_ENDPOINT_XFER_BULK:
395 /* fifo policy for these lists, except that NAKing
396 * should rotate a qh to the end (for fairness).
397 */
398 head = qh->ring.prev;
399 list_del(&qh->ring);
400 kfree(qh);
401 qh = first_qh(head);
402 break;
403 }
404 }
405 return qh;
406}
407
408/*
409 * Advance this hardware endpoint's queue, completing the specified urb and
410 * advancing to either the next urb queued to that qh, or else invalidating
411 * that qh and advancing to the next qh scheduled after the current one.
412 *
413 * Context: caller owns controller lock, irqs are blocked
414 */
415static void
416musb_advance_schedule(struct musb *musb, struct urb *urb,
417 struct musb_hw_ep *hw_ep, int is_in)
418{
419 struct musb_qh *qh;
420
421 if (is_in || hw_ep->is_shared_fifo)
422 qh = hw_ep->in_qh;
423 else
424 qh = hw_ep->out_qh;
425
426 if (urb->status == -EINPROGRESS)
427 qh = musb_giveback(qh, urb, 0);
428 else
429 qh = musb_giveback(qh, urb, urb->status);
430
431 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
432 DBG(4, "... next ep%d %cX urb %p\n",
433 hw_ep->epnum, is_in ? 'R' : 'T',
434 next_urb(qh));
435 musb_start_urb(musb, is_in, qh);
436 }
437}
438
439static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
440{
441 /* we don't want fifo to fill itself again;
442 * ignore dma (various models),
443 * leave toggle alone (may not have been saved yet)
444 */
445 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
446 csr &= ~(MUSB_RXCSR_H_REQPKT
447 | MUSB_RXCSR_H_AUTOREQ
448 | MUSB_RXCSR_AUTOCLEAR);
449
450 /* write 2x to allow double buffering */
451 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
452 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
453
454 /* flush writebuffer */
455 return musb_readw(hw_ep->regs, MUSB_RXCSR);
456}
457
458/*
459 * PIO RX for a packet (or part of it).
460 */
461static bool
462musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
463{
464 u16 rx_count;
465 u8 *buf;
466 u16 csr;
467 bool done = false;
468 u32 length;
469 int do_flush = 0;
470 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
471 void __iomem *epio = hw_ep->regs;
472 struct musb_qh *qh = hw_ep->in_qh;
473 int pipe = urb->pipe;
474 void *buffer = urb->transfer_buffer;
475
476 /* musb_ep_select(mbase, epnum); */
477 rx_count = musb_readw(epio, MUSB_RXCOUNT);
478 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
479 urb->transfer_buffer, qh->offset,
480 urb->transfer_buffer_length);
481
482 /* unload FIFO */
483 if (usb_pipeisoc(pipe)) {
484 int status = 0;
485 struct usb_iso_packet_descriptor *d;
486
487 if (iso_err) {
488 status = -EILSEQ;
489 urb->error_count++;
490 }
491
492 d = urb->iso_frame_desc + qh->iso_idx;
493 buf = buffer + d->offset;
494 length = d->length;
495 if (rx_count > length) {
496 if (status == 0) {
497 status = -EOVERFLOW;
498 urb->error_count++;
499 }
500 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
501 do_flush = 1;
502 } else
503 length = rx_count;
504 urb->actual_length += length;
505 d->actual_length = length;
506
507 d->status = status;
508
509 /* see if we are done */
510 done = (++qh->iso_idx >= urb->number_of_packets);
511 } else {
512 /* non-isoch */
513 buf = buffer + qh->offset;
514 length = urb->transfer_buffer_length - qh->offset;
515 if (rx_count > length) {
516 if (urb->status == -EINPROGRESS)
517 urb->status = -EOVERFLOW;
518 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
519 do_flush = 1;
520 } else
521 length = rx_count;
522 urb->actual_length += length;
523 qh->offset += length;
524
525 /* see if we are done */
526 done = (urb->actual_length == urb->transfer_buffer_length)
527 || (rx_count < qh->maxpacket)
528 || (urb->status != -EINPROGRESS);
529 if (done
530 && (urb->status == -EINPROGRESS)
531 && (urb->transfer_flags & URB_SHORT_NOT_OK)
532 && (urb->actual_length
533 < urb->transfer_buffer_length))
534 urb->status = -EREMOTEIO;
535 }
536
537 musb_read_fifo(hw_ep, length, buf);
538
539 csr = musb_readw(epio, MUSB_RXCSR);
540 csr |= MUSB_RXCSR_H_WZC_BITS;
541 if (unlikely(do_flush))
542 musb_h_flush_rxfifo(hw_ep, csr);
543 else {
544 /* REVISIT this assumes AUTOCLEAR is never set */
545 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
546 if (!done)
547 csr |= MUSB_RXCSR_H_REQPKT;
548 musb_writew(epio, MUSB_RXCSR, csr);
549 }
550
551 return done;
552}
553
554/* we don't always need to reinit a given side of an endpoint...
555 * when we do, use tx/rx reinit routine and then construct a new CSR
556 * to address data toggle, NYET, and DMA or PIO.
557 *
558 * it's possible that driver bugs (especially for DMA) or aborting a
559 * transfer might have left the endpoint busier than it should be.
560 * the busy/not-empty tests are basically paranoia.
561 */
562static void
563musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
564{
565 u16 csr;
566
567 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
568 * That always uses tx_reinit since ep0 repurposes TX register
569 * offsets; the initial SETUP packet is also a kind of OUT.
570 */
571
572 /* if programmed for Tx, put it in RX mode */
573 if (ep->is_shared_fifo) {
574 csr = musb_readw(ep->regs, MUSB_TXCSR);
575 if (csr & MUSB_TXCSR_MODE) {
576 musb_h_tx_flush_fifo(ep);
577 musb_writew(ep->regs, MUSB_TXCSR,
578 MUSB_TXCSR_FRCDATATOG);
579 }
580 /* clear mode (and everything else) to enable Rx */
581 musb_writew(ep->regs, MUSB_TXCSR, 0);
582
583 /* scrub all previous state, clearing toggle */
584 } else {
585 csr = musb_readw(ep->regs, MUSB_RXCSR);
586 if (csr & MUSB_RXCSR_RXPKTRDY)
587 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
588 musb_readw(ep->regs, MUSB_RXCOUNT));
589
590 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
591 }
592
593 /* target addr and (for multipoint) hub addr/port */
594 if (musb->is_multipoint) {
595 musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
596 qh->addr_reg);
597 musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
598 qh->h_addr_reg);
599 musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
600 qh->h_port_reg);
601 } else
602 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
603
604 /* protocol/endpoint, interval/NAKlimit, i/o size */
605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
607 /* NOTE: bulk combining rewrites high bits of maxpacket */
608 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
609
610 ep->rx_reinit = 0;
611}
612
613
614/*
615 * Program an HDRC endpoint as per the given URB
616 * Context: irqs blocked, controller lock held
617 */
618static void musb_ep_program(struct musb *musb, u8 epnum,
619 struct urb *urb, unsigned int is_out,
620 u8 *buf, u32 len)
621{
622 struct dma_controller *dma_controller;
623 struct dma_channel *dma_channel;
624 u8 dma_ok;
625 void __iomem *mbase = musb->mregs;
626 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
627 void __iomem *epio = hw_ep->regs;
628 struct musb_qh *qh;
629 u16 packet_sz;
630
631 if (!is_out || hw_ep->is_shared_fifo)
632 qh = hw_ep->in_qh;
633 else
634 qh = hw_ep->out_qh;
635
636 packet_sz = qh->maxpacket;
637
638 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
639 "h_addr%02x h_port%02x bytes %d\n",
640 is_out ? "-->" : "<--",
641 epnum, urb, urb->dev->speed,
642 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
643 qh->h_addr_reg, qh->h_port_reg,
644 len);
645
646 musb_ep_select(mbase, epnum);
647
648 /* candidate for DMA? */
649 dma_controller = musb->dma_controller;
650 if (is_dma_capable() && epnum && dma_controller) {
651 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
652 if (!dma_channel) {
653 dma_channel = dma_controller->channel_alloc(
654 dma_controller, hw_ep, is_out);
655 if (is_out)
656 hw_ep->tx_channel = dma_channel;
657 else
658 hw_ep->rx_channel = dma_channel;
659 }
660 } else
661 dma_channel = NULL;
662
663 /* make sure we clear DMAEnab, autoSet bits from previous run */
664
665 /* OUT/transmit/EP0 or IN/receive? */
666 if (is_out) {
667 u16 csr;
668 u16 int_txe;
669 u16 load_count;
670
671 csr = musb_readw(epio, MUSB_TXCSR);
672
673 /* disable interrupt in case we flush */
674 int_txe = musb_readw(mbase, MUSB_INTRTXE);
675 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
676
677 /* general endpoint setup */
678 if (epnum) {
679 /* ASSERT: TXCSR_DMAENAB was already cleared */
680
681 /* flush all old state, set default */
682 musb_h_tx_flush_fifo(hw_ep);
683 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
684 | MUSB_TXCSR_DMAMODE
685 | MUSB_TXCSR_FRCDATATOG
686 | MUSB_TXCSR_H_RXSTALL
687 | MUSB_TXCSR_H_ERROR
688 | MUSB_TXCSR_TXPKTRDY
689 );
690 csr |= MUSB_TXCSR_MODE;
691
692 if (usb_gettoggle(urb->dev,
693 qh->epnum, 1))
694 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
695 | MUSB_TXCSR_H_DATATOGGLE;
696 else
697 csr |= MUSB_TXCSR_CLRDATATOG;
698
699 /* twice in case of double packet buffering */
700 musb_writew(epio, MUSB_TXCSR, csr);
701 /* REVISIT may need to clear FLUSHFIFO ... */
702 musb_writew(epio, MUSB_TXCSR, csr);
703 csr = musb_readw(epio, MUSB_TXCSR);
704 } else {
705 /* endpoint 0: just flush */
706 musb_writew(epio, MUSB_CSR0,
707 csr | MUSB_CSR0_FLUSHFIFO);
708 musb_writew(epio, MUSB_CSR0,
709 csr | MUSB_CSR0_FLUSHFIFO);
710 }
711
712 /* target addr and (for multipoint) hub addr/port */
713 if (musb->is_multipoint) {
714 musb_writeb(mbase,
715 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
716 qh->addr_reg);
717 musb_writeb(mbase,
718 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
719 qh->h_addr_reg);
720 musb_writeb(mbase,
721 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
722 qh->h_port_reg);
723/* FIXME if !epnum, do the same for RX ... */
724 } else
725 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
726
727 /* protocol/endpoint/interval/NAKlimit */
728 if (epnum) {
729 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
730 if (can_bulk_split(musb, qh->type))
731 musb_writew(epio, MUSB_TXMAXP,
732 packet_sz
733 | ((hw_ep->max_packet_sz_tx /
734 packet_sz) - 1) << 11);
735 else
736 musb_writew(epio, MUSB_TXMAXP,
737 packet_sz);
738 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
739 } else {
740 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
741 if (musb->is_multipoint)
742 musb_writeb(epio, MUSB_TYPE0,
743 qh->type_reg);
744 }
745
746 if (can_bulk_split(musb, qh->type))
747 load_count = min((u32) hw_ep->max_packet_sz_tx,
748 len);
749 else
750 load_count = min((u32) packet_sz, len);
751
752#ifdef CONFIG_USB_INVENTRA_DMA
753 if (dma_channel) {
754
755 /* clear previous state */
756 csr = musb_readw(epio, MUSB_TXCSR);
757 csr &= ~(MUSB_TXCSR_AUTOSET
758 | MUSB_TXCSR_DMAMODE
759 | MUSB_TXCSR_DMAENAB);
760 csr |= MUSB_TXCSR_MODE;
761 musb_writew(epio, MUSB_TXCSR,
762 csr | MUSB_TXCSR_MODE);
763
764 qh->segsize = min(len, dma_channel->max_len);
765
766 if (qh->segsize <= packet_sz)
767 dma_channel->desired_mode = 0;
768 else
769 dma_channel->desired_mode = 1;
770
771
772 if (dma_channel->desired_mode == 0) {
773 csr &= ~(MUSB_TXCSR_AUTOSET
774 | MUSB_TXCSR_DMAMODE);
775 csr |= (MUSB_TXCSR_DMAENAB);
776 /* against programming guide */
777 } else
778 csr |= (MUSB_TXCSR_AUTOSET
779 | MUSB_TXCSR_DMAENAB
780 | MUSB_TXCSR_DMAMODE);
781
782 musb_writew(epio, MUSB_TXCSR, csr);
783
784 dma_ok = dma_controller->channel_program(
785 dma_channel, packet_sz,
786 dma_channel->desired_mode,
787 urb->transfer_dma,
788 qh->segsize);
789 if (dma_ok) {
790 load_count = 0;
791 } else {
792 dma_controller->channel_release(dma_channel);
793 if (is_out)
794 hw_ep->tx_channel = NULL;
795 else
796 hw_ep->rx_channel = NULL;
797 dma_channel = NULL;
798 }
799 }
800#endif
801
802 /* candidate for DMA */
803 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
804
805 /* program endpoint CSRs first, then setup DMA.
806 * assume CPPI setup succeeds.
807 * defer enabling dma.
808 */
809 csr = musb_readw(epio, MUSB_TXCSR);
810 csr &= ~(MUSB_TXCSR_AUTOSET
811 | MUSB_TXCSR_DMAMODE
812 | MUSB_TXCSR_DMAENAB);
813 csr |= MUSB_TXCSR_MODE;
814 musb_writew(epio, MUSB_TXCSR,
815 csr | MUSB_TXCSR_MODE);
816
817 dma_channel->actual_len = 0L;
818 qh->segsize = len;
819
820 /* TX uses "rndis" mode automatically, but needs help
821 * to identify the zero-length-final-packet case.
822 */
823 dma_ok = dma_controller->channel_program(
824 dma_channel, packet_sz,
825 (urb->transfer_flags
826 & URB_ZERO_PACKET)
827 == URB_ZERO_PACKET,
828 urb->transfer_dma,
829 qh->segsize);
830 if (dma_ok) {
831 load_count = 0;
832 } else {
833 dma_controller->channel_release(dma_channel);
834 hw_ep->tx_channel = NULL;
835 dma_channel = NULL;
836
837 /* REVISIT there's an error path here that
838 * needs handling: can't do dma, but
839 * there's no pio buffer address...
840 */
841 }
842 }
843
844 if (load_count) {
845 /* ASSERT: TXCSR_DMAENAB was already cleared */
846
847 /* PIO to load FIFO */
848 qh->segsize = load_count;
849 musb_write_fifo(hw_ep, load_count, buf);
850 csr = musb_readw(epio, MUSB_TXCSR);
851 csr &= ~(MUSB_TXCSR_DMAENAB
852 | MUSB_TXCSR_DMAMODE
853 | MUSB_TXCSR_AUTOSET);
854 /* write CSR */
855 csr |= MUSB_TXCSR_MODE;
856
857 if (epnum)
858 musb_writew(epio, MUSB_TXCSR, csr);
859 }
860
861 /* re-enable interrupt */
862 musb_writew(mbase, MUSB_INTRTXE, int_txe);
863
864 /* IN/receive */
865 } else {
866 u16 csr;
867
868 if (hw_ep->rx_reinit) {
869 musb_rx_reinit(musb, qh, hw_ep);
870
871 /* init new state: toggle and NYET, maybe DMA later */
872 if (usb_gettoggle(urb->dev, qh->epnum, 0))
873 csr = MUSB_RXCSR_H_WR_DATATOGGLE
874 | MUSB_RXCSR_H_DATATOGGLE;
875 else
876 csr = 0;
877 if (qh->type == USB_ENDPOINT_XFER_INT)
878 csr |= MUSB_RXCSR_DISNYET;
879
880 } else {
881 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
882
883 if (csr & (MUSB_RXCSR_RXPKTRDY
884 | MUSB_RXCSR_DMAENAB
885 | MUSB_RXCSR_H_REQPKT))
886 ERR("broken !rx_reinit, ep%d csr %04x\n",
887 hw_ep->epnum, csr);
888
889 /* scrub any stale state, leaving toggle alone */
890 csr &= MUSB_RXCSR_DISNYET;
891 }
892
893 /* kick things off */
894
895 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
896 /* candidate for DMA */
897 if (dma_channel) {
898 dma_channel->actual_len = 0L;
899 qh->segsize = len;
900
901 /* AUTOREQ is in a DMA register */
902 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
903 csr = musb_readw(hw_ep->regs,
904 MUSB_RXCSR);
905
906 /* unless caller treats short rx transfers as
907 * errors, we dare not queue multiple transfers.
908 */
909 dma_ok = dma_controller->channel_program(
910 dma_channel, packet_sz,
911 !(urb->transfer_flags
912 & URB_SHORT_NOT_OK),
913 urb->transfer_dma,
914 qh->segsize);
915 if (!dma_ok) {
916 dma_controller->channel_release(
917 dma_channel);
918 hw_ep->rx_channel = NULL;
919 dma_channel = NULL;
920 } else
921 csr |= MUSB_RXCSR_DMAENAB;
922 }
923 }
924
925 csr |= MUSB_RXCSR_H_REQPKT;
926 DBG(7, "RXCSR%d := %04x\n", epnum, csr);
927 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
928 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
929 }
930}
931
932
933/*
934 * Service the default endpoint (ep0) as host.
935 * Return true until it's time to start the status stage.
936 */
937static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
938{
939 bool more = false;
940 u8 *fifo_dest = NULL;
941 u16 fifo_count = 0;
942 struct musb_hw_ep *hw_ep = musb->control_ep;
943 struct musb_qh *qh = hw_ep->in_qh;
944 struct usb_ctrlrequest *request;
945
946 switch (musb->ep0_stage) {
947 case MUSB_EP0_IN:
948 fifo_dest = urb->transfer_buffer + urb->actual_length;
949 fifo_count = min(len, ((u16) (urb->transfer_buffer_length
950 - urb->actual_length)));
951 if (fifo_count < len)
952 urb->status = -EOVERFLOW;
953
954 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
955
956 urb->actual_length += fifo_count;
957 if (len < qh->maxpacket) {
958 /* always terminate on short read; it's
959 * rarely reported as an error.
960 */
961 } else if (urb->actual_length <
962 urb->transfer_buffer_length)
963 more = true;
964 break;
965 case MUSB_EP0_START:
966 request = (struct usb_ctrlrequest *) urb->setup_packet;
967
968 if (!request->wLength) {
969 DBG(4, "start no-DATA\n");
970 break;
971 } else if (request->bRequestType & USB_DIR_IN) {
972 DBG(4, "start IN-DATA\n");
973 musb->ep0_stage = MUSB_EP0_IN;
974 more = true;
975 break;
976 } else {
977 DBG(4, "start OUT-DATA\n");
978 musb->ep0_stage = MUSB_EP0_OUT;
979 more = true;
980 }
981 /* FALLTHROUGH */
982 case MUSB_EP0_OUT:
983 fifo_count = min(qh->maxpacket, ((u16)
984 (urb->transfer_buffer_length
985 - urb->actual_length)));
986
987 if (fifo_count) {
988 fifo_dest = (u8 *) (urb->transfer_buffer
989 + urb->actual_length);
990 DBG(3, "Sending %d bytes to %p\n",
991 fifo_count, fifo_dest);
992 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
993
994 urb->actual_length += fifo_count;
995 more = true;
996 }
997 break;
998 default:
999 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1000 break;
1001 }
1002
1003 return more;
1004}
1005
1006/*
1007 * Handle default endpoint interrupt as host. Only called in IRQ time
1008 * from the LinuxIsr() interrupt service routine.
1009 *
1010 * called with controller irqlocked
1011 */
1012irqreturn_t musb_h_ep0_irq(struct musb *musb)
1013{
1014 struct urb *urb;
1015 u16 csr, len;
1016 int status = 0;
1017 void __iomem *mbase = musb->mregs;
1018 struct musb_hw_ep *hw_ep = musb->control_ep;
1019 void __iomem *epio = hw_ep->regs;
1020 struct musb_qh *qh = hw_ep->in_qh;
1021 bool complete = false;
1022 irqreturn_t retval = IRQ_NONE;
1023
1024 /* ep0 only has one queue, "in" */
1025 urb = next_urb(qh);
1026
1027 musb_ep_select(mbase, 0);
1028 csr = musb_readw(epio, MUSB_CSR0);
1029 len = (csr & MUSB_CSR0_RXPKTRDY)
1030 ? musb_readb(epio, MUSB_COUNT0)
1031 : 0;
1032
1033 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1034 csr, qh, len, urb, musb->ep0_stage);
1035
1036 /* if we just did status stage, we are done */
1037 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1038 retval = IRQ_HANDLED;
1039 complete = true;
1040 }
1041
1042 /* prepare status */
1043 if (csr & MUSB_CSR0_H_RXSTALL) {
1044 DBG(6, "STALLING ENDPOINT\n");
1045 status = -EPIPE;
1046
1047 } else if (csr & MUSB_CSR0_H_ERROR) {
1048 DBG(2, "no response, csr0 %04x\n", csr);
1049 status = -EPROTO;
1050
1051 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1052 DBG(2, "control NAK timeout\n");
1053
1054 /* NOTE: this code path would be a good place to PAUSE a
1055 * control transfer, if another one is queued, so that
1056 * ep0 is more likely to stay busy.
1057 *
1058 * if (qh->ring.next != &musb->control), then
1059 * we have a candidate... NAKing is *NOT* an error
1060 */
1061 musb_writew(epio, MUSB_CSR0, 0);
1062 retval = IRQ_HANDLED;
1063 }
1064
1065 if (status) {
1066 DBG(6, "aborting\n");
1067 retval = IRQ_HANDLED;
1068 if (urb)
1069 urb->status = status;
1070 complete = true;
1071
1072 /* use the proper sequence to abort the transfer */
1073 if (csr & MUSB_CSR0_H_REQPKT) {
1074 csr &= ~MUSB_CSR0_H_REQPKT;
1075 musb_writew(epio, MUSB_CSR0, csr);
1076 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1077 musb_writew(epio, MUSB_CSR0, csr);
1078 } else {
1079 csr |= MUSB_CSR0_FLUSHFIFO;
1080 musb_writew(epio, MUSB_CSR0, csr);
1081 musb_writew(epio, MUSB_CSR0, csr);
1082 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1083 musb_writew(epio, MUSB_CSR0, csr);
1084 }
1085
1086 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1087
1088 /* clear it */
1089 musb_writew(epio, MUSB_CSR0, 0);
1090 }
1091
1092 if (unlikely(!urb)) {
1093 /* stop endpoint since we have no place for its data, this
1094 * SHOULD NEVER HAPPEN! */
1095 ERR("no URB for end 0\n");
1096
1097 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1098 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1099 musb_writew(epio, MUSB_CSR0, 0);
1100
1101 goto done;
1102 }
1103
1104 if (!complete) {
1105 /* call common logic and prepare response */
1106 if (musb_h_ep0_continue(musb, len, urb)) {
1107 /* more packets required */
1108 csr = (MUSB_EP0_IN == musb->ep0_stage)
1109 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1110 } else {
1111 /* data transfer complete; perform status phase */
1112 if (usb_pipeout(urb->pipe)
1113 || !urb->transfer_buffer_length)
1114 csr = MUSB_CSR0_H_STATUSPKT
1115 | MUSB_CSR0_H_REQPKT;
1116 else
1117 csr = MUSB_CSR0_H_STATUSPKT
1118 | MUSB_CSR0_TXPKTRDY;
1119
1120 /* flag status stage */
1121 musb->ep0_stage = MUSB_EP0_STATUS;
1122
1123 DBG(5, "ep0 STATUS, csr %04x\n", csr);
1124
1125 }
1126 musb_writew(epio, MUSB_CSR0, csr);
1127 retval = IRQ_HANDLED;
1128 } else
1129 musb->ep0_stage = MUSB_EP0_IDLE;
1130
1131 /* call completion handler if done */
1132 if (complete)
1133 musb_advance_schedule(musb, urb, hw_ep, 1);
1134done:
1135 return retval;
1136}
1137
1138
1139#ifdef CONFIG_USB_INVENTRA_DMA
1140
1141/* Host side TX (OUT) using Mentor DMA works as follows:
1142 submit_urb ->
1143 - if queue was empty, Program Endpoint
1144 - ... which starts DMA to fifo in mode 1 or 0
1145
1146 DMA Isr (transfer complete) -> TxAvail()
1147 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1148 only in musb_cleanup_urb)
1149 - TxPktRdy has to be set in mode 0 or for
1150 short packets in mode 1.
1151*/
1152
1153#endif
1154
1155/* Service a Tx-Available or dma completion irq for the endpoint */
1156void musb_host_tx(struct musb *musb, u8 epnum)
1157{
1158 int pipe;
1159 bool done = false;
1160 u16 tx_csr;
1161 size_t wLength = 0;
1162 u8 *buf = NULL;
1163 struct urb *urb;
1164 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1165 void __iomem *epio = hw_ep->regs;
1166 struct musb_qh *qh = hw_ep->out_qh;
1167 u32 status = 0;
1168 void __iomem *mbase = musb->mregs;
1169 struct dma_channel *dma;
1170
1171 urb = next_urb(qh);
1172
1173 musb_ep_select(mbase, epnum);
1174 tx_csr = musb_readw(epio, MUSB_TXCSR);
1175
1176 /* with CPPI, DMA sometimes triggers "extra" irqs */
1177 if (!urb) {
1178 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1179 goto finish;
1180 }
1181
1182 pipe = urb->pipe;
1183 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1184 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1185 dma ? ", dma" : "");
1186
1187 /* check for errors */
1188 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1189 /* dma was disabled, fifo flushed */
1190 DBG(3, "TX end %d stall\n", epnum);
1191
1192 /* stall; record URB status */
1193 status = -EPIPE;
1194
1195 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1196 /* (NON-ISO) dma was disabled, fifo flushed */
1197 DBG(3, "TX 3strikes on ep=%d\n", epnum);
1198
1199 status = -ETIMEDOUT;
1200
1201 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1202 DBG(6, "TX end=%d device not responding\n", epnum);
1203
1204 /* NOTE: this code path would be a good place to PAUSE a
1205 * transfer, if there's some other (nonperiodic) tx urb
1206 * that could use this fifo. (dma complicates it...)
1207 *
1208 * if (bulk && qh->ring.next != &musb->out_bulk), then
1209 * we have a candidate... NAKing is *NOT* an error
1210 */
1211 musb_ep_select(mbase, epnum);
1212 musb_writew(epio, MUSB_TXCSR,
1213 MUSB_TXCSR_H_WZC_BITS
1214 | MUSB_TXCSR_TXPKTRDY);
1215 goto finish;
1216 }
1217
1218 if (status) {
1219 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1220 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1221 (void) musb->dma_controller->channel_abort(dma);
1222 }
1223
1224 /* do the proper sequence to abort the transfer in the
1225 * usb core; the dma engine should already be stopped.
1226 */
1227 musb_h_tx_flush_fifo(hw_ep);
1228 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1229 | MUSB_TXCSR_DMAENAB
1230 | MUSB_TXCSR_H_ERROR
1231 | MUSB_TXCSR_H_RXSTALL
1232 | MUSB_TXCSR_H_NAKTIMEOUT
1233 );
1234
1235 musb_ep_select(mbase, epnum);
1236 musb_writew(epio, MUSB_TXCSR, tx_csr);
1237 /* REVISIT may need to clear FLUSHFIFO ... */
1238 musb_writew(epio, MUSB_TXCSR, tx_csr);
1239 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1240
1241 done = true;
1242 }
1243
1244 /* second cppi case */
1245 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1246 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1247 goto finish;
1248
1249 }
1250
1251 /* REVISIT this looks wrong... */
1252 if (!status || dma || usb_pipeisoc(pipe)) {
1253 if (dma)
1254 wLength = dma->actual_len;
1255 else
1256 wLength = qh->segsize;
1257 qh->offset += wLength;
1258
1259 if (usb_pipeisoc(pipe)) {
1260 struct usb_iso_packet_descriptor *d;
1261
1262 d = urb->iso_frame_desc + qh->iso_idx;
1263 d->actual_length = qh->segsize;
1264 if (++qh->iso_idx >= urb->number_of_packets) {
1265 done = true;
1266 } else {
1267 d++;
1268 buf = urb->transfer_buffer + d->offset;
1269 wLength = d->length;
1270 }
1271 } else if (dma) {
1272 done = true;
1273 } else {
1274 /* see if we need to send more data, or ZLP */
1275 if (qh->segsize < qh->maxpacket)
1276 done = true;
1277 else if (qh->offset == urb->transfer_buffer_length
1278 && !(urb->transfer_flags
1279 & URB_ZERO_PACKET))
1280 done = true;
1281 if (!done) {
1282 buf = urb->transfer_buffer
1283 + qh->offset;
1284 wLength = urb->transfer_buffer_length
1285 - qh->offset;
1286 }
1287 }
1288 }
1289
1290 /* urb->status != -EINPROGRESS means request has been faulted,
1291 * so we must abort this transfer after cleanup
1292 */
1293 if (urb->status != -EINPROGRESS) {
1294 done = true;
1295 if (status == 0)
1296 status = urb->status;
1297 }
1298
1299 if (done) {
1300 /* set status */
1301 urb->status = status;
1302 urb->actual_length = qh->offset;
1303 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1304
1305 } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1306 /* WARN_ON(!buf); */
1307
1308 /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1309 * (and presumably, fifo is not half-full) we should write TWO
1310 * packets before updating TXCSR ... other docs disagree ...
1311 */
1312 /* PIO: start next packet in this URB */
1313 wLength = min(qh->maxpacket, (u16) wLength);
1314 musb_write_fifo(hw_ep, wLength, buf);
1315 qh->segsize = wLength;
1316
1317 musb_ep_select(mbase, epnum);
1318 musb_writew(epio, MUSB_TXCSR,
1319 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1320 } else
1321 DBG(1, "not complete, but dma enabled?\n");
1322
1323finish:
1324 return;
1325}
1326
1327
1328#ifdef CONFIG_USB_INVENTRA_DMA
1329
1330/* Host side RX (IN) using Mentor DMA works as follows:
1331 submit_urb ->
1332 - if queue was empty, ProgramEndpoint
1333 - first IN token is sent out (by setting ReqPkt)
1334 LinuxIsr -> RxReady()
1335 /\ => first packet is received
1336 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1337 | -> DMA Isr (transfer complete) -> RxReady()
1338 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1339 | - if urb not complete, send next IN token (ReqPkt)
1340 | | else complete urb.
1341 | |
1342 ---------------------------
1343 *
1344 * Nuances of mode 1:
1345 * For short packets, no ack (+RxPktRdy) is sent automatically
1346 * (even if AutoClear is ON)
1347 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1348 * automatically => major problem, as collecting the next packet becomes
1349 * difficult. Hence mode 1 is not used.
1350 *
1351 * REVISIT
1352 * All we care about at this driver level is that
1353 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1354 * (b) termination conditions are: short RX, or buffer full;
1355 * (c) fault modes include
1356 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1357 * (and that endpoint's dma queue stops immediately)
1358 * - overflow (full, PLUS more bytes in the terminal packet)
1359 *
1360 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1361 * thus be a great candidate for using mode 1 ... for all but the
1362 * last packet of one URB's transfer.
1363 */
1364
1365#endif
1366
1367/*
1368 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1369 * and high-bandwidth IN transfer cases.
1370 */
1371void musb_host_rx(struct musb *musb, u8 epnum)
1372{
1373 struct urb *urb;
1374 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1375 void __iomem *epio = hw_ep->regs;
1376 struct musb_qh *qh = hw_ep->in_qh;
1377 size_t xfer_len;
1378 void __iomem *mbase = musb->mregs;
1379 int pipe;
1380 u16 rx_csr, val;
1381 bool iso_err = false;
1382 bool done = false;
1383 u32 status;
1384 struct dma_channel *dma;
1385
1386 musb_ep_select(mbase, epnum);
1387
1388 urb = next_urb(qh);
1389 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1390 status = 0;
1391 xfer_len = 0;
1392
1393 rx_csr = musb_readw(epio, MUSB_RXCSR);
1394 val = rx_csr;
1395
1396 if (unlikely(!urb)) {
1397 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1398 * usbtest #11 (unlinks) triggers it regularly, sometimes
1399 * with fifo full. (Only with DMA??)
1400 */
1401 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1402 musb_readw(epio, MUSB_RXCOUNT));
1403 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1404 return;
1405 }
1406
1407 pipe = urb->pipe;
1408
1409 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1410 epnum, rx_csr, urb->actual_length,
1411 dma ? dma->actual_len : 0);
1412
1413 /* check for errors, concurrent stall & unlink is not really
1414 * handled yet! */
1415 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1416 DBG(3, "RX end %d STALL\n", epnum);
1417
1418 /* stall; record URB status */
1419 status = -EPIPE;
1420
1421 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1422 DBG(3, "end %d RX proto error\n", epnum);
1423
1424 status = -EPROTO;
1425 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1426
1427 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1428
1429 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1430 /* NOTE this code path would be a good place to PAUSE a
1431 * transfer, if there's some other (nonperiodic) rx urb
1432 * that could use this fifo. (dma complicates it...)
1433 *
1434 * if (bulk && qh->ring.next != &musb->in_bulk), then
1435 * we have a candidate... NAKing is *NOT* an error
1436 */
1437 DBG(6, "RX end %d NAK timeout\n", epnum);
1438 musb_ep_select(mbase, epnum);
1439 musb_writew(epio, MUSB_RXCSR,
1440 MUSB_RXCSR_H_WZC_BITS
1441 | MUSB_RXCSR_H_REQPKT);
1442
1443 goto finish;
1444 } else {
1445 DBG(4, "RX end %d ISO data error\n", epnum);
1446 /* packet error reported later */
1447 iso_err = true;
1448 }
1449 }
1450
1451 /* faults abort the transfer */
1452 if (status) {
1453 /* clean up dma and collect transfer count */
1454 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1455 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1456 (void) musb->dma_controller->channel_abort(dma);
1457 xfer_len = dma->actual_len;
1458 }
1459 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1460 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1461 done = true;
1462 goto finish;
1463 }
1464
1465 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1466 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1467 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1468 goto finish;
1469 }
1470
1471 /* thorough shutdown for now ... given more precise fault handling
1472 * and better queueing support, we might keep a DMA pipeline going
1473 * while processing this irq for earlier completions.
1474 */
1475
1476 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1477
1478#ifndef CONFIG_USB_INVENTRA_DMA
1479 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1480 /* REVISIT this happened for a while on some short reads...
1481 * the cleanup still needs investigation... looks bad...
1482 * and also duplicates dma cleanup code above ... plus,
1483 * shouldn't this be the "half full" double buffer case?
1484 */
1485 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1486 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1487 (void) musb->dma_controller->channel_abort(dma);
1488 xfer_len = dma->actual_len;
1489 done = true;
1490 }
1491
1492 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1493 xfer_len, dma ? ", dma" : "");
1494 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1495
1496 musb_ep_select(mbase, epnum);
1497 musb_writew(epio, MUSB_RXCSR,
1498 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1499 }
1500#endif
1501 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1502 xfer_len = dma->actual_len;
1503
1504 val &= ~(MUSB_RXCSR_DMAENAB
1505 | MUSB_RXCSR_H_AUTOREQ
1506 | MUSB_RXCSR_AUTOCLEAR
1507 | MUSB_RXCSR_RXPKTRDY);
1508 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1509
1510#ifdef CONFIG_USB_INVENTRA_DMA
1511 /* done if urb buffer is full or short packet is recd */
1512 done = (urb->actual_length + xfer_len >=
1513 urb->transfer_buffer_length
1514 || dma->actual_len < qh->maxpacket);
1515
1516 /* send IN token for next packet, without AUTOREQ */
1517 if (!done) {
1518 val |= MUSB_RXCSR_H_REQPKT;
1519 musb_writew(epio, MUSB_RXCSR,
1520 MUSB_RXCSR_H_WZC_BITS | val);
1521 }
1522
1523 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1524 done ? "off" : "reset",
1525 musb_readw(epio, MUSB_RXCSR),
1526 musb_readw(epio, MUSB_RXCOUNT));
1527#else
1528 done = true;
1529#endif
1530 } else if (urb->status == -EINPROGRESS) {
1531 /* if no errors, be sure a packet is ready for unloading */
1532 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1533 status = -EPROTO;
1534 ERR("Rx interrupt with no errors or packet!\n");
1535
1536 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1537
1538/* SCRUB (RX) */
1539 /* do the proper sequence to abort the transfer */
1540 musb_ep_select(mbase, epnum);
1541 val &= ~MUSB_RXCSR_H_REQPKT;
1542 musb_writew(epio, MUSB_RXCSR, val);
1543 goto finish;
1544 }
1545
1546 /* we are expecting IN packets */
1547#ifdef CONFIG_USB_INVENTRA_DMA
1548 if (dma) {
1549 struct dma_controller *c;
1550 u16 rx_count;
1551 int ret;
1552
1553 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1554
1555 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1556 epnum, rx_count,
1557 urb->transfer_dma
1558 + urb->actual_length,
1559 qh->offset,
1560 urb->transfer_buffer_length);
1561
1562 c = musb->dma_controller;
1563
1564 dma->desired_mode = 0;
1565#ifdef USE_MODE1
1566 /* because of the issue below, mode 1 will
1567 * only rarely behave with correct semantics.
1568 */
1569 if ((urb->transfer_flags &
1570 URB_SHORT_NOT_OK)
1571 && (urb->transfer_buffer_length -
1572 urb->actual_length)
1573 > qh->maxpacket)
1574 dma->desired_mode = 1;
1575#endif
1576
1577/* Disadvantage of using mode 1:
1578 * It's basically usable only for mass storage class; essentially all
1579 * other protocols also terminate transfers on short packets.
1580 *
1581 * Details:
1582 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1583 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1584 * to use the extra IN token to grab the last packet using mode 0, then
1585 * the problem is that you cannot be sure when the device will send the
1586 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1587 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1588 * transfer, while sometimes it is recd just a little late so that if you
1589 * try to configure for mode 0 soon after the mode 1 transfer is
1590 * completed, you will find rxcount 0. Okay, so you might think why not
1591 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1592 */
1593
1594 val = musb_readw(epio, MUSB_RXCSR);
1595 val &= ~MUSB_RXCSR_H_REQPKT;
1596
1597 if (dma->desired_mode == 0)
1598 val &= ~MUSB_RXCSR_H_AUTOREQ;
1599 else
1600 val |= MUSB_RXCSR_H_AUTOREQ;
1601 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1602
1603 musb_writew(epio, MUSB_RXCSR,
1604 MUSB_RXCSR_H_WZC_BITS | val);
1605
1606 /* REVISIT if when actual_length != 0,
1607 * transfer_buffer_length needs to be
1608 * adjusted first...
1609 */
1610 ret = c->channel_program(
1611 dma, qh->maxpacket,
1612 dma->desired_mode,
1613 urb->transfer_dma
1614 + urb->actual_length,
1615 (dma->desired_mode == 0)
1616 ? rx_count
1617 : urb->transfer_buffer_length);
1618
1619 if (!ret) {
1620 c->channel_release(dma);
1621 hw_ep->rx_channel = NULL;
1622 dma = NULL;
1623 /* REVISIT reset CSR */
1624 }
1625 }
1626#endif /* Mentor DMA */
1627
1628 if (!dma) {
1629 done = musb_host_packet_rx(musb, urb,
1630 epnum, iso_err);
1631 DBG(6, "read %spacket\n", done ? "last " : "");
1632 }
1633 }
1634
1635 if (dma && usb_pipeisoc(pipe)) {
1636 struct usb_iso_packet_descriptor *d;
1637 int iso_stat = status;
1638
1639 d = urb->iso_frame_desc + qh->iso_idx;
1640 d->actual_length += xfer_len;
1641 if (iso_err) {
1642 iso_stat = -EILSEQ;
1643 urb->error_count++;
1644 }
1645 d->status = iso_stat;
1646 }
1647
1648finish:
1649 urb->actual_length += xfer_len;
1650 qh->offset += xfer_len;
1651 if (done) {
1652 if (urb->status == -EINPROGRESS)
1653 urb->status = status;
1654 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1655 }
1656}
1657
1658/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1659 * the software schedule associates multiple such nodes with a given
1660 * host side hardware endpoint + direction; scheduling may activate
1661 * that hardware endpoint.
1662 */
1663static int musb_schedule(
1664 struct musb *musb,
1665 struct musb_qh *qh,
1666 int is_in)
1667{
1668 int idle;
1669 int best_diff;
1670 int best_end, epnum;
1671 struct musb_hw_ep *hw_ep = NULL;
1672 struct list_head *head = NULL;
1673
1674 /* use fixed hardware for control and bulk */
1675 switch (qh->type) {
1676 case USB_ENDPOINT_XFER_CONTROL:
1677 head = &musb->control;
1678 hw_ep = musb->control_ep;
1679 break;
1680 case USB_ENDPOINT_XFER_BULK:
1681 hw_ep = musb->bulk_ep;
1682 if (is_in)
1683 head = &musb->in_bulk;
1684 else
1685 head = &musb->out_bulk;
1686 break;
1687 }
1688 if (head) {
1689 idle = list_empty(head);
1690 list_add_tail(&qh->ring, head);
1691 goto success;
1692 }
1693
1694 /* else, periodic transfers get muxed to other endpoints */
1695
1696 /* FIXME this doesn't consider direction, so it can only
1697 * work for one half of the endpoint hardware, and assumes
1698 * the previous cases handled all non-shared endpoints...
1699 */
1700
1701 /* we know this qh hasn't been scheduled, so all we need to do
1702 * is choose which hardware endpoint to put it on ...
1703 *
1704 * REVISIT what we really want here is a regular schedule tree
1705 * like e.g. OHCI uses, but for now musb->periodic is just an
1706 * array of the _single_ logical endpoint associated with a
1707 * given physical one (identity mapping logical->physical).
1708 *
1709 * that simplistic approach makes TT scheduling a lot simpler;
1710 * there is none, and thus none of its complexity...
1711 */
1712 best_diff = 4096;
1713 best_end = -1;
1714
1715 for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
1716 int diff;
1717
1718 if (musb->periodic[epnum])
1719 continue;
1720 hw_ep = &musb->endpoints[epnum];
1721 if (hw_ep == musb->bulk_ep)
1722 continue;
1723
1724 if (is_in)
1725 diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1726 else
1727 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1728
1729 if (diff > 0 && best_diff > diff) {
1730 best_diff = diff;
1731 best_end = epnum;
1732 }
1733 }
1734 if (best_end < 0)
1735 return -ENOSPC;
1736
1737 idle = 1;
1738 hw_ep = musb->endpoints + best_end;
1739 musb->periodic[best_end] = qh;
1740 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1741success:
1742 qh->hw_ep = hw_ep;
1743 qh->hep->hcpriv = qh;
1744 if (idle)
1745 musb_start_urb(musb, is_in, qh);
1746 return 0;
1747}
1748
1749static int musb_urb_enqueue(
1750 struct usb_hcd *hcd,
1751 struct urb *urb,
1752 gfp_t mem_flags)
1753{
1754 unsigned long flags;
1755 struct musb *musb = hcd_to_musb(hcd);
1756 struct usb_host_endpoint *hep = urb->ep;
1757 struct musb_qh *qh = hep->hcpriv;
1758 struct usb_endpoint_descriptor *epd = &hep->desc;
1759 int ret;
1760 unsigned type_reg;
1761 unsigned interval;
1762
1763 /* host role must be active */
1764 if (!is_host_active(musb) || !musb->is_active)
1765 return -ENODEV;
1766
1767 spin_lock_irqsave(&musb->lock, flags);
1768 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1769 spin_unlock_irqrestore(&musb->lock, flags);
1770 if (ret)
1771 return ret;
1772
1773 /* DMA mapping was already done, if needed, and this urb is on
1774 * hep->urb_list ... so there's little to do unless hep wasn't
1775 * yet scheduled onto a live qh.
1776 *
1777 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1778 * disabled, testing for empty qh->ring and avoiding qh setup costs
1779 * except for the first urb queued after a config change.
1780 */
1781 if (qh) {
1782 urb->hcpriv = qh;
1783 return 0;
1784 }
1785
1786 /* Allocate and initialize qh, minimizing the work done each time
1787 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1788 *
1789 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1790 * for bugs in other kernel code to break this driver...
1791 */
1792 qh = kzalloc(sizeof *qh, mem_flags);
1793 if (!qh) {
1794 usb_hcd_unlink_urb_from_ep(hcd, urb);
1795 return -ENOMEM;
1796 }
1797
1798 qh->hep = hep;
1799 qh->dev = urb->dev;
1800 INIT_LIST_HEAD(&qh->ring);
1801 qh->is_ready = 1;
1802
1803 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1804
1805 /* no high bandwidth support yet */
1806 if (qh->maxpacket & ~0x7ff) {
1807 ret = -EMSGSIZE;
1808 goto done;
1809 }
1810
1811 qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1812 qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1813
1814 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1815 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1816
1817 /* precompute rxtype/txtype/type0 register */
1818 type_reg = (qh->type << 4) | qh->epnum;
1819 switch (urb->dev->speed) {
1820 case USB_SPEED_LOW:
1821 type_reg |= 0xc0;
1822 break;
1823 case USB_SPEED_FULL:
1824 type_reg |= 0x80;
1825 break;
1826 default:
1827 type_reg |= 0x40;
1828 }
1829 qh->type_reg = type_reg;
1830
1831 /* precompute rxinterval/txinterval register */
1832 interval = min((u8)16, epd->bInterval); /* log encoding */
1833 switch (qh->type) {
1834 case USB_ENDPOINT_XFER_INT:
1835 /* fullspeed uses linear encoding */
1836 if (USB_SPEED_FULL == urb->dev->speed) {
1837 interval = epd->bInterval;
1838 if (!interval)
1839 interval = 1;
1840 }
1841 /* FALLTHROUGH */
1842 case USB_ENDPOINT_XFER_ISOC:
1843 /* iso always uses log encoding */
1844 break;
1845 default:
1846 /* REVISIT we actually want to use NAK limits, hinting to the
1847 * transfer scheduling logic to try some other qh, e.g. try
1848 * for 2 msec first:
1849 *
1850 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1851 *
1852 * The downside of disabling this is that transfer scheduling
1853 * gets VERY unfair for nonperiodic transfers; a misbehaving
1854 * peripheral could make that hurt. Or for reads, one that's
1855 * perfectly normal: network and other drivers keep reads
1856 * posted at all times, having one pending for a week should
1857 * be perfectly safe.
1858 *
1859 * The upside of disabling it is avoidng transfer scheduling
1860 * code to put this aside for while.
1861 */
1862 interval = 0;
1863 }
1864 qh->intv_reg = interval;
1865
1866 /* precompute addressing for external hub/tt ports */
1867 if (musb->is_multipoint) {
1868 struct usb_device *parent = urb->dev->parent;
1869
1870 if (parent != hcd->self.root_hub) {
1871 qh->h_addr_reg = (u8) parent->devnum;
1872
1873 /* set up tt info if needed */
1874 if (urb->dev->tt) {
1875 qh->h_port_reg = (u8) urb->dev->ttport;
1876 qh->h_addr_reg |= 0x80;
1877 }
1878 }
1879 }
1880
1881 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1882 * until we get real dma queues (with an entry for each urb/buffer),
1883 * we only have work to do in the former case.
1884 */
1885 spin_lock_irqsave(&musb->lock, flags);
1886 if (hep->hcpriv) {
1887 /* some concurrent activity submitted another urb to hep...
1888 * odd, rare, error prone, but legal.
1889 */
1890 kfree(qh);
1891 ret = 0;
1892 } else
1893 ret = musb_schedule(musb, qh,
1894 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1895
1896 if (ret == 0) {
1897 urb->hcpriv = qh;
1898 /* FIXME set urb->start_frame for iso/intr, it's tested in
1899 * musb_start_urb(), but otherwise only konicawc cares ...
1900 */
1901 }
1902 spin_unlock_irqrestore(&musb->lock, flags);
1903
1904done:
1905 if (ret != 0) {
1906 usb_hcd_unlink_urb_from_ep(hcd, urb);
1907 kfree(qh);
1908 }
1909 return ret;
1910}
1911
1912
1913/*
1914 * abort a transfer that's at the head of a hardware queue.
1915 * called with controller locked, irqs blocked
1916 * that hardware queue advances to the next transfer, unless prevented
1917 */
1918static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1919{
1920 struct musb_hw_ep *ep = qh->hw_ep;
1921 void __iomem *epio = ep->regs;
1922 unsigned hw_end = ep->epnum;
1923 void __iomem *regs = ep->musb->mregs;
1924 u16 csr;
1925 int status = 0;
1926
1927 musb_ep_select(regs, hw_end);
1928
1929 if (is_dma_capable()) {
1930 struct dma_channel *dma;
1931
1932 dma = is_in ? ep->rx_channel : ep->tx_channel;
1933 if (dma) {
1934 status = ep->musb->dma_controller->channel_abort(dma);
1935 DBG(status ? 1 : 3,
1936 "abort %cX%d DMA for urb %p --> %d\n",
1937 is_in ? 'R' : 'T', ep->epnum,
1938 urb, status);
1939 urb->actual_length += dma->actual_len;
1940 }
1941 }
1942
1943 /* turn off DMA requests, discard state, stop polling ... */
1944 if (is_in) {
1945 /* giveback saves bulk toggle */
1946 csr = musb_h_flush_rxfifo(ep, 0);
1947
1948 /* REVISIT we still get an irq; should likely clear the
1949 * endpoint's irq status here to avoid bogus irqs.
1950 * clearing that status is platform-specific...
1951 */
1952 } else {
1953 musb_h_tx_flush_fifo(ep);
1954 csr = musb_readw(epio, MUSB_TXCSR);
1955 csr &= ~(MUSB_TXCSR_AUTOSET
1956 | MUSB_TXCSR_DMAENAB
1957 | MUSB_TXCSR_H_RXSTALL
1958 | MUSB_TXCSR_H_NAKTIMEOUT
1959 | MUSB_TXCSR_H_ERROR
1960 | MUSB_TXCSR_TXPKTRDY);
1961 musb_writew(epio, MUSB_TXCSR, csr);
1962 /* REVISIT may need to clear FLUSHFIFO ... */
1963 musb_writew(epio, MUSB_TXCSR, csr);
1964 /* flush cpu writebuffer */
1965 csr = musb_readw(epio, MUSB_TXCSR);
1966 }
1967 if (status == 0)
1968 musb_advance_schedule(ep->musb, urb, ep, is_in);
1969 return status;
1970}
1971
1972static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1973{
1974 struct musb *musb = hcd_to_musb(hcd);
1975 struct musb_qh *qh;
1976 struct list_head *sched;
1977 unsigned long flags;
1978 int ret;
1979
1980 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
1981 usb_pipedevice(urb->pipe),
1982 usb_pipeendpoint(urb->pipe),
1983 usb_pipein(urb->pipe) ? "in" : "out");
1984
1985 spin_lock_irqsave(&musb->lock, flags);
1986 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1987 if (ret)
1988 goto done;
1989
1990 qh = urb->hcpriv;
1991 if (!qh)
1992 goto done;
1993
1994 /* Any URB not actively programmed into endpoint hardware can be
1995 * immediately given back. Such an URB must be at the head of its
1996 * endpoint queue, unless someday we get real DMA queues. And even
1997 * then, it might not be known to the hardware...
1998 *
1999 * Otherwise abort current transfer, pending dma, etc.; urb->status
2000 * has already been updated. This is a synchronous abort; it'd be
2001 * OK to hold off until after some IRQ, though.
2002 */
2003 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2004 ret = -EINPROGRESS;
2005 else {
2006 switch (qh->type) {
2007 case USB_ENDPOINT_XFER_CONTROL:
2008 sched = &musb->control;
2009 break;
2010 case USB_ENDPOINT_XFER_BULK:
2011 if (usb_pipein(urb->pipe))
2012 sched = &musb->in_bulk;
2013 else
2014 sched = &musb->out_bulk;
2015 break;
2016 default:
2017 /* REVISIT when we get a schedule tree, periodic
2018 * transfers won't always be at the head of a
2019 * singleton queue...
2020 */
2021 sched = NULL;
2022 break;
2023 }
2024 }
2025
2026 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2027 if (ret < 0 || (sched && qh != first_qh(sched))) {
2028 int ready = qh->is_ready;
2029
2030 ret = 0;
2031 qh->is_ready = 0;
2032 __musb_giveback(musb, urb, 0);
2033 qh->is_ready = ready;
2034 } else
2035 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2036done:
2037 spin_unlock_irqrestore(&musb->lock, flags);
2038 return ret;
2039}
2040
2041/* disable an endpoint */
2042static void
2043musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2044{
2045 u8 epnum = hep->desc.bEndpointAddress;
2046 unsigned long flags;
2047 struct musb *musb = hcd_to_musb(hcd);
2048 u8 is_in = epnum & USB_DIR_IN;
2049 struct musb_qh *qh = hep->hcpriv;
2050 struct urb *urb, *tmp;
2051 struct list_head *sched;
2052
2053 if (!qh)
2054 return;
2055
2056 spin_lock_irqsave(&musb->lock, flags);
2057
2058 switch (qh->type) {
2059 case USB_ENDPOINT_XFER_CONTROL:
2060 sched = &musb->control;
2061 break;
2062 case USB_ENDPOINT_XFER_BULK:
2063 if (is_in)
2064 sched = &musb->in_bulk;
2065 else
2066 sched = &musb->out_bulk;
2067 break;
2068 default:
2069 /* REVISIT when we get a schedule tree, periodic transfers
2070 * won't always be at the head of a singleton queue...
2071 */
2072 sched = NULL;
2073 break;
2074 }
2075
2076 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2077
2078 /* kick first urb off the hardware, if needed */
2079 qh->is_ready = 0;
2080 if (!sched || qh == first_qh(sched)) {
2081 urb = next_urb(qh);
2082
2083 /* make software (then hardware) stop ASAP */
2084 if (!urb->unlinked)
2085 urb->status = -ESHUTDOWN;
2086
2087 /* cleanup */
2088 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2089 } else
2090 urb = NULL;
2091
2092 /* then just nuke all the others */
2093 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
2094 musb_giveback(qh, urb, -ESHUTDOWN);
2095
2096 spin_unlock_irqrestore(&musb->lock, flags);
2097}
2098
2099static int musb_h_get_frame_number(struct usb_hcd *hcd)
2100{
2101 struct musb *musb = hcd_to_musb(hcd);
2102
2103 return musb_readw(musb->mregs, MUSB_FRAME);
2104}
2105
2106static int musb_h_start(struct usb_hcd *hcd)
2107{
2108 struct musb *musb = hcd_to_musb(hcd);
2109
2110 /* NOTE: musb_start() is called when the hub driver turns
2111 * on port power, or when (OTG) peripheral starts.
2112 */
2113 hcd->state = HC_STATE_RUNNING;
2114 musb->port1_status = 0;
2115 return 0;
2116}
2117
2118static void musb_h_stop(struct usb_hcd *hcd)
2119{
2120 musb_stop(hcd_to_musb(hcd));
2121 hcd->state = HC_STATE_HALT;
2122}
2123
2124static int musb_bus_suspend(struct usb_hcd *hcd)
2125{
2126 struct musb *musb = hcd_to_musb(hcd);
2127
2128 if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2129 return 0;
2130
2131 if (is_host_active(musb) && musb->is_active) {
2132 WARNING("trying to suspend as %s is_active=%i\n",
2133 otg_state_string(musb), musb->is_active);
2134 return -EBUSY;
2135 } else
2136 return 0;
2137}
2138
2139static int musb_bus_resume(struct usb_hcd *hcd)
2140{
2141 /* resuming child port does the work */
2142 return 0;
2143}
2144
2145const struct hc_driver musb_hc_driver = {
2146 .description = "musb-hcd",
2147 .product_desc = "MUSB HDRC host driver",
2148 .hcd_priv_size = sizeof(struct musb),
2149 .flags = HCD_USB2 | HCD_MEMORY,
2150
2151 /* not using irq handler or reset hooks from usbcore, since
2152 * those must be shared with peripheral code for OTG configs
2153 */
2154
2155 .start = musb_h_start,
2156 .stop = musb_h_stop,
2157
2158 .get_frame_number = musb_h_get_frame_number,
2159
2160 .urb_enqueue = musb_urb_enqueue,
2161 .urb_dequeue = musb_urb_dequeue,
2162 .endpoint_disable = musb_h_disable,
2163
2164 .hub_status_data = musb_hub_status_data,
2165 .hub_control = musb_hub_control,
2166 .bus_suspend = musb_bus_suspend,
2167 .bus_resume = musb_bus_resume,
2168 /* .start_port_reset = NULL, */
2169 /* .hub_irq_enable = NULL, */
2170};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 000000000000..77bcdb9d5b32
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
1/*
2 * MUSB OTG driver host defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef _MUSB_HOST_H
36#define _MUSB_HOST_H
37
38static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
39{
40 return container_of((void *) musb, struct usb_hcd, hcd_priv);
41}
42
43static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
44{
45 return (struct musb *) (hcd->hcd_priv);
46}
47
48/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
49struct musb_qh {
50 struct usb_host_endpoint *hep; /* usbcore info */
51 struct usb_device *dev;
52 struct musb_hw_ep *hw_ep; /* current binding */
53
54 struct list_head ring; /* of musb_qh */
55 /* struct musb_qh *next; */ /* for periodic tree */
56
57 unsigned offset; /* in urb->transfer_buffer */
58 unsigned segsize; /* current xfer fragment */
59
60 u8 type_reg; /* {rx,tx} type register */
61 u8 intv_reg; /* {rx,tx} interval register */
62 u8 addr_reg; /* device address register */
63 u8 h_addr_reg; /* hub address register */
64 u8 h_port_reg; /* hub port register */
65
66 u8 is_ready; /* safe to modify hw_ep */
67 u8 type; /* XFERTYPE_* */
68 u8 epnum;
69 u16 maxpacket;
70 u16 frame; /* for periodic schedule */
71 unsigned iso_idx; /* in urb->iso_frame_desc[] */
72};
73
74/* map from control or bulk queue head to the first qh on that ring */
75static inline struct musb_qh *first_qh(struct list_head *q)
76{
77 if (list_empty(q))
78 return NULL;
79 return list_entry(q->next, struct musb_qh, ring);
80}
81
82
83extern void musb_root_disconnect(struct musb *musb);
84
85struct usb_hcd;
86
87extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
88extern int musb_hub_control(struct usb_hcd *hcd,
89 u16 typeReq, u16 wValue, u16 wIndex,
90 char *buf, u16 wLength);
91
92extern const struct hc_driver musb_hc_driver;
93
94static inline struct urb *next_urb(struct musb_qh *qh)
95{
96#ifdef CONFIG_USB_MUSB_HDRC_HCD
97 struct list_head *queue;
98
99 if (!qh)
100 return NULL;
101 queue = &qh->hep->urb_list;
102 if (list_empty(queue))
103 return NULL;
104 return list_entry(queue->next, struct urb, urb_list);
105#else
106 return NULL;
107#endif
108}
109
110#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 000000000000..6bbedae83af8
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
1/*
2 * MUSB OTG driver register I/O
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
36#define __MUSB_LINUX_PLATFORM_ARCH_H__
37
38#include <linux/io.h>
39
40#ifndef CONFIG_ARM
41static inline void readsl(const void __iomem *addr, void *buf, int len)
42 { insl((unsigned long)addr, buf, len); }
43static inline void readsw(const void __iomem *addr, void *buf, int len)
44 { insw((unsigned long)addr, buf, len); }
45static inline void readsb(const void __iomem *addr, void *buf, int len)
46 { insb((unsigned long)addr, buf, len); }
47
48static inline void writesl(const void __iomem *addr, const void *buf, int len)
49 { outsl((unsigned long)addr, buf, len); }
50static inline void writesw(const void __iomem *addr, const void *buf, int len)
51 { outsw((unsigned long)addr, buf, len); }
52static inline void writesb(const void __iomem *addr, const void *buf, int len)
53 { outsb((unsigned long)addr, buf, len); }
54
55#endif
56
57/* NOTE: these offsets are all in bytes */
58
59static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
60 { return __raw_readw(addr + offset); }
61
62static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
63 { return __raw_readl(addr + offset); }
64
65
66static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
67 { __raw_writew(data, addr + offset); }
68
69static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
70 { __raw_writel(data, addr + offset); }
71
72
73#ifdef CONFIG_USB_TUSB6010
74
75/*
76 * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
77 */
78static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
79{
80 u16 tmp;
81 u8 val;
82
83 tmp = __raw_readw(addr + (offset & ~1));
84 if (offset & 1)
85 val = (tmp >> 8);
86 else
87 val = tmp & 0xff;
88
89 return val;
90}
91
92static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
93{
94 u16 tmp;
95
96 tmp = __raw_readw(addr + (offset & ~1));
97 if (offset & 1)
98 tmp = (data << 8) | (tmp & 0xff);
99 else
100 tmp = (tmp & 0xff00) | data;
101
102 __raw_writew(tmp, addr + (offset & ~1));
103}
104
105#else
106
107static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
108 { return __raw_readb(addr + offset); }
109
110static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
111 { __raw_writeb(data, addr + offset); }
112
113#endif /* CONFIG_USB_TUSB6010 */
114
115#endif
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
new file mode 100644
index 000000000000..55e6b78bdccc
--- /dev/null
+++ b/drivers/usb/musb/musb_procfs.c
@@ -0,0 +1,830 @@
1/*
2 * MUSB OTG driver debug support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/uaccess.h> /* FIXME remove procfs writes */
39#include <asm/arch/hardware.h>
40
41#include "musb_core.h"
42
43#include "davinci.h"
44
45#ifdef CONFIG_USB_MUSB_HDRC_HCD
46
47static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
48{
49 int count;
50 int tmp;
51 struct usb_host_endpoint *hep = qh->hep;
52 struct urb *urb;
53
54 count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n",
55 qh, qh->dev->devnum, qh->epnum,
56 ({ char *s; switch (qh->type) {
57 case USB_ENDPOINT_XFER_BULK:
58 s = "-bulk"; break;
59 case USB_ENDPOINT_XFER_INT:
60 s = "-int"; break;
61 case USB_ENDPOINT_XFER_CONTROL:
62 s = ""; break;
63 default:
64 s = "iso"; break;
65 }; s; }),
66 qh->maxpacket);
67 if (count <= 0)
68 return 0;
69 buf += count;
70 max -= count;
71
72 list_for_each_entry(urb, &hep->urb_list, urb_list) {
73 tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
74 usb_pipein(urb->pipe) ? "in" : "out",
75 urb, urb->actual_length,
76 urb->transfer_buffer_length);
77 if (tmp <= 0)
78 break;
79 tmp = min(tmp, (int)max);
80 count += tmp;
81 buf += tmp;
82 max -= tmp;
83 }
84 return count;
85}
86
87static int
88dump_queue(struct list_head *q, char *buf, unsigned max)
89{
90 int count = 0;
91 struct musb_qh *qh;
92
93 list_for_each_entry(qh, q, ring) {
94 int tmp;
95
96 tmp = dump_qh(qh, buf, max);
97 if (tmp <= 0)
98 break;
99 tmp = min(tmp, (int)max);
100 count += tmp;
101 buf += tmp;
102 max -= tmp;
103 }
104 return count;
105}
106
107#endif /* HCD */
108
109#ifdef CONFIG_USB_GADGET_MUSB_HDRC
110static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
111{
112 char *buf = buffer;
113 int code = 0;
114 void __iomem *regs = ep->hw_ep->regs;
115 char *mode = "1buf";
116
117 if (ep->is_in) {
118 if (ep->hw_ep->tx_double_buffered)
119 mode = "2buf";
120 } else {
121 if (ep->hw_ep->rx_double_buffered)
122 mode = "2buf";
123 }
124
125 do {
126 struct usb_request *req;
127
128 code = snprintf(buf, max,
129 "\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
130 ep->name, ep->current_epnum,
131 mode, ep->dma ? " dma" : "",
132 musb_readw(regs,
133 (ep->is_in || !ep->current_epnum)
134 ? MUSB_TXCSR
135 : MUSB_RXCSR),
136 musb_readw(regs, ep->is_in
137 ? MUSB_TXMAXP
138 : MUSB_RXMAXP)
139 );
140 if (code <= 0)
141 break;
142 code = min(code, (int) max);
143 buf += code;
144 max -= code;
145
146 if (is_cppi_enabled() && ep->current_epnum) {
147 unsigned cppi = ep->current_epnum - 1;
148 void __iomem *base = ep->musb->ctrl_base;
149 unsigned off1 = cppi << 2;
150 void __iomem *ram = base;
151 char tmp[16];
152
153 if (ep->is_in) {
154 ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
155 tmp[0] = 0;
156 } else {
157 ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
158 snprintf(tmp, sizeof tmp, "%d left, ",
159 musb_readl(base,
160 DAVINCI_RXCPPI_BUFCNT0_REG + off1));
161 }
162
163 code = snprintf(buf, max, "%cX DMA%d: %s"
164 "%08x %08x, %08x %08x; "
165 "%08x %08x %08x .. %08x\n",
166 ep->is_in ? 'T' : 'R',
167 ep->current_epnum - 1, tmp,
168 musb_readl(ram, 0 * 4),
169 musb_readl(ram, 1 * 4),
170 musb_readl(ram, 2 * 4),
171 musb_readl(ram, 3 * 4),
172 musb_readl(ram, 4 * 4),
173 musb_readl(ram, 5 * 4),
174 musb_readl(ram, 6 * 4),
175 musb_readl(ram, 7 * 4));
176 if (code <= 0)
177 break;
178 code = min(code, (int) max);
179 buf += code;
180 max -= code;
181 }
182
183 if (list_empty(&ep->req_list)) {
184 code = snprintf(buf, max, "\t(queue empty)\n");
185 if (code <= 0)
186 break;
187 code = min(code, (int) max);
188 buf += code;
189 max -= code;
190 break;
191 }
192 list_for_each_entry(req, &ep->req_list, list) {
193 code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
194 req,
195 req->zero ? "zero, " : "",
196 req->short_not_ok ? "!short, " : "",
197 req->actual, req->length);
198 if (code <= 0)
199 break;
200 code = min(code, (int) max);
201 buf += code;
202 max -= code;
203 }
204 } while (0);
205 return buf - buffer;
206}
207#endif
208
209static int
210dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
211{
212 int code = 0;
213 char *buf = aBuffer;
214 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
215
216 do {
217 musb_ep_select(musb->mregs, epnum);
218#ifdef CONFIG_USB_MUSB_HDRC_HCD
219 if (is_host_active(musb)) {
220 int dump_rx, dump_tx;
221 void __iomem *regs = hw_ep->regs;
222
223 /* TEMPORARY (!) until we have a real periodic
224 * schedule tree ...
225 */
226 if (!epnum) {
227 /* control is shared, uses RX queue
228 * but (mostly) shadowed tx registers
229 */
230 dump_tx = !list_empty(&musb->control);
231 dump_rx = 0;
232 } else if (hw_ep == musb->bulk_ep) {
233 dump_tx = !list_empty(&musb->out_bulk);
234 dump_rx = !list_empty(&musb->in_bulk);
235 } else if (musb->periodic[epnum]) {
236 struct usb_host_endpoint *hep;
237
238 hep = musb->periodic[epnum]->hep;
239 dump_rx = hep->desc.bEndpointAddress
240 & USB_ENDPOINT_DIR_MASK;
241 dump_tx = !dump_rx;
242 } else
243 break;
244 /* END TEMPORARY */
245
246
247 if (dump_rx) {
248 code = snprintf(buf, max,
249 "\nRX%d: %s rxcsr %04x interval %02x "
250 "max %04x type %02x; "
251 "dev %d hub %d port %d"
252 "\n",
253 epnum,
254 hw_ep->rx_double_buffered
255 ? "2buf" : "1buf",
256 musb_readw(regs, MUSB_RXCSR),
257 musb_readb(regs, MUSB_RXINTERVAL),
258 musb_readw(regs, MUSB_RXMAXP),
259 musb_readb(regs, MUSB_RXTYPE),
260 /* FIXME: assumes multipoint */
261 musb_readb(musb->mregs,
262 MUSB_BUSCTL_OFFSET(epnum,
263 MUSB_RXFUNCADDR)),
264 musb_readb(musb->mregs,
265 MUSB_BUSCTL_OFFSET(epnum,
266 MUSB_RXHUBADDR)),
267 musb_readb(musb->mregs,
268 MUSB_BUSCTL_OFFSET(epnum,
269 MUSB_RXHUBPORT))
270 );
271 if (code <= 0)
272 break;
273 code = min(code, (int) max);
274 buf += code;
275 max -= code;
276
277 if (is_cppi_enabled()
278 && epnum
279 && hw_ep->rx_channel) {
280 unsigned cppi = epnum - 1;
281 unsigned off1 = cppi << 2;
282 void __iomem *base;
283 void __iomem *ram;
284 char tmp[16];
285
286 base = musb->ctrl_base;
287 ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
288 cppi) + base;
289 snprintf(tmp, sizeof tmp, "%d left, ",
290 musb_readl(base,
291 DAVINCI_RXCPPI_BUFCNT0_REG
292 + off1));
293
294 code = snprintf(buf, max,
295 " rx dma%d: %s"
296 "%08x %08x, %08x %08x; "
297 "%08x %08x %08x .. %08x\n",
298 cppi, tmp,
299 musb_readl(ram, 0 * 4),
300 musb_readl(ram, 1 * 4),
301 musb_readl(ram, 2 * 4),
302 musb_readl(ram, 3 * 4),
303 musb_readl(ram, 4 * 4),
304 musb_readl(ram, 5 * 4),
305 musb_readl(ram, 6 * 4),
306 musb_readl(ram, 7 * 4));
307 if (code <= 0)
308 break;
309 code = min(code, (int) max);
310 buf += code;
311 max -= code;
312 }
313
314 if (hw_ep == musb->bulk_ep
315 && !list_empty(
316 &musb->in_bulk)) {
317 code = dump_queue(&musb->in_bulk,
318 buf, max);
319 if (code <= 0)
320 break;
321 code = min(code, (int) max);
322 buf += code;
323 max -= code;
324 } else if (musb->periodic[epnum]) {
325 code = dump_qh(musb->periodic[epnum],
326 buf, max);
327 if (code <= 0)
328 break;
329 code = min(code, (int) max);
330 buf += code;
331 max -= code;
332 }
333 }
334
335 if (dump_tx) {
336 code = snprintf(buf, max,
337 "\nTX%d: %s txcsr %04x interval %02x "
338 "max %04x type %02x; "
339 "dev %d hub %d port %d"
340 "\n",
341 epnum,
342 hw_ep->tx_double_buffered
343 ? "2buf" : "1buf",
344 musb_readw(regs, MUSB_TXCSR),
345 musb_readb(regs, MUSB_TXINTERVAL),
346 musb_readw(regs, MUSB_TXMAXP),
347 musb_readb(regs, MUSB_TXTYPE),
348 /* FIXME: assumes multipoint */
349 musb_readb(musb->mregs,
350 MUSB_BUSCTL_OFFSET(epnum,
351 MUSB_TXFUNCADDR)),
352 musb_readb(musb->mregs,
353 MUSB_BUSCTL_OFFSET(epnum,
354 MUSB_TXHUBADDR)),
355 musb_readb(musb->mregs,
356 MUSB_BUSCTL_OFFSET(epnum,
357 MUSB_TXHUBPORT))
358 );
359 if (code <= 0)
360 break;
361 code = min(code, (int) max);
362 buf += code;
363 max -= code;
364
365 if (is_cppi_enabled()
366 && epnum
367 && hw_ep->tx_channel) {
368 unsigned cppi = epnum - 1;
369 void __iomem *base;
370 void __iomem *ram;
371
372 base = musb->ctrl_base;
373 ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
374 cppi) + base;
375 code = snprintf(buf, max,
376 " tx dma%d: "
377 "%08x %08x, %08x %08x; "
378 "%08x %08x %08x .. %08x\n",
379 cppi,
380 musb_readl(ram, 0 * 4),
381 musb_readl(ram, 1 * 4),
382 musb_readl(ram, 2 * 4),
383 musb_readl(ram, 3 * 4),
384 musb_readl(ram, 4 * 4),
385 musb_readl(ram, 5 * 4),
386 musb_readl(ram, 6 * 4),
387 musb_readl(ram, 7 * 4));
388 if (code <= 0)
389 break;
390 code = min(code, (int) max);
391 buf += code;
392 max -= code;
393 }
394
395 if (hw_ep == musb->control_ep
396 && !list_empty(
397 &musb->control)) {
398 code = dump_queue(&musb->control,
399 buf, max);
400 if (code <= 0)
401 break;
402 code = min(code, (int) max);
403 buf += code;
404 max -= code;
405 } else if (hw_ep == musb->bulk_ep
406 && !list_empty(
407 &musb->out_bulk)) {
408 code = dump_queue(&musb->out_bulk,
409 buf, max);
410 if (code <= 0)
411 break;
412 code = min(code, (int) max);
413 buf += code;
414 max -= code;
415 } else if (musb->periodic[epnum]) {
416 code = dump_qh(musb->periodic[epnum],
417 buf, max);
418 if (code <= 0)
419 break;
420 code = min(code, (int) max);
421 buf += code;
422 max -= code;
423 }
424 }
425 }
426#endif
427#ifdef CONFIG_USB_GADGET_MUSB_HDRC
428 if (is_peripheral_active(musb)) {
429 code = 0;
430
431 if (hw_ep->ep_in.desc || !epnum) {
432 code = dump_ep(&hw_ep->ep_in, buf, max);
433 if (code <= 0)
434 break;
435 code = min(code, (int) max);
436 buf += code;
437 max -= code;
438 }
439 if (hw_ep->ep_out.desc) {
440 code = dump_ep(&hw_ep->ep_out, buf, max);
441 if (code <= 0)
442 break;
443 code = min(code, (int) max);
444 buf += code;
445 max -= code;
446 }
447 }
448#endif
449 } while (0);
450
451 return buf - aBuffer;
452}
453
454/* Dump the current status and compile options.
455 * @param musb the device driver instance
456 * @param buffer where to dump the status; it must be big enough to hold the
457 * result otherwise "BAD THINGS HAPPENS(TM)".
458 */
459static int dump_header_stats(struct musb *musb, char *buffer)
460{
461 int code, count = 0;
462 const void __iomem *mbase = musb->mregs;
463
464 *buffer = 0;
465 count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
466 "(Power=%02x, DevCtl=%02x)\n",
467 (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
468 musb_readb(mbase, MUSB_POWER),
469 musb_readb(mbase, MUSB_DEVCTL));
470 if (count <= 0)
471 return 0;
472 buffer += count;
473
474 code = sprintf(buffer, "OTG state: %s; %sactive\n",
475 otg_state_string(musb),
476 musb->is_active ? "" : "in");
477 if (code <= 0)
478 goto done;
479 buffer += code;
480 count += code;
481
482 code = sprintf(buffer,
483 "Options: "
484#ifdef CONFIG_MUSB_PIO_ONLY
485 "pio"
486#elif defined(CONFIG_USB_TI_CPPI_DMA)
487 "cppi-dma"
488#elif defined(CONFIG_USB_INVENTRA_DMA)
489 "musb-dma"
490#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
491 "tusb-omap-dma"
492#else
493 "?dma?"
494#endif
495 ", "
496#ifdef CONFIG_USB_MUSB_OTG
497 "otg (peripheral+host)"
498#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
499 "peripheral"
500#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
501 "host"
502#endif
503 ", debug=%d [eps=%d]\n",
504 debug,
505 musb->nr_endpoints);
506 if (code <= 0)
507 goto done;
508 count += code;
509 buffer += code;
510
511#ifdef CONFIG_USB_GADGET_MUSB_HDRC
512 code = sprintf(buffer, "Peripheral address: %02x\n",
513 musb_readb(musb->ctrl_base, MUSB_FADDR));
514 if (code <= 0)
515 goto done;
516 buffer += code;
517 count += code;
518#endif
519
520#ifdef CONFIG_USB_MUSB_HDRC_HCD
521 code = sprintf(buffer, "Root port status: %08x\n",
522 musb->port1_status);
523 if (code <= 0)
524 goto done;
525 buffer += code;
526 count += code;
527#endif
528
529#ifdef CONFIG_ARCH_DAVINCI
530 code = sprintf(buffer,
531 "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
532 "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
533 "\n",
534 musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
535 musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
536 __raw_readl((void __force __iomem *)
537 IO_ADDRESS(USBPHY_CTL_PADDR)),
538 musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
539 musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
540 musb_readl(musb->ctrl_base,
541 DAVINCI_USB_INT_SOURCE_REG),
542 musb_readl(musb->ctrl_base,
543 DAVINCI_USB_INT_MASK_REG));
544 if (code <= 0)
545 goto done;
546 count += code;
547 buffer += code;
548#endif /* DAVINCI */
549
550#ifdef CONFIG_USB_TUSB6010
551 code = sprintf(buffer,
552 "TUSB6010: devconf %08x, phy enable %08x drive %08x"
553 "\n\totg %03x timer %08x"
554 "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
555 "\n",
556 musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
557 musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
558 musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
559 musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
560 musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
561 musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
562 musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
563 musb_readl(musb->ctrl_base, TUSB_INT_SRC),
564 musb_readl(musb->ctrl_base, TUSB_INT_MASK));
565 if (code <= 0)
566 goto done;
567 count += code;
568 buffer += code;
569#endif /* DAVINCI */
570
571 if (is_cppi_enabled() && musb->dma_controller) {
572 code = sprintf(buffer,
573 "CPPI: txcr=%d txsrc=%01x txena=%01x; "
574 "rxcr=%d rxsrc=%01x rxena=%01x "
575 "\n",
576 musb_readl(musb->ctrl_base,
577 DAVINCI_TXCPPI_CTRL_REG),
578 musb_readl(musb->ctrl_base,
579 DAVINCI_TXCPPI_RAW_REG),
580 musb_readl(musb->ctrl_base,
581 DAVINCI_TXCPPI_INTENAB_REG),
582 musb_readl(musb->ctrl_base,
583 DAVINCI_RXCPPI_CTRL_REG),
584 musb_readl(musb->ctrl_base,
585 DAVINCI_RXCPPI_RAW_REG),
586 musb_readl(musb->ctrl_base,
587 DAVINCI_RXCPPI_INTENAB_REG));
588 if (code <= 0)
589 goto done;
590 count += code;
591 buffer += code;
592 }
593
594#ifdef CONFIG_USB_GADGET_MUSB_HDRC
595 if (is_peripheral_enabled(musb)) {
596 code = sprintf(buffer, "Gadget driver: %s\n",
597 musb->gadget_driver
598 ? musb->gadget_driver->driver.name
599 : "(none)");
600 if (code <= 0)
601 goto done;
602 count += code;
603 buffer += code;
604 }
605#endif
606
607done:
608 return count;
609}
610
611/* Write to ProcFS
612 *
613 * C soft-connect
614 * c soft-disconnect
615 * I enable HS
616 * i disable HS
617 * s stop session
618 * F force session (OTG-unfriendly)
619 * E rElinquish bus (OTG)
620 * H request host mode
621 * h cancel host request
622 * T start sending TEST_PACKET
623 * D<num> set/query the debug level
624 */
625static int musb_proc_write(struct file *file, const char __user *buffer,
626 unsigned long count, void *data)
627{
628 char cmd;
629 u8 reg;
630 struct musb *musb = (struct musb *)data;
631 void __iomem *mbase = musb->mregs;
632
633 /* MOD_INC_USE_COUNT; */
634
635 if (unlikely(copy_from_user(&cmd, buffer, 1)))
636 return -EFAULT;
637
638 switch (cmd) {
639 case 'C':
640 if (mbase) {
641 reg = musb_readb(mbase, MUSB_POWER)
642 | MUSB_POWER_SOFTCONN;
643 musb_writeb(mbase, MUSB_POWER, reg);
644 }
645 break;
646
647 case 'c':
648 if (mbase) {
649 reg = musb_readb(mbase, MUSB_POWER)
650 & ~MUSB_POWER_SOFTCONN;
651 musb_writeb(mbase, MUSB_POWER, reg);
652 }
653 break;
654
655 case 'I':
656 if (mbase) {
657 reg = musb_readb(mbase, MUSB_POWER)
658 | MUSB_POWER_HSENAB;
659 musb_writeb(mbase, MUSB_POWER, reg);
660 }
661 break;
662
663 case 'i':
664 if (mbase) {
665 reg = musb_readb(mbase, MUSB_POWER)
666 & ~MUSB_POWER_HSENAB;
667 musb_writeb(mbase, MUSB_POWER, reg);
668 }
669 break;
670
671 case 'F':
672 reg = musb_readb(mbase, MUSB_DEVCTL);
673 reg |= MUSB_DEVCTL_SESSION;
674 musb_writeb(mbase, MUSB_DEVCTL, reg);
675 break;
676
677 case 'H':
678 if (mbase) {
679 reg = musb_readb(mbase, MUSB_DEVCTL);
680 reg |= MUSB_DEVCTL_HR;
681 musb_writeb(mbase, MUSB_DEVCTL, reg);
682 /* MUSB_HST_MODE( ((struct musb*)data) ); */
683 /* WARNING("Host Mode\n"); */
684 }
685 break;
686
687 case 'h':
688 if (mbase) {
689 reg = musb_readb(mbase, MUSB_DEVCTL);
690 reg &= ~MUSB_DEVCTL_HR;
691 musb_writeb(mbase, MUSB_DEVCTL, reg);
692 }
693 break;
694
695 case 'T':
696 if (mbase) {
697 musb_load_testpacket(musb);
698 musb_writeb(mbase, MUSB_TESTMODE,
699 MUSB_TEST_PACKET);
700 }
701 break;
702
703#if (MUSB_DEBUG > 0)
704 /* set/read debug level */
705 case 'D':{
706 if (count > 1) {
707 char digits[8], *p = digits;
708 int i = 0, level = 0, sign = 1;
709 int len = min(count - 1, (unsigned long)8);
710
711 if (copy_from_user(&digits, &buffer[1], len))
712 return -EFAULT;
713
714 /* optional sign */
715 if (*p == '-') {
716 len -= 1;
717 sign = -sign;
718 p++;
719 }
720
721 /* read it */
722 while (i++ < len && *p > '0' && *p < '9') {
723 level = level * 10 + (*p - '0');
724 p++;
725 }
726
727 level *= sign;
728 DBG(1, "debug level %d\n", level);
729 debug = level;
730 }
731 }
732 break;
733
734
735 case '?':
736 INFO("?: you are seeing it\n");
737 INFO("C/c: soft connect enable/disable\n");
738 INFO("I/i: hispeed enable/disable\n");
739 INFO("F: force session start\n");
740 INFO("H: host mode\n");
741 INFO("T: start sending TEST_PACKET\n");
742 INFO("D: set/read dbug level\n");
743 break;
744#endif
745
746 default:
747 ERR("Command %c not implemented\n", cmd);
748 break;
749 }
750
751 musb_platform_try_idle(musb, 0);
752
753 return count;
754}
755
756static int musb_proc_read(char *page, char **start,
757 off_t off, int count, int *eof, void *data)
758{
759 char *buffer = page;
760 int code = 0;
761 unsigned long flags;
762 struct musb *musb = data;
763 unsigned epnum;
764
765 count -= off;
766 count -= 1; /* for NUL at end */
767 if (count <= 0)
768 return -EINVAL;
769
770 spin_lock_irqsave(&musb->lock, flags);
771
772 code = dump_header_stats(musb, buffer);
773 if (code > 0) {
774 buffer += code;
775 count -= code;
776 }
777
778 /* generate the report for the end points */
779 /* REVISIT ... not unless something's connected! */
780 for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
781 epnum++) {
782 code = dump_end_info(musb, epnum, buffer, count);
783 if (code > 0) {
784 buffer += code;
785 count -= code;
786 }
787 }
788
789 musb_platform_try_idle(musb, 0);
790
791 spin_unlock_irqrestore(&musb->lock, flags);
792 *eof = 1;
793
794 return buffer - page;
795}
796
797void __devexit musb_debug_delete(char *name, struct musb *musb)
798{
799 if (musb->proc_entry)
800 remove_proc_entry(name, NULL);
801}
802
803struct proc_dir_entry *__init
804musb_debug_create(char *name, struct musb *data)
805{
806 struct proc_dir_entry *pde;
807
808 /* FIXME convert everything to seq_file; then later, debugfs */
809
810 if (!name)
811 return NULL;
812
813 pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
814 data->proc_entry = pde;
815 if (pde) {
816 pde->data = data;
817 /* pde->owner = THIS_MODULE; */
818
819 pde->read_proc = musb_proc_read;
820 pde->write_proc = musb_proc_write;
821
822 pde->size = 0;
823
824 pr_debug("Registered /proc/%s\n", name);
825 } else {
826 pr_debug("Cannot create a valid proc file entry");
827 }
828
829 return pde;
830}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 000000000000..9c228661aa5a
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
1/*
2 * MUSB OTG driver register defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_REGS_H__
36#define __MUSB_REGS_H__
37
38#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
39
40/*
41 * Common USB registers
42 */
43
44#define MUSB_FADDR 0x00 /* 8-bit */
45#define MUSB_POWER 0x01 /* 8-bit */
46
47#define MUSB_INTRTX 0x02 /* 16-bit */
48#define MUSB_INTRRX 0x04
49#define MUSB_INTRTXE 0x06
50#define MUSB_INTRRXE 0x08
51#define MUSB_INTRUSB 0x0A /* 8 bit */
52#define MUSB_INTRUSBE 0x0B /* 8 bit */
53#define MUSB_FRAME 0x0C
54#define MUSB_INDEX 0x0E /* 8 bit */
55#define MUSB_TESTMODE 0x0F /* 8 bit */
56
57/* Get offset for a given FIFO from musb->mregs */
58#ifdef CONFIG_USB_TUSB6010
59#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
60#else
61#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
62#endif
63
64/*
65 * Additional Control Registers
66 */
67
68#define MUSB_DEVCTL 0x60 /* 8 bit */
69
70/* These are always controlled through the INDEX register */
71#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
72#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
73#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
74#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
75
76/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
77#define MUSB_HWVERS 0x6C /* 8 bit */
78
79#define MUSB_EPINFO 0x78 /* 8 bit */
80#define MUSB_RAMINFO 0x79 /* 8 bit */
81#define MUSB_LINKINFO 0x7a /* 8 bit */
82#define MUSB_VPLEN 0x7b /* 8 bit */
83#define MUSB_HS_EOF1 0x7c /* 8 bit */
84#define MUSB_FS_EOF1 0x7d /* 8 bit */
85#define MUSB_LS_EOF1 0x7e /* 8 bit */
86
87/* Offsets to endpoint registers */
88#define MUSB_TXMAXP 0x00
89#define MUSB_TXCSR 0x02
90#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
91#define MUSB_RXMAXP 0x04
92#define MUSB_RXCSR 0x06
93#define MUSB_RXCOUNT 0x08
94#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
95#define MUSB_TXTYPE 0x0A
96#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
97#define MUSB_TXINTERVAL 0x0B
98#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
99#define MUSB_RXTYPE 0x0C
100#define MUSB_RXINTERVAL 0x0D
101#define MUSB_FIFOSIZE 0x0F
102#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
103
104/* Offsets to endpoint registers in indexed model (using INDEX register) */
105#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
106 (0x10 + (_offset))
107
108/* Offsets to endpoint registers in flat models */
109#define MUSB_FLAT_OFFSET(_epnum, _offset) \
110 (0x100 + (0x10*(_epnum)) + (_offset))
111
112#ifdef CONFIG_USB_TUSB6010
113/* TUSB6010 EP0 configuration register is special */
114#define MUSB_TUSB_OFFSET(_epnum, _offset) \
115 (0x10 + _offset)
116#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
117#endif
118
119/* "bus control"/target registers, for host side multipoint (external hubs) */
120#define MUSB_TXFUNCADDR 0x00
121#define MUSB_TXHUBADDR 0x02
122#define MUSB_TXHUBPORT 0x03
123
124#define MUSB_RXFUNCADDR 0x04
125#define MUSB_RXHUBADDR 0x06
126#define MUSB_RXHUBPORT 0x07
127
128#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
129 (0x80 + (8*(_epnum)) + (_offset))
130
131/*
132 * MUSB Register bits
133 */
134
135/* POWER */
136#define MUSB_POWER_ISOUPDATE 0x80
137#define MUSB_POWER_SOFTCONN 0x40
138#define MUSB_POWER_HSENAB 0x20
139#define MUSB_POWER_HSMODE 0x10
140#define MUSB_POWER_RESET 0x08
141#define MUSB_POWER_RESUME 0x04
142#define MUSB_POWER_SUSPENDM 0x02
143#define MUSB_POWER_ENSUSPEND 0x01
144
145/* INTRUSB */
146#define MUSB_INTR_SUSPEND 0x01
147#define MUSB_INTR_RESUME 0x02
148#define MUSB_INTR_RESET 0x04
149#define MUSB_INTR_BABBLE 0x04
150#define MUSB_INTR_SOF 0x08
151#define MUSB_INTR_CONNECT 0x10
152#define MUSB_INTR_DISCONNECT 0x20
153#define MUSB_INTR_SESSREQ 0x40
154#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
155
156/* DEVCTL */
157#define MUSB_DEVCTL_BDEVICE 0x80
158#define MUSB_DEVCTL_FSDEV 0x40
159#define MUSB_DEVCTL_LSDEV 0x20
160#define MUSB_DEVCTL_VBUS 0x18
161#define MUSB_DEVCTL_VBUS_SHIFT 3
162#define MUSB_DEVCTL_HM 0x04
163#define MUSB_DEVCTL_HR 0x02
164#define MUSB_DEVCTL_SESSION 0x01
165
166/* TESTMODE */
167#define MUSB_TEST_FORCE_HOST 0x80
168#define MUSB_TEST_FIFO_ACCESS 0x40
169#define MUSB_TEST_FORCE_FS 0x20
170#define MUSB_TEST_FORCE_HS 0x10
171#define MUSB_TEST_PACKET 0x08
172#define MUSB_TEST_K 0x04
173#define MUSB_TEST_J 0x02
174#define MUSB_TEST_SE0_NAK 0x01
175
176/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
177#define MUSB_FIFOSZ_DPB 0x10
178/* Allocation size (8, 16, 32, ... 4096) */
179#define MUSB_FIFOSZ_SIZE 0x0f
180
181/* CSR0 */
182#define MUSB_CSR0_FLUSHFIFO 0x0100
183#define MUSB_CSR0_TXPKTRDY 0x0002
184#define MUSB_CSR0_RXPKTRDY 0x0001
185
186/* CSR0 in Peripheral mode */
187#define MUSB_CSR0_P_SVDSETUPEND 0x0080
188#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
189#define MUSB_CSR0_P_SENDSTALL 0x0020
190#define MUSB_CSR0_P_SETUPEND 0x0010
191#define MUSB_CSR0_P_DATAEND 0x0008
192#define MUSB_CSR0_P_SENTSTALL 0x0004
193
194/* CSR0 in Host mode */
195#define MUSB_CSR0_H_DIS_PING 0x0800
196#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
197#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
198#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
199#define MUSB_CSR0_H_STATUSPKT 0x0040
200#define MUSB_CSR0_H_REQPKT 0x0020
201#define MUSB_CSR0_H_ERROR 0x0010
202#define MUSB_CSR0_H_SETUPPKT 0x0008
203#define MUSB_CSR0_H_RXSTALL 0x0004
204
205/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
206#define MUSB_CSR0_P_WZC_BITS \
207 (MUSB_CSR0_P_SENTSTALL)
208#define MUSB_CSR0_H_WZC_BITS \
209 (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
210 | MUSB_CSR0_RXPKTRDY)
211
212/* TxType/RxType */
213#define MUSB_TYPE_SPEED 0xc0
214#define MUSB_TYPE_SPEED_SHIFT 6
215#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
216#define MUSB_TYPE_PROTO_SHIFT 4
217#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
218
219/* CONFIGDATA */
220#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
221#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
222#define MUSB_CONFIGDATA_BIGENDIAN 0x20
223#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
224#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
225#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
226#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
227#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
228
229/* TXCSR in Peripheral and Host mode */
230#define MUSB_TXCSR_AUTOSET 0x8000
231#define MUSB_TXCSR_MODE 0x2000
232#define MUSB_TXCSR_DMAENAB 0x1000
233#define MUSB_TXCSR_FRCDATATOG 0x0800
234#define MUSB_TXCSR_DMAMODE 0x0400
235#define MUSB_TXCSR_CLRDATATOG 0x0040
236#define MUSB_TXCSR_FLUSHFIFO 0x0008
237#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
238#define MUSB_TXCSR_TXPKTRDY 0x0001
239
240/* TXCSR in Peripheral mode */
241#define MUSB_TXCSR_P_ISO 0x4000
242#define MUSB_TXCSR_P_INCOMPTX 0x0080
243#define MUSB_TXCSR_P_SENTSTALL 0x0020
244#define MUSB_TXCSR_P_SENDSTALL 0x0010
245#define MUSB_TXCSR_P_UNDERRUN 0x0004
246
247/* TXCSR in Host mode */
248#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
249#define MUSB_TXCSR_H_DATATOGGLE 0x0100
250#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
251#define MUSB_TXCSR_H_RXSTALL 0x0020
252#define MUSB_TXCSR_H_ERROR 0x0004
253
254/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
255#define MUSB_TXCSR_P_WZC_BITS \
256 (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
257 | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
258#define MUSB_TXCSR_H_WZC_BITS \
259 (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
260 | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
261
262/* RXCSR in Peripheral and Host mode */
263#define MUSB_RXCSR_AUTOCLEAR 0x8000
264#define MUSB_RXCSR_DMAENAB 0x2000
265#define MUSB_RXCSR_DISNYET 0x1000
266#define MUSB_RXCSR_PID_ERR 0x1000
267#define MUSB_RXCSR_DMAMODE 0x0800
268#define MUSB_RXCSR_INCOMPRX 0x0100
269#define MUSB_RXCSR_CLRDATATOG 0x0080
270#define MUSB_RXCSR_FLUSHFIFO 0x0010
271#define MUSB_RXCSR_DATAERROR 0x0008
272#define MUSB_RXCSR_FIFOFULL 0x0002
273#define MUSB_RXCSR_RXPKTRDY 0x0001
274
275/* RXCSR in Peripheral mode */
276#define MUSB_RXCSR_P_ISO 0x4000
277#define MUSB_RXCSR_P_SENTSTALL 0x0040
278#define MUSB_RXCSR_P_SENDSTALL 0x0020
279#define MUSB_RXCSR_P_OVERRUN 0x0004
280
281/* RXCSR in Host mode */
282#define MUSB_RXCSR_H_AUTOREQ 0x4000
283#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
284#define MUSB_RXCSR_H_DATATOGGLE 0x0200
285#define MUSB_RXCSR_H_RXSTALL 0x0040
286#define MUSB_RXCSR_H_REQPKT 0x0020
287#define MUSB_RXCSR_H_ERROR 0x0004
288
289/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
290#define MUSB_RXCSR_P_WZC_BITS \
291 (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
292 | MUSB_RXCSR_RXPKTRDY)
293#define MUSB_RXCSR_H_WZC_BITS \
294 (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
295 | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
296
297/* HUBADDR */
298#define MUSB_HUBADDR_MULTI_TT 0x80
299
300#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 000000000000..e0e9ce584175
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
1/*
2 * MUSB OTG driver virtual root hub support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/time.h>
42#include <linux/timer.h>
43
44#include <asm/unaligned.h>
45
46#include "musb_core.h"
47
48
49static void musb_port_suspend(struct musb *musb, bool do_suspend)
50{
51 u8 power;
52 void __iomem *mbase = musb->mregs;
53
54 if (!is_host_active(musb))
55 return;
56
57 /* NOTE: this doesn't necessarily put PHY into low power mode,
58 * turning off its clock; that's a function of PHY integration and
59 * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
60 * SE0 changing to connect (J) or wakeup (K) states.
61 */
62 power = musb_readb(mbase, MUSB_POWER);
63 if (do_suspend) {
64 int retries = 10000;
65
66 power &= ~MUSB_POWER_RESUME;
67 power |= MUSB_POWER_SUSPENDM;
68 musb_writeb(mbase, MUSB_POWER, power);
69
70 /* Needed for OPT A tests */
71 power = musb_readb(mbase, MUSB_POWER);
72 while (power & MUSB_POWER_SUSPENDM) {
73 power = musb_readb(mbase, MUSB_POWER);
74 if (retries-- < 1)
75 break;
76 }
77
78 DBG(3, "Root port suspended, power %02x\n", power);
79
80 musb->port1_status |= USB_PORT_STAT_SUSPEND;
81 switch (musb->xceiv.state) {
82 case OTG_STATE_A_HOST:
83 musb->xceiv.state = OTG_STATE_A_SUSPEND;
84 musb->is_active = is_otg_enabled(musb)
85 && musb->xceiv.host->b_hnp_enable;
86 musb_platform_try_idle(musb, 0);
87 break;
88#ifdef CONFIG_USB_MUSB_OTG
89 case OTG_STATE_B_HOST:
90 musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
91 musb->is_active = is_otg_enabled(musb)
92 && musb->xceiv.host->b_hnp_enable;
93 musb_platform_try_idle(musb, 0);
94 break;
95#endif
96 default:
97 DBG(1, "bogus rh suspend? %s\n",
98 otg_state_string(musb));
99 }
100 } else if (power & MUSB_POWER_SUSPENDM) {
101 power &= ~MUSB_POWER_SUSPENDM;
102 power |= MUSB_POWER_RESUME;
103 musb_writeb(mbase, MUSB_POWER, power);
104
105 DBG(3, "Root port resuming, power %02x\n", power);
106
107 /* later, GetPortStatus will stop RESUME signaling */
108 musb->port1_status |= MUSB_PORT_STAT_RESUME;
109 musb->rh_timer = jiffies + msecs_to_jiffies(20);
110 }
111}
112
113static void musb_port_reset(struct musb *musb, bool do_reset)
114{
115 u8 power;
116 void __iomem *mbase = musb->mregs;
117
118#ifdef CONFIG_USB_MUSB_OTG
119 if (musb->xceiv.state == OTG_STATE_B_IDLE) {
120 DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
121 musb->port1_status &= ~USB_PORT_STAT_RESET;
122 return;
123 }
124#endif
125
126 if (!is_host_active(musb))
127 return;
128
129 /* NOTE: caller guarantees it will turn off the reset when
130 * the appropriate amount of time has passed
131 */
132 power = musb_readb(mbase, MUSB_POWER);
133 if (do_reset) {
134
135 /*
136 * If RESUME is set, we must make sure it stays minimum 20 ms.
137 * Then we must clear RESUME and wait a bit to let musb start
138 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
139 * fail with "Error! Did not receive an SOF before suspend
140 * detected".
141 */
142 if (power & MUSB_POWER_RESUME) {
143 while (time_before(jiffies, musb->rh_timer))
144 msleep(1);
145 musb_writeb(mbase, MUSB_POWER,
146 power & ~MUSB_POWER_RESUME);
147 msleep(1);
148 }
149
150 musb->ignore_disconnect = true;
151 power &= 0xf0;
152 musb_writeb(mbase, MUSB_POWER,
153 power | MUSB_POWER_RESET);
154
155 musb->port1_status |= USB_PORT_STAT_RESET;
156 musb->port1_status &= ~USB_PORT_STAT_ENABLE;
157 musb->rh_timer = jiffies + msecs_to_jiffies(50);
158 } else {
159 DBG(4, "root port reset stopped\n");
160 musb_writeb(mbase, MUSB_POWER,
161 power & ~MUSB_POWER_RESET);
162
163 musb->ignore_disconnect = false;
164
165 power = musb_readb(mbase, MUSB_POWER);
166 if (power & MUSB_POWER_HSMODE) {
167 DBG(4, "high-speed device connected\n");
168 musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
169 }
170
171 musb->port1_status &= ~USB_PORT_STAT_RESET;
172 musb->port1_status |= USB_PORT_STAT_ENABLE
173 | (USB_PORT_STAT_C_RESET << 16)
174 | (USB_PORT_STAT_C_ENABLE << 16);
175 usb_hcd_poll_rh_status(musb_to_hcd(musb));
176
177 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
178 }
179}
180
181void musb_root_disconnect(struct musb *musb)
182{
183 musb->port1_status = (1 << USB_PORT_FEAT_POWER)
184 | (1 << USB_PORT_FEAT_C_CONNECTION);
185
186 usb_hcd_poll_rh_status(musb_to_hcd(musb));
187 musb->is_active = 0;
188
189 switch (musb->xceiv.state) {
190 case OTG_STATE_A_HOST:
191 case OTG_STATE_A_SUSPEND:
192 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
193 musb->is_active = 0;
194 break;
195 case OTG_STATE_A_WAIT_VFALL:
196 musb->xceiv.state = OTG_STATE_B_IDLE;
197 break;
198 default:
199 DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
200 }
201}
202
203
204/*---------------------------------------------------------------------*/
205
206/* Caller may or may not hold musb->lock */
207int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
208{
209 struct musb *musb = hcd_to_musb(hcd);
210 int retval = 0;
211
212 /* called in_irq() via usb_hcd_poll_rh_status() */
213 if (musb->port1_status & 0xffff0000) {
214 *buf = 0x02;
215 retval = 1;
216 }
217 return retval;
218}
219
220int musb_hub_control(
221 struct usb_hcd *hcd,
222 u16 typeReq,
223 u16 wValue,
224 u16 wIndex,
225 char *buf,
226 u16 wLength)
227{
228 struct musb *musb = hcd_to_musb(hcd);
229 u32 temp;
230 int retval = 0;
231 unsigned long flags;
232
233 spin_lock_irqsave(&musb->lock, flags);
234
235 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
236 spin_unlock_irqrestore(&musb->lock, flags);
237 return -ESHUTDOWN;
238 }
239
240 /* hub features: always zero, setting is a NOP
241 * port features: reported, sometimes updated when host is active
242 * no indicators
243 */
244 switch (typeReq) {
245 case ClearHubFeature:
246 case SetHubFeature:
247 switch (wValue) {
248 case C_HUB_OVER_CURRENT:
249 case C_HUB_LOCAL_POWER:
250 break;
251 default:
252 goto error;
253 }
254 break;
255 case ClearPortFeature:
256 if ((wIndex & 0xff) != 1)
257 goto error;
258
259 switch (wValue) {
260 case USB_PORT_FEAT_ENABLE:
261 break;
262 case USB_PORT_FEAT_SUSPEND:
263 musb_port_suspend(musb, false);
264 break;
265 case USB_PORT_FEAT_POWER:
266 if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
267 musb_set_vbus(musb, 0);
268 break;
269 case USB_PORT_FEAT_C_CONNECTION:
270 case USB_PORT_FEAT_C_ENABLE:
271 case USB_PORT_FEAT_C_OVER_CURRENT:
272 case USB_PORT_FEAT_C_RESET:
273 case USB_PORT_FEAT_C_SUSPEND:
274 break;
275 default:
276 goto error;
277 }
278 DBG(5, "clear feature %d\n", wValue);
279 musb->port1_status &= ~(1 << wValue);
280 break;
281 case GetHubDescriptor:
282 {
283 struct usb_hub_descriptor *desc = (void *)buf;
284
285 desc->bDescLength = 9;
286 desc->bDescriptorType = 0x29;
287 desc->bNbrPorts = 1;
288 desc->wHubCharacteristics = __constant_cpu_to_le16(
289 0x0001 /* per-port power switching */
290 | 0x0010 /* no overcurrent reporting */
291 );
292 desc->bPwrOn2PwrGood = 5; /* msec/2 */
293 desc->bHubContrCurrent = 0;
294
295 /* workaround bogus struct definition */
296 desc->DeviceRemovable[0] = 0x02; /* port 1 */
297 desc->DeviceRemovable[1] = 0xff;
298 }
299 break;
300 case GetHubStatus:
301 temp = 0;
302 *(__le32 *) buf = cpu_to_le32(temp);
303 break;
304 case GetPortStatus:
305 if (wIndex != 1)
306 goto error;
307
308 /* finish RESET signaling? */
309 if ((musb->port1_status & USB_PORT_STAT_RESET)
310 && time_after_eq(jiffies, musb->rh_timer))
311 musb_port_reset(musb, false);
312
313 /* finish RESUME signaling? */
314 if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
315 && time_after_eq(jiffies, musb->rh_timer)) {
316 u8 power;
317
318 power = musb_readb(musb->mregs, MUSB_POWER);
319 power &= ~MUSB_POWER_RESUME;
320 DBG(4, "root port resume stopped, power %02x\n",
321 power);
322 musb_writeb(musb->mregs, MUSB_POWER, power);
323
324 /* ISSUE: DaVinci (RTL 1.300) disconnects after
325 * resume of high speed peripherals (but not full
326 * speed ones).
327 */
328
329 musb->is_active = 1;
330 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
331 | MUSB_PORT_STAT_RESUME);
332 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
333 usb_hcd_poll_rh_status(musb_to_hcd(musb));
334 /* NOTE: it might really be A_WAIT_BCON ... */
335 musb->xceiv.state = OTG_STATE_A_HOST;
336 }
337
338 put_unaligned(cpu_to_le32(musb->port1_status
339 & ~MUSB_PORT_STAT_RESUME),
340 (__le32 *) buf);
341
342 /* port change status is more interesting */
343 DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
344 musb->port1_status);
345 break;
346 case SetPortFeature:
347 if ((wIndex & 0xff) != 1)
348 goto error;
349
350 switch (wValue) {
351 case USB_PORT_FEAT_POWER:
352 /* NOTE: this controller has a strange state machine
353 * that involves "requesting sessions" according to
354 * magic side effects from incompletely-described
355 * rules about startup...
356 *
357 * This call is what really starts the host mode; be
358 * very careful about side effects if you reorder any
359 * initialization logic, e.g. for OTG, or change any
360 * logic relating to VBUS power-up.
361 */
362 if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
363 musb_start(musb);
364 break;
365 case USB_PORT_FEAT_RESET:
366 musb_port_reset(musb, true);
367 break;
368 case USB_PORT_FEAT_SUSPEND:
369 musb_port_suspend(musb, true);
370 break;
371 case USB_PORT_FEAT_TEST:
372 if (unlikely(is_host_active(musb)))
373 goto error;
374
375 wIndex >>= 8;
376 switch (wIndex) {
377 case 1:
378 pr_debug("TEST_J\n");
379 temp = MUSB_TEST_J;
380 break;
381 case 2:
382 pr_debug("TEST_K\n");
383 temp = MUSB_TEST_K;
384 break;
385 case 3:
386 pr_debug("TEST_SE0_NAK\n");
387 temp = MUSB_TEST_SE0_NAK;
388 break;
389 case 4:
390 pr_debug("TEST_PACKET\n");
391 temp = MUSB_TEST_PACKET;
392 musb_load_testpacket(musb);
393 break;
394 case 5:
395 pr_debug("TEST_FORCE_ENABLE\n");
396 temp = MUSB_TEST_FORCE_HOST
397 | MUSB_TEST_FORCE_HS;
398
399 musb_writeb(musb->mregs, MUSB_DEVCTL,
400 MUSB_DEVCTL_SESSION);
401 break;
402 case 6:
403 pr_debug("TEST_FIFO_ACCESS\n");
404 temp = MUSB_TEST_FIFO_ACCESS;
405 break;
406 default:
407 goto error;
408 }
409 musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
410 break;
411 default:
412 goto error;
413 }
414 DBG(5, "set feature %d\n", wValue);
415 musb->port1_status |= 1 << wValue;
416 break;
417
418 default:
419error:
420 /* "protocol stall" on error */
421 retval = -EPIPE;
422 }
423 spin_unlock_irqrestore(&musb->lock, flags);
424 return retval;
425}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 000000000000..9ba8fb7fcd24
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
1/*
2 * MUSB OTG driver - support for Mentor's DMA controller
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2007 by Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
24 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
27 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33#include <linux/device.h>
34#include <linux/interrupt.h>
35#include <linux/platform_device.h>
36#include "musb_core.h"
37
38#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
39#include "omap2430.h"
40#endif
41
42#define MUSB_HSDMA_BASE 0x200
43#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
44#define MUSB_HSDMA_CONTROL 0x4
45#define MUSB_HSDMA_ADDRESS 0x8
46#define MUSB_HSDMA_COUNT 0xc
47
48#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \
49 (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
50
51/* control register (16-bit): */
52#define MUSB_HSDMA_ENABLE_SHIFT 0
53#define MUSB_HSDMA_TRANSMIT_SHIFT 1
54#define MUSB_HSDMA_MODE1_SHIFT 2
55#define MUSB_HSDMA_IRQENABLE_SHIFT 3
56#define MUSB_HSDMA_ENDPOINT_SHIFT 4
57#define MUSB_HSDMA_BUSERROR_SHIFT 8
58#define MUSB_HSDMA_BURSTMODE_SHIFT 9
59#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
60#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
61#define MUSB_HSDMA_BURSTMODE_INCR4 1
62#define MUSB_HSDMA_BURSTMODE_INCR8 2
63#define MUSB_HSDMA_BURSTMODE_INCR16 3
64
65#define MUSB_HSDMA_CHANNELS 8
66
67struct musb_dma_controller;
68
69struct musb_dma_channel {
70 struct dma_channel Channel;
71 struct musb_dma_controller *controller;
72 u32 dwStartAddress;
73 u32 len;
74 u16 wMaxPacketSize;
75 u8 bIndex;
76 u8 epnum;
77 u8 transmit;
78};
79
80struct musb_dma_controller {
81 struct dma_controller Controller;
82 struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
83 void *pDmaPrivate;
84 void __iomem *pCoreBase;
85 u8 bChannelCount;
86 u8 bmUsedChannels;
87 u8 irq;
88};
89
90static int dma_controller_start(struct dma_controller *c)
91{
92 /* nothing to do */
93 return 0;
94}
95
96static void dma_channel_release(struct dma_channel *pChannel);
97
98static int dma_controller_stop(struct dma_controller *c)
99{
100 struct musb_dma_controller *controller =
101 container_of(c, struct musb_dma_controller, Controller);
102 struct musb *musb = (struct musb *) controller->pDmaPrivate;
103 struct dma_channel *pChannel;
104 u8 bBit;
105
106 if (controller->bmUsedChannels != 0) {
107 dev_err(musb->controller,
108 "Stopping DMA controller while channel active\n");
109
110 for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
111 if (controller->bmUsedChannels & (1 << bBit)) {
112 pChannel = &controller->aChannel[bBit].Channel;
113 dma_channel_release(pChannel);
114
115 if (!controller->bmUsedChannels)
116 break;
117 }
118 }
119 }
120 return 0;
121}
122
123static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
124 struct musb_hw_ep *hw_ep, u8 transmit)
125{
126 u8 bBit;
127 struct dma_channel *pChannel = NULL;
128 struct musb_dma_channel *pImplChannel = NULL;
129 struct musb_dma_controller *controller =
130 container_of(c, struct musb_dma_controller, Controller);
131
132 for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
133 if (!(controller->bmUsedChannels & (1 << bBit))) {
134 controller->bmUsedChannels |= (1 << bBit);
135 pImplChannel = &(controller->aChannel[bBit]);
136 pImplChannel->controller = controller;
137 pImplChannel->bIndex = bBit;
138 pImplChannel->epnum = hw_ep->epnum;
139 pImplChannel->transmit = transmit;
140 pChannel = &(pImplChannel->Channel);
141 pChannel->private_data = pImplChannel;
142 pChannel->status = MUSB_DMA_STATUS_FREE;
143 pChannel->max_len = 0x10000;
144 /* Tx => mode 1; Rx => mode 0 */
145 pChannel->desired_mode = transmit;
146 pChannel->actual_len = 0;
147 break;
148 }
149 }
150 return pChannel;
151}
152
153static void dma_channel_release(struct dma_channel *pChannel)
154{
155 struct musb_dma_channel *pImplChannel =
156 (struct musb_dma_channel *) pChannel->private_data;
157
158 pChannel->actual_len = 0;
159 pImplChannel->dwStartAddress = 0;
160 pImplChannel->len = 0;
161
162 pImplChannel->controller->bmUsedChannels &=
163 ~(1 << pImplChannel->bIndex);
164
165 pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
166}
167
168static void configure_channel(struct dma_channel *pChannel,
169 u16 packet_sz, u8 mode,
170 dma_addr_t dma_addr, u32 len)
171{
172 struct musb_dma_channel *pImplChannel =
173 (struct musb_dma_channel *) pChannel->private_data;
174 struct musb_dma_controller *controller = pImplChannel->controller;
175 void __iomem *mbase = controller->pCoreBase;
176 u8 bChannel = pImplChannel->bIndex;
177 u16 csr = 0;
178
179 DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
180 pChannel, packet_sz, dma_addr, len, mode);
181
182 if (mode) {
183 csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
184 BUG_ON(len < packet_sz);
185
186 if (packet_sz >= 64) {
187 csr |= MUSB_HSDMA_BURSTMODE_INCR16
188 << MUSB_HSDMA_BURSTMODE_SHIFT;
189 } else if (packet_sz >= 32) {
190 csr |= MUSB_HSDMA_BURSTMODE_INCR8
191 << MUSB_HSDMA_BURSTMODE_SHIFT;
192 } else if (packet_sz >= 16) {
193 csr |= MUSB_HSDMA_BURSTMODE_INCR4
194 << MUSB_HSDMA_BURSTMODE_SHIFT;
195 }
196 }
197
198 csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
199 | (1 << MUSB_HSDMA_ENABLE_SHIFT)
200 | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
201 | (pImplChannel->transmit
202 ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
203 : 0);
204
205 /* address/count */
206 musb_writel(mbase,
207 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
208 dma_addr);
209 musb_writel(mbase,
210 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
211 len);
212
213 /* control (this should start things) */
214 musb_writew(mbase,
215 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
216 csr);
217}
218
219static int dma_channel_program(struct dma_channel *pChannel,
220 u16 packet_sz, u8 mode,
221 dma_addr_t dma_addr, u32 len)
222{
223 struct musb_dma_channel *pImplChannel =
224 (struct musb_dma_channel *) pChannel->private_data;
225
226 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
227 pImplChannel->epnum,
228 pImplChannel->transmit ? "Tx" : "Rx",
229 packet_sz, dma_addr, len, mode);
230
231 BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
232 pChannel->status == MUSB_DMA_STATUS_BUSY);
233
234 pChannel->actual_len = 0;
235 pImplChannel->dwStartAddress = dma_addr;
236 pImplChannel->len = len;
237 pImplChannel->wMaxPacketSize = packet_sz;
238 pChannel->status = MUSB_DMA_STATUS_BUSY;
239
240 if ((mode == 1) && (len >= packet_sz))
241 configure_channel(pChannel, packet_sz, 1, dma_addr, len);
242 else
243 configure_channel(pChannel, packet_sz, 0, dma_addr, len);
244
245 return true;
246}
247
248static int dma_channel_abort(struct dma_channel *pChannel)
249{
250 struct musb_dma_channel *pImplChannel =
251 (struct musb_dma_channel *) pChannel->private_data;
252 u8 bChannel = pImplChannel->bIndex;
253 void __iomem *mbase = pImplChannel->controller->pCoreBase;
254 u16 csr;
255
256 if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
257 if (pImplChannel->transmit) {
258
259 csr = musb_readw(mbase,
260 MUSB_EP_OFFSET(pImplChannel->epnum,
261 MUSB_TXCSR));
262 csr &= ~(MUSB_TXCSR_AUTOSET |
263 MUSB_TXCSR_DMAENAB |
264 MUSB_TXCSR_DMAMODE);
265 musb_writew(mbase,
266 MUSB_EP_OFFSET(pImplChannel->epnum,
267 MUSB_TXCSR),
268 csr);
269 } else {
270 csr = musb_readw(mbase,
271 MUSB_EP_OFFSET(pImplChannel->epnum,
272 MUSB_RXCSR));
273 csr &= ~(MUSB_RXCSR_AUTOCLEAR |
274 MUSB_RXCSR_DMAENAB |
275 MUSB_RXCSR_DMAMODE);
276 musb_writew(mbase,
277 MUSB_EP_OFFSET(pImplChannel->epnum,
278 MUSB_RXCSR),
279 csr);
280 }
281
282 musb_writew(mbase,
283 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
284 0);
285 musb_writel(mbase,
286 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
287 0);
288 musb_writel(mbase,
289 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
290 0);
291
292 pChannel->status = MUSB_DMA_STATUS_FREE;
293 }
294 return 0;
295}
296
297static irqreturn_t dma_controller_irq(int irq, void *private_data)
298{
299 struct musb_dma_controller *controller =
300 (struct musb_dma_controller *)private_data;
301 struct musb_dma_channel *pImplChannel;
302 struct musb *musb = controller->pDmaPrivate;
303 void __iomem *mbase = controller->pCoreBase;
304 struct dma_channel *pChannel;
305 u8 bChannel;
306 u16 csr;
307 u32 dwAddress;
308 u8 int_hsdma;
309 irqreturn_t retval = IRQ_NONE;
310 unsigned long flags;
311
312 spin_lock_irqsave(&musb->lock, flags);
313
314 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
315 if (!int_hsdma)
316 goto done;
317
318 for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
319 if (int_hsdma & (1 << bChannel)) {
320 pImplChannel = (struct musb_dma_channel *)
321 &(controller->aChannel[bChannel]);
322 pChannel = &pImplChannel->Channel;
323
324 csr = musb_readw(mbase,
325 MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
326 MUSB_HSDMA_CONTROL));
327
328 if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
329 pImplChannel->Channel.status =
330 MUSB_DMA_STATUS_BUS_ABORT;
331 else {
332 u8 devctl;
333
334 dwAddress = musb_readl(mbase,
335 MUSB_HSDMA_CHANNEL_OFFSET(
336 bChannel,
337 MUSB_HSDMA_ADDRESS));
338 pChannel->actual_len = dwAddress
339 - pImplChannel->dwStartAddress;
340
341 DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
342 pChannel, pImplChannel->dwStartAddress,
343 dwAddress, pChannel->actual_len,
344 pImplChannel->len,
345 (pChannel->actual_len
346 < pImplChannel->len) ?
347 "=> reconfig 0" : "=> complete");
348
349 devctl = musb_readb(mbase, MUSB_DEVCTL);
350
351 pChannel->status = MUSB_DMA_STATUS_FREE;
352
353 /* completed */
354 if ((devctl & MUSB_DEVCTL_HM)
355 && (pImplChannel->transmit)
356 && ((pChannel->desired_mode == 0)
357 || (pChannel->actual_len &
358 (pImplChannel->wMaxPacketSize - 1)))
359 ) {
360 /* Send out the packet */
361 musb_ep_select(mbase,
362 pImplChannel->epnum);
363 musb_writew(mbase, MUSB_EP_OFFSET(
364 pImplChannel->epnum,
365 MUSB_TXCSR),
366 MUSB_TXCSR_TXPKTRDY);
367 } else
368 musb_dma_completion(
369 musb,
370 pImplChannel->epnum,
371 pImplChannel->transmit);
372 }
373 }
374 }
375 retval = IRQ_HANDLED;
376done:
377 spin_unlock_irqrestore(&musb->lock, flags);
378 return retval;
379}
380
381void dma_controller_destroy(struct dma_controller *c)
382{
383 struct musb_dma_controller *controller;
384
385 controller = container_of(c, struct musb_dma_controller, Controller);
386 if (!controller)
387 return;
388
389 if (controller->irq)
390 free_irq(controller->irq, c);
391
392 kfree(controller);
393}
394
395struct dma_controller *__init
396dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
397{
398 struct musb_dma_controller *controller;
399 struct device *dev = musb->controller;
400 struct platform_device *pdev = to_platform_device(dev);
401 int irq = platform_get_irq(pdev, 1);
402
403 if (irq == 0) {
404 dev_err(dev, "No DMA interrupt line!\n");
405 return NULL;
406 }
407
408 controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
409 if (!controller)
410 return NULL;
411
412 controller->bChannelCount = MUSB_HSDMA_CHANNELS;
413 controller->pDmaPrivate = musb;
414 controller->pCoreBase = pCoreBase;
415
416 controller->Controller.start = dma_controller_start;
417 controller->Controller.stop = dma_controller_stop;
418 controller->Controller.channel_alloc = dma_channel_allocate;
419 controller->Controller.channel_release = dma_channel_release;
420 controller->Controller.channel_program = dma_channel_program;
421 controller->Controller.channel_abort = dma_channel_abort;
422
423 if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
424 musb->controller->bus_id, &controller->Controller)) {
425 dev_err(dev, "request_irq %d failed!\n", irq);
426 dma_controller_destroy(&controller->Controller);
427 return NULL;
428 }
429
430 controller->irq = irq;
431
432 return &controller->Controller;
433}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 000000000000..298b22e6ad0d
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright (C) 2005-2007 by Texas Instruments
3 * Some code has been taken from tusb6010.c
4 * Copyrights for that are attributable to:
5 * Copyright (C) 2006 Nokia Corporation
6 * Jarkko Nikula <jarkko.nikula@nokia.com>
7 * Tony Lindgren <tony@atomide.com>
8 *
9 * This file is part of the Inventra Controller Driver for Linux.
10 *
11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software
14 * Foundation.
15 *
16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/list.h>
34#include <linux/clk.h>
35#include <linux/io.h>
36
37#include <asm/mach-types.h>
38#include <asm/arch/hardware.h>
39#include <asm/arch/mux.h>
40
41#include "musb_core.h"
42#include "omap2430.h"
43
44#ifdef CONFIG_ARCH_OMAP3430
45#define get_cpu_rev() 2
46#endif
47
48#define MUSB_TIMEOUT_A_WAIT_BCON 1100
49
50static struct timer_list musb_idle_timer;
51
52static void musb_do_idle(unsigned long _musb)
53{
54 struct musb *musb = (void *)_musb;
55 unsigned long flags;
56 u8 power;
57 u8 devctl;
58
59 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
60
61 spin_lock_irqsave(&musb->lock, flags);
62
63 switch (musb->xceiv.state) {
64 case OTG_STATE_A_WAIT_BCON:
65 devctl &= ~MUSB_DEVCTL_SESSION;
66 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
67
68 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
69 if (devctl & MUSB_DEVCTL_BDEVICE) {
70 musb->xceiv.state = OTG_STATE_B_IDLE;
71 MUSB_DEV_MODE(musb);
72 } else {
73 musb->xceiv.state = OTG_STATE_A_IDLE;
74 MUSB_HST_MODE(musb);
75 }
76 break;
77#ifdef CONFIG_USB_MUSB_HDRC_HCD
78 case OTG_STATE_A_SUSPEND:
79 /* finish RESUME signaling? */
80 if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
81 power = musb_readb(musb->mregs, MUSB_POWER);
82 power &= ~MUSB_POWER_RESUME;
83 DBG(1, "root port resume stopped, power %02x\n", power);
84 musb_writeb(musb->mregs, MUSB_POWER, power);
85 musb->is_active = 1;
86 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
87 | MUSB_PORT_STAT_RESUME);
88 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
89 usb_hcd_poll_rh_status(musb_to_hcd(musb));
90 /* NOTE: it might really be A_WAIT_BCON ... */
91 musb->xceiv.state = OTG_STATE_A_HOST;
92 }
93 break;
94#endif
95#ifdef CONFIG_USB_MUSB_HDRC_HCD
96 case OTG_STATE_A_HOST:
97 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
98 if (devctl & MUSB_DEVCTL_BDEVICE)
99 musb->xceiv.state = OTG_STATE_B_IDLE;
100 else
101 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
102#endif
103 default:
104 break;
105 }
106 spin_unlock_irqrestore(&musb->lock, flags);
107}
108
109
110void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
111{
112 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
113 static unsigned long last_timer;
114
115 if (timeout == 0)
116 timeout = default_timeout;
117
118 /* Never idle if active, or when VBUS timeout is not set as host */
119 if (musb->is_active || ((musb->a_wait_bcon == 0)
120 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
121 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
122 del_timer(&musb_idle_timer);
123 last_timer = jiffies;
124 return;
125 }
126
127 if (time_after(last_timer, timeout)) {
128 if (!timer_pending(&musb_idle_timer))
129 last_timer = timeout;
130 else {
131 DBG(4, "Longer idle timer already pending, ignoring\n");
132 return;
133 }
134 }
135 last_timer = timeout;
136
137 DBG(4, "%s inactive, for idle timer for %lu ms\n",
138 otg_state_string(musb),
139 (unsigned long)jiffies_to_msecs(timeout - jiffies));
140 mod_timer(&musb_idle_timer, timeout);
141}
142
143void musb_platform_enable(struct musb *musb)
144{
145}
146void musb_platform_disable(struct musb *musb)
147{
148}
149static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
150{
151}
152
153static void omap_set_vbus(struct musb *musb, int is_on)
154{
155 u8 devctl;
156 /* HDRC controls CPEN, but beware current surges during device
157 * connect. They can trigger transient overcurrent conditions
158 * that must be ignored.
159 */
160
161 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
162
163 if (is_on) {
164 musb->is_active = 1;
165 musb->xceiv.default_a = 1;
166 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
167 devctl |= MUSB_DEVCTL_SESSION;
168
169 MUSB_HST_MODE(musb);
170 } else {
171 musb->is_active = 0;
172
173 /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
174 * jumping right to B_IDLE...
175 */
176
177 musb->xceiv.default_a = 0;
178 musb->xceiv.state = OTG_STATE_B_IDLE;
179 devctl &= ~MUSB_DEVCTL_SESSION;
180
181 MUSB_DEV_MODE(musb);
182 }
183 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
184
185 DBG(1, "VBUS %s, devctl %02x "
186 /* otg %3x conf %08x prcm %08x */ "\n",
187 otg_state_string(musb),
188 musb_readb(musb->mregs, MUSB_DEVCTL));
189}
190static int omap_set_power(struct otg_transceiver *x, unsigned mA)
191{
192 return 0;
193}
194
195static int musb_platform_resume(struct musb *musb);
196
197void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
198{
199 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
200
201 devctl |= MUSB_DEVCTL_SESSION;
202 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
203
204 switch (musb_mode) {
205 case MUSB_HOST:
206 otg_set_host(&musb->xceiv, musb->xceiv.host);
207 break;
208 case MUSB_PERIPHERAL:
209 otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
210 break;
211 case MUSB_OTG:
212 break;
213 }
214}
215
216int __init musb_platform_init(struct musb *musb)
217{
218 u32 l;
219
220#if defined(CONFIG_ARCH_OMAP2430)
221 omap_cfg_reg(AE5_2430_USB0HS_STP);
222#endif
223
224 musb_platform_resume(musb);
225
226 l = omap_readl(OTG_SYSCONFIG);
227 l &= ~ENABLEWAKEUP; /* disable wakeup */
228 l &= ~NOSTDBY; /* remove possible nostdby */
229 l |= SMARTSTDBY; /* enable smart standby */
230 l &= ~AUTOIDLE; /* disable auto idle */
231 l &= ~NOIDLE; /* remove possible noidle */
232 l |= SMARTIDLE; /* enable smart idle */
233 l |= AUTOIDLE; /* enable auto idle */
234 omap_writel(l, OTG_SYSCONFIG);
235
236 l = omap_readl(OTG_INTERFSEL);
237 l |= ULPI_12PIN;
238 omap_writel(l, OTG_INTERFSEL);
239
240 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
241 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
242 omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
243 omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
244 omap_readl(OTG_SIMENABLE));
245
246 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
247
248 if (is_host_enabled(musb))
249 musb->board_set_vbus = omap_set_vbus;
250 if (is_peripheral_enabled(musb))
251 musb->xceiv.set_power = omap_set_power;
252 musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
253
254 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
255
256 return 0;
257}
258
259int musb_platform_suspend(struct musb *musb)
260{
261 u32 l;
262
263 if (!musb->clock)
264 return 0;
265
266 /* in any role */
267 l = omap_readl(OTG_FORCESTDBY);
268 l |= ENABLEFORCE; /* enable MSTANDBY */
269 omap_writel(l, OTG_FORCESTDBY);
270
271 l = omap_readl(OTG_SYSCONFIG);
272 l |= ENABLEWAKEUP; /* enable wakeup */
273 omap_writel(l, OTG_SYSCONFIG);
274
275 if (musb->xceiv.set_suspend)
276 musb->xceiv.set_suspend(&musb->xceiv, 1);
277
278 if (musb->set_clock)
279 musb->set_clock(musb->clock, 0);
280 else
281 clk_disable(musb->clock);
282
283 return 0;
284}
285
286static int musb_platform_resume(struct musb *musb)
287{
288 u32 l;
289
290 if (!musb->clock)
291 return 0;
292
293 if (musb->xceiv.set_suspend)
294 musb->xceiv.set_suspend(&musb->xceiv, 0);
295
296 if (musb->set_clock)
297 musb->set_clock(musb->clock, 1);
298 else
299 clk_enable(musb->clock);
300
301 l = omap_readl(OTG_SYSCONFIG);
302 l &= ~ENABLEWAKEUP; /* disable wakeup */
303 omap_writel(l, OTG_SYSCONFIG);
304
305 l = omap_readl(OTG_FORCESTDBY);
306 l &= ~ENABLEFORCE; /* disable MSTANDBY */
307 omap_writel(l, OTG_FORCESTDBY);
308
309 return 0;
310}
311
312
313int musb_platform_exit(struct musb *musb)
314{
315
316 omap_vbus_power(musb, 0 /*off*/, 1);
317
318 musb_platform_suspend(musb);
319
320 clk_put(musb->clock);
321 musb->clock = 0;
322
323 return 0;
324}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 000000000000..786a62071f72
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * The Inventra Controller Driver for Linux is free software; you
5 * can redistribute it and/or modify it under the terms of the GNU
6 * General Public License version 2 as published by the Free Software
7 * Foundation.
8 */
9
10#ifndef __MUSB_OMAP243X_H__
11#define __MUSB_OMAP243X_H__
12
13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
14#include <asm/arch/hardware.h>
15#include <asm/arch/usb.h>
16
17/*
18 * OMAP2430-specific definitions
19 */
20
21#define MENTOR_BASE_OFFSET 0
22#if defined(CONFIG_ARCH_OMAP2430)
23#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
24#elif defined(CONFIG_ARCH_OMAP3430)
25#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
26#endif
27#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
28#define OTG_REVISION OMAP_HSOTG(0x0)
29#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
30# define MIDLEMODE 12 /* bit position */
31# define FORCESTDBY (0 << MIDLEMODE)
32# define NOSTDBY (1 << MIDLEMODE)
33# define SMARTSTDBY (2 << MIDLEMODE)
34# define SIDLEMODE 3 /* bit position */
35# define FORCEIDLE (0 << SIDLEMODE)
36# define NOIDLE (1 << SIDLEMODE)
37# define SMARTIDLE (2 << SIDLEMODE)
38# define ENABLEWAKEUP (1 << 2)
39# define SOFTRST (1 << 1)
40# define AUTOIDLE (1 << 0)
41#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
42# define RESETDONE (1 << 0)
43#define OTG_INTERFSEL OMAP_HSOTG(0xc)
44# define EXTCP (1 << 2)
45# define PHYSEL 0 /* bit position */
46# define UTMI_8BIT (0 << PHYSEL)
47# define ULPI_12PIN (1 << PHYSEL)
48# define ULPI_8PIN (2 << PHYSEL)
49#define OTG_SIMENABLE OMAP_HSOTG(0x10)
50# define TM1 (1 << 0)
51#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
52# define ENABLEFORCE (1 << 0)
53
54#endif /* CONFIG_ARCH_OMAP2430 */
55
56#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 000000000000..b73b036f3d77
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
1/*
2 * TUSB6010 USB 2.0 OTG Dual Role controller
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Jarkko Nikula <jarkko.nikula@nokia.com>
6 * Tony Lindgren <tony@atomide.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Notes:
13 * - Driver assumes that interface to external host (main CPU) is
14 * configured for NOR FLASH interface instead of VLYNQ serial
15 * interface.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/usb.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25
26#include "musb_core.h"
27
28static void tusb_source_power(struct musb *musb, int is_on);
29
30#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
31#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
32
33/*
34 * Checks the revision. We need to use the DMA register as 3.0 does not
35 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
36 */
37u8 tusb_get_revision(struct musb *musb)
38{
39 void __iomem *tbase = musb->ctrl_base;
40 u32 die_id;
41 u8 rev;
42
43 rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
44 if (TUSB_REV_MAJOR(rev) == 3) {
45 die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
46 TUSB_DIDR1_HI));
47 if (die_id >= TUSB_DIDR1_HI_REV_31)
48 rev |= 1;
49 }
50
51 return rev;
52}
53
54static int __init tusb_print_revision(struct musb *musb)
55{
56 void __iomem *tbase = musb->ctrl_base;
57 u8 rev;
58
59 rev = tusb_get_revision(musb);
60
61 pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
62 "prcm",
63 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
64 TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
65 "int",
66 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
67 TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
68 "gpio",
69 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
70 TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
71 "dma",
72 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
73 TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
74 "dieid",
75 TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
76 "rev",
77 TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
78
79 return tusb_get_revision(musb);
80}
81
82#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
83 | TUSB_PHY_OTG_CTRL_TESTM0)
84
85/*
86 * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
87 * Disables power detection in PHY for the duration of idle.
88 */
89static void tusb_wbus_quirk(struct musb *musb, int enabled)
90{
91 void __iomem *tbase = musb->ctrl_base;
92 static u32 phy_otg_ctrl, phy_otg_ena;
93 u32 tmp;
94
95 if (enabled) {
96 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
97 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
98 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
99 | phy_otg_ena | WBUS_QUIRK_MASK;
100 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
101 tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
102 tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
103 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
104 DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
105 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
106 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
107 } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
108 & TUSB_PHY_OTG_CTRL_TESTM2) {
109 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
110 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
111 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
112 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
113 DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
114 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
115 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
116 phy_otg_ctrl = 0;
117 phy_otg_ena = 0;
118 }
119}
120
121/*
122 * TUSB 6010 may use a parallel bus that doesn't support byte ops;
123 * so both loading and unloading FIFOs need explicit byte counts.
124 */
125
126static inline void
127tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
128{
129 u32 val;
130 int i;
131
132 if (len > 4) {
133 for (i = 0; i < (len >> 2); i++) {
134 memcpy(&val, buf, 4);
135 musb_writel(fifo, 0, val);
136 buf += 4;
137 }
138 len %= 4;
139 }
140 if (len > 0) {
141 /* Write the rest 1 - 3 bytes to FIFO */
142 memcpy(&val, buf, len);
143 musb_writel(fifo, 0, val);
144 }
145}
146
147static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
148 void __iomem *buf, u16 len)
149{
150 u32 val;
151 int i;
152
153 if (len > 4) {
154 for (i = 0; i < (len >> 2); i++) {
155 val = musb_readl(fifo, 0);
156 memcpy(buf, &val, 4);
157 buf += 4;
158 }
159 len %= 4;
160 }
161 if (len > 0) {
162 /* Read the rest 1 - 3 bytes from FIFO */
163 val = musb_readl(fifo, 0);
164 memcpy(buf, &val, len);
165 }
166}
167
168void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
169{
170 void __iomem *ep_conf = hw_ep->conf;
171 void __iomem *fifo = hw_ep->fifo;
172 u8 epnum = hw_ep->epnum;
173
174 prefetch(buf);
175
176 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
177 'T', epnum, fifo, len, buf);
178
179 if (epnum)
180 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
181 TUSB_EP_CONFIG_XFR_SIZE(len));
182 else
183 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
184 TUSB_EP0_CONFIG_XFR_SIZE(len));
185
186 if (likely((0x01 & (unsigned long) buf) == 0)) {
187
188 /* Best case is 32bit-aligned destination address */
189 if ((0x02 & (unsigned long) buf) == 0) {
190 if (len >= 4) {
191 writesl(fifo, buf, len >> 2);
192 buf += (len & ~0x03);
193 len &= 0x03;
194 }
195 } else {
196 if (len >= 2) {
197 u32 val;
198 int i;
199
200 /* Cannot use writesw, fifo is 32-bit */
201 for (i = 0; i < (len >> 2); i++) {
202 val = (u32)(*(u16 *)buf);
203 buf += 2;
204 val |= (*(u16 *)buf) << 16;
205 buf += 2;
206 musb_writel(fifo, 0, val);
207 }
208 len &= 0x03;
209 }
210 }
211 }
212
213 if (len > 0)
214 tusb_fifo_write_unaligned(fifo, buf, len);
215}
216
217void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
218{
219 void __iomem *ep_conf = hw_ep->conf;
220 void __iomem *fifo = hw_ep->fifo;
221 u8 epnum = hw_ep->epnum;
222
223 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
224 'R', epnum, fifo, len, buf);
225
226 if (epnum)
227 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
228 TUSB_EP_CONFIG_XFR_SIZE(len));
229 else
230 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
231
232 if (likely((0x01 & (unsigned long) buf) == 0)) {
233
234 /* Best case is 32bit-aligned destination address */
235 if ((0x02 & (unsigned long) buf) == 0) {
236 if (len >= 4) {
237 readsl(fifo, buf, len >> 2);
238 buf += (len & ~0x03);
239 len &= 0x03;
240 }
241 } else {
242 if (len >= 2) {
243 u32 val;
244 int i;
245
246 /* Cannot use readsw, fifo is 32-bit */
247 for (i = 0; i < (len >> 2); i++) {
248 val = musb_readl(fifo, 0);
249 *(u16 *)buf = (u16)(val & 0xffff);
250 buf += 2;
251 *(u16 *)buf = (u16)(val >> 16);
252 buf += 2;
253 }
254 len &= 0x03;
255 }
256 }
257 }
258
259 if (len > 0)
260 tusb_fifo_read_unaligned(fifo, buf, len);
261}
262
263#ifdef CONFIG_USB_GADGET_MUSB_HDRC
264
265/* This is used by gadget drivers, and OTG transceiver logic, allowing
266 * at most mA current to be drawn from VBUS during a Default-B session
267 * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
268 * mode), or low power Default-B sessions, something else supplies power.
269 * Caller must take care of locking.
270 */
271static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
272{
273 struct musb *musb = container_of(x, struct musb, xceiv);
274 void __iomem *tbase = musb->ctrl_base;
275 u32 reg;
276
277 /*
278 * Keep clock active when enabled. Note that this is not tied to
279 * drawing VBUS, as with OTG mA can be less than musb->min_power.
280 */
281 if (musb->set_clock) {
282 if (mA)
283 musb->set_clock(musb->clock, 1);
284 else
285 musb->set_clock(musb->clock, 0);
286 }
287
288 /* tps65030 seems to consume max 100mA, with maybe 60mA available
289 * (measured on one board) for things other than tps and tusb.
290 *
291 * Boards sharing the CPU clock with CLKIN will need to prevent
292 * certain idle sleep states while the USB link is active.
293 *
294 * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
295 * The actual current usage would be very board-specific. For now,
296 * it's simpler to just use an aggregate (also board-specific).
297 */
298 if (x->default_a || mA < (musb->min_power << 1))
299 mA = 0;
300
301 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
302 if (mA) {
303 musb->is_bus_powered = 1;
304 reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
305 } else {
306 musb->is_bus_powered = 0;
307 reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
308 }
309 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
310
311 DBG(2, "draw max %d mA VBUS\n", mA);
312 return 0;
313}
314
315#else
316#define tusb_draw_power NULL
317#endif
318
319/* workaround for issue 13: change clock during chip idle
320 * (to be fixed in rev3 silicon) ... symptoms include disconnect
321 * or looping suspend/resume cycles
322 */
323static void tusb_set_clock_source(struct musb *musb, unsigned mode)
324{
325 void __iomem *tbase = musb->ctrl_base;
326 u32 reg;
327
328 reg = musb_readl(tbase, TUSB_PRCM_CONF);
329 reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
330
331 /* 0 = refclk (clkin, XI)
332 * 1 = PHY 60 MHz (internal PLL)
333 * 2 = not supported
334 * 3 = what?
335 */
336 if (mode > 0)
337 reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
338
339 musb_writel(tbase, TUSB_PRCM_CONF, reg);
340
341 /* FIXME tusb6010_platform_retime(mode == 0); */
342}
343
344/*
345 * Idle TUSB6010 until next wake-up event; NOR access always wakes.
346 * Other code ensures that we idle unless we're connected _and_ the
347 * USB link is not suspended ... and tells us the relevant wakeup
348 * events. SW_EN for voltage is handled separately.
349 */
350void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
351{
352 void __iomem *tbase = musb->ctrl_base;
353 u32 reg;
354
355 if ((wakeup_enables & TUSB_PRCM_WBUS)
356 && (tusb_get_revision(musb) == TUSB_REV_30))
357 tusb_wbus_quirk(musb, 1);
358
359 tusb_set_clock_source(musb, 0);
360
361 wakeup_enables |= TUSB_PRCM_WNORCS;
362 musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
363
364 /* REVISIT writeup of WID implies that if WID set and ID is grounded,
365 * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
366 * Presumably that's mostly to save power, hence WID is immaterial ...
367 */
368
369 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
370 /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
371 if (is_host_active(musb)) {
372 reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
373 reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
374 } else {
375 reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
376 reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
377 }
378 reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
379 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
380
381 DBG(6, "idle, wake on %02x\n", wakeup_enables);
382}
383
384/*
385 * Updates cable VBUS status. Caller must take care of locking.
386 */
387int musb_platform_get_vbus_status(struct musb *musb)
388{
389 void __iomem *tbase = musb->ctrl_base;
390 u32 otg_stat, prcm_mngmt;
391 int ret = 0;
392
393 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
394 prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
395
396 /* Temporarily enable VBUS detection if it was disabled for
397 * suspend mode. Unless it's enabled otg_stat and devctl will
398 * not show correct VBUS state.
399 */
400 if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
401 u32 tmp = prcm_mngmt;
402 tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
403 musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
404 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
405 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
406 }
407
408 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
409 ret = 1;
410
411 return ret;
412}
413
414static struct timer_list musb_idle_timer;
415
416static void musb_do_idle(unsigned long _musb)
417{
418 struct musb *musb = (void *)_musb;
419 unsigned long flags;
420
421 spin_lock_irqsave(&musb->lock, flags);
422
423 switch (musb->xceiv.state) {
424 case OTG_STATE_A_WAIT_BCON:
425 if ((musb->a_wait_bcon != 0)
426 && (musb->idle_timeout == 0
427 || time_after(jiffies, musb->idle_timeout))) {
428 DBG(4, "Nothing connected %s, turning off VBUS\n",
429 otg_state_string(musb));
430 }
431 /* FALLTHROUGH */
432 case OTG_STATE_A_IDLE:
433 tusb_source_power(musb, 0);
434 default:
435 break;
436 }
437
438 if (!musb->is_active) {
439 u32 wakeups;
440
441 /* wait until khubd handles port change status */
442 if (is_host_active(musb) && (musb->port1_status >> 16))
443 goto done;
444
445#ifdef CONFIG_USB_GADGET_MUSB_HDRC
446 if (is_peripheral_enabled(musb) && !musb->gadget_driver)
447 wakeups = 0;
448 else {
449 wakeups = TUSB_PRCM_WHOSTDISCON
450 | TUSB_PRCM_WBUS
451 | TUSB_PRCM_WVBUS;
452 if (is_otg_enabled(musb))
453 wakeups |= TUSB_PRCM_WID;
454 }
455#else
456 wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
457#endif
458 tusb_allow_idle(musb, wakeups);
459 }
460done:
461 spin_unlock_irqrestore(&musb->lock, flags);
462}
463
464/*
465 * Maybe put TUSB6010 into idle mode mode depending on USB link status,
466 * like "disconnected" or "suspended". We'll be woken out of it by
467 * connect, resume, or disconnect.
468 *
469 * Needs to be called as the last function everywhere where there is
470 * register access to TUSB6010 because of NOR flash wake-up.
471 * Caller should own controller spinlock.
472 *
473 * Delay because peripheral enables D+ pullup 3msec after SE0, and
474 * we don't want to treat that full speed J as a wakeup event.
475 * ... peripherals must draw only suspend current after 10 msec.
476 */
477void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
478{
479 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
480 static unsigned long last_timer;
481
482 if (timeout == 0)
483 timeout = default_timeout;
484
485 /* Never idle if active, or when VBUS timeout is not set as host */
486 if (musb->is_active || ((musb->a_wait_bcon == 0)
487 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
488 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
489 del_timer(&musb_idle_timer);
490 last_timer = jiffies;
491 return;
492 }
493
494 if (time_after(last_timer, timeout)) {
495 if (!timer_pending(&musb_idle_timer))
496 last_timer = timeout;
497 else {
498 DBG(4, "Longer idle timer already pending, ignoring\n");
499 return;
500 }
501 }
502 last_timer = timeout;
503
504 DBG(4, "%s inactive, for idle timer for %lu ms\n",
505 otg_state_string(musb),
506 (unsigned long)jiffies_to_msecs(timeout - jiffies));
507 mod_timer(&musb_idle_timer, timeout);
508}
509
510/* ticks of 60 MHz clock */
511#define DEVCLOCK 60000000
512#define OTG_TIMER_MS(msecs) ((msecs) \
513 ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
514 | TUSB_DEV_OTG_TIMER_ENABLE) \
515 : 0)
516
517static void tusb_source_power(struct musb *musb, int is_on)
518{
519 void __iomem *tbase = musb->ctrl_base;
520 u32 conf, prcm, timer;
521 u8 devctl;
522
523 /* HDRC controls CPEN, but beware current surges during device
524 * connect. They can trigger transient overcurrent conditions
525 * that must be ignored.
526 */
527
528 prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
529 conf = musb_readl(tbase, TUSB_DEV_CONF);
530 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
531
532 if (is_on) {
533 if (musb->set_clock)
534 musb->set_clock(musb->clock, 1);
535 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
536 musb->xceiv.default_a = 1;
537 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
538 devctl |= MUSB_DEVCTL_SESSION;
539
540 conf |= TUSB_DEV_CONF_USB_HOST_MODE;
541 MUSB_HST_MODE(musb);
542 } else {
543 u32 otg_stat;
544
545 timer = 0;
546
547 /* If ID pin is grounded, we want to be a_idle */
548 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
549 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
550 switch (musb->xceiv.state) {
551 case OTG_STATE_A_WAIT_VRISE:
552 case OTG_STATE_A_WAIT_BCON:
553 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
554 break;
555 case OTG_STATE_A_WAIT_VFALL:
556 musb->xceiv.state = OTG_STATE_A_IDLE;
557 break;
558 default:
559 musb->xceiv.state = OTG_STATE_A_IDLE;
560 }
561 musb->is_active = 0;
562 musb->xceiv.default_a = 1;
563 MUSB_HST_MODE(musb);
564 } else {
565 musb->is_active = 0;
566 musb->xceiv.default_a = 0;
567 musb->xceiv.state = OTG_STATE_B_IDLE;
568 MUSB_DEV_MODE(musb);
569 }
570
571 devctl &= ~MUSB_DEVCTL_SESSION;
572 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
573 if (musb->set_clock)
574 musb->set_clock(musb->clock, 0);
575 }
576 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
577
578 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
579 musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
580 musb_writel(tbase, TUSB_DEV_CONF, conf);
581 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
582
583 DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
584 otg_state_string(musb),
585 musb_readb(musb->mregs, MUSB_DEVCTL),
586 musb_readl(tbase, TUSB_DEV_OTG_STAT),
587 conf, prcm);
588}
589
590/*
591 * Sets the mode to OTG, peripheral or host by changing the ID detection.
592 * Caller must take care of locking.
593 *
594 * Note that if a mini-A cable is plugged in the ID line will stay down as
595 * the weak ID pull-up is not able to pull the ID up.
596 *
597 * REVISIT: It would be possible to add support for changing between host
598 * and peripheral modes in non-OTG configurations by reconfiguring hardware
599 * and then setting musb->board_mode. For now, only support OTG mode.
600 */
601void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
602{
603 void __iomem *tbase = musb->ctrl_base;
604 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
605
606 if (musb->board_mode != MUSB_OTG) {
607 ERR("Changing mode currently only supported in OTG mode\n");
608 return;
609 }
610
611 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
612 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
613 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
614 dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
615
616 switch (musb_mode) {
617
618#ifdef CONFIG_USB_MUSB_HDRC_HCD
619 case MUSB_HOST: /* Disable PHY ID detect, ground ID */
620 phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
621 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
622 dev_conf |= TUSB_DEV_CONF_ID_SEL;
623 dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
624 break;
625#endif
626
627#ifdef CONFIG_USB_GADGET_MUSB_HDRC
628 case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
629 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
630 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
631 dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
632 break;
633#endif
634
635#ifdef CONFIG_USB_MUSB_OTG
636 case MUSB_OTG: /* Use PHY ID detection */
637 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
638 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
639 dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
640 break;
641#endif
642
643 default:
644 DBG(2, "Trying to set unknown mode %i\n", musb_mode);
645 }
646
647 musb_writel(tbase, TUSB_PHY_OTG_CTRL,
648 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
649 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
650 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
651 musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
652
653 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
654 if ((musb_mode == MUSB_PERIPHERAL) &&
655 !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
656 INFO("Cannot be peripheral with mini-A cable "
657 "otg_stat: %08x\n", otg_stat);
658}
659
660static inline unsigned long
661tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
662{
663 u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
664 unsigned long idle_timeout = 0;
665
666 /* ID pin */
667 if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
668 int default_a;
669
670 if (is_otg_enabled(musb))
671 default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
672 else
673 default_a = is_host_enabled(musb);
674 DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
675 musb->xceiv.default_a = default_a;
676 tusb_source_power(musb, default_a);
677
678 /* Don't allow idling immediately */
679 if (default_a)
680 idle_timeout = jiffies + (HZ * 3);
681 }
682
683 /* VBUS state change */
684 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
685
686 /* B-dev state machine: no vbus ~= disconnect */
687 if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
688 || !is_host_enabled(musb)) {
689#ifdef CONFIG_USB_MUSB_HDRC_HCD
690 /* ? musb_root_disconnect(musb); */
691 musb->port1_status &=
692 ~(USB_PORT_STAT_CONNECTION
693 | USB_PORT_STAT_ENABLE
694 | USB_PORT_STAT_LOW_SPEED
695 | USB_PORT_STAT_HIGH_SPEED
696 | USB_PORT_STAT_TEST
697 );
698#endif
699
700 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
701 DBG(1, "Forcing disconnect (no interrupt)\n");
702 if (musb->xceiv.state != OTG_STATE_B_IDLE) {
703 /* INTR_DISCONNECT can hide... */
704 musb->xceiv.state = OTG_STATE_B_IDLE;
705 musb->int_usb |= MUSB_INTR_DISCONNECT;
706 }
707 musb->is_active = 0;
708 }
709 DBG(2, "vbus change, %s, otg %03x\n",
710 otg_state_string(musb), otg_stat);
711 idle_timeout = jiffies + (1 * HZ);
712 schedule_work(&musb->irq_work);
713
714 } else /* A-dev state machine */ {
715 DBG(2, "vbus change, %s, otg %03x\n",
716 otg_state_string(musb), otg_stat);
717
718 switch (musb->xceiv.state) {
719 case OTG_STATE_A_IDLE:
720 DBG(2, "Got SRP, turning on VBUS\n");
721 musb_set_vbus(musb, 1);
722
723 /* CONNECT can wake if a_wait_bcon is set */
724 if (musb->a_wait_bcon != 0)
725 musb->is_active = 0;
726 else
727 musb->is_active = 1;
728
729 /*
730 * OPT FS A TD.4.6 needs few seconds for
731 * A_WAIT_VRISE
732 */
733 idle_timeout = jiffies + (2 * HZ);
734
735 break;
736 case OTG_STATE_A_WAIT_VRISE:
737 /* ignore; A-session-valid < VBUS_VALID/2,
738 * we monitor this with the timer
739 */
740 break;
741 case OTG_STATE_A_WAIT_VFALL:
742 /* REVISIT this irq triggers during short
743 * spikes caused by enumeration ...
744 */
745 if (musb->vbuserr_retry) {
746 musb->vbuserr_retry--;
747 tusb_source_power(musb, 1);
748 } else {
749 musb->vbuserr_retry
750 = VBUSERR_RETRY_COUNT;
751 tusb_source_power(musb, 0);
752 }
753 break;
754 default:
755 break;
756 }
757 }
758 }
759
760 /* OTG timer expiration */
761 if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
762 u8 devctl;
763
764 DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
765
766 switch (musb->xceiv.state) {
767 case OTG_STATE_A_WAIT_VRISE:
768 /* VBUS has probably been valid for a while now,
769 * but may well have bounced out of range a bit
770 */
771 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
772 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
773 if ((devctl & MUSB_DEVCTL_VBUS)
774 != MUSB_DEVCTL_VBUS) {
775 DBG(2, "devctl %02x\n", devctl);
776 break;
777 }
778 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
779 musb->is_active = 0;
780 idle_timeout = jiffies
781 + msecs_to_jiffies(musb->a_wait_bcon);
782 } else {
783 /* REVISIT report overcurrent to hub? */
784 ERR("vbus too slow, devctl %02x\n", devctl);
785 tusb_source_power(musb, 0);
786 }
787 break;
788 case OTG_STATE_A_WAIT_BCON:
789 if (musb->a_wait_bcon != 0)
790 idle_timeout = jiffies
791 + msecs_to_jiffies(musb->a_wait_bcon);
792 break;
793 case OTG_STATE_A_SUSPEND:
794 break;
795 case OTG_STATE_B_WAIT_ACON:
796 break;
797 default:
798 break;
799 }
800 }
801 schedule_work(&musb->irq_work);
802
803 return idle_timeout;
804}
805
806static irqreturn_t tusb_interrupt(int irq, void *__hci)
807{
808 struct musb *musb = __hci;
809 void __iomem *tbase = musb->ctrl_base;
810 unsigned long flags, idle_timeout = 0;
811 u32 int_mask, int_src;
812
813 spin_lock_irqsave(&musb->lock, flags);
814
815 /* Mask all interrupts to allow using both edge and level GPIO irq */
816 int_mask = musb_readl(tbase, TUSB_INT_MASK);
817 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
818
819 int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
820 DBG(3, "TUSB IRQ %08x\n", int_src);
821
822 musb->int_usb = (u8) int_src;
823
824 /* Acknowledge wake-up source interrupts */
825 if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
826 u32 reg;
827 u32 i;
828
829 if (tusb_get_revision(musb) == TUSB_REV_30)
830 tusb_wbus_quirk(musb, 0);
831
832 /* there are issues re-locking the PLL on wakeup ... */
833
834 /* work around issue 8 */
835 for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
836 musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
837 musb_writel(tbase, TUSB_SCRATCH_PAD, i);
838 reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
839 if (reg == i)
840 break;
841 DBG(6, "TUSB NOR not ready\n");
842 }
843
844 /* work around issue 13 (2nd half) */
845 tusb_set_clock_source(musb, 1);
846
847 reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
848 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
849 if (reg & ~TUSB_PRCM_WNORCS) {
850 musb->is_active = 1;
851 schedule_work(&musb->irq_work);
852 }
853 DBG(3, "wake %sactive %02x\n",
854 musb->is_active ? "" : "in", reg);
855
856 /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
857 }
858
859 if (int_src & TUSB_INT_SRC_USB_IP_CONN)
860 del_timer(&musb_idle_timer);
861
862 /* OTG state change reports (annoyingly) not issued by Mentor core */
863 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
864 | TUSB_INT_SRC_OTG_TIMEOUT
865 | TUSB_INT_SRC_ID_STATUS_CHNG))
866 idle_timeout = tusb_otg_ints(musb, int_src, tbase);
867
868 /* TX dma callback must be handled here, RX dma callback is
869 * handled in tusb_omap_dma_cb.
870 */
871 if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
872 u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
873 u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
874
875 DBG(3, "DMA IRQ %08x\n", dma_src);
876 real_dma_src = ~real_dma_src & dma_src;
877 if (tusb_dma_omap() && real_dma_src) {
878 int tx_source = (real_dma_src & 0xffff);
879 int i;
880
881 for (i = 1; i <= 15; i++) {
882 if (tx_source & (1 << i)) {
883 DBG(3, "completing ep%i %s\n", i, "tx");
884 musb_dma_completion(musb, i, 1);
885 }
886 }
887 }
888 musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
889 }
890
891 /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
892 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
893 u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
894
895 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
896 musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
897 musb->int_tx = (musb_src & 0xffff);
898 } else {
899 musb->int_rx = 0;
900 musb->int_tx = 0;
901 }
902
903 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
904 musb_interrupt(musb);
905
906 /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
907 musb_writel(tbase, TUSB_INT_SRC_CLEAR,
908 int_src & ~TUSB_INT_MASK_RESERVED_BITS);
909
910 musb_platform_try_idle(musb, idle_timeout);
911
912 musb_writel(tbase, TUSB_INT_MASK, int_mask);
913 spin_unlock_irqrestore(&musb->lock, flags);
914
915 return IRQ_HANDLED;
916}
917
918static int dma_off;
919
920/*
921 * Enables TUSB6010. Caller must take care of locking.
922 * REVISIT:
923 * - Check what is unnecessary in MGC_HdrcStart()
924 */
925void musb_platform_enable(struct musb *musb)
926{
927 void __iomem *tbase = musb->ctrl_base;
928
929 /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
930 * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
931 musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
932
933 /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
934 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
935 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
936 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
937
938 /* Clear all subsystem interrups */
939 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
940 musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
941 musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
942
943 /* Acknowledge pending interrupt(s) */
944 musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
945
946 /* Only 0 clock cycles for minimum interrupt de-assertion time and
947 * interrupt polarity active low seems to work reliably here */
948 musb_writel(tbase, TUSB_INT_CTRL_CONF,
949 TUSB_INT_CTRL_CONF_INT_RELCYC(0));
950
951 set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
952
953 /* maybe force into the Default-A OTG state machine */
954 if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
955 & TUSB_DEV_OTG_STAT_ID_STATUS))
956 musb_writel(tbase, TUSB_INT_SRC_SET,
957 TUSB_INT_SRC_ID_STATUS_CHNG);
958
959 if (is_dma_capable() && dma_off)
960 printk(KERN_WARNING "%s %s: dma not reactivated\n",
961 __FILE__, __func__);
962 else
963 dma_off = 1;
964}
965
966/*
967 * Disables TUSB6010. Caller must take care of locking.
968 */
969void musb_platform_disable(struct musb *musb)
970{
971 void __iomem *tbase = musb->ctrl_base;
972
973 /* FIXME stop DMA, IRQs, timers, ... */
974
975 /* disable all IRQs */
976 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
977 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
978 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
979 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
980
981 del_timer(&musb_idle_timer);
982
983 if (is_dma_capable() && !dma_off) {
984 printk(KERN_WARNING "%s %s: dma still active\n",
985 __FILE__, __func__);
986 dma_off = 1;
987 }
988}
989
990/*
991 * Sets up TUSB6010 CPU interface specific signals and registers
992 * Note: Settings optimized for OMAP24xx
993 */
994static void __init tusb_setup_cpu_interface(struct musb *musb)
995{
996 void __iomem *tbase = musb->ctrl_base;
997
998 /*
999 * Disable GPIO[5:0] pullups (used as output DMA requests)
1000 * Don't disable GPIO[7:6] as they are needed for wake-up.
1001 */
1002 musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
1003
1004 /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
1005 musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
1006
1007 /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
1008 musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
1009
1010 /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
1011 * de-assertion time 2 system clocks p 62 */
1012 musb_writel(tbase, TUSB_DMA_REQ_CONF,
1013 TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
1014 TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
1015 TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
1016
1017 /* Set 0 wait count for synchronous burst access */
1018 musb_writel(tbase, TUSB_WAIT_COUNT, 1);
1019}
1020
1021static int __init tusb_start(struct musb *musb)
1022{
1023 void __iomem *tbase = musb->ctrl_base;
1024 int ret = 0;
1025 unsigned long flags;
1026 u32 reg;
1027
1028 if (musb->board_set_power)
1029 ret = musb->board_set_power(1);
1030 if (ret != 0) {
1031 printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
1032 return ret;
1033 }
1034
1035 spin_lock_irqsave(&musb->lock, flags);
1036
1037 if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
1038 TUSB_PROD_TEST_RESET_VAL) {
1039 printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
1040 goto err;
1041 }
1042
1043 ret = tusb_print_revision(musb);
1044 if (ret < 2) {
1045 printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
1046 ret);
1047 goto err;
1048 }
1049
1050 /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
1051 * NOR FLASH interface is used */
1052 musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
1053
1054 /* Select PHY free running 60MHz as a system clock */
1055 tusb_set_clock_source(musb, 1);
1056
1057 /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
1058 * power saving, enable VBus detect and session end comparators,
1059 * enable IDpullup, enable VBus charging */
1060 musb_writel(tbase, TUSB_PRCM_MNGMT,
1061 TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
1062 TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
1063 TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
1064 TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
1065 TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
1066 tusb_setup_cpu_interface(musb);
1067
1068 /* simplify: always sense/pullup ID pins, as if in OTG mode */
1069 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
1070 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1071 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
1072
1073 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
1074 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1075 musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
1076
1077 spin_unlock_irqrestore(&musb->lock, flags);
1078
1079 return 0;
1080
1081err:
1082 spin_unlock_irqrestore(&musb->lock, flags);
1083
1084 if (musb->board_set_power)
1085 musb->board_set_power(0);
1086
1087 return -ENODEV;
1088}
1089
1090int __init musb_platform_init(struct musb *musb)
1091{
1092 struct platform_device *pdev;
1093 struct resource *mem;
1094 void __iomem *sync;
1095 int ret;
1096
1097 pdev = to_platform_device(musb->controller);
1098
1099 /* dma address for async dma */
1100 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1101 musb->async = mem->start;
1102
1103 /* dma address for sync dma */
1104 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1105 if (!mem) {
1106 pr_debug("no sync dma resource?\n");
1107 return -ENODEV;
1108 }
1109 musb->sync = mem->start;
1110
1111 sync = ioremap(mem->start, mem->end - mem->start + 1);
1112 if (!sync) {
1113 pr_debug("ioremap for sync failed\n");
1114 return -ENOMEM;
1115 }
1116 musb->sync_va = sync;
1117
1118 /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
1119 * FIFOs at 0x600, TUSB at 0x800
1120 */
1121 musb->mregs += TUSB_BASE_OFFSET;
1122
1123 ret = tusb_start(musb);
1124 if (ret) {
1125 printk(KERN_ERR "Could not start tusb6010 (%d)\n",
1126 ret);
1127 return -ENODEV;
1128 }
1129 musb->isr = tusb_interrupt;
1130
1131 if (is_host_enabled(musb))
1132 musb->board_set_vbus = tusb_source_power;
1133 if (is_peripheral_enabled(musb))
1134 musb->xceiv.set_power = tusb_draw_power;
1135
1136 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
1137
1138 return ret;
1139}
1140
1141int musb_platform_exit(struct musb *musb)
1142{
1143 del_timer_sync(&musb_idle_timer);
1144
1145 if (musb->board_set_power)
1146 musb->board_set_power(0);
1147
1148 iounmap(musb->sync_va);
1149
1150 return 0;
1151}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 000000000000..ab8c96286ce6
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
1/*
2 * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Jarkko Nikula <jarkko.nikula@nokia.com>
6 * Tony Lindgren <tony@atomide.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __TUSB6010_H__
14#define __TUSB6010_H__
15
16extern u8 tusb_get_revision(struct musb *musb);
17
18#ifdef CONFIG_USB_TUSB6010
19#define musb_in_tusb() 1
20#else
21#define musb_in_tusb() 0
22#endif
23
24#ifdef CONFIG_USB_TUSB_OMAP_DMA
25#define tusb_dma_omap() 1
26#else
27#define tusb_dma_omap() 0
28#endif
29
30/* VLYNQ control register. 32-bit at offset 0x000 */
31#define TUSB_VLYNQ_CTRL 0x004
32
33/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
34#define TUSB_BASE_OFFSET 0x400
35
36/* FIFO registers 32-bit at offset 0x600 */
37#define TUSB_FIFO_BASE 0x600
38
39/* Device System & Control registers. 32-bit at offset 0x800 */
40#define TUSB_SYS_REG_BASE 0x800
41
42#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
43#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
44#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
45#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
46#define TUSB_DEV_CONF_ID_SEL (1 << 0)
47
48#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
49#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
50#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
51#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
52#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
53#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
54#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
55#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
56#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
57#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
58#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
59#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
60#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
61#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
62#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
63#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
64#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
65#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
66#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
67#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
68#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
69#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
70#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
71
72/*OTG status register */
73#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
74#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
75#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
76#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
77#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
78#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
79#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
80#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
81#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
82#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
83#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
84
85#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
86# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
87# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
88#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
89
90/* PRCM configuration register */
91#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
92#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
93#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
94
95/* PRCM management register */
96#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
97#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
98#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
99#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
100#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
101#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
102#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
103#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
104#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
105#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
106#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
107#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
108#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
109#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
110#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
111
112/* Wake-up source clear and mask registers */
113#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
114#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
115#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
116#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
117#define TUSB_PRCM_WGPIO_7 (1 << 12)
118#define TUSB_PRCM_WGPIO_6 (1 << 11)
119#define TUSB_PRCM_WGPIO_5 (1 << 10)
120#define TUSB_PRCM_WGPIO_4 (1 << 9)
121#define TUSB_PRCM_WGPIO_3 (1 << 8)
122#define TUSB_PRCM_WGPIO_2 (1 << 7)
123#define TUSB_PRCM_WGPIO_1 (1 << 6)
124#define TUSB_PRCM_WGPIO_0 (1 << 5)
125#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
126#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
127#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
128#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
129#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
130
131#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
132#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
133#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
134#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
135#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
136#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
137#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
138#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
139#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
140#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
141#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
142#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
143#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
144#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
145#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
146#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
147
148/* NOR flash interrupt source registers */
149#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
150#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
151#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
152#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
153#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
154#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
155#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
156#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
157#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
158#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
159#define TUSB_INT_SRC_DEV_READY (1 << 12)
160#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
161#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
162#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
163#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
164#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
165#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
166#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
167#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
168#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
169#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
170
171/* NOR flash interrupt registers reserved bits. Must be written as 0 */
172#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
173#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
174#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
175#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
176#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
177#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
178
179/* Reserved bits for NOR flash interrupt mask and clear register */
180#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
181 TUSB_INT_MASK_RESERVED_13 | \
182 TUSB_INT_MASK_RESERVED_8)
183
184/* Reserved bits for NOR flash interrupt status register */
185#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
186 TUSB_INT_SRC_RESERVED_18 | \
187 TUSB_INT_SRC_RESERVED_10)
188
189#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
190#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
191#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
192#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
193#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
194#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
195
196/* Offsets from each ep base register */
197#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
198#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
199#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
200
201#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
202#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
203#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
204
205/* Device System & Control register bitfields */
206#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
207#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
208#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
209#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
210#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
211#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
212#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
213#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
214#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
215#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
216#define TUSB_EP_CONFIG_SW_EN (1 << 31)
217#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
218#define TUSB_PROD_TEST_RESET_VAL 0xa596
219#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
220
221#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
222#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
223#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
224#define TUSB_DIDR1_HI_REV_20 0
225#define TUSB_DIDR1_HI_REV_30 1
226#define TUSB_DIDR1_HI_REV_31 2
227
228#define TUSB_REV_10 0x10
229#define TUSB_REV_20 0x20
230#define TUSB_REV_30 0x30
231#define TUSB_REV_31 0x31
232
233#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 000000000000..52f7f29cebda
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
1/*
2 * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/usb.h>
16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h>
18#include <asm/arch/dma.h>
19#include <asm/arch/mux.h>
20
21#include "musb_core.h"
22
23#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
24
25#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
26
27struct tusb_omap_dma_ch {
28 struct musb *musb;
29 void __iomem *tbase;
30 unsigned long phys_offset;
31 int epnum;
32 u8 tx;
33 struct musb_hw_ep *hw_ep;
34
35 int ch;
36 s8 dmareq;
37 s8 sync_dev;
38
39 struct tusb_omap_dma *tusb_dma;
40
41 void __iomem *dma_addr;
42
43 u32 len;
44 u16 packet_sz;
45 u16 transfer_packet_sz;
46 u32 transfer_len;
47 u32 completed_len;
48};
49
50struct tusb_omap_dma {
51 struct dma_controller controller;
52 struct musb *musb;
53 void __iomem *tbase;
54
55 int ch;
56 s8 dmareq;
57 s8 sync_dev;
58 unsigned multichannel:1;
59};
60
61static int tusb_omap_dma_start(struct dma_controller *c)
62{
63 struct tusb_omap_dma *tusb_dma;
64
65 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
66
67 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
68
69 return 0;
70}
71
72static int tusb_omap_dma_stop(struct dma_controller *c)
73{
74 struct tusb_omap_dma *tusb_dma;
75
76 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
77
78 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
79
80 return 0;
81}
82
83/*
84 * Allocate dmareq0 to the current channel unless it's already taken
85 */
86static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
87{
88 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
89
90 if (reg != 0) {
91 DBG(3, "ep%i dmareq0 is busy for ep%i\n",
92 chdat->epnum, reg & 0xf);
93 return -EAGAIN;
94 }
95
96 if (chdat->tx)
97 reg = (1 << 4) | chdat->epnum;
98 else
99 reg = chdat->epnum;
100
101 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
102
103 return 0;
104}
105
106static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
107{
108 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
109
110 if ((reg & 0xf) != chdat->epnum) {
111 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
112 chdat->epnum, reg & 0xf);
113 return;
114 }
115 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
116}
117
118/*
119 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
120 * musb_gadget.c.
121 */
122static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
123{
124 struct dma_channel *channel = (struct dma_channel *)data;
125 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
126 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
127 struct musb *musb = chdat->musb;
128 struct musb_hw_ep *hw_ep = chdat->hw_ep;
129 void __iomem *ep_conf = hw_ep->conf;
130 void __iomem *mbase = musb->mregs;
131 unsigned long remaining, flags, pio;
132 int ch;
133
134 spin_lock_irqsave(&musb->lock, flags);
135
136 if (tusb_dma->multichannel)
137 ch = chdat->ch;
138 else
139 ch = tusb_dma->ch;
140
141 if (ch_status != OMAP_DMA_BLOCK_IRQ)
142 printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
143
144 DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
145 chdat->epnum, chdat->tx ? "tx" : "rx",
146 ch, ch_status);
147
148 if (chdat->tx)
149 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
150 else
151 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
152
153 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
154
155 /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
156 if (unlikely(remaining > chdat->transfer_len)) {
157 DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
158 chdat->tx ? "tx" : "rx", chdat->ch,
159 remaining);
160 remaining = 0;
161 }
162
163 channel->actual_len = chdat->transfer_len - remaining;
164 pio = chdat->len - channel->actual_len;
165
166 DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
167
168 /* Transfer remaining 1 - 31 bytes */
169 if (pio > 0 && pio < 32) {
170 u8 *buf;
171
172 DBG(3, "Using PIO for remaining %lu bytes\n", pio);
173 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
174 if (chdat->tx) {
175 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
176 chdat->transfer_len, DMA_TO_DEVICE);
177 musb_write_fifo(hw_ep, pio, buf);
178 } else {
179 musb_read_fifo(hw_ep, pio, buf);
180 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
181 chdat->transfer_len, DMA_FROM_DEVICE);
182 }
183 channel->actual_len += pio;
184 }
185
186 if (!tusb_dma->multichannel)
187 tusb_omap_free_shared_dmareq(chdat);
188
189 channel->status = MUSB_DMA_STATUS_FREE;
190
191 /* Handle only RX callbacks here. TX callbacks must be handled based
192 * on the TUSB DMA status interrupt.
193 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
194 * interrupt for RX and TX.
195 */
196 if (!chdat->tx)
197 musb_dma_completion(musb, chdat->epnum, chdat->tx);
198
199 /* We must terminate short tx transfers manually by setting TXPKTRDY.
200 * REVISIT: This same problem may occur with other MUSB dma as well.
201 * Easy to test with g_ether by pinging the MUSB board with ping -s54.
202 */
203 if ((chdat->transfer_len < chdat->packet_sz)
204 || (chdat->transfer_len % chdat->packet_sz != 0)) {
205 u16 csr;
206
207 if (chdat->tx) {
208 DBG(3, "terminating short tx packet\n");
209 musb_ep_select(mbase, chdat->epnum);
210 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
211 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
212 | MUSB_TXCSR_P_WZC_BITS;
213 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
214 }
215 }
216
217 spin_unlock_irqrestore(&musb->lock, flags);
218}
219
220static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
221 u8 rndis_mode, dma_addr_t dma_addr, u32 len)
222{
223 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
224 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
225 struct musb *musb = chdat->musb;
226 struct musb_hw_ep *hw_ep = chdat->hw_ep;
227 void __iomem *mbase = musb->mregs;
228 void __iomem *ep_conf = hw_ep->conf;
229 dma_addr_t fifo = hw_ep->fifo_sync;
230 struct omap_dma_channel_params dma_params;
231 u32 dma_remaining;
232 int src_burst, dst_burst;
233 u16 csr;
234 int ch;
235 s8 dmareq;
236 s8 sync_dev;
237
238 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
239 return false;
240
241 /*
242 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
243 * register which will cause missed DMA interrupt. We could try to
244 * use a timer for the callback, but it is unsafe as the XFR_SIZE
245 * register is corrupt, and we won't know if the DMA worked.
246 */
247 if (dma_addr & 0x2)
248 return false;
249
250 /*
251 * Because of HW issue #10, it seems like mixing sync DMA and async
252 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
253 * using the channel for DMA.
254 */
255 if (chdat->tx)
256 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
257 else
258 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
259
260 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
261 if (dma_remaining) {
262 DBG(2, "Busy %s dma ch%i, not using: %08x\n",
263 chdat->tx ? "tx" : "rx", chdat->ch,
264 dma_remaining);
265 return false;
266 }
267
268 chdat->transfer_len = len & ~0x1f;
269
270 if (len < packet_sz)
271 chdat->transfer_packet_sz = chdat->transfer_len;
272 else
273 chdat->transfer_packet_sz = packet_sz;
274
275 if (tusb_dma->multichannel) {
276 ch = chdat->ch;
277 dmareq = chdat->dmareq;
278 sync_dev = chdat->sync_dev;
279 } else {
280 if (tusb_omap_use_shared_dmareq(chdat) != 0) {
281 DBG(3, "could not get dma for ep%i\n", chdat->epnum);
282 return false;
283 }
284 if (tusb_dma->ch < 0) {
285 /* REVISIT: This should get blocked earlier, happens
286 * with MSC ErrorRecoveryTest
287 */
288 WARN_ON(1);
289 return false;
290 }
291
292 ch = tusb_dma->ch;
293 dmareq = tusb_dma->dmareq;
294 sync_dev = tusb_dma->sync_dev;
295 omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
296 }
297
298 chdat->packet_sz = packet_sz;
299 chdat->len = len;
300 channel->actual_len = 0;
301 chdat->dma_addr = (void __iomem *)dma_addr;
302 channel->status = MUSB_DMA_STATUS_BUSY;
303
304 /* Since we're recycling dma areas, we need to clean or invalidate */
305 if (chdat->tx)
306 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
307 else
308 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
309
310 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
311 if ((dma_addr & 0x3) == 0) {
312 dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
313 dma_params.elem_count = 8; /* Elements in frame */
314 } else {
315 dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
316 dma_params.elem_count = 16; /* Elements in frame */
317 fifo = hw_ep->fifo_async;
318 }
319
320 dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
321
322 DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
323 chdat->epnum, chdat->tx ? "tx" : "rx",
324 ch, dma_addr, chdat->transfer_len, len,
325 chdat->transfer_packet_sz, packet_sz);
326
327 /*
328 * Prepare omap DMA for transfer
329 */
330 if (chdat->tx) {
331 dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
332 dma_params.src_start = (unsigned long)dma_addr;
333 dma_params.src_ei = 0;
334 dma_params.src_fi = 0;
335
336 dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
337 dma_params.dst_start = (unsigned long)fifo;
338 dma_params.dst_ei = 1;
339 dma_params.dst_fi = -31; /* Loop 32 byte window */
340
341 dma_params.trigger = sync_dev;
342 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
343 dma_params.src_or_dst_synch = 0; /* Dest sync */
344
345 src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */
346 dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */
347 } else {
348 dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
349 dma_params.src_start = (unsigned long)fifo;
350 dma_params.src_ei = 1;
351 dma_params.src_fi = -31; /* Loop 32 byte window */
352
353 dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
354 dma_params.dst_start = (unsigned long)dma_addr;
355 dma_params.dst_ei = 0;
356 dma_params.dst_fi = 0;
357
358 dma_params.trigger = sync_dev;
359 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
360 dma_params.src_or_dst_synch = 1; /* Source sync */
361
362 src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
363 dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
364 }
365
366 DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
367 chdat->epnum, chdat->tx ? "tx" : "rx",
368 (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
369 ((dma_addr & 0x3) == 0) ? "sync" : "async",
370 dma_params.src_start, dma_params.dst_start);
371
372 omap_set_dma_params(ch, &dma_params);
373 omap_set_dma_src_burst_mode(ch, src_burst);
374 omap_set_dma_dest_burst_mode(ch, dst_burst);
375 omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
376
377 /*
378 * Prepare MUSB for DMA transfer
379 */
380 if (chdat->tx) {
381 musb_ep_select(mbase, chdat->epnum);
382 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
383 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
384 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
385 csr &= ~MUSB_TXCSR_P_UNDERRUN;
386 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
387 } else {
388 musb_ep_select(mbase, chdat->epnum);
389 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
390 csr |= MUSB_RXCSR_DMAENAB;
391 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
392 musb_writew(hw_ep->regs, MUSB_RXCSR,
393 csr | MUSB_RXCSR_P_WZC_BITS);
394 }
395
396 /*
397 * Start DMA transfer
398 */
399 omap_start_dma(ch);
400
401 if (chdat->tx) {
402 /* Send transfer_packet_sz packets at a time */
403 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
404 chdat->transfer_packet_sz);
405
406 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
407 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
408 } else {
409 /* Receive transfer_packet_sz packets at a time */
410 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
411 chdat->transfer_packet_sz << 16);
412
413 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
414 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
415 }
416
417 return true;
418}
419
420static int tusb_omap_dma_abort(struct dma_channel *channel)
421{
422 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
423 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
424
425 if (!tusb_dma->multichannel) {
426 if (tusb_dma->ch >= 0) {
427 omap_stop_dma(tusb_dma->ch);
428 omap_free_dma(tusb_dma->ch);
429 tusb_dma->ch = -1;
430 }
431
432 tusb_dma->dmareq = -1;
433 tusb_dma->sync_dev = -1;
434 }
435
436 channel->status = MUSB_DMA_STATUS_FREE;
437
438 return 0;
439}
440
441static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
442{
443 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
444 int i, dmareq_nr = -1;
445
446 const int sync_dev[6] = {
447 OMAP24XX_DMA_EXT_DMAREQ0,
448 OMAP24XX_DMA_EXT_DMAREQ1,
449 OMAP242X_DMA_EXT_DMAREQ2,
450 OMAP242X_DMA_EXT_DMAREQ3,
451 OMAP242X_DMA_EXT_DMAREQ4,
452 OMAP242X_DMA_EXT_DMAREQ5,
453 };
454
455 for (i = 0; i < MAX_DMAREQ; i++) {
456 int cur = (reg & (0xf << (i * 5))) >> (i * 5);
457 if (cur == 0) {
458 dmareq_nr = i;
459 break;
460 }
461 }
462
463 if (dmareq_nr == -1)
464 return -EAGAIN;
465
466 reg |= (chdat->epnum << (dmareq_nr * 5));
467 if (chdat->tx)
468 reg |= ((1 << 4) << (dmareq_nr * 5));
469 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
470
471 chdat->dmareq = dmareq_nr;
472 chdat->sync_dev = sync_dev[chdat->dmareq];
473
474 return 0;
475}
476
477static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
478{
479 u32 reg;
480
481 if (!chdat || chdat->dmareq < 0)
482 return;
483
484 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
485 reg &= ~(0x1f << (chdat->dmareq * 5));
486 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
487
488 chdat->dmareq = -1;
489 chdat->sync_dev = -1;
490}
491
492static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
493
494static struct dma_channel *
495tusb_omap_dma_allocate(struct dma_controller *c,
496 struct musb_hw_ep *hw_ep,
497 u8 tx)
498{
499 int ret, i;
500 const char *dev_name;
501 struct tusb_omap_dma *tusb_dma;
502 struct musb *musb;
503 void __iomem *tbase;
504 struct dma_channel *channel = NULL;
505 struct tusb_omap_dma_ch *chdat = NULL;
506 u32 reg;
507
508 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
509 musb = tusb_dma->musb;
510 tbase = musb->ctrl_base;
511
512 reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
513 if (tx)
514 reg &= ~(1 << hw_ep->epnum);
515 else
516 reg &= ~(1 << (hw_ep->epnum + 15));
517 musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
518
519 /* REVISIT: Why does dmareq5 not work? */
520 if (hw_ep->epnum == 0) {
521 DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
522 return NULL;
523 }
524
525 for (i = 0; i < MAX_DMAREQ; i++) {
526 struct dma_channel *ch = dma_channel_pool[i];
527 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
528 ch->status = MUSB_DMA_STATUS_FREE;
529 channel = ch;
530 chdat = ch->private_data;
531 break;
532 }
533 }
534
535 if (!channel)
536 return NULL;
537
538 if (tx) {
539 chdat->tx = 1;
540 dev_name = "TUSB transmit";
541 } else {
542 chdat->tx = 0;
543 dev_name = "TUSB receive";
544 }
545
546 chdat->musb = tusb_dma->musb;
547 chdat->tbase = tusb_dma->tbase;
548 chdat->hw_ep = hw_ep;
549 chdat->epnum = hw_ep->epnum;
550 chdat->dmareq = -1;
551 chdat->completed_len = 0;
552 chdat->tusb_dma = tusb_dma;
553
554 channel->max_len = 0x7fffffff;
555 channel->desired_mode = 0;
556 channel->actual_len = 0;
557
558 if (tusb_dma->multichannel) {
559 ret = tusb_omap_dma_allocate_dmareq(chdat);
560 if (ret != 0)
561 goto free_dmareq;
562
563 ret = omap_request_dma(chdat->sync_dev, dev_name,
564 tusb_omap_dma_cb, channel, &chdat->ch);
565 if (ret != 0)
566 goto free_dmareq;
567 } else if (tusb_dma->ch == -1) {
568 tusb_dma->dmareq = 0;
569 tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
570
571 /* Callback data gets set later in the shared dmareq case */
572 ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
573 tusb_omap_dma_cb, NULL, &tusb_dma->ch);
574 if (ret != 0)
575 goto free_dmareq;
576
577 chdat->dmareq = -1;
578 chdat->ch = -1;
579 }
580
581 DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
582 chdat->epnum,
583 chdat->tx ? "tx" : "rx",
584 chdat->ch >= 0 ? "dedicated" : "shared",
585 chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
586 chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
587 chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
588
589 return channel;
590
591free_dmareq:
592 tusb_omap_dma_free_dmareq(chdat);
593
594 DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
595 channel->status = MUSB_DMA_STATUS_UNKNOWN;
596
597 return NULL;
598}
599
600static void tusb_omap_dma_release(struct dma_channel *channel)
601{
602 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
603 struct musb *musb = chdat->musb;
604 void __iomem *tbase = musb->ctrl_base;
605 u32 reg;
606
607 DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
608
609 reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
610 if (chdat->tx)
611 reg |= (1 << chdat->epnum);
612 else
613 reg |= (1 << (chdat->epnum + 15));
614 musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
615
616 reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
617 if (chdat->tx)
618 reg |= (1 << chdat->epnum);
619 else
620 reg |= (1 << (chdat->epnum + 15));
621 musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
622
623 channel->status = MUSB_DMA_STATUS_UNKNOWN;
624
625 if (chdat->ch >= 0) {
626 omap_stop_dma(chdat->ch);
627 omap_free_dma(chdat->ch);
628 chdat->ch = -1;
629 }
630
631 if (chdat->dmareq >= 0)
632 tusb_omap_dma_free_dmareq(chdat);
633
634 channel = NULL;
635}
636
637void dma_controller_destroy(struct dma_controller *c)
638{
639 struct tusb_omap_dma *tusb_dma;
640 int i;
641
642 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
643 for (i = 0; i < MAX_DMAREQ; i++) {
644 struct dma_channel *ch = dma_channel_pool[i];
645 if (ch) {
646 kfree(ch->private_data);
647 kfree(ch);
648 }
649 }
650
651 if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
652 omap_free_dma(tusb_dma->ch);
653
654 kfree(tusb_dma);
655}
656
657struct dma_controller *__init
658dma_controller_create(struct musb *musb, void __iomem *base)
659{
660 void __iomem *tbase = musb->ctrl_base;
661 struct tusb_omap_dma *tusb_dma;
662 int i;
663
664 /* REVISIT: Get dmareq lines used from board-*.c */
665
666 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
667 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
668
669 musb_writel(tbase, TUSB_DMA_REQ_CONF,
670 TUSB_DMA_REQ_CONF_BURST_SIZE(2)
671 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
672 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
673
674 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
675 if (!tusb_dma)
676 goto cleanup;
677
678 tusb_dma->musb = musb;
679 tusb_dma->tbase = musb->ctrl_base;
680
681 tusb_dma->ch = -1;
682 tusb_dma->dmareq = -1;
683 tusb_dma->sync_dev = -1;
684
685 tusb_dma->controller.start = tusb_omap_dma_start;
686 tusb_dma->controller.stop = tusb_omap_dma_stop;
687 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
688 tusb_dma->controller.channel_release = tusb_omap_dma_release;
689 tusb_dma->controller.channel_program = tusb_omap_dma_program;
690 tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
691
692 if (tusb_get_revision(musb) >= TUSB_REV_30)
693 tusb_dma->multichannel = 1;
694
695 for (i = 0; i < MAX_DMAREQ; i++) {
696 struct dma_channel *ch;
697 struct tusb_omap_dma_ch *chdat;
698
699 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
700 if (!ch)
701 goto cleanup;
702
703 dma_channel_pool[i] = ch;
704
705 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
706 if (!chdat)
707 goto cleanup;
708
709 ch->status = MUSB_DMA_STATUS_UNKNOWN;
710 ch->private_data = chdat;
711 }
712
713 return &tusb_dma->controller;
714
715cleanup:
716 dma_controller_destroy(&tusb_dma->controller);
717
718 return NULL;
719}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c1767fc8..70338f4ec918 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED
499config USB_SERIAL_SIERRAWIRELESS 499config USB_SERIAL_SIERRAWIRELESS
500 tristate "USB Sierra Wireless Driver" 500 tristate "USB Sierra Wireless Driver"
501 help 501 help
502 Say M here if you want to use a Sierra Wireless device (if 502 Say M here if you want to use Sierra Wireless devices.
503 using an PC 5220 or AC580 please use the Airprime driver 503
504 instead). 504 Many deviecs have a feature known as TRU-Install, for those devices
505 to work properly the USB Storage Sierra feature must be enabled.
505 506
506 To compile this driver as a module, choose M here: the 507 To compile this driver as a module, choose M here: the
507 module will be called sierra. 508 module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 838717250145..984f6eff4c47 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = {
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 563 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 564 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
566 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 567 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
567 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 568 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
568 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 569 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = {
637 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, 638 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
638 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 639 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
639 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 640 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
641 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
640 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 642 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
641 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 643 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
642 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 644 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = {
646 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 648 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
647 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), 649 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
648 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 650 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
651 { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
652 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
653 { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
654 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
649 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 655 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
650 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 656 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
651 { }, /* Optional parameter entry */ 657 { }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea44dcf9..382265bba969 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
524#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 524#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
525#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 525#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
526#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 526#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
527#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
527#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 528#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
529#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
528 530
529/* 531/*
530 * Definitions for ID TECH (www.idt-net.com) devices 532 * Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
815#define OLIMEX_VID 0x15BA 817#define OLIMEX_VID 0x15BA
816#define OLIMEX_ARM_USB_OCD_PID 0x0003 818#define OLIMEX_ARM_USB_OCD_PID 0x0003
817 819
820/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
821/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
822#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
823#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
824
818/* www.elsterelectricity.com Elster Unicom III Optical Probe */ 825/* www.elsterelectricity.com Elster Unicom III Optical Probe */
819#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ 826#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
820 827
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95f2b0f..e143198aeb02 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -186,6 +186,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
186#define BANDRICH_VENDOR_ID 0x1A8D 186#define BANDRICH_VENDOR_ID 0x1A8D
187#define BANDRICH_PRODUCT_C100_1 0x1002 187#define BANDRICH_PRODUCT_C100_1 0x1002
188#define BANDRICH_PRODUCT_C100_2 0x1003 188#define BANDRICH_PRODUCT_C100_2 0x1003
189#define BANDRICH_PRODUCT_1004 0x1004
190#define BANDRICH_PRODUCT_1005 0x1005
191#define BANDRICH_PRODUCT_1006 0x1006
192#define BANDRICH_PRODUCT_1007 0x1007
193#define BANDRICH_PRODUCT_1008 0x1008
194#define BANDRICH_PRODUCT_1009 0x1009
195#define BANDRICH_PRODUCT_100A 0x100a
196
197#define BANDRICH_PRODUCT_100B 0x100b
198#define BANDRICH_PRODUCT_100C 0x100c
199#define BANDRICH_PRODUCT_100D 0x100d
200#define BANDRICH_PRODUCT_100E 0x100e
201
202#define BANDRICH_PRODUCT_100F 0x100f
203#define BANDRICH_PRODUCT_1010 0x1010
204#define BANDRICH_PRODUCT_1011 0x1011
205#define BANDRICH_PRODUCT_1012 0x1012
189 206
190#define AMOI_VENDOR_ID 0x1614 207#define AMOI_VENDOR_ID 0x1614
191#define AMOI_PRODUCT_9508 0x0800 208#define AMOI_PRODUCT_9508 0x0800
@@ -197,6 +214,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
197#define TELIT_VENDOR_ID 0x1bc7 214#define TELIT_VENDOR_ID 0x1bc7
198#define TELIT_PRODUCT_UC864E 0x1003 215#define TELIT_PRODUCT_UC864E 0x1003
199 216
217/* ZTE PRODUCTS */
218#define ZTE_VENDOR_ID 0x19d2
219#define ZTE_PRODUCT_MF628 0x0015
220
200static struct usb_device_id option_ids[] = { 221static struct usb_device_id option_ids[] = {
201 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 222 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
202 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 223 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -302,12 +323,28 @@ static struct usb_device_id option_ids[] = {
302 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, 323 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
303 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 324 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
304 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 325 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
326 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
327 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
328 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
329 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
330 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
331 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
332 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
333 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
334 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
335 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
336 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
337 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
338 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
339 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
340 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
305 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 341 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
306 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 342 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
307 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 343 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
308 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 344 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
309 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 345 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
310 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 346 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
347 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
311 { } /* Terminating entry */ 348 { } /* Terminating entry */
312}; 349};
313MODULE_DEVICE_TABLE(usb, option_ids); 350MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +383,7 @@ static struct usb_serial_driver option_1port_device = {
346 .read_int_callback = option_instat_callback, 383 .read_int_callback = option_instat_callback,
347}; 384};
348 385
349#ifdef CONFIG_USB_DEBUG
350static int debug; 386static int debug;
351#else
352#define debug 0
353#endif
354 387
355/* per port private data */ 388/* per port private data */
356 389
@@ -954,8 +987,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
954MODULE_VERSION(DRIVER_VERSION); 987MODULE_VERSION(DRIVER_VERSION);
955MODULE_LICENSE("GPL"); 988MODULE_LICENSE("GPL");
956 989
957#ifdef CONFIG_USB_DEBUG
958module_param(debug, bool, S_IRUGO | S_IWUSR); 990module_param(debug, bool, S_IRUGO | S_IWUSR);
959MODULE_PARM_DESC(debug, "Debug messages"); 991MODULE_PARM_DESC(debug, "Debug messages");
960#endif
961
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446ad625..1ede1441cb1b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = {
90 { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, 90 { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, 91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, 92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
93 { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
94 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, 93 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
95 { } /* Terminating entry */ 94 { } /* Terminating entry */
96}; 95};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbcf7a22..a3bd039c78e9 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
107#define COREGA_VENDOR_ID 0x07aa 107#define COREGA_VENDOR_ID 0x07aa
108#define COREGA_PRODUCT_ID 0x002a 108#define COREGA_PRODUCT_ID 0x002a
109 109
110/* HL HL-340 (ID: 4348:5523) */
111#define HL340_VENDOR_ID 0x4348
112#define HL340_PRODUCT_ID 0x5523
113
114/* Y.C. Cable U.S.A., Inc - USB to RS-232 */ 110/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
115#define YCCABLE_VENDOR_ID 0x05ad 111#define YCCABLE_VENDOR_ID 0x05ad
116#define YCCABLE_PRODUCT_ID 0x0fba 112#define YCCABLE_PRODUCT_ID 0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f1523ec56..706033753adb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15*/ 15*/
16 16
17#define DRIVER_VERSION "v.1.2.9c" 17#define DRIVER_VERSION "v.1.2.13a"
18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" 18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
20 20
@@ -31,6 +31,7 @@
31#define SWIMS_USB_REQUEST_SetPower 0x00 31#define SWIMS_USB_REQUEST_SetPower 0x00
32#define SWIMS_USB_REQUEST_SetNmea 0x07 32#define SWIMS_USB_REQUEST_SetNmea 0x07
33#define SWIMS_USB_REQUEST_SetMode 0x0B 33#define SWIMS_USB_REQUEST_SetMode 0x0B
34#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
34#define SWIMS_SET_MODE_Modem 0x0001 35#define SWIMS_SET_MODE_Modem 0x0001
35 36
36/* per port private data */ 37/* per port private data */
@@ -40,18 +41,11 @@
40 41
41static int debug; 42static int debug;
42static int nmea; 43static int nmea;
43static int truinstall = 1;
44
45enum devicetype {
46 DEVICE_3_PORT = 0,
47 DEVICE_1_PORT = 1,
48 DEVICE_INSTALLER = 2,
49};
50 44
51static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) 45static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
52{ 46{
53 int result; 47 int result;
54 dev_dbg(&udev->dev, "%s", "SET POWER STATE\n"); 48 dev_dbg(&udev->dev, "%s", __func__);
55 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 49 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
56 SWIMS_USB_REQUEST_SetPower, /* __u8 request */ 50 SWIMS_USB_REQUEST_SetPower, /* __u8 request */
57 USB_TYPE_VENDOR, /* __u8 request type */ 51 USB_TYPE_VENDOR, /* __u8 request type */
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
63 return result; 57 return result;
64} 58}
65 59
66static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
67{
68 int result;
69 dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
70 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
71 SWIMS_USB_REQUEST_SetMode, /* __u8 request */
72 USB_TYPE_VENDOR, /* __u8 request type */
73 eSWocMode, /* __u16 value */
74 0x0000, /* __u16 index */
75 NULL, /* void *data */
76 0, /* __u16 size */
77 USB_CTRL_SET_TIMEOUT); /* int timeout */
78 return result;
79}
80
81static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) 60static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
82{ 61{
83 int result; 62 int result;
84 dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n"); 63 dev_dbg(&udev->dev, "%s", __func__);
85 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 64 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
86 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ 65 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
87 USB_TYPE_VENDOR, /* __u8 request type */ 66 USB_TYPE_VENDOR, /* __u8 request type */
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
97{ 76{
98 int result; 77 int result;
99 int *num_ports = usb_get_serial_data(serial); 78 int *num_ports = usb_get_serial_data(serial);
79 dev_dbg(&serial->dev->dev, "%s", __func__);
100 80
101 result = *num_ports; 81 result = *num_ports;
102 82
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
110 90
111static int sierra_calc_interface(struct usb_serial *serial) 91static int sierra_calc_interface(struct usb_serial *serial)
112{ 92{
113 int interface; 93 int interface;
114 struct usb_interface *p_interface; 94 struct usb_interface *p_interface;
115 struct usb_host_interface *p_host_interface; 95 struct usb_host_interface *p_host_interface;
96 dev_dbg(&serial->dev->dev, "%s", __func__);
116 97
117 /* Get the interface structure pointer from the serial struct */ 98 /* Get the interface structure pointer from the serial struct */
118 p_interface = serial->interface; 99 p_interface = serial->interface;
119 100
120 /* Get a pointer to the host interface structure */ 101 /* Get a pointer to the host interface structure */
121 p_host_interface = p_interface->cur_altsetting; 102 p_host_interface = p_interface->cur_altsetting;
122 103
123 /* read the interface descriptor for this active altsetting 104 /* read the interface descriptor for this active altsetting
124 * to find out the interface number we are on 105 * to find out the interface number we are on
125 */ 106 */
126 interface = p_host_interface->desc.bInterfaceNumber; 107 interface = p_host_interface->desc.bInterfaceNumber;
127 108
128 return interface; 109 return interface;
129} 110}
130 111
131static int sierra_probe(struct usb_serial *serial, 112static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial,
135 struct usb_device *udev; 116 struct usb_device *udev;
136 int *num_ports; 117 int *num_ports;
137 u8 ifnum; 118 u8 ifnum;
119 u8 numendpoints;
120
121 dev_dbg(&serial->dev->dev, "%s", __func__);
138 122
139 num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); 123 num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
140 if (!num_ports) 124 if (!num_ports)
141 return -ENOMEM; 125 return -ENOMEM;
142 126
143 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 127 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
128 numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
144 udev = serial->dev; 129 udev = serial->dev;
145 130
146 /* Figure out the interface number from the serial structure */ 131 /* Figure out the interface number from the serial structure */
147 ifnum = sierra_calc_interface(serial); 132 ifnum = sierra_calc_interface(serial);
148
149 /*
150 * If this interface supports more than 1 alternate
151 * select the 2nd one
152 */
153 if (serial->interface->num_altsetting == 2) {
154 dev_dbg(&udev->dev,
155 "Selecting alt setting for interface %d\n",
156 ifnum);
157 133
158 /* We know the alternate setting is 1 for the MC8785 */ 134 /*
159 usb_set_interface(udev, ifnum, 1); 135 * If this interface supports more than 1 alternate
160 } 136 * select the 2nd one
137 */
138 if (serial->interface->num_altsetting == 2) {
139 dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
140 ifnum);
141 /* We know the alternate setting is 1 for the MC8785 */
142 usb_set_interface(udev, ifnum, 1);
143 }
161 144
162 /* Check if in installer mode */ 145 /* Dummy interface present on some SKUs should be ignored */
163 if (truinstall && id->driver_info == DEVICE_INSTALLER) { 146 if (ifnum == 0x99)
164 dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
165 result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
166 /* Don't bind to the device when in installer mode */
167 kfree(num_ports);
168 return -EIO;
169 } else if (id->driver_info == DEVICE_1_PORT)
170 *num_ports = 1;
171 else if (ifnum == 0x99)
172 *num_ports = 0; 147 *num_ports = 0;
148 else if (numendpoints <= 3)
149 *num_ports = 1;
173 else 150 else
174 *num_ports = 3; 151 *num_ports = (numendpoints-1)/2;
152
175 /* 153 /*
176 * save off our num_ports info so that we can use it in the 154 * save off our num_ports info so that we can use it in the
177 * calc_num_ports callback 155 * calc_num_ports callback
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = {
187 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 165 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
188 { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ 166 { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
189 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 167 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
168 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
190 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ 169 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
191 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 170 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
192 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 171 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
193 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ 172 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
194 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */ 173 /* Sierra Wireless C597 */
174 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
175 /* Sierra Wireless Device */
176 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
177 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
195 178
196 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 179 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
197 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ 180 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
198 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 181 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
199 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ 182 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
200 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ 183 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */
201 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ 184 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
202 { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ 185 { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */
203 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 186 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
204 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ 187 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
205 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ 188 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
206 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ 189 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
207 { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/ 190 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
191 { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
192 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
193 { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
208 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ 194 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
209 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ 195 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
210 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ 196 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
211 { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ 197 { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
212 { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ 198 { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
213 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ 199 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
214 { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ 200 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
215 { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ 201 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
216 202 /* Sierra Wireless C885 */
217 { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */ 203 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
218 { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */ 204 /* Sierra Wireless Device */
219 205 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
220 { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ 206 /* Sierra Wireless Device */
221 { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ 207 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
208
209 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
210 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
222 211
223 { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
224 { } 212 { }
225}; 213};
226MODULE_DEVICE_TABLE(usb, id_table); 214MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty,
268 if (portdata->rts_state) 256 if (portdata->rts_state)
269 val |= 0x02; 257 val |= 0x02;
270 258
271 /* Determine which port is targeted */ 259 /* If composite device then properly report interface */
272 if (port->bulk_out_endpointAddress == 2) 260 if (serial->num_ports == 1)
273 interface = 0; 261 interface = sierra_calc_interface(serial);
274 else if (port->bulk_out_endpointAddress == 4) 262
275 interface = 1; 263 /* Otherwise the need to do non-composite mapping */
276 else if (port->bulk_out_endpointAddress == 5) 264 else {
277 interface = 2; 265 if (port->bulk_out_endpointAddress == 2)
266 interface = 0;
267 else if (port->bulk_out_endpointAddress == 4)
268 interface = 1;
269 else if (port->bulk_out_endpointAddress == 5)
270 interface = 2;
271 }
278 272
279 return usb_control_msg(serial->dev, 273 return usb_control_msg(serial->dev,
280 usb_rcvctrlpipe(serial->dev, 0), 274 usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial)
713static struct usb_serial_driver sierra_device = { 707static struct usb_serial_driver sierra_device = {
714 .driver = { 708 .driver = {
715 .owner = THIS_MODULE, 709 .owner = THIS_MODULE,
716 .name = "sierra1", 710 .name = "sierra",
717 }, 711 },
718 .description = "Sierra USB modem", 712 .description = "Sierra USB modem",
719 .id_table = id_table, 713 .id_table = id_table,
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
769MODULE_VERSION(DRIVER_VERSION); 763MODULE_VERSION(DRIVER_VERSION);
770MODULE_LICENSE("GPL"); 764MODULE_LICENSE("GPL");
771 765
772module_param(truinstall, bool, 0); 766module_param(nmea, bool, S_IRUGO | S_IWUSR);
773MODULE_PARM_DESC(truinstall, "TRU-Install support");
774
775module_param(nmea, bool, 0);
776MODULE_PARM_DESC(nmea, "NMEA streaming"); 767MODULE_PARM_DESC(nmea, "NMEA streaming");
777 768
778#ifdef CONFIG_USB_DEBUG
779module_param(debug, bool, S_IRUGO | S_IWUSR); 769module_param(debug, bool, S_IRUGO | S_IWUSR);
780MODULE_PARM_DESC(debug, "Debug messages"); 770MODULE_PARM_DESC(debug, "Debug messages");
781#endif
782
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531eedea..b157c48e8b78 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial)
122 122
123 dbg("%s", __func__); 123 dbg("%s", __func__);
124 124
125 if (serial == NULL)
126 return;
127
128 for (i = 0; i < serial->num_ports; ++i) 125 for (i = 0; i < serial->num_ports; ++i)
129 serial_table[serial->minor + i] = NULL; 126 serial_table[serial->minor + i] = NULL;
130} 127}
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref)
142 serial->type->shutdown(serial); 139 serial->type->shutdown(serial);
143 140
144 /* return the minor range that this device had */ 141 /* return the minor range that this device had */
145 return_serial(serial); 142 if (serial->minor != SERIAL_TTY_NO_MINOR)
143 return_serial(serial);
146 144
147 for (i = 0; i < serial->num_ports; ++i) 145 for (i = 0; i < serial->num_ports; ++i)
148 serial->port[i]->port.count = 0; 146 serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
575 serial->interface = interface; 573 serial->interface = interface;
576 kref_init(&serial->kref); 574 kref_init(&serial->kref);
577 mutex_init(&serial->disc_mutex); 575 mutex_init(&serial->disc_mutex);
576 serial->minor = SERIAL_TTY_NO_MINOR;
578 577
579 return serial; 578 return serial;
580} 579}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d9249632ae1..c76034672c18 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA
146 on the resulting scsi device node returns the Karma to normal 146 on the resulting scsi device node returns the Karma to normal
147 operation. 147 operation.
148 148
149config USB_STORAGE_SIERRA
150 bool "Sierra Wireless TRU-Install Feature Support"
151 depends on USB_STORAGE
152 help
153 Say Y here to include additional code to support Sierra Wireless
154 products with the TRU-Install feature (e.g., AC597E, AC881U).
155
156 This code switches the Sierra Wireless device from being in
157 Mass Storage mode to Modem mode. It also has the ability to
158 support host software upgrades should full Linux support be added
159 to TRU-Install.
160
149config USB_STORAGE_CYPRESS_ATACB 161config USB_STORAGE_CYPRESS_ATACB
150 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" 162 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
151 depends on USB_STORAGE 163 depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c766c53..bc3415b475c9 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o 21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o 22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o 23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
24usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
24usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o 25usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
25 26
26usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ 27usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 000000000000..4359a2cb42df
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
1#include <scsi/scsi.h>
2#include <scsi/scsi_host.h>
3#include <scsi/scsi_cmnd.h>
4#include <scsi/scsi_device.h>
5#include <linux/usb.h>
6
7#include "usb.h"
8#include "transport.h"
9#include "protocol.h"
10#include "scsiglue.h"
11#include "sierra_ms.h"
12#include "debug.h"
13
14#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
15#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
16#define SWIMS_USB_INDEX_SetMode 0x0000
17#define SWIMS_SET_MODE_Modem 0x0001
18
19#define TRU_NORMAL 0x01
20#define TRU_FORCE_MS 0x02
21#define TRU_FORCE_MODEM 0x03
22
23static unsigned int swi_tru_install = 1;
24module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
25MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
26 " 2=Force CD-Rom, 3=Force Modem)");
27
28struct swoc_info {
29 __u8 rev;
30 __u8 reserved[8];
31 __u16 LinuxSKU;
32 __u16 LinuxVer;
33 __u8 reserved2[47];
34} __attribute__((__packed__));
35
36static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
37{
38 if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
39 (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
40 return true;
41 else
42 return false;
43}
44
45static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
46{
47 int result;
48 US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
49 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
50 SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
51 USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
52 eSWocMode, /* __u16 value */
53 0x0000, /* __u16 index */
54 NULL, /* void *data */
55 0, /* __u16 size */
56 USB_CTRL_SET_TIMEOUT); /* int timeout */
57 return result;
58}
59
60
61static int sierra_get_swoc_info(struct usb_device *udev,
62 struct swoc_info *swocInfo)
63{
64 int result;
65
66 US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
67
68 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
69 SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
70 USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
71 0, /* __u16 value */
72 0, /* __u16 index */
73 (void *) swocInfo, /* void *data */
74 sizeof(struct swoc_info), /* __u16 size */
75 USB_CTRL_SET_TIMEOUT); /* int timeout */
76
77 swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
78 swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
79 return result;
80}
81
82static void debug_swoc(struct swoc_info *swocInfo)
83{
84 US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
85 US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
86 US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
87}
88
89
90static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
91 char *buf)
92{
93 struct swoc_info *swocInfo;
94 struct usb_interface *intf = to_usb_interface(dev);
95 struct usb_device *udev = interface_to_usbdev(intf);
96 int result;
97 if (swi_tru_install == TRU_FORCE_MS) {
98 result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
99 } else {
100 swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
101 if (!swocInfo) {
102 US_DEBUGP("SWIMS: Allocation failure\n");
103 snprintf(buf, PAGE_SIZE, "Error\n");
104 return -ENOMEM;
105 }
106 result = sierra_get_swoc_info(udev, swocInfo);
107 if (result < 0) {
108 US_DEBUGP("SWIMS: failed SWoC query\n");
109 kfree(swocInfo);
110 snprintf(buf, PAGE_SIZE, "Error\n");
111 return -EIO;
112 }
113 debug_swoc(swocInfo);
114 result = snprintf(buf, PAGE_SIZE,
115 "REV=%02d SKU=%04X VER=%04X\n",
116 swocInfo->rev,
117 swocInfo->LinuxSKU,
118 swocInfo->LinuxVer);
119 kfree(swocInfo);
120 }
121 return result;
122}
123static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
124
125int sierra_ms_init(struct us_data *us)
126{
127 int result, retries;
128 signed long delay_t;
129 struct swoc_info *swocInfo;
130 struct usb_device *udev;
131 struct Scsi_Host *sh;
132 struct scsi_device *sd;
133
134 delay_t = 2;
135 retries = 3;
136 result = 0;
137 udev = us->pusb_dev;
138
139 sh = us_to_host(us);
140 sd = scsi_get_host_dev(sh);
141
142 US_DEBUGP("SWIMS: sierra_ms_init called\n");
143
144 /* Force Modem mode */
145 if (swi_tru_install == TRU_FORCE_MODEM) {
146 US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
147 result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
148 if (result < 0)
149 US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
150 return -EIO;
151 }
152 /* Force Mass Storage mode (keep CD-Rom) */
153 else if (swi_tru_install == TRU_FORCE_MS) {
154 US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
155 goto complete;
156 }
157 /* Normal TRU-Install Logic */
158 else {
159 US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
160
161 swocInfo = kmalloc(sizeof(struct swoc_info),
162 GFP_KERNEL);
163 if (!swocInfo) {
164 US_DEBUGP("SWIMS: %s", "Allocation failure\n");
165 return -ENOMEM;
166 }
167
168 retries = 3;
169 do {
170 retries--;
171 result = sierra_get_swoc_info(udev, swocInfo);
172 if (result < 0) {
173 US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
174 schedule_timeout_uninterruptible(2*HZ);
175 }
176 } while (retries && result < 0);
177
178 if (result < 0) {
179 US_DEBUGP("SWIMS: %s",
180 "Completely failed SWoC query\n");
181 kfree(swocInfo);
182 return -EIO;
183 }
184
185 debug_swoc(swocInfo);
186
187 /* If there is not Linux software on the TRU-Install device
188 * then switch to modem mode
189 */
190 if (!containsFullLinuxPackage(swocInfo)) {
191 US_DEBUGP("SWIMS: %s",
192 "Switching to Modem Mode\n");
193 result = sierra_set_ms_mode(udev,
194 SWIMS_SET_MODE_Modem);
195 if (result < 0)
196 US_DEBUGP("SWIMS: Failed to switch modem\n");
197 kfree(swocInfo);
198 return -EIO;
199 }
200 kfree(swocInfo);
201 }
202complete:
203 result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
204
205 return USB_STOR_TRANSPORT_GOOD;
206}
207
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 000000000000..bb48634ac1fc
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
1#ifndef _SIERRA_MS_H_
2#define _SIERRA_MS_H_
3extern int sierra_ms_init(struct us_data *us);
4#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb7b2b0..3523a0bfa0ff 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1032 1032
1033 /* try to compute the actual residue, based on how much data 1033 /* try to compute the actual residue, based on how much data
1034 * was really transferred and what the device tells us */ 1034 * was really transferred and what the device tells us */
1035 if (residue) { 1035 if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
1036 if (!(us->fflags & US_FL_IGNORE_RESIDUE)) { 1036
1037 /* Heuristically detect devices that generate bogus residues
1038 * by seeing what happens with INQUIRY and READ CAPACITY
1039 * commands.
1040 */
1041 if (bcs->Status == US_BULK_STAT_OK &&
1042 scsi_get_resid(srb) == 0 &&
1043 ((srb->cmnd[0] == INQUIRY &&
1044 transfer_length == 36) ||
1045 (srb->cmnd[0] == READ_CAPACITY &&
1046 transfer_length == 8))) {
1047 us->fflags |= US_FL_IGNORE_RESIDUE;
1048
1049 } else {
1037 residue = min(residue, transfer_length); 1050 residue = min(residue, transfer_length);
1038 scsi_set_resid(srb, max(scsi_get_resid(srb), 1051 scsi_set_resid(srb, max(scsi_get_resid(srb),
1039 (int) residue)); 1052 (int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f55aa96..ba412e68d474 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
225 US_SC_DEVICE, US_PR_DEVICE, NULL, 225 US_SC_DEVICE, US_PR_DEVICE, NULL,
226 US_FL_MAX_SECTORS_64 ), 226 US_FL_MAX_SECTORS_64 ),
227 227
228/* Reported by Cedric Godin <cedric@belbone.be> */
229UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
230 "Nokia",
231 "5300",
232 US_SC_DEVICE, US_PR_DEVICE, NULL,
233 US_FL_FIX_CAPACITY ),
234
228/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ 235/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
229UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, 236UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
230 "SMSC", 237 "SMSC",
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
356 US_FL_FIX_CAPACITY), 363 US_FL_FIX_CAPACITY),
357 364
358/* Reported by Emil Larsson <emil@swip.net> */ 365/* Reported by Emil Larsson <emil@swip.net> */
359UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110, 366UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
360 "NIKON", 367 "NIKON",
361 "NIKON DSC D80", 368 "NIKON DSC D80",
362 US_SC_DEVICE, US_PR_DEVICE, NULL, 369 US_SC_DEVICE, US_PR_DEVICE, NULL,
363 US_FL_FIX_CAPACITY), 370 US_FL_FIX_CAPACITY),
364 371
365/* Reported by Ortwin Glueck <odi@odi.ch> */ 372/* Reported by Ortwin Glueck <odi@odi.ch> */
366UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110, 373UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
367 "NIKON", 374 "NIKON",
368 "NIKON DSC D40", 375 "NIKON DSC D40",
369 US_SC_DEVICE, US_PR_DEVICE, NULL, 376 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
1185 US_SC_DEVICE, US_PR_DEVICE, NULL, 1192 US_SC_DEVICE, US_PR_DEVICE, NULL,
1186 US_FL_FIX_INQUIRY ), 1193 US_FL_FIX_INQUIRY ),
1187 1194
1195/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
1196UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
1197 "Simple Tech/Datafab",
1198 "CF+SM Reader",
1199 US_SC_DEVICE, US_PR_DEVICE, NULL,
1200 US_FL_IGNORE_RESIDUE ),
1201
1188/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant 1202/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
1189 * to the USB storage specification in two ways: 1203 * to the USB storage specification in two ways:
1190 * - They tell us they are using transport protocol CBI. In reality they 1204 * - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1562 US_SC_DEVICE, US_PR_DEVICE, NULL, 1576 US_SC_DEVICE, US_PR_DEVICE, NULL,
1563 0), 1577 0),
1564 1578
1579#ifdef CONFIG_USB_STORAGE_SIERRA
1565/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1580/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1566 * Entry is needed for the initializer function override, 1581 * Entry is needed for the initializer function override,
1567 * which instructs the device to load as a modem 1582 * which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1570UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, 1585UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
1571 "Sierra Wireless", 1586 "Sierra Wireless",
1572 "USB MMC Storage", 1587 "USB MMC Storage",
1573 US_SC_DEVICE, US_PR_DEVICE, NULL, 1588 US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
1574 US_FL_IGNORE_DEVICE), 1589 0),
1590#endif
1575 1591
1576/* Reported by Jaco Kroon <jaco@kroon.co.za> 1592/* Reported by Jaco Kroon <jaco@kroon.co.za>
1577 * The usb-storage module found on the Digitech GNX4 (and supposedly other 1593 * The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
1743 US_FL_FIX_CAPACITY), 1759 US_FL_FIX_CAPACITY),
1744 1760
1745/* 1761/*
1762 * Patch by Jost Diederichs <jost@qdusa.com>
1763 */
1764UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
1765 "Motorola Inc.",
1766 "Motorola Phone (RAZRV3xx)",
1767 US_SC_DEVICE, US_PR_DEVICE, NULL,
1768 US_FL_FIX_CAPACITY),
1769
1770/*
1746 * Patch by Constantin Baranov <const@tltsu.ru> 1771 * Patch by Constantin Baranov <const@tltsu.ru>
1747 * Report by Andreas Koenecke. 1772 * Report by Andreas Koenecke.
1748 * Motorola ROKR Z6. 1773 * Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1767 US_SC_DEVICE, US_PR_DEVICE, NULL, 1792 US_SC_DEVICE, US_PR_DEVICE, NULL,
1768 US_FL_FIX_CAPACITY ), 1793 US_FL_FIX_CAPACITY ),
1769 1794
1795/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1796UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1797 "iRiver",
1798 "MP3 T10",
1799 US_SC_DEVICE, US_PR_DEVICE, NULL,
1800 US_FL_IGNORE_RESIDUE ),
1801
1770/* 1802/*
1771 * David Härdeman <david@2gen.com> 1803 * David Härdeman <david@2gen.com>
1772 * The key makes the SCSI stack print confusing (but harmless) messages 1804 * The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851be985..73679aa506de 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB 102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
103#include "cypress_atacb.h" 103#include "cypress_atacb.h"
104#endif 104#endif
105#ifdef CONFIG_USB_STORAGE_SIERRA
106#include "sierra_ms.h"
107#endif
105 108
106/* Some informational data */ 109/* Some informational data */
107MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); 110MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c4e7d721bd8d..89d2fb7b991a 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -30,16 +30,16 @@
30 30
31static struct config_group *space_list; 31static struct config_group *space_list;
32static struct config_group *comm_list; 32static struct config_group *comm_list;
33static struct comm *local_comm; 33static struct dlm_comm *local_comm;
34 34
35struct clusters; 35struct dlm_clusters;
36struct cluster; 36struct dlm_cluster;
37struct spaces; 37struct dlm_spaces;
38struct space; 38struct dlm_space;
39struct comms; 39struct dlm_comms;
40struct comm; 40struct dlm_comm;
41struct nodes; 41struct dlm_nodes;
42struct node; 42struct dlm_node;
43 43
44static struct config_group *make_cluster(struct config_group *, const char *); 44static struct config_group *make_cluster(struct config_group *, const char *);
45static void drop_cluster(struct config_group *, struct config_item *); 45static void drop_cluster(struct config_group *, struct config_item *);
@@ -68,17 +68,22 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
68static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, 68static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
69 const char *buf, size_t len); 69 const char *buf, size_t len);
70 70
71static ssize_t comm_nodeid_read(struct comm *cm, char *buf); 71static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf);
72static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len); 72static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
73static ssize_t comm_local_read(struct comm *cm, char *buf); 73 size_t len);
74static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len); 74static ssize_t comm_local_read(struct dlm_comm *cm, char *buf);
75static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len); 75static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
76static ssize_t node_nodeid_read(struct node *nd, char *buf); 76 size_t len);
77static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len); 77static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
78static ssize_t node_weight_read(struct node *nd, char *buf); 78 size_t len);
79static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len); 79static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
80 80static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
81struct cluster { 81 size_t len);
82static ssize_t node_weight_read(struct dlm_node *nd, char *buf);
83static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
84 size_t len);
85
86struct dlm_cluster {
82 struct config_group group; 87 struct config_group group;
83 unsigned int cl_tcp_port; 88 unsigned int cl_tcp_port;
84 unsigned int cl_buffer_size; 89 unsigned int cl_buffer_size;
@@ -109,11 +114,11 @@ enum {
109 114
110struct cluster_attribute { 115struct cluster_attribute {
111 struct configfs_attribute attr; 116 struct configfs_attribute attr;
112 ssize_t (*show)(struct cluster *, char *); 117 ssize_t (*show)(struct dlm_cluster *, char *);
113 ssize_t (*store)(struct cluster *, const char *, size_t); 118 ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
114}; 119};
115 120
116static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field, 121static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
117 int *info_field, int check_zero, 122 int *info_field, int check_zero,
118 const char *buf, size_t len) 123 const char *buf, size_t len)
119{ 124{
@@ -134,12 +139,12 @@ static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
134} 139}
135 140
136#define CLUSTER_ATTR(name, check_zero) \ 141#define CLUSTER_ATTR(name, check_zero) \
137static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len) \ 142static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \
138{ \ 143{ \
139 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \ 144 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
140 check_zero, buf, len); \ 145 check_zero, buf, len); \
141} \ 146} \
142static ssize_t name##_read(struct cluster *cl, char *buf) \ 147static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \
143{ \ 148{ \
144 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \ 149 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
145} \ 150} \
@@ -181,8 +186,8 @@ enum {
181 186
182struct comm_attribute { 187struct comm_attribute {
183 struct configfs_attribute attr; 188 struct configfs_attribute attr;
184 ssize_t (*show)(struct comm *, char *); 189 ssize_t (*show)(struct dlm_comm *, char *);
185 ssize_t (*store)(struct comm *, const char *, size_t); 190 ssize_t (*store)(struct dlm_comm *, const char *, size_t);
186}; 191};
187 192
188static struct comm_attribute comm_attr_nodeid = { 193static struct comm_attribute comm_attr_nodeid = {
@@ -222,8 +227,8 @@ enum {
222 227
223struct node_attribute { 228struct node_attribute {
224 struct configfs_attribute attr; 229 struct configfs_attribute attr;
225 ssize_t (*show)(struct node *, char *); 230 ssize_t (*show)(struct dlm_node *, char *);
226 ssize_t (*store)(struct node *, const char *, size_t); 231 ssize_t (*store)(struct dlm_node *, const char *, size_t);
227}; 232};
228 233
229static struct node_attribute node_attr_nodeid = { 234static struct node_attribute node_attr_nodeid = {
@@ -248,26 +253,26 @@ static struct configfs_attribute *node_attrs[] = {
248 NULL, 253 NULL,
249}; 254};
250 255
251struct clusters { 256struct dlm_clusters {
252 struct configfs_subsystem subsys; 257 struct configfs_subsystem subsys;
253}; 258};
254 259
255struct spaces { 260struct dlm_spaces {
256 struct config_group ss_group; 261 struct config_group ss_group;
257}; 262};
258 263
259struct space { 264struct dlm_space {
260 struct config_group group; 265 struct config_group group;
261 struct list_head members; 266 struct list_head members;
262 struct mutex members_lock; 267 struct mutex members_lock;
263 int members_count; 268 int members_count;
264}; 269};
265 270
266struct comms { 271struct dlm_comms {
267 struct config_group cs_group; 272 struct config_group cs_group;
268}; 273};
269 274
270struct comm { 275struct dlm_comm {
271 struct config_item item; 276 struct config_item item;
272 int nodeid; 277 int nodeid;
273 int local; 278 int local;
@@ -275,11 +280,11 @@ struct comm {
275 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 280 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
276}; 281};
277 282
278struct nodes { 283struct dlm_nodes {
279 struct config_group ns_group; 284 struct config_group ns_group;
280}; 285};
281 286
282struct node { 287struct dlm_node {
283 struct config_item item; 288 struct config_item item;
284 struct list_head list; /* space->members */ 289 struct list_head list; /* space->members */
285 int nodeid; 290 int nodeid;
@@ -372,38 +377,40 @@ static struct config_item_type node_type = {
372 .ct_owner = THIS_MODULE, 377 .ct_owner = THIS_MODULE,
373}; 378};
374 379
375static struct cluster *to_cluster(struct config_item *i) 380static struct dlm_cluster *to_cluster(struct config_item *i)
376{ 381{
377 return i ? container_of(to_config_group(i), struct cluster, group):NULL; 382 return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
383 NULL;
378} 384}
379 385
380static struct space *to_space(struct config_item *i) 386static struct dlm_space *to_space(struct config_item *i)
381{ 387{
382 return i ? container_of(to_config_group(i), struct space, group) : NULL; 388 return i ? container_of(to_config_group(i), struct dlm_space, group) :
389 NULL;
383} 390}
384 391
385static struct comm *to_comm(struct config_item *i) 392static struct dlm_comm *to_comm(struct config_item *i)
386{ 393{
387 return i ? container_of(i, struct comm, item) : NULL; 394 return i ? container_of(i, struct dlm_comm, item) : NULL;
388} 395}
389 396
390static struct node *to_node(struct config_item *i) 397static struct dlm_node *to_node(struct config_item *i)
391{ 398{
392 return i ? container_of(i, struct node, item) : NULL; 399 return i ? container_of(i, struct dlm_node, item) : NULL;
393} 400}
394 401
395static struct config_group *make_cluster(struct config_group *g, 402static struct config_group *make_cluster(struct config_group *g,
396 const char *name) 403 const char *name)
397{ 404{
398 struct cluster *cl = NULL; 405 struct dlm_cluster *cl = NULL;
399 struct spaces *sps = NULL; 406 struct dlm_spaces *sps = NULL;
400 struct comms *cms = NULL; 407 struct dlm_comms *cms = NULL;
401 void *gps = NULL; 408 void *gps = NULL;
402 409
403 cl = kzalloc(sizeof(struct cluster), GFP_KERNEL); 410 cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL);
404 gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); 411 gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
405 sps = kzalloc(sizeof(struct spaces), GFP_KERNEL); 412 sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL);
406 cms = kzalloc(sizeof(struct comms), GFP_KERNEL); 413 cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL);
407 414
408 if (!cl || !gps || !sps || !cms) 415 if (!cl || !gps || !sps || !cms)
409 goto fail; 416 goto fail;
@@ -443,7 +450,7 @@ static struct config_group *make_cluster(struct config_group *g,
443 450
444static void drop_cluster(struct config_group *g, struct config_item *i) 451static void drop_cluster(struct config_group *g, struct config_item *i)
445{ 452{
446 struct cluster *cl = to_cluster(i); 453 struct dlm_cluster *cl = to_cluster(i);
447 struct config_item *tmp; 454 struct config_item *tmp;
448 int j; 455 int j;
449 456
@@ -461,20 +468,20 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
461 468
462static void release_cluster(struct config_item *i) 469static void release_cluster(struct config_item *i)
463{ 470{
464 struct cluster *cl = to_cluster(i); 471 struct dlm_cluster *cl = to_cluster(i);
465 kfree(cl->group.default_groups); 472 kfree(cl->group.default_groups);
466 kfree(cl); 473 kfree(cl);
467} 474}
468 475
469static struct config_group *make_space(struct config_group *g, const char *name) 476static struct config_group *make_space(struct config_group *g, const char *name)
470{ 477{
471 struct space *sp = NULL; 478 struct dlm_space *sp = NULL;
472 struct nodes *nds = NULL; 479 struct dlm_nodes *nds = NULL;
473 void *gps = NULL; 480 void *gps = NULL;
474 481
475 sp = kzalloc(sizeof(struct space), GFP_KERNEL); 482 sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL);
476 gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL); 483 gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
477 nds = kzalloc(sizeof(struct nodes), GFP_KERNEL); 484 nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL);
478 485
479 if (!sp || !gps || !nds) 486 if (!sp || !gps || !nds)
480 goto fail; 487 goto fail;
@@ -500,7 +507,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
500 507
501static void drop_space(struct config_group *g, struct config_item *i) 508static void drop_space(struct config_group *g, struct config_item *i)
502{ 509{
503 struct space *sp = to_space(i); 510 struct dlm_space *sp = to_space(i);
504 struct config_item *tmp; 511 struct config_item *tmp;
505 int j; 512 int j;
506 513
@@ -517,16 +524,16 @@ static void drop_space(struct config_group *g, struct config_item *i)
517 524
518static void release_space(struct config_item *i) 525static void release_space(struct config_item *i)
519{ 526{
520 struct space *sp = to_space(i); 527 struct dlm_space *sp = to_space(i);
521 kfree(sp->group.default_groups); 528 kfree(sp->group.default_groups);
522 kfree(sp); 529 kfree(sp);
523} 530}
524 531
525static struct config_item *make_comm(struct config_group *g, const char *name) 532static struct config_item *make_comm(struct config_group *g, const char *name)
526{ 533{
527 struct comm *cm; 534 struct dlm_comm *cm;
528 535
529 cm = kzalloc(sizeof(struct comm), GFP_KERNEL); 536 cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL);
530 if (!cm) 537 if (!cm)
531 return ERR_PTR(-ENOMEM); 538 return ERR_PTR(-ENOMEM);
532 539
@@ -539,7 +546,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
539 546
540static void drop_comm(struct config_group *g, struct config_item *i) 547static void drop_comm(struct config_group *g, struct config_item *i)
541{ 548{
542 struct comm *cm = to_comm(i); 549 struct dlm_comm *cm = to_comm(i);
543 if (local_comm == cm) 550 if (local_comm == cm)
544 local_comm = NULL; 551 local_comm = NULL;
545 dlm_lowcomms_close(cm->nodeid); 552 dlm_lowcomms_close(cm->nodeid);
@@ -550,16 +557,16 @@ static void drop_comm(struct config_group *g, struct config_item *i)
550 557
551static void release_comm(struct config_item *i) 558static void release_comm(struct config_item *i)
552{ 559{
553 struct comm *cm = to_comm(i); 560 struct dlm_comm *cm = to_comm(i);
554 kfree(cm); 561 kfree(cm);
555} 562}
556 563
557static struct config_item *make_node(struct config_group *g, const char *name) 564static struct config_item *make_node(struct config_group *g, const char *name)
558{ 565{
559 struct space *sp = to_space(g->cg_item.ci_parent); 566 struct dlm_space *sp = to_space(g->cg_item.ci_parent);
560 struct node *nd; 567 struct dlm_node *nd;
561 568
562 nd = kzalloc(sizeof(struct node), GFP_KERNEL); 569 nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
563 if (!nd) 570 if (!nd)
564 return ERR_PTR(-ENOMEM); 571 return ERR_PTR(-ENOMEM);
565 572
@@ -578,8 +585,8 @@ static struct config_item *make_node(struct config_group *g, const char *name)
578 585
579static void drop_node(struct config_group *g, struct config_item *i) 586static void drop_node(struct config_group *g, struct config_item *i)
580{ 587{
581 struct space *sp = to_space(g->cg_item.ci_parent); 588 struct dlm_space *sp = to_space(g->cg_item.ci_parent);
582 struct node *nd = to_node(i); 589 struct dlm_node *nd = to_node(i);
583 590
584 mutex_lock(&sp->members_lock); 591 mutex_lock(&sp->members_lock);
585 list_del(&nd->list); 592 list_del(&nd->list);
@@ -591,11 +598,11 @@ static void drop_node(struct config_group *g, struct config_item *i)
591 598
592static void release_node(struct config_item *i) 599static void release_node(struct config_item *i)
593{ 600{
594 struct node *nd = to_node(i); 601 struct dlm_node *nd = to_node(i);
595 kfree(nd); 602 kfree(nd);
596} 603}
597 604
598static struct clusters clusters_root = { 605static struct dlm_clusters clusters_root = {
599 .subsys = { 606 .subsys = {
600 .su_group = { 607 .su_group = {
601 .cg_item = { 608 .cg_item = {
@@ -625,7 +632,7 @@ void dlm_config_exit(void)
625static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, 632static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
626 char *buf) 633 char *buf)
627{ 634{
628 struct cluster *cl = to_cluster(i); 635 struct dlm_cluster *cl = to_cluster(i);
629 struct cluster_attribute *cla = 636 struct cluster_attribute *cla =
630 container_of(a, struct cluster_attribute, attr); 637 container_of(a, struct cluster_attribute, attr);
631 return cla->show ? cla->show(cl, buf) : 0; 638 return cla->show ? cla->show(cl, buf) : 0;
@@ -635,7 +642,7 @@ static ssize_t store_cluster(struct config_item *i,
635 struct configfs_attribute *a, 642 struct configfs_attribute *a,
636 const char *buf, size_t len) 643 const char *buf, size_t len)
637{ 644{
638 struct cluster *cl = to_cluster(i); 645 struct dlm_cluster *cl = to_cluster(i);
639 struct cluster_attribute *cla = 646 struct cluster_attribute *cla =
640 container_of(a, struct cluster_attribute, attr); 647 container_of(a, struct cluster_attribute, attr);
641 return cla->store ? cla->store(cl, buf, len) : -EINVAL; 648 return cla->store ? cla->store(cl, buf, len) : -EINVAL;
@@ -644,7 +651,7 @@ static ssize_t store_cluster(struct config_item *i,
644static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 651static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
645 char *buf) 652 char *buf)
646{ 653{
647 struct comm *cm = to_comm(i); 654 struct dlm_comm *cm = to_comm(i);
648 struct comm_attribute *cma = 655 struct comm_attribute *cma =
649 container_of(a, struct comm_attribute, attr); 656 container_of(a, struct comm_attribute, attr);
650 return cma->show ? cma->show(cm, buf) : 0; 657 return cma->show ? cma->show(cm, buf) : 0;
@@ -653,29 +660,31 @@ static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
653static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 660static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
654 const char *buf, size_t len) 661 const char *buf, size_t len)
655{ 662{
656 struct comm *cm = to_comm(i); 663 struct dlm_comm *cm = to_comm(i);
657 struct comm_attribute *cma = 664 struct comm_attribute *cma =
658 container_of(a, struct comm_attribute, attr); 665 container_of(a, struct comm_attribute, attr);
659 return cma->store ? cma->store(cm, buf, len) : -EINVAL; 666 return cma->store ? cma->store(cm, buf, len) : -EINVAL;
660} 667}
661 668
662static ssize_t comm_nodeid_read(struct comm *cm, char *buf) 669static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf)
663{ 670{
664 return sprintf(buf, "%d\n", cm->nodeid); 671 return sprintf(buf, "%d\n", cm->nodeid);
665} 672}
666 673
667static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len) 674static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
675 size_t len)
668{ 676{
669 cm->nodeid = simple_strtol(buf, NULL, 0); 677 cm->nodeid = simple_strtol(buf, NULL, 0);
670 return len; 678 return len;
671} 679}
672 680
673static ssize_t comm_local_read(struct comm *cm, char *buf) 681static ssize_t comm_local_read(struct dlm_comm *cm, char *buf)
674{ 682{
675 return sprintf(buf, "%d\n", cm->local); 683 return sprintf(buf, "%d\n", cm->local);
676} 684}
677 685
678static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len) 686static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
687 size_t len)
679{ 688{
680 cm->local= simple_strtol(buf, NULL, 0); 689 cm->local= simple_strtol(buf, NULL, 0);
681 if (cm->local && !local_comm) 690 if (cm->local && !local_comm)
@@ -683,7 +692,7 @@ static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
683 return len; 692 return len;
684} 693}
685 694
686static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len) 695static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
687{ 696{
688 struct sockaddr_storage *addr; 697 struct sockaddr_storage *addr;
689 698
@@ -705,7 +714,7 @@ static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
705static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, 714static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
706 char *buf) 715 char *buf)
707{ 716{
708 struct node *nd = to_node(i); 717 struct dlm_node *nd = to_node(i);
709 struct node_attribute *nda = 718 struct node_attribute *nda =
710 container_of(a, struct node_attribute, attr); 719 container_of(a, struct node_attribute, attr);
711 return nda->show ? nda->show(nd, buf) : 0; 720 return nda->show ? nda->show(nd, buf) : 0;
@@ -714,29 +723,31 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
714static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, 723static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
715 const char *buf, size_t len) 724 const char *buf, size_t len)
716{ 725{
717 struct node *nd = to_node(i); 726 struct dlm_node *nd = to_node(i);
718 struct node_attribute *nda = 727 struct node_attribute *nda =
719 container_of(a, struct node_attribute, attr); 728 container_of(a, struct node_attribute, attr);
720 return nda->store ? nda->store(nd, buf, len) : -EINVAL; 729 return nda->store ? nda->store(nd, buf, len) : -EINVAL;
721} 730}
722 731
723static ssize_t node_nodeid_read(struct node *nd, char *buf) 732static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
724{ 733{
725 return sprintf(buf, "%d\n", nd->nodeid); 734 return sprintf(buf, "%d\n", nd->nodeid);
726} 735}
727 736
728static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len) 737static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
738 size_t len)
729{ 739{
730 nd->nodeid = simple_strtol(buf, NULL, 0); 740 nd->nodeid = simple_strtol(buf, NULL, 0);
731 return len; 741 return len;
732} 742}
733 743
734static ssize_t node_weight_read(struct node *nd, char *buf) 744static ssize_t node_weight_read(struct dlm_node *nd, char *buf)
735{ 745{
736 return sprintf(buf, "%d\n", nd->weight); 746 return sprintf(buf, "%d\n", nd->weight);
737} 747}
738 748
739static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len) 749static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
750 size_t len)
740{ 751{
741 nd->weight = simple_strtol(buf, NULL, 0); 752 nd->weight = simple_strtol(buf, NULL, 0);
742 return len; 753 return len;
@@ -746,7 +757,7 @@ static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
746 * Functions for the dlm to get the info that's been configured 757 * Functions for the dlm to get the info that's been configured
747 */ 758 */
748 759
749static struct space *get_space(char *name) 760static struct dlm_space *get_space(char *name)
750{ 761{
751 struct config_item *i; 762 struct config_item *i;
752 763
@@ -760,15 +771,15 @@ static struct space *get_space(char *name)
760 return to_space(i); 771 return to_space(i);
761} 772}
762 773
763static void put_space(struct space *sp) 774static void put_space(struct dlm_space *sp)
764{ 775{
765 config_item_put(&sp->group.cg_item); 776 config_item_put(&sp->group.cg_item);
766} 777}
767 778
768static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr) 779static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
769{ 780{
770 struct config_item *i; 781 struct config_item *i;
771 struct comm *cm = NULL; 782 struct dlm_comm *cm = NULL;
772 int found = 0; 783 int found = 0;
773 784
774 if (!comm_list) 785 if (!comm_list)
@@ -801,7 +812,7 @@ static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
801 return cm; 812 return cm;
802} 813}
803 814
804static void put_comm(struct comm *cm) 815static void put_comm(struct dlm_comm *cm)
805{ 816{
806 config_item_put(&cm->item); 817 config_item_put(&cm->item);
807} 818}
@@ -810,8 +821,8 @@ static void put_comm(struct comm *cm)
810int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out, 821int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
811 int **new_out, int *new_count_out) 822 int **new_out, int *new_count_out)
812{ 823{
813 struct space *sp; 824 struct dlm_space *sp;
814 struct node *nd; 825 struct dlm_node *nd;
815 int i = 0, rv = 0, ids_count = 0, new_count = 0; 826 int i = 0, rv = 0, ids_count = 0, new_count = 0;
816 int *ids, *new; 827 int *ids, *new;
817 828
@@ -874,8 +885,8 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
874 885
875int dlm_node_weight(char *lsname, int nodeid) 886int dlm_node_weight(char *lsname, int nodeid)
876{ 887{
877 struct space *sp; 888 struct dlm_space *sp;
878 struct node *nd; 889 struct dlm_node *nd;
879 int w = -EEXIST; 890 int w = -EEXIST;
880 891
881 sp = get_space(lsname); 892 sp = get_space(lsname);
@@ -897,7 +908,7 @@ int dlm_node_weight(char *lsname, int nodeid)
897 908
898int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr) 909int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
899{ 910{
900 struct comm *cm = get_comm(nodeid, NULL); 911 struct dlm_comm *cm = get_comm(nodeid, NULL);
901 if (!cm) 912 if (!cm)
902 return -EEXIST; 913 return -EEXIST;
903 if (!cm->addr_count) 914 if (!cm->addr_count)
@@ -909,7 +920,7 @@ int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
909 920
910int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) 921int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
911{ 922{
912 struct comm *cm = get_comm(0, addr); 923 struct dlm_comm *cm = get_comm(0, addr);
913 if (!cm) 924 if (!cm)
914 return -EEXIST; 925 return -EEXIST;
915 *nodeid = cm->nodeid; 926 *nodeid = cm->nodeid;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 929e48ae7591..34f14a14fb4e 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -527,8 +527,10 @@ static ssize_t device_write(struct file *file, const char __user *buf,
527 k32buf = (struct dlm_write_request32 *)kbuf; 527 k32buf = (struct dlm_write_request32 *)kbuf;
528 kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - 528 kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
529 sizeof(struct dlm_write_request32)), GFP_KERNEL); 529 sizeof(struct dlm_write_request32)), GFP_KERNEL);
530 if (!kbuf) 530 if (!kbuf) {
531 kfree(k32buf);
531 return -ENOMEM; 532 return -ENOMEM;
533 }
532 534
533 if (proc) 535 if (proc)
534 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 536 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
@@ -539,8 +541,10 @@ static ssize_t device_write(struct file *file, const char __user *buf,
539 541
540 /* do we really need this? can a write happen after a close? */ 542 /* do we really need this? can a write happen after a close? */
541 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) && 543 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
542 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) 544 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
543 return -EINVAL; 545 error = -EINVAL;
546 goto out_free;
547 }
544 548
545 sigfillset(&allsigs); 549 sigfillset(&allsigs);
546 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig); 550 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
deleted file mode 100644
index 3abe7e9ceb33..000000000000
--- a/fs/xfs/linux-2.6/sema.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_SUPPORT_SEMA_H__
19#define __XFS_SUPPORT_SEMA_H__
20
21#include <linux/time.h>
22#include <linux/wait.h>
23#include <linux/semaphore.h>
24#include <asm/atomic.h>
25
26/*
27 * sema_t structure just maps to struct semaphore in Linux kernel.
28 */
29
30typedef struct semaphore sema_t;
31
32#define initnsema(sp, val, name) sema_init(sp, val)
33#define psema(sp, b) down(sp)
34#define vsema(sp) up(sp)
35#define freesema(sema) do { } while (0)
36
37static inline int issemalocked(sema_t *sp)
38{
39 return down_trylock(sp) || (up(sp), 0);
40}
41
42/*
43 * Map cpsema (try to get the sema) to down_trylock. We need to switch
44 * the return values since cpsema returns 1 (acquired) 0 (failed) and
45 * down_trylock returns the reverse 0 (acquired) 1 (failed).
46 */
47static inline int cpsema(sema_t *sp)
48{
49 return down_trylock(sp) ? 0 : 1;
50}
51
52#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fa47e43b8b41..f42f80a3b1fa 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -73,7 +73,6 @@ xfs_page_trace(
73 unsigned long pgoff) 73 unsigned long pgoff)
74{ 74{
75 xfs_inode_t *ip; 75 xfs_inode_t *ip;
76 bhv_vnode_t *vp = vn_from_inode(inode);
77 loff_t isize = i_size_read(inode); 76 loff_t isize = i_size_read(inode);
78 loff_t offset = page_offset(page); 77 loff_t offset = page_offset(page);
79 int delalloc = -1, unmapped = -1, unwritten = -1; 78 int delalloc = -1, unmapped = -1, unwritten = -1;
@@ -81,7 +80,7 @@ xfs_page_trace(
81 if (page_has_buffers(page)) 80 if (page_has_buffers(page))
82 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); 81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
83 82
84 ip = xfs_vtoi(vp); 83 ip = XFS_I(inode);
85 if (!ip->i_rwtrace) 84 if (!ip->i_rwtrace)
86 return; 85 return;
87 86
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 9cc8f0213095..986061ae1b9b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -58,7 +58,7 @@ xfs_buf_trace(
58 bp, id, 58 bp, id,
59 (void *)(unsigned long)bp->b_flags, 59 (void *)(unsigned long)bp->b_flags,
60 (void *)(unsigned long)bp->b_hold.counter, 60 (void *)(unsigned long)bp->b_hold.counter,
61 (void *)(unsigned long)bp->b_sema.count.counter, 61 (void *)(unsigned long)bp->b_sema.count,
62 (void *)current, 62 (void *)current,
63 data, ra, 63 data, ra,
64 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), 64 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -253,7 +253,7 @@ _xfs_buf_initialize(
253 253
254 memset(bp, 0, sizeof(xfs_buf_t)); 254 memset(bp, 0, sizeof(xfs_buf_t));
255 atomic_set(&bp->b_hold, 1); 255 atomic_set(&bp->b_hold, 1);
256 init_MUTEX_LOCKED(&bp->b_iodonesema); 256 init_completion(&bp->b_iowait);
257 INIT_LIST_HEAD(&bp->b_list); 257 INIT_LIST_HEAD(&bp->b_list);
258 INIT_LIST_HEAD(&bp->b_hash_list); 258 INIT_LIST_HEAD(&bp->b_hash_list);
259 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ 259 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -838,6 +838,7 @@ xfs_buf_rele(
838 return; 838 return;
839 } 839 }
840 840
841 ASSERT(atomic_read(&bp->b_hold) > 0);
841 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { 842 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
842 if (bp->b_relse) { 843 if (bp->b_relse) {
843 atomic_inc(&bp->b_hold); 844 atomic_inc(&bp->b_hold);
@@ -851,11 +852,6 @@ xfs_buf_rele(
851 spin_unlock(&hash->bh_lock); 852 spin_unlock(&hash->bh_lock);
852 xfs_buf_free(bp); 853 xfs_buf_free(bp);
853 } 854 }
854 } else {
855 /*
856 * Catch reference count leaks
857 */
858 ASSERT(atomic_read(&bp->b_hold) >= 0);
859 } 855 }
860} 856}
861 857
@@ -1037,7 +1033,7 @@ xfs_buf_ioend(
1037 xfs_buf_iodone_work(&bp->b_iodone_work); 1033 xfs_buf_iodone_work(&bp->b_iodone_work);
1038 } 1034 }
1039 } else { 1035 } else {
1040 up(&bp->b_iodonesema); 1036 complete(&bp->b_iowait);
1041 } 1037 }
1042} 1038}
1043 1039
@@ -1275,7 +1271,7 @@ xfs_buf_iowait(
1275 XB_TRACE(bp, "iowait", 0); 1271 XB_TRACE(bp, "iowait", 0);
1276 if (atomic_read(&bp->b_io_remaining)) 1272 if (atomic_read(&bp->b_io_remaining))
1277 blk_run_address_space(bp->b_target->bt_mapping); 1273 blk_run_address_space(bp->b_target->bt_mapping);
1278 down(&bp->b_iodonesema); 1274 wait_for_completion(&bp->b_iowait);
1279 XB_TRACE(bp, "iowaited", (long)bp->b_error); 1275 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1280 return bp->b_error; 1276 return bp->b_error;
1281} 1277}
@@ -1799,7 +1795,7 @@ int __init
1799xfs_buf_init(void) 1795xfs_buf_init(void)
1800{ 1796{
1801#ifdef XFS_BUF_TRACE 1797#ifdef XFS_BUF_TRACE
1802 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); 1798 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
1803#endif 1799#endif
1804 1800
1805 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1801 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 29d1d4adc078..fe0109956656 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -157,7 +157,7 @@ typedef struct xfs_buf {
157 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 157 xfs_buf_iodone_t b_iodone; /* I/O completion function */
158 xfs_buf_relse_t b_relse; /* releasing function */ 158 xfs_buf_relse_t b_relse; /* releasing function */
159 xfs_buf_bdstrat_t b_strat; /* pre-write function */ 159 xfs_buf_bdstrat_t b_strat; /* pre-write function */
160 struct semaphore b_iodonesema; /* Semaphore for I/O waiters */ 160 struct completion b_iowait; /* queue for I/O waiters */
161 void *b_fspriv; 161 void *b_fspriv;
162 void *b_fspriv2; 162 void *b_fspriv2;
163 void *b_fspriv3; 163 void *b_fspriv3;
@@ -352,7 +352,7 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
352#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) 352#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
353#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) 353#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
354#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) 354#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
355#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema); 355#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
356 356
357#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) 357#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
358#define XFS_BUF_TARGET(bp) ((bp)->b_target) 358#define XFS_BUF_TARGET(bp) ((bp)->b_target)
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 987fe84f7b13..24fd598af846 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -139,7 +139,7 @@ xfs_nfs_get_inode(
139 } 139 }
140 140
141 xfs_iunlock(ip, XFS_ILOCK_SHARED); 141 xfs_iunlock(ip, XFS_ILOCK_SHARED);
142 return ip->i_vnode; 142 return VFS_I(ip);
143} 143}
144 144
145STATIC struct dentry * 145STATIC struct dentry *
@@ -167,7 +167,7 @@ xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
167 if (!inode) 167 if (!inode)
168 return NULL; 168 return NULL;
169 if (IS_ERR(inode)) 169 if (IS_ERR(inode))
170 return ERR_PTR(PTR_ERR(inode)); 170 return ERR_CAST(inode);
171 result = d_alloc_anon(inode); 171 result = d_alloc_anon(inode);
172 if (!result) { 172 if (!result) {
173 iput(inode); 173 iput(inode);
@@ -198,7 +198,7 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
198 if (!inode) 198 if (!inode)
199 return NULL; 199 return NULL;
200 if (IS_ERR(inode)) 200 if (IS_ERR(inode))
201 return ERR_PTR(PTR_ERR(inode)); 201 return ERR_CAST(inode);
202 result = d_alloc_anon(inode); 202 result = d_alloc_anon(inode);
203 if (!result) { 203 if (!result) {
204 iput(inode); 204 iput(inode);
@@ -219,9 +219,9 @@ xfs_fs_get_parent(
219 if (unlikely(error)) 219 if (unlikely(error))
220 return ERR_PTR(-error); 220 return ERR_PTR(-error);
221 221
222 parent = d_alloc_anon(cip->i_vnode); 222 parent = d_alloc_anon(VFS_I(cip));
223 if (unlikely(!parent)) { 223 if (unlikely(!parent)) {
224 iput(cip->i_vnode); 224 iput(VFS_I(cip));
225 return ERR_PTR(-ENOMEM); 225 return ERR_PTR(-ENOMEM);
226 } 226 }
227 return parent; 227 return parent;
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 1eefe61f0e10..36caa6d957df 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -31,7 +31,7 @@ xfs_tosspages(
31 xfs_off_t last, 31 xfs_off_t last,
32 int fiopt) 32 int fiopt)
33{ 33{
34 struct address_space *mapping = ip->i_vnode->i_mapping; 34 struct address_space *mapping = VFS_I(ip)->i_mapping;
35 35
36 if (mapping->nrpages) 36 if (mapping->nrpages)
37 truncate_inode_pages(mapping, first); 37 truncate_inode_pages(mapping, first);
@@ -44,7 +44,7 @@ xfs_flushinval_pages(
44 xfs_off_t last, 44 xfs_off_t last,
45 int fiopt) 45 int fiopt)
46{ 46{
47 struct address_space *mapping = ip->i_vnode->i_mapping; 47 struct address_space *mapping = VFS_I(ip)->i_mapping;
48 int ret = 0; 48 int ret = 0;
49 49
50 if (mapping->nrpages) { 50 if (mapping->nrpages) {
@@ -64,7 +64,7 @@ xfs_flush_pages(
64 uint64_t flags, 64 uint64_t flags,
65 int fiopt) 65 int fiopt)
66{ 66{
67 struct address_space *mapping = ip->i_vnode->i_mapping; 67 struct address_space *mapping = VFS_I(ip)->i_mapping;
68 int ret = 0; 68 int ret = 0;
69 int ret2; 69 int ret2;
70 70
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index acb978d9d085..48799ba7e3e6 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -245,7 +245,7 @@ xfs_vget_fsop_handlereq(
245 245
246 xfs_iunlock(ip, XFS_ILOCK_SHARED); 246 xfs_iunlock(ip, XFS_ILOCK_SHARED);
247 247
248 *inode = XFS_ITOV(ip); 248 *inode = VFS_I(ip);
249 return 0; 249 return 0;
250} 250}
251 251
@@ -927,7 +927,7 @@ STATIC void
927xfs_diflags_to_linux( 927xfs_diflags_to_linux(
928 struct xfs_inode *ip) 928 struct xfs_inode *ip)
929{ 929{
930 struct inode *inode = XFS_ITOV(ip); 930 struct inode *inode = VFS_I(ip);
931 unsigned int xflags = xfs_ip2xflags(ip); 931 unsigned int xflags = xfs_ip2xflags(ip);
932 932
933 if (xflags & XFS_XFLAG_IMMUTABLE) 933 if (xflags & XFS_XFLAG_IMMUTABLE)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index e88f51028086..91bcd979242c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -62,7 +62,7 @@ void
62xfs_synchronize_atime( 62xfs_synchronize_atime(
63 xfs_inode_t *ip) 63 xfs_inode_t *ip)
64{ 64{
65 struct inode *inode = ip->i_vnode; 65 struct inode *inode = VFS_I(ip);
66 66
67 if (inode) { 67 if (inode) {
68 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; 68 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
@@ -79,7 +79,7 @@ void
79xfs_mark_inode_dirty_sync( 79xfs_mark_inode_dirty_sync(
80 xfs_inode_t *ip) 80 xfs_inode_t *ip)
81{ 81{
82 struct inode *inode = ip->i_vnode; 82 struct inode *inode = VFS_I(ip);
83 83
84 if (inode) 84 if (inode)
85 mark_inode_dirty_sync(inode); 85 mark_inode_dirty_sync(inode);
@@ -89,36 +89,31 @@ xfs_mark_inode_dirty_sync(
89 * Change the requested timestamp in the given inode. 89 * Change the requested timestamp in the given inode.
90 * We don't lock across timestamp updates, and we don't log them but 90 * We don't lock across timestamp updates, and we don't log them but
91 * we do record the fact that there is dirty information in core. 91 * we do record the fact that there is dirty information in core.
92 *
93 * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG
94 * with XFS_ICHGTIME_ACC to be sure that access time
95 * update will take. Calling first with XFS_ICHGTIME_ACC
96 * and then XFS_ICHGTIME_MOD may fail to modify the access
97 * timestamp if the filesystem is mounted noacctm.
98 */ 92 */
99void 93void
100xfs_ichgtime( 94xfs_ichgtime(
101 xfs_inode_t *ip, 95 xfs_inode_t *ip,
102 int flags) 96 int flags)
103{ 97{
104 struct inode *inode = vn_to_inode(XFS_ITOV(ip)); 98 struct inode *inode = VFS_I(ip);
105 timespec_t tv; 99 timespec_t tv;
100 int sync_it = 0;
101
102 tv = current_fs_time(inode->i_sb);
106 103
107 nanotime(&tv); 104 if ((flags & XFS_ICHGTIME_MOD) &&
108 if (flags & XFS_ICHGTIME_MOD) { 105 !timespec_equal(&inode->i_mtime, &tv)) {
109 inode->i_mtime = tv; 106 inode->i_mtime = tv;
110 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 107 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
111 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 108 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
109 sync_it = 1;
112 } 110 }
113 if (flags & XFS_ICHGTIME_ACC) { 111 if ((flags & XFS_ICHGTIME_CHG) &&
114 inode->i_atime = tv; 112 !timespec_equal(&inode->i_ctime, &tv)) {
115 ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec;
116 ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec;
117 }
118 if (flags & XFS_ICHGTIME_CHG) {
119 inode->i_ctime = tv; 113 inode->i_ctime = tv;
120 ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; 114 ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
121 ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; 115 ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
116 sync_it = 1;
122 } 117 }
123 118
124 /* 119 /*
@@ -130,55 +125,11 @@ xfs_ichgtime(
130 * ensure that the compiler does not reorder the update 125 * ensure that the compiler does not reorder the update
131 * of i_update_core above the timestamp updates above. 126 * of i_update_core above the timestamp updates above.
132 */ 127 */
133 SYNCHRONIZE(); 128 if (sync_it) {
134 ip->i_update_core = 1; 129 SYNCHRONIZE();
135 if (!(inode->i_state & I_NEW)) 130 ip->i_update_core = 1;
136 mark_inode_dirty_sync(inode); 131 mark_inode_dirty_sync(inode);
137}
138
139/*
140 * Variant on the above which avoids querying the system clock
141 * in situations where we know the Linux inode timestamps have
142 * just been updated (and so we can update our inode cheaply).
143 */
144void
145xfs_ichgtime_fast(
146 xfs_inode_t *ip,
147 struct inode *inode,
148 int flags)
149{
150 timespec_t *tvp;
151
152 /*
153 * Atime updates for read() & friends are handled lazily now, and
154 * explicit updates must go through xfs_ichgtime()
155 */
156 ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
157
158 if (flags & XFS_ICHGTIME_MOD) {
159 tvp = &inode->i_mtime;
160 ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
161 ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
162 } 132 }
163 if (flags & XFS_ICHGTIME_CHG) {
164 tvp = &inode->i_ctime;
165 ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
166 ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec;
167 }
168
169 /*
170 * We update the i_update_core field _after_ changing
171 * the timestamps in order to coordinate properly with
172 * xfs_iflush() so that we don't lose timestamp updates.
173 * This keeps us from having to hold the inode lock
174 * while doing this. We use the SYNCHRONIZE macro to
175 * ensure that the compiler does not reorder the update
176 * of i_update_core above the timestamp updates above.
177 */
178 SYNCHRONIZE();
179 ip->i_update_core = 1;
180 if (!(inode->i_state & I_NEW))
181 mark_inode_dirty_sync(inode);
182} 133}
183 134
184/* 135/*
@@ -299,7 +250,7 @@ xfs_vn_mknod(
299 if (unlikely(error)) 250 if (unlikely(error))
300 goto out_free_acl; 251 goto out_free_acl;
301 252
302 inode = ip->i_vnode; 253 inode = VFS_I(ip);
303 254
304 error = xfs_init_security(inode, dir); 255 error = xfs_init_security(inode, dir);
305 if (unlikely(error)) 256 if (unlikely(error))
@@ -366,7 +317,7 @@ xfs_vn_lookup(
366 return NULL; 317 return NULL;
367 } 318 }
368 319
369 return d_splice_alias(cip->i_vnode, dentry); 320 return d_splice_alias(VFS_I(cip), dentry);
370} 321}
371 322
372STATIC struct dentry * 323STATIC struct dentry *
@@ -399,12 +350,12 @@ xfs_vn_ci_lookup(
399 350
400 /* if exact match, just splice and exit */ 351 /* if exact match, just splice and exit */
401 if (!ci_name.name) 352 if (!ci_name.name)
402 return d_splice_alias(ip->i_vnode, dentry); 353 return d_splice_alias(VFS_I(ip), dentry);
403 354
404 /* else case-insensitive match... */ 355 /* else case-insensitive match... */
405 dname.name = ci_name.name; 356 dname.name = ci_name.name;
406 dname.len = ci_name.len; 357 dname.len = ci_name.len;
407 dentry = d_add_ci(ip->i_vnode, dentry, &dname); 358 dentry = d_add_ci(VFS_I(ip), dentry, &dname);
408 kmem_free(ci_name.name); 359 kmem_free(ci_name.name);
409 return dentry; 360 return dentry;
410} 361}
@@ -478,7 +429,7 @@ xfs_vn_symlink(
478 if (unlikely(error)) 429 if (unlikely(error))
479 goto out; 430 goto out;
480 431
481 inode = cip->i_vnode; 432 inode = VFS_I(cip);
482 433
483 error = xfs_init_security(inode, dir); 434 error = xfs_init_security(inode, dir);
484 if (unlikely(error)) 435 if (unlikely(error))
@@ -710,7 +661,7 @@ out_error:
710 return error; 661 return error;
711} 662}
712 663
713const struct inode_operations xfs_inode_operations = { 664static const struct inode_operations xfs_inode_operations = {
714 .permission = xfs_vn_permission, 665 .permission = xfs_vn_permission,
715 .truncate = xfs_vn_truncate, 666 .truncate = xfs_vn_truncate,
716 .getattr = xfs_vn_getattr, 667 .getattr = xfs_vn_getattr,
@@ -722,7 +673,7 @@ const struct inode_operations xfs_inode_operations = {
722 .fallocate = xfs_vn_fallocate, 673 .fallocate = xfs_vn_fallocate,
723}; 674};
724 675
725const struct inode_operations xfs_dir_inode_operations = { 676static const struct inode_operations xfs_dir_inode_operations = {
726 .create = xfs_vn_create, 677 .create = xfs_vn_create,
727 .lookup = xfs_vn_lookup, 678 .lookup = xfs_vn_lookup,
728 .link = xfs_vn_link, 679 .link = xfs_vn_link,
@@ -747,7 +698,7 @@ const struct inode_operations xfs_dir_inode_operations = {
747 .listxattr = xfs_vn_listxattr, 698 .listxattr = xfs_vn_listxattr,
748}; 699};
749 700
750const struct inode_operations xfs_dir_ci_inode_operations = { 701static const struct inode_operations xfs_dir_ci_inode_operations = {
751 .create = xfs_vn_create, 702 .create = xfs_vn_create,
752 .lookup = xfs_vn_ci_lookup, 703 .lookup = xfs_vn_ci_lookup,
753 .link = xfs_vn_link, 704 .link = xfs_vn_link,
@@ -772,7 +723,7 @@ const struct inode_operations xfs_dir_ci_inode_operations = {
772 .listxattr = xfs_vn_listxattr, 723 .listxattr = xfs_vn_listxattr,
773}; 724};
774 725
775const struct inode_operations xfs_symlink_inode_operations = { 726static const struct inode_operations xfs_symlink_inode_operations = {
776 .readlink = generic_readlink, 727 .readlink = generic_readlink,
777 .follow_link = xfs_vn_follow_link, 728 .follow_link = xfs_vn_follow_link,
778 .put_link = xfs_vn_put_link, 729 .put_link = xfs_vn_put_link,
@@ -784,3 +735,98 @@ const struct inode_operations xfs_symlink_inode_operations = {
784 .removexattr = generic_removexattr, 735 .removexattr = generic_removexattr,
785 .listxattr = xfs_vn_listxattr, 736 .listxattr = xfs_vn_listxattr,
786}; 737};
738
739STATIC void
740xfs_diflags_to_iflags(
741 struct inode *inode,
742 struct xfs_inode *ip)
743{
744 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
745 inode->i_flags |= S_IMMUTABLE;
746 else
747 inode->i_flags &= ~S_IMMUTABLE;
748 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
749 inode->i_flags |= S_APPEND;
750 else
751 inode->i_flags &= ~S_APPEND;
752 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
753 inode->i_flags |= S_SYNC;
754 else
755 inode->i_flags &= ~S_SYNC;
756 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
757 inode->i_flags |= S_NOATIME;
758 else
759 inode->i_flags &= ~S_NOATIME;
760}
761
762/*
763 * Initialize the Linux inode, set up the operation vectors and
764 * unlock the inode.
765 *
766 * When reading existing inodes from disk this is called directly
767 * from xfs_iget, when creating a new inode it is called from
768 * xfs_ialloc after setting up the inode.
769 */
770void
771xfs_setup_inode(
772 struct xfs_inode *ip)
773{
774 struct inode *inode = ip->i_vnode;
775
776 inode->i_mode = ip->i_d.di_mode;
777 inode->i_nlink = ip->i_d.di_nlink;
778 inode->i_uid = ip->i_d.di_uid;
779 inode->i_gid = ip->i_d.di_gid;
780
781 switch (inode->i_mode & S_IFMT) {
782 case S_IFBLK:
783 case S_IFCHR:
784 inode->i_rdev =
785 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
786 sysv_minor(ip->i_df.if_u2.if_rdev));
787 break;
788 default:
789 inode->i_rdev = 0;
790 break;
791 }
792
793 inode->i_generation = ip->i_d.di_gen;
794 i_size_write(inode, ip->i_d.di_size);
795 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
796 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
797 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
798 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
799 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
800 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
801 xfs_diflags_to_iflags(inode, ip);
802 xfs_iflags_clear(ip, XFS_IMODIFIED);
803
804 switch (inode->i_mode & S_IFMT) {
805 case S_IFREG:
806 inode->i_op = &xfs_inode_operations;
807 inode->i_fop = &xfs_file_operations;
808 inode->i_mapping->a_ops = &xfs_address_space_operations;
809 break;
810 case S_IFDIR:
811 if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
812 inode->i_op = &xfs_dir_ci_inode_operations;
813 else
814 inode->i_op = &xfs_dir_inode_operations;
815 inode->i_fop = &xfs_dir_file_operations;
816 break;
817 case S_IFLNK:
818 inode->i_op = &xfs_symlink_inode_operations;
819 if (!(ip->i_df.if_flags & XFS_IFINLINE))
820 inode->i_mapping->a_ops = &xfs_address_space_operations;
821 break;
822 default:
823 inode->i_op = &xfs_inode_operations;
824 init_special_inode(inode, inode->i_mode, inode->i_rdev);
825 break;
826 }
827
828 xfs_iflags_clear(ip, XFS_INEW);
829 barrier();
830
831 unlock_new_inode(inode);
832}
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index d97ba934a2ac..8b1a1e31dc21 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -18,10 +18,7 @@
18#ifndef __XFS_IOPS_H__ 18#ifndef __XFS_IOPS_H__
19#define __XFS_IOPS_H__ 19#define __XFS_IOPS_H__
20 20
21extern const struct inode_operations xfs_inode_operations; 21struct xfs_inode;
22extern const struct inode_operations xfs_dir_inode_operations;
23extern const struct inode_operations xfs_dir_ci_inode_operations;
24extern const struct inode_operations xfs_symlink_inode_operations;
25 22
26extern const struct file_operations xfs_file_operations; 23extern const struct file_operations xfs_file_operations;
27extern const struct file_operations xfs_dir_file_operations; 24extern const struct file_operations xfs_dir_file_operations;
@@ -29,14 +26,6 @@ extern const struct file_operations xfs_invis_file_operations;
29 26
30extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); 27extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
31 28
32struct xfs_inode; 29extern void xfs_setup_inode(struct xfs_inode *);
33extern void xfs_ichgtime(struct xfs_inode *, int);
34extern void xfs_ichgtime_fast(struct xfs_inode *, struct inode *, int);
35
36#define xfs_vtoi(vp) \
37 ((struct xfs_inode *)vn_to_inode(vp)->i_private)
38
39#define XFS_I(inode) \
40 ((struct xfs_inode *)(inode)->i_private)
41 30
42#endif /* __XFS_IOPS_H__ */ 31#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 4d45d9351a6c..cc0f7b3a9795 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -45,13 +45,13 @@
45#include <mrlock.h> 45#include <mrlock.h>
46#include <sv.h> 46#include <sv.h>
47#include <mutex.h> 47#include <mutex.h>
48#include <sema.h>
49#include <time.h> 48#include <time.h>
50 49
51#include <support/ktrace.h> 50#include <support/ktrace.h>
52#include <support/debug.h> 51#include <support/debug.h>
53#include <support/uuid.h> 52#include <support/uuid.h>
54 53
54#include <linux/semaphore.h>
55#include <linux/mm.h> 55#include <linux/mm.h>
56#include <linux/kernel.h> 56#include <linux/kernel.h>
57#include <linux/blkdev.h> 57#include <linux/blkdev.h>
@@ -126,8 +126,6 @@
126 126
127#define current_cpu() (raw_smp_processor_id()) 127#define current_cpu() (raw_smp_processor_id())
128#define current_pid() (current->pid) 128#define current_pid() (current->pid)
129#define current_fsuid(cred) (current->fsuid)
130#define current_fsgid(cred) (current->fsgid)
131#define current_test_flags(f) (current->flags & (f)) 129#define current_test_flags(f) (current->flags & (f))
132#define current_set_flags_nested(sp, f) \ 130#define current_set_flags_nested(sp, f) \
133 (*(sp) = current->flags, current->flags |= (f)) 131 (*(sp) = current->flags, current->flags |= (f))
@@ -180,7 +178,7 @@
180#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) 178#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
181#define xfs_stack_trace() dump_stack() 179#define xfs_stack_trace() dump_stack()
182#define xfs_itruncate_data(ip, off) \ 180#define xfs_itruncate_data(ip, off) \
183 (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) 181 (-vmtruncate(VFS_I(ip), (off)))
184 182
185 183
186/* Move the kernel do_div definition off to one side */ 184/* Move the kernel do_div definition off to one side */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 82333b3e118e..1957e5357d04 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -137,7 +137,7 @@ xfs_iozero(
137 struct address_space *mapping; 137 struct address_space *mapping;
138 int status; 138 int status;
139 139
140 mapping = ip->i_vnode->i_mapping; 140 mapping = VFS_I(ip)->i_mapping;
141 do { 141 do {
142 unsigned offset, bytes; 142 unsigned offset, bytes;
143 void *fsdata; 143 void *fsdata;
@@ -674,9 +674,7 @@ start:
674 */ 674 */
675 if (likely(!(ioflags & IO_INVIS) && 675 if (likely(!(ioflags & IO_INVIS) &&
676 !mnt_want_write(file->f_path.mnt))) { 676 !mnt_want_write(file->f_path.mnt))) {
677 file_update_time(file); 677 xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
678 xfs_ichgtime_fast(xip, inode,
679 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
680 mnt_drop_write(file->f_path.mnt); 678 mnt_drop_write(file->f_path.mnt);
681 } 679 }
682 680
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 30ae96397e31..73c65f19e549 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -581,118 +581,6 @@ xfs_max_file_offset(
581 return (((__uint64_t)pagefactor) << bitshift) - 1; 581 return (((__uint64_t)pagefactor) << bitshift) - 1;
582} 582}
583 583
584STATIC_INLINE void
585xfs_set_inodeops(
586 struct inode *inode)
587{
588 switch (inode->i_mode & S_IFMT) {
589 case S_IFREG:
590 inode->i_op = &xfs_inode_operations;
591 inode->i_fop = &xfs_file_operations;
592 inode->i_mapping->a_ops = &xfs_address_space_operations;
593 break;
594 case S_IFDIR:
595 if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
596 inode->i_op = &xfs_dir_ci_inode_operations;
597 else
598 inode->i_op = &xfs_dir_inode_operations;
599 inode->i_fop = &xfs_dir_file_operations;
600 break;
601 case S_IFLNK:
602 inode->i_op = &xfs_symlink_inode_operations;
603 if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE))
604 inode->i_mapping->a_ops = &xfs_address_space_operations;
605 break;
606 default:
607 inode->i_op = &xfs_inode_operations;
608 init_special_inode(inode, inode->i_mode, inode->i_rdev);
609 break;
610 }
611}
612
613STATIC_INLINE void
614xfs_revalidate_inode(
615 xfs_mount_t *mp,
616 bhv_vnode_t *vp,
617 xfs_inode_t *ip)
618{
619 struct inode *inode = vn_to_inode(vp);
620
621 inode->i_mode = ip->i_d.di_mode;
622 inode->i_nlink = ip->i_d.di_nlink;
623 inode->i_uid = ip->i_d.di_uid;
624 inode->i_gid = ip->i_d.di_gid;
625
626 switch (inode->i_mode & S_IFMT) {
627 case S_IFBLK:
628 case S_IFCHR:
629 inode->i_rdev =
630 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
631 sysv_minor(ip->i_df.if_u2.if_rdev));
632 break;
633 default:
634 inode->i_rdev = 0;
635 break;
636 }
637
638 inode->i_generation = ip->i_d.di_gen;
639 i_size_write(inode, ip->i_d.di_size);
640 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
641 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
642 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
643 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
644 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
645 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
646 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
647 inode->i_flags |= S_IMMUTABLE;
648 else
649 inode->i_flags &= ~S_IMMUTABLE;
650 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
651 inode->i_flags |= S_APPEND;
652 else
653 inode->i_flags &= ~S_APPEND;
654 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
655 inode->i_flags |= S_SYNC;
656 else
657 inode->i_flags &= ~S_SYNC;
658 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
659 inode->i_flags |= S_NOATIME;
660 else
661 inode->i_flags &= ~S_NOATIME;
662 xfs_iflags_clear(ip, XFS_IMODIFIED);
663}
664
665void
666xfs_initialize_vnode(
667 struct xfs_mount *mp,
668 bhv_vnode_t *vp,
669 struct xfs_inode *ip)
670{
671 struct inode *inode = vn_to_inode(vp);
672
673 if (!ip->i_vnode) {
674 ip->i_vnode = vp;
675 inode->i_private = ip;
676 }
677
678 /*
679 * We need to set the ops vectors, and unlock the inode, but if
680 * we have been called during the new inode create process, it is
681 * too early to fill in the Linux inode. We will get called a
682 * second time once the inode is properly set up, and then we can
683 * finish our work.
684 */
685 if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
686 xfs_revalidate_inode(mp, vp, ip);
687 xfs_set_inodeops(inode);
688
689 xfs_iflags_clear(ip, XFS_INEW);
690 barrier();
691
692 unlock_new_inode(inode);
693 }
694}
695
696int 584int
697xfs_blkdev_get( 585xfs_blkdev_get(
698 xfs_mount_t *mp, 586 xfs_mount_t *mp,
@@ -982,26 +870,21 @@ STATIC struct inode *
982xfs_fs_alloc_inode( 870xfs_fs_alloc_inode(
983 struct super_block *sb) 871 struct super_block *sb)
984{ 872{
985 bhv_vnode_t *vp; 873 return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
986
987 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
988 if (unlikely(!vp))
989 return NULL;
990 return vn_to_inode(vp);
991} 874}
992 875
993STATIC void 876STATIC void
994xfs_fs_destroy_inode( 877xfs_fs_destroy_inode(
995 struct inode *inode) 878 struct inode *inode)
996{ 879{
997 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); 880 kmem_zone_free(xfs_vnode_zone, inode);
998} 881}
999 882
1000STATIC void 883STATIC void
1001xfs_fs_inode_init_once( 884xfs_fs_inode_init_once(
1002 void *vnode) 885 void *vnode)
1003{ 886{
1004 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 887 inode_init_once((struct inode *)vnode);
1005} 888}
1006 889
1007/* 890/*
@@ -1106,7 +989,7 @@ void
1106xfs_flush_inode( 989xfs_flush_inode(
1107 xfs_inode_t *ip) 990 xfs_inode_t *ip)
1108{ 991{
1109 struct inode *inode = ip->i_vnode; 992 struct inode *inode = VFS_I(ip);
1110 993
1111 igrab(inode); 994 igrab(inode);
1112 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); 995 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
@@ -1131,7 +1014,7 @@ void
1131xfs_flush_device( 1014xfs_flush_device(
1132 xfs_inode_t *ip) 1015 xfs_inode_t *ip)
1133{ 1016{
1134 struct inode *inode = vn_to_inode(XFS_ITOV(ip)); 1017 struct inode *inode = VFS_I(ip);
1135 1018
1136 igrab(inode); 1019 igrab(inode);
1137 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); 1020 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
@@ -1201,6 +1084,15 @@ xfssyncd(
1201} 1084}
1202 1085
1203STATIC void 1086STATIC void
1087xfs_free_fsname(
1088 struct xfs_mount *mp)
1089{
1090 kfree(mp->m_fsname);
1091 kfree(mp->m_rtname);
1092 kfree(mp->m_logname);
1093}
1094
1095STATIC void
1204xfs_fs_put_super( 1096xfs_fs_put_super(
1205 struct super_block *sb) 1097 struct super_block *sb)
1206{ 1098{
@@ -1239,8 +1131,6 @@ xfs_fs_put_super(
1239 error = xfs_unmount_flush(mp, 0); 1131 error = xfs_unmount_flush(mp, 0);
1240 WARN_ON(error); 1132 WARN_ON(error);
1241 1133
1242 IRELE(rip);
1243
1244 /* 1134 /*
1245 * If we're forcing a shutdown, typically because of a media error, 1135 * If we're forcing a shutdown, typically because of a media error,
1246 * we want to make sure we invalidate dirty pages that belong to 1136 * we want to make sure we invalidate dirty pages that belong to
@@ -1257,10 +1147,12 @@ xfs_fs_put_super(
1257 } 1147 }
1258 1148
1259 xfs_unmountfs(mp); 1149 xfs_unmountfs(mp);
1150 xfs_freesb(mp);
1260 xfs_icsb_destroy_counters(mp); 1151 xfs_icsb_destroy_counters(mp);
1261 xfs_close_devices(mp); 1152 xfs_close_devices(mp);
1262 xfs_qmops_put(mp); 1153 xfs_qmops_put(mp);
1263 xfs_dmops_put(mp); 1154 xfs_dmops_put(mp);
1155 xfs_free_fsname(mp);
1264 kfree(mp); 1156 kfree(mp);
1265} 1157}
1266 1158
@@ -1517,6 +1409,8 @@ xfs_start_flags(
1517 struct xfs_mount_args *ap, 1409 struct xfs_mount_args *ap,
1518 struct xfs_mount *mp) 1410 struct xfs_mount *mp)
1519{ 1411{
1412 int error;
1413
1520 /* Values are in BBs */ 1414 /* Values are in BBs */
1521 if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { 1415 if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
1522 /* 1416 /*
@@ -1549,17 +1443,27 @@ xfs_start_flags(
1549 ap->logbufsize); 1443 ap->logbufsize);
1550 return XFS_ERROR(EINVAL); 1444 return XFS_ERROR(EINVAL);
1551 } 1445 }
1446
1447 error = ENOMEM;
1448
1552 mp->m_logbsize = ap->logbufsize; 1449 mp->m_logbsize = ap->logbufsize;
1553 mp->m_fsname_len = strlen(ap->fsname) + 1; 1450 mp->m_fsname_len = strlen(ap->fsname) + 1;
1554 mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); 1451
1555 strcpy(mp->m_fsname, ap->fsname); 1452 mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL);
1453 if (!mp->m_fsname)
1454 goto out;
1455
1556 if (ap->rtname[0]) { 1456 if (ap->rtname[0]) {
1557 mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP); 1457 mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL);
1558 strcpy(mp->m_rtname, ap->rtname); 1458 if (!mp->m_rtname)
1459 goto out_free_fsname;
1460
1559 } 1461 }
1462
1560 if (ap->logname[0]) { 1463 if (ap->logname[0]) {
1561 mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP); 1464 mp->m_logname = kstrdup(ap->logname, GFP_KERNEL);
1562 strcpy(mp->m_logname, ap->logname); 1465 if (!mp->m_logname)
1466 goto out_free_rtname;
1563 } 1467 }
1564 1468
1565 if (ap->flags & XFSMNT_WSYNC) 1469 if (ap->flags & XFSMNT_WSYNC)
@@ -1632,6 +1536,14 @@ xfs_start_flags(
1632 if (ap->flags & XFSMNT_DMAPI) 1536 if (ap->flags & XFSMNT_DMAPI)
1633 mp->m_flags |= XFS_MOUNT_DMAPI; 1537 mp->m_flags |= XFS_MOUNT_DMAPI;
1634 return 0; 1538 return 0;
1539
1540
1541 out_free_rtname:
1542 kfree(mp->m_rtname);
1543 out_free_fsname:
1544 kfree(mp->m_fsname);
1545 out:
1546 return error;
1635} 1547}
1636 1548
1637/* 1549/*
@@ -1792,10 +1704,10 @@ xfs_fs_fill_super(
1792 */ 1704 */
1793 error = xfs_start_flags(args, mp); 1705 error = xfs_start_flags(args, mp);
1794 if (error) 1706 if (error)
1795 goto out_destroy_counters; 1707 goto out_free_fsname;
1796 error = xfs_readsb(mp, flags); 1708 error = xfs_readsb(mp, flags);
1797 if (error) 1709 if (error)
1798 goto out_destroy_counters; 1710 goto out_free_fsname;
1799 error = xfs_finish_flags(args, mp); 1711 error = xfs_finish_flags(args, mp);
1800 if (error) 1712 if (error)
1801 goto out_free_sb; 1713 goto out_free_sb;
@@ -1811,7 +1723,7 @@ xfs_fs_fill_super(
1811 if (error) 1723 if (error)
1812 goto out_free_sb; 1724 goto out_free_sb;
1813 1725
1814 error = xfs_mountfs(mp, flags); 1726 error = xfs_mountfs(mp);
1815 if (error) 1727 if (error)
1816 goto out_filestream_unmount; 1728 goto out_filestream_unmount;
1817 1729
@@ -1825,7 +1737,7 @@ xfs_fs_fill_super(
1825 sb->s_time_gran = 1; 1737 sb->s_time_gran = 1;
1826 set_posix_acl_flag(sb); 1738 set_posix_acl_flag(sb);
1827 1739
1828 root = igrab(mp->m_rootip->i_vnode); 1740 root = igrab(VFS_I(mp->m_rootip));
1829 if (!root) { 1741 if (!root) {
1830 error = ENOENT; 1742 error = ENOENT;
1831 goto fail_unmount; 1743 goto fail_unmount;
@@ -1857,7 +1769,8 @@ xfs_fs_fill_super(
1857 xfs_filestream_unmount(mp); 1769 xfs_filestream_unmount(mp);
1858 out_free_sb: 1770 out_free_sb:
1859 xfs_freesb(mp); 1771 xfs_freesb(mp);
1860 out_destroy_counters: 1772 out_free_fsname:
1773 xfs_free_fsname(mp);
1861 xfs_icsb_destroy_counters(mp); 1774 xfs_icsb_destroy_counters(mp);
1862 xfs_close_devices(mp); 1775 xfs_close_devices(mp);
1863 out_put_qmops: 1776 out_put_qmops:
@@ -1890,10 +1803,8 @@ xfs_fs_fill_super(
1890 error = xfs_unmount_flush(mp, 0); 1803 error = xfs_unmount_flush(mp, 0);
1891 WARN_ON(error); 1804 WARN_ON(error);
1892 1805
1893 IRELE(mp->m_rootip);
1894
1895 xfs_unmountfs(mp); 1806 xfs_unmountfs(mp);
1896 goto out_destroy_counters; 1807 goto out_free_sb;
1897} 1808}
1898 1809
1899STATIC int 1810STATIC int
@@ -2014,7 +1925,7 @@ xfs_free_trace_bufs(void)
2014STATIC int __init 1925STATIC int __init
2015xfs_init_zones(void) 1926xfs_init_zones(void)
2016{ 1927{
2017 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", 1928 xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode",
2018 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | 1929 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
2019 KM_ZONE_SPREAD, 1930 KM_ZONE_SPREAD,
2020 xfs_fs_inode_init_once); 1931 xfs_fs_inode_init_once);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index b7d13da01bd6..fe2ef4e6a0f9 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -101,9 +101,6 @@ struct block_device;
101 101
102extern __uint64_t xfs_max_file_offset(unsigned int); 102extern __uint64_t xfs_max_file_offset(unsigned int);
103 103
104extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp,
105 struct xfs_inode *ip);
106
107extern void xfs_flush_inode(struct xfs_inode *); 104extern void xfs_flush_inode(struct xfs_inode *);
108extern void xfs_flush_device(struct xfs_inode *); 105extern void xfs_flush_device(struct xfs_inode *);
109 106
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 25488b6d9881..b52528bbbfff 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -33,7 +33,7 @@
33 33
34 34
35/* 35/*
36 * Dedicated vnode inactive/reclaim sync semaphores. 36 * Dedicated vnode inactive/reclaim sync wait queues.
37 * Prime number of hash buckets since address is used as the key. 37 * Prime number of hash buckets since address is used as the key.
38 */ 38 */
39#define NVSYNC 37 39#define NVSYNC 37
@@ -82,24 +82,6 @@ vn_ioerror(
82 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); 82 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l);
83} 83}
84 84
85
86/*
87 * Add a reference to a referenced vnode.
88 */
89bhv_vnode_t *
90vn_hold(
91 bhv_vnode_t *vp)
92{
93 struct inode *inode;
94
95 XFS_STATS_INC(vn_hold);
96
97 inode = igrab(vn_to_inode(vp));
98 ASSERT(inode);
99
100 return vp;
101}
102
103#ifdef XFS_INODE_TRACE 85#ifdef XFS_INODE_TRACE
104 86
105/* 87/*
@@ -108,7 +90,7 @@ vn_hold(
108 */ 90 */
109static inline int xfs_icount(struct xfs_inode *ip) 91static inline int xfs_icount(struct xfs_inode *ip)
110{ 92{
111 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 93 struct inode *vp = VFS_I(ip);
112 94
113 if (vp) 95 if (vp)
114 return vn_count(vp); 96 return vn_count(vp);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 41ca2cec5d31..683ce16210ff 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -22,20 +22,6 @@ struct file;
22struct xfs_iomap; 22struct xfs_iomap;
23struct attrlist_cursor_kern; 23struct attrlist_cursor_kern;
24 24
25typedef struct inode bhv_vnode_t;
26
27/*
28 * Vnode to Linux inode mapping.
29 */
30static inline bhv_vnode_t *vn_from_inode(struct inode *inode)
31{
32 return inode;
33}
34static inline struct inode *vn_to_inode(bhv_vnode_t *vnode)
35{
36 return vnode;
37}
38
39/* 25/*
40 * Return values for xfs_inactive. A return value of 26 * Return values for xfs_inactive. A return value of
41 * VN_INACTIVE_NOCACHE implies that the file system behavior 27 * VN_INACTIVE_NOCACHE implies that the file system behavior
@@ -76,57 +62,52 @@ extern void vn_iowait(struct xfs_inode *ip);
76extern void vn_iowake(struct xfs_inode *ip); 62extern void vn_iowake(struct xfs_inode *ip);
77extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); 63extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
78 64
79static inline int vn_count(bhv_vnode_t *vp) 65static inline int vn_count(struct inode *vp)
80{ 66{
81 return atomic_read(&vn_to_inode(vp)->i_count); 67 return atomic_read(&vp->i_count);
82} 68}
83 69
84/* 70#define IHOLD(ip) \
85 * Vnode reference counting functions (and macros for compatibility). 71do { \
86 */ 72 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
87extern bhv_vnode_t *vn_hold(bhv_vnode_t *); 73 atomic_inc(&(VFS_I(ip)->i_count)); \
74 xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
75} while (0)
88 76
89#if defined(XFS_INODE_TRACE) 77#define IRELE(ip) \
90#define VN_HOLD(vp) \ 78do { \
91 ((void)vn_hold(vp), \ 79 xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
92 xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) 80 iput(VFS_I(ip)); \
93#define VN_RELE(vp) \ 81} while (0)
94 (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \
95 iput(vn_to_inode(vp)))
96#else
97#define VN_HOLD(vp) ((void)vn_hold(vp))
98#define VN_RELE(vp) (iput(vn_to_inode(vp)))
99#endif
100 82
101static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp) 83static inline struct inode *vn_grab(struct inode *vp)
102{ 84{
103 struct inode *inode = igrab(vn_to_inode(vp)); 85 return igrab(vp);
104 return inode ? vn_from_inode(inode) : NULL;
105} 86}
106 87
107/* 88/*
108 * Dealing with bad inodes 89 * Dealing with bad inodes
109 */ 90 */
110static inline int VN_BAD(bhv_vnode_t *vp) 91static inline int VN_BAD(struct inode *vp)
111{ 92{
112 return is_bad_inode(vn_to_inode(vp)); 93 return is_bad_inode(vp);
113} 94}
114 95
115/* 96/*
116 * Extracting atime values in various formats 97 * Extracting atime values in various formats
117 */ 98 */
118static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime) 99static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime)
119{ 100{
120 bs_atime->tv_sec = vp->i_atime.tv_sec; 101 bs_atime->tv_sec = vp->i_atime.tv_sec;
121 bs_atime->tv_nsec = vp->i_atime.tv_nsec; 102 bs_atime->tv_nsec = vp->i_atime.tv_nsec;
122} 103}
123 104
124static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts) 105static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts)
125{ 106{
126 *ts = vp->i_atime; 107 *ts = vp->i_atime;
127} 108}
128 109
129static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) 110static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
130{ 111{
131 *tt = vp->i_atime.tv_sec; 112 *tt = vp->i_atime.tv_sec;
132} 113}
@@ -134,9 +115,9 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
134/* 115/*
135 * Some useful predicates. 116 * Some useful predicates.
136 */ 117 */
137#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) 118#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping)
138#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) 119#define VN_CACHED(vp) (vp->i_mapping->nrpages)
139#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ 120#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \
140 PAGECACHE_TAG_DIRTY) 121 PAGECACHE_TAG_DIRTY)
141 122
142 123
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index fc9f3fb39b7b..f2705f2fd43c 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -101,11 +101,18 @@ xfs_qm_dqinit(
101 if (brandnewdquot) { 101 if (brandnewdquot) {
102 dqp->dq_flnext = dqp->dq_flprev = dqp; 102 dqp->dq_flnext = dqp->dq_flprev = dqp;
103 mutex_init(&dqp->q_qlock); 103 mutex_init(&dqp->q_qlock);
104 initnsema(&dqp->q_flock, 1, "fdq");
105 sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); 104 sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
106 105
106 /*
107 * Because we want to use a counting completion, complete
108 * the flush completion once to allow a single access to
109 * the flush completion without blocking.
110 */
111 init_completion(&dqp->q_flush);
112 complete(&dqp->q_flush);
113
107#ifdef XFS_DQUOT_TRACE 114#ifdef XFS_DQUOT_TRACE
108 dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); 115 dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
109 xfs_dqtrace_entry(dqp, "DQINIT"); 116 xfs_dqtrace_entry(dqp, "DQINIT");
110#endif 117#endif
111 } else { 118 } else {
@@ -150,7 +157,6 @@ xfs_qm_dqdestroy(
150 ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); 157 ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
151 158
152 mutex_destroy(&dqp->q_qlock); 159 mutex_destroy(&dqp->q_qlock);
153 freesema(&dqp->q_flock);
154 sv_destroy(&dqp->q_pinwait); 160 sv_destroy(&dqp->q_pinwait);
155 161
156#ifdef XFS_DQUOT_TRACE 162#ifdef XFS_DQUOT_TRACE
@@ -431,7 +437,7 @@ xfs_qm_dqalloc(
431 * when it unlocks the inode. Since we want to keep the quota 437 * when it unlocks the inode. Since we want to keep the quota
432 * inode around, we bump the vnode ref count now. 438 * inode around, we bump the vnode ref count now.
433 */ 439 */
434 VN_HOLD(XFS_ITOV(quotip)); 440 IHOLD(quotip);
435 441
436 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); 442 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
437 nmaps = 1; 443 nmaps = 1;
@@ -1211,7 +1217,7 @@ xfs_qm_dqflush(
1211 int error; 1217 int error;
1212 1218
1213 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1219 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1214 ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); 1220 ASSERT(!completion_done(&dqp->q_flush));
1215 xfs_dqtrace_entry(dqp, "DQFLUSH"); 1221 xfs_dqtrace_entry(dqp, "DQFLUSH");
1216 1222
1217 /* 1223 /*
@@ -1348,34 +1354,18 @@ xfs_qm_dqflush_done(
1348 xfs_dqfunlock(dqp); 1354 xfs_dqfunlock(dqp);
1349} 1355}
1350 1356
1351
1352int
1353xfs_qm_dqflock_nowait(
1354 xfs_dquot_t *dqp)
1355{
1356 int locked;
1357
1358 locked = cpsema(&((dqp)->q_flock));
1359
1360 /* XXX ifdef these out */
1361 if (locked)
1362 (dqp)->dq_flags |= XFS_DQ_FLOCKED;
1363 return (locked);
1364}
1365
1366
1367int 1357int
1368xfs_qm_dqlock_nowait( 1358xfs_qm_dqlock_nowait(
1369 xfs_dquot_t *dqp) 1359 xfs_dquot_t *dqp)
1370{ 1360{
1371 return (mutex_trylock(&((dqp)->q_qlock))); 1361 return mutex_trylock(&dqp->q_qlock);
1372} 1362}
1373 1363
1374void 1364void
1375xfs_dqlock( 1365xfs_dqlock(
1376 xfs_dquot_t *dqp) 1366 xfs_dquot_t *dqp)
1377{ 1367{
1378 mutex_lock(&(dqp->q_qlock)); 1368 mutex_lock(&dqp->q_qlock);
1379} 1369}
1380 1370
1381void 1371void
@@ -1468,7 +1458,7 @@ xfs_qm_dqpurge(
1468 * if we're turning off quotas. Basically, we need this flush 1458 * if we're turning off quotas. Basically, we need this flush
1469 * lock, and are willing to block on it. 1459 * lock, and are willing to block on it.
1470 */ 1460 */
1471 if (! xfs_qm_dqflock_nowait(dqp)) { 1461 if (!xfs_dqflock_nowait(dqp)) {
1472 /* 1462 /*
1473 * Block on the flush lock after nudging dquot buffer, 1463 * Block on the flush lock after nudging dquot buffer,
1474 * if it is incore. 1464 * if it is incore.
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index f7393bba4e95..8958d0faf8d3 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -82,7 +82,7 @@ typedef struct xfs_dquot {
82 xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ 82 xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
83 xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ 83 xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
84 mutex_t q_qlock; /* quota lock */ 84 mutex_t q_qlock; /* quota lock */
85 sema_t q_flock; /* flush lock */ 85 struct completion q_flush; /* flush completion queue */
86 uint q_pincount; /* pin count for this dquot */ 86 uint q_pincount; /* pin count for this dquot */
87 sv_t q_pinwait; /* sync var for pinning */ 87 sv_t q_pinwait; /* sync var for pinning */
88#ifdef XFS_DQUOT_TRACE 88#ifdef XFS_DQUOT_TRACE
@@ -113,17 +113,25 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
113 113
114 114
115/* 115/*
116 * The following three routines simply manage the q_flock 116 * Manage the q_flush completion queue embedded in the dquot. This completion
117 * semaphore embedded in the dquot. This semaphore synchronizes 117 * queue synchronizes processes attempting to flush the in-core dquot back to
118 * processes attempting to flush the in-core dquot back to disk. 118 * disk.
119 */ 119 */
120#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ 120static inline void xfs_dqflock(xfs_dquot_t *dqp)
121 (dqp)->dq_flags |= XFS_DQ_FLOCKED; } 121{
122#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \ 122 wait_for_completion(&dqp->q_flush);
123 vsema(&((dqp)->q_flock)); \ 123}
124 (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } 124
125static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
126{
127 return try_wait_for_completion(&dqp->q_flush);
128}
129
130static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
131{
132 complete(&dqp->q_flush);
133}
125 134
126#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
127#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) 135#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
128#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) 136#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
129#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) 137#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
@@ -167,7 +175,6 @@ extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
167extern int xfs_qm_dqpurge(xfs_dquot_t *); 175extern int xfs_qm_dqpurge(xfs_dquot_t *);
168extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); 176extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
169extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); 177extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
170extern int xfs_qm_dqflock_nowait(xfs_dquot_t *);
171extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); 178extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
172extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, 179extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
173 xfs_disk_dquot_t *); 180 xfs_disk_dquot_t *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 08d2fc89e6a1..f028644caa5e 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -151,7 +151,7 @@ xfs_qm_dquot_logitem_push(
151 dqp = logitem->qli_dquot; 151 dqp = logitem->qli_dquot;
152 152
153 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 153 ASSERT(XFS_DQ_IS_LOCKED(dqp));
154 ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); 154 ASSERT(!completion_done(&dqp->q_flush));
155 155
156 /* 156 /*
157 * Since we were able to lock the dquot's flush lock and 157 * Since we were able to lock the dquot's flush lock and
@@ -245,7 +245,7 @@ xfs_qm_dquot_logitem_pushbuf(
245 * inode flush completed and the inode was taken off the AIL. 245 * inode flush completed and the inode was taken off the AIL.
246 * So, just get out. 246 * So, just get out.
247 */ 247 */
248 if (!issemalocked(&(dqp->q_flock)) || 248 if (completion_done(&dqp->q_flush) ||
249 ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { 249 ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
250 qip->qli_pushbuf_flag = 0; 250 qip->qli_pushbuf_flag = 0;
251 xfs_dqunlock(dqp); 251 xfs_dqunlock(dqp);
@@ -258,7 +258,7 @@ xfs_qm_dquot_logitem_pushbuf(
258 if (bp != NULL) { 258 if (bp != NULL) {
259 if (XFS_BUF_ISDELAYWRITE(bp)) { 259 if (XFS_BUF_ISDELAYWRITE(bp)) {
260 dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && 260 dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
261 issemalocked(&(dqp->q_flock))); 261 !completion_done(&dqp->q_flush));
262 qip->qli_pushbuf_flag = 0; 262 qip->qli_pushbuf_flag = 0;
263 xfs_dqunlock(dqp); 263 xfs_dqunlock(dqp);
264 264
@@ -317,7 +317,7 @@ xfs_qm_dquot_logitem_trylock(
317 return (XFS_ITEM_LOCKED); 317 return (XFS_ITEM_LOCKED);
318 318
319 retval = XFS_ITEM_SUCCESS; 319 retval = XFS_ITEM_SUCCESS;
320 if (! xfs_qm_dqflock_nowait(dqp)) { 320 if (!xfs_dqflock_nowait(dqp)) {
321 /* 321 /*
322 * The dquot is already being flushed. It may have been 322 * The dquot is already being flushed. It may have been
323 * flushed delayed write, however, and we don't want to 323 * flushed delayed write, however, and we don't want to
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 021934a3d456..df0ffef9775a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -310,8 +310,7 @@ xfs_qm_unmount_quotadestroy(
310 */ 310 */
311void 311void
312xfs_qm_mount_quotas( 312xfs_qm_mount_quotas(
313 xfs_mount_t *mp, 313 xfs_mount_t *mp)
314 int mfsi_flags)
315{ 314{
316 int error = 0; 315 int error = 0;
317 uint sbf; 316 uint sbf;
@@ -346,8 +345,7 @@ xfs_qm_mount_quotas(
346 /* 345 /*
347 * If any of the quotas are not consistent, do a quotacheck. 346 * If any of the quotas are not consistent, do a quotacheck.
348 */ 347 */
349 if (XFS_QM_NEED_QUOTACHECK(mp) && 348 if (XFS_QM_NEED_QUOTACHECK(mp)) {
350 !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
351 error = xfs_qm_quotacheck(mp); 349 error = xfs_qm_quotacheck(mp);
352 if (error) { 350 if (error) {
353 /* Quotacheck failed and disabled quotas. */ 351 /* Quotacheck failed and disabled quotas. */
@@ -484,7 +482,7 @@ again:
484 xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); 482 xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
485 /* XXX a sentinel would be better */ 483 /* XXX a sentinel would be better */
486 recl = XFS_QI_MPLRECLAIMS(mp); 484 recl = XFS_QI_MPLRECLAIMS(mp);
487 if (! xfs_qm_dqflock_nowait(dqp)) { 485 if (!xfs_dqflock_nowait(dqp)) {
488 /* 486 /*
489 * If we can't grab the flush lock then check 487 * If we can't grab the flush lock then check
490 * to see if the dquot has been flushed delayed 488 * to see if the dquot has been flushed delayed
@@ -1062,7 +1060,7 @@ xfs_qm_sync(
1062 1060
1063 /* XXX a sentinel would be better */ 1061 /* XXX a sentinel would be better */
1064 recl = XFS_QI_MPLRECLAIMS(mp); 1062 recl = XFS_QI_MPLRECLAIMS(mp);
1065 if (! xfs_qm_dqflock_nowait(dqp)) { 1063 if (!xfs_dqflock_nowait(dqp)) {
1066 if (nowait) { 1064 if (nowait) {
1067 xfs_dqunlock(dqp); 1065 xfs_dqunlock(dqp);
1068 continue; 1066 continue;
@@ -2079,7 +2077,7 @@ xfs_qm_shake_freelist(
2079 * Try to grab the flush lock. If this dquot is in the process of 2077 * Try to grab the flush lock. If this dquot is in the process of
2080 * getting flushed to disk, we don't want to reclaim it. 2078 * getting flushed to disk, we don't want to reclaim it.
2081 */ 2079 */
2082 if (! xfs_qm_dqflock_nowait(dqp)) { 2080 if (!xfs_dqflock_nowait(dqp)) {
2083 xfs_dqunlock(dqp); 2081 xfs_dqunlock(dqp);
2084 dqp = dqp->dq_flnext; 2082 dqp = dqp->dq_flnext;
2085 continue; 2083 continue;
@@ -2257,7 +2255,7 @@ xfs_qm_dqreclaim_one(void)
2257 * Try to grab the flush lock. If this dquot is in the process of 2255 * Try to grab the flush lock. If this dquot is in the process of
2258 * getting flushed to disk, we don't want to reclaim it. 2256 * getting flushed to disk, we don't want to reclaim it.
2259 */ 2257 */
2260 if (! xfs_qm_dqflock_nowait(dqp)) { 2258 if (!xfs_dqflock_nowait(dqp)) {
2261 xfs_dqunlock(dqp); 2259 xfs_dqunlock(dqp);
2262 continue; 2260 continue;
2263 } 2261 }
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index cd2300e374af..44f25349e478 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct {
165#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) 165#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
166 166
167extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); 167extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
168extern void xfs_qm_mount_quotas(xfs_mount_t *, int); 168extern void xfs_qm_mount_quotas(xfs_mount_t *);
169extern int xfs_qm_quotacheck(xfs_mount_t *); 169extern int xfs_qm_quotacheck(xfs_mount_t *);
170extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); 170extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
171extern int xfs_qm_unmount_quotas(xfs_mount_t *); 171extern int xfs_qm_unmount_quotas(xfs_mount_t *);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index f4f6c4c861d7..eea2e60b456b 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -162,7 +162,7 @@ xfs_qm_newmount(
162 * mounting, and get on with the boring life 162 * mounting, and get on with the boring life
163 * without disk quotas. 163 * without disk quotas.
164 */ 164 */
165 xfs_qm_mount_quotas(mp, 0); 165 xfs_qm_mount_quotas(mp);
166 } else { 166 } else {
167 /* 167 /*
168 * Clear the quota flags, but remember them. This 168 * Clear the quota flags, but remember them. This
@@ -184,13 +184,12 @@ STATIC int
184xfs_qm_endmount( 184xfs_qm_endmount(
185 xfs_mount_t *mp, 185 xfs_mount_t *mp,
186 uint needquotamount, 186 uint needquotamount,
187 uint quotaflags, 187 uint quotaflags)
188 int mfsi_flags)
189{ 188{
190 if (needquotamount) { 189 if (needquotamount) {
191 ASSERT(mp->m_qflags == 0); 190 ASSERT(mp->m_qflags == 0);
192 mp->m_qflags = quotaflags; 191 mp->m_qflags = quotaflags;
193 xfs_qm_mount_quotas(mp, mfsi_flags); 192 xfs_qm_mount_quotas(mp);
194 } 193 }
195 194
196#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) 195#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index adfb8723f65a..1a3b803dfa55 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1034,7 +1034,7 @@ xfs_qm_dqrele_all_inodes(
1034{ 1034{
1035 xfs_inode_t *ip, *topino; 1035 xfs_inode_t *ip, *topino;
1036 uint ireclaims; 1036 uint ireclaims;
1037 bhv_vnode_t *vp; 1037 struct inode *vp;
1038 boolean_t vnode_refd; 1038 boolean_t vnode_refd;
1039 1039
1040 ASSERT(mp->m_quotainfo); 1040 ASSERT(mp->m_quotainfo);
@@ -1059,7 +1059,7 @@ again:
1059 ip = ip->i_mnext; 1059 ip = ip->i_mnext;
1060 continue; 1060 continue;
1061 } 1061 }
1062 vp = XFS_ITOV_NULL(ip); 1062 vp = VFS_I(ip);
1063 if (!vp) { 1063 if (!vp) {
1064 ASSERT(ip->i_udquot == NULL); 1064 ASSERT(ip->i_udquot == NULL);
1065 ASSERT(ip->i_gdquot == NULL); 1065 ASSERT(ip->i_gdquot == NULL);
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 3e4648ad9cfc..b2f639a1416f 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -37,15 +37,15 @@
37#include <linux/capability.h> 37#include <linux/capability.h>
38#include <linux/posix_acl_xattr.h> 38#include <linux/posix_acl_xattr.h>
39 39
40STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *); 40STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *);
41STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); 41STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
42STATIC void xfs_acl_get_endian(xfs_acl_t *); 42STATIC void xfs_acl_get_endian(xfs_acl_t *);
43STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); 43STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
44STATIC int xfs_acl_invalid(xfs_acl_t *); 44STATIC int xfs_acl_invalid(xfs_acl_t *);
45STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); 45STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
46STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *); 46STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *);
47STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *); 47STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *);
48STATIC int xfs_acl_allow_set(bhv_vnode_t *, int); 48STATIC int xfs_acl_allow_set(struct inode *, int);
49 49
50kmem_zone_t *xfs_acl_zone; 50kmem_zone_t *xfs_acl_zone;
51 51
@@ -55,7 +55,7 @@ kmem_zone_t *xfs_acl_zone;
55 */ 55 */
56int 56int
57xfs_acl_vhasacl_access( 57xfs_acl_vhasacl_access(
58 bhv_vnode_t *vp) 58 struct inode *vp)
59{ 59{
60 int error; 60 int error;
61 61
@@ -68,7 +68,7 @@ xfs_acl_vhasacl_access(
68 */ 68 */
69int 69int
70xfs_acl_vhasacl_default( 70xfs_acl_vhasacl_default(
71 bhv_vnode_t *vp) 71 struct inode *vp)
72{ 72{
73 int error; 73 int error;
74 74
@@ -207,7 +207,7 @@ posix_acl_xfs_to_xattr(
207 207
208int 208int
209xfs_acl_vget( 209xfs_acl_vget(
210 bhv_vnode_t *vp, 210 struct inode *vp,
211 void *acl, 211 void *acl,
212 size_t size, 212 size_t size,
213 int kind) 213 int kind)
@@ -217,7 +217,6 @@ xfs_acl_vget(
217 posix_acl_xattr_header *ext_acl = acl; 217 posix_acl_xattr_header *ext_acl = acl;
218 int flags = 0; 218 int flags = 0;
219 219
220 VN_HOLD(vp);
221 if(size) { 220 if(size) {
222 if (!(_ACL_ALLOC(xfs_acl))) { 221 if (!(_ACL_ALLOC(xfs_acl))) {
223 error = ENOMEM; 222 error = ENOMEM;
@@ -239,11 +238,10 @@ xfs_acl_vget(
239 goto out; 238 goto out;
240 } 239 }
241 if (kind == _ACL_TYPE_ACCESS) 240 if (kind == _ACL_TYPE_ACCESS)
242 xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl); 241 xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl);
243 error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); 242 error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
244 } 243 }
245out: 244out:
246 VN_RELE(vp);
247 if(xfs_acl) 245 if(xfs_acl)
248 _ACL_FREE(xfs_acl); 246 _ACL_FREE(xfs_acl);
249 return -error; 247 return -error;
@@ -251,28 +249,26 @@ out:
251 249
252int 250int
253xfs_acl_vremove( 251xfs_acl_vremove(
254 bhv_vnode_t *vp, 252 struct inode *vp,
255 int kind) 253 int kind)
256{ 254{
257 int error; 255 int error;
258 256
259 VN_HOLD(vp);
260 error = xfs_acl_allow_set(vp, kind); 257 error = xfs_acl_allow_set(vp, kind);
261 if (!error) { 258 if (!error) {
262 error = xfs_attr_remove(xfs_vtoi(vp), 259 error = xfs_attr_remove(XFS_I(vp),
263 kind == _ACL_TYPE_DEFAULT? 260 kind == _ACL_TYPE_DEFAULT?
264 SGI_ACL_DEFAULT: SGI_ACL_FILE, 261 SGI_ACL_DEFAULT: SGI_ACL_FILE,
265 ATTR_ROOT); 262 ATTR_ROOT);
266 if (error == ENOATTR) 263 if (error == ENOATTR)
267 error = 0; /* 'scool */ 264 error = 0; /* 'scool */
268 } 265 }
269 VN_RELE(vp);
270 return -error; 266 return -error;
271} 267}
272 268
273int 269int
274xfs_acl_vset( 270xfs_acl_vset(
275 bhv_vnode_t *vp, 271 struct inode *vp,
276 void *acl, 272 void *acl,
277 size_t size, 273 size_t size,
278 int kind) 274 int kind)
@@ -298,7 +294,6 @@ xfs_acl_vset(
298 return 0; 294 return 0;
299 } 295 }
300 296
301 VN_HOLD(vp);
302 error = xfs_acl_allow_set(vp, kind); 297 error = xfs_acl_allow_set(vp, kind);
303 298
304 /* Incoming ACL exists, set file mode based on its value */ 299 /* Incoming ACL exists, set file mode based on its value */
@@ -321,7 +316,6 @@ xfs_acl_vset(
321 } 316 }
322 317
323out: 318out:
324 VN_RELE(vp);
325 _ACL_FREE(xfs_acl); 319 _ACL_FREE(xfs_acl);
326 return -error; 320 return -error;
327} 321}
@@ -363,7 +357,7 @@ xfs_acl_iaccess(
363 357
364STATIC int 358STATIC int
365xfs_acl_allow_set( 359xfs_acl_allow_set(
366 bhv_vnode_t *vp, 360 struct inode *vp,
367 int kind) 361 int kind)
368{ 362{
369 if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) 363 if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
@@ -372,7 +366,7 @@ xfs_acl_allow_set(
372 return ENOTDIR; 366 return ENOTDIR;
373 if (vp->i_sb->s_flags & MS_RDONLY) 367 if (vp->i_sb->s_flags & MS_RDONLY)
374 return EROFS; 368 return EROFS;
375 if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) 369 if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
376 return EPERM; 370 return EPERM;
377 return 0; 371 return 0;
378} 372}
@@ -566,7 +560,7 @@ xfs_acl_get_endian(
566 */ 560 */
567STATIC void 561STATIC void
568xfs_acl_get_attr( 562xfs_acl_get_attr(
569 bhv_vnode_t *vp, 563 struct inode *vp,
570 xfs_acl_t *aclp, 564 xfs_acl_t *aclp,
571 int kind, 565 int kind,
572 int flags, 566 int flags,
@@ -576,7 +570,7 @@ xfs_acl_get_attr(
576 570
577 ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); 571 ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
578 flags |= ATTR_ROOT; 572 flags |= ATTR_ROOT;
579 *error = xfs_attr_get(xfs_vtoi(vp), 573 *error = xfs_attr_get(XFS_I(vp),
580 kind == _ACL_TYPE_ACCESS ? 574 kind == _ACL_TYPE_ACCESS ?
581 SGI_ACL_FILE : SGI_ACL_DEFAULT, 575 SGI_ACL_FILE : SGI_ACL_DEFAULT,
582 (char *)aclp, &len, flags); 576 (char *)aclp, &len, flags);
@@ -590,7 +584,7 @@ xfs_acl_get_attr(
590 */ 584 */
591STATIC void 585STATIC void
592xfs_acl_set_attr( 586xfs_acl_set_attr(
593 bhv_vnode_t *vp, 587 struct inode *vp,
594 xfs_acl_t *aclp, 588 xfs_acl_t *aclp,
595 int kind, 589 int kind,
596 int *error) 590 int *error)
@@ -615,7 +609,7 @@ xfs_acl_set_attr(
615 INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); 609 INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
616 } 610 }
617 INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); 611 INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
618 *error = xfs_attr_set(xfs_vtoi(vp), 612 *error = xfs_attr_set(XFS_I(vp),
619 kind == _ACL_TYPE_ACCESS ? 613 kind == _ACL_TYPE_ACCESS ?
620 SGI_ACL_FILE: SGI_ACL_DEFAULT, 614 SGI_ACL_FILE: SGI_ACL_DEFAULT,
621 (char *)newacl, len, ATTR_ROOT); 615 (char *)newacl, len, ATTR_ROOT);
@@ -624,7 +618,7 @@ xfs_acl_set_attr(
624 618
625int 619int
626xfs_acl_vtoacl( 620xfs_acl_vtoacl(
627 bhv_vnode_t *vp, 621 struct inode *vp,
628 xfs_acl_t *access_acl, 622 xfs_acl_t *access_acl,
629 xfs_acl_t *default_acl) 623 xfs_acl_t *default_acl)
630{ 624{
@@ -639,7 +633,7 @@ xfs_acl_vtoacl(
639 if (error) 633 if (error)
640 access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; 634 access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
641 else /* We have a good ACL and the file mode, synchronize. */ 635 else /* We have a good ACL and the file mode, synchronize. */
642 xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl); 636 xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl);
643 } 637 }
644 638
645 if (default_acl) { 639 if (default_acl) {
@@ -656,7 +650,7 @@ xfs_acl_vtoacl(
656 */ 650 */
657int 651int
658xfs_acl_inherit( 652xfs_acl_inherit(
659 bhv_vnode_t *vp, 653 struct inode *vp,
660 mode_t mode, 654 mode_t mode,
661 xfs_acl_t *pdaclp) 655 xfs_acl_t *pdaclp)
662{ 656{
@@ -715,7 +709,7 @@ out_error:
715 */ 709 */
716STATIC int 710STATIC int
717xfs_acl_setmode( 711xfs_acl_setmode(
718 bhv_vnode_t *vp, 712 struct inode *vp,
719 xfs_acl_t *acl, 713 xfs_acl_t *acl,
720 int *basicperms) 714 int *basicperms)
721{ 715{
@@ -734,7 +728,7 @@ xfs_acl_setmode(
734 * mode. The m:: bits take precedence over the g:: bits. 728 * mode. The m:: bits take precedence over the g:: bits.
735 */ 729 */
736 iattr.ia_valid = ATTR_MODE; 730 iattr.ia_valid = ATTR_MODE;
737 iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode; 731 iattr.ia_mode = XFS_I(vp)->i_d.di_mode;
738 iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); 732 iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
739 ap = acl->acl_entry; 733 ap = acl->acl_entry;
740 for (i = 0; i < acl->acl_cnt; ++i) { 734 for (i = 0; i < acl->acl_cnt; ++i) {
@@ -764,7 +758,7 @@ xfs_acl_setmode(
764 if (gap && nomask) 758 if (gap && nomask)
765 iattr.ia_mode |= gap->ae_perm << 3; 759 iattr.ia_mode |= gap->ae_perm << 3;
766 760
767 return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred); 761 return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred);
768} 762}
769 763
770/* 764/*
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 323ee94cf831..a4e293b93efa 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -59,14 +59,14 @@ extern struct kmem_zone *xfs_acl_zone;
59 (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) 59 (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
60#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) 60#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
61 61
62extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *); 62extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *);
63extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); 63extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
64extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *); 64extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *);
65extern int xfs_acl_vhasacl_access(bhv_vnode_t *); 65extern int xfs_acl_vhasacl_access(struct inode *);
66extern int xfs_acl_vhasacl_default(bhv_vnode_t *); 66extern int xfs_acl_vhasacl_default(struct inode *);
67extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int); 67extern int xfs_acl_vset(struct inode *, void *, size_t, int);
68extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int); 68extern int xfs_acl_vget(struct inode *, void *, size_t, int);
69extern int xfs_acl_vremove(bhv_vnode_t *, int); 69extern int xfs_acl_vremove(struct inode *, int);
70 70
71#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) 71#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
72 72
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index f9472a2076d4..0b3b5efe848c 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -92,16 +92,6 @@
92 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ 92 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
93 } 93 }
94 94
95/* define generic INT_ macros */
96
97#define INT_GET(reference,arch) \
98 (((arch) == ARCH_NOCONVERT) \
99 ? \
100 (reference) \
101 : \
102 INT_SWAP((reference),(reference)) \
103 )
104
105/* does not return a value */ 95/* does not return a value */
106#define INT_SET(reference,arch,valueref) \ 96#define INT_SET(reference,arch,valueref) \
107 (__builtin_constant_p(valueref) ? \ 97 (__builtin_constant_p(valueref) ? \
@@ -112,64 +102,6 @@
112 ) \ 102 ) \
113 ) 103 )
114 104
115/* does not return a value */
116#define INT_MOD_EXPR(reference,arch,code) \
117 (((arch) == ARCH_NOCONVERT) \
118 ? \
119 (void)((reference) code) \
120 : \
121 (void)( \
122 (reference) = INT_GET((reference),arch) , \
123 ((reference) code), \
124 INT_SET(reference, arch, reference) \
125 ) \
126 )
127
128/* does not return a value */
129#define INT_MOD(reference,arch,delta) \
130 (void)( \
131 INT_MOD_EXPR(reference,arch,+=(delta)) \
132 )
133
134/*
135 * INT_COPY - copy a value between two locations with the
136 * _same architecture_ but _potentially different sizes_
137 *
138 * if the types of the two parameters are equal or they are
139 * in native architecture, a simple copy is done
140 *
141 * otherwise, architecture conversions are done
142 *
143 */
144
145/* does not return a value */
146#define INT_COPY(dst,src,arch) \
147 ( \
148 ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
149 ? \
150 (void)((dst) = (src)) \
151 : \
152 INT_SET(dst, arch, INT_GET(src, arch)) \
153 )
154
155/*
156 * INT_XLATE - copy a value in either direction between two locations
157 * with different architectures
158 *
159 * dir < 0 - copy from memory to buffer (native to arch)
160 * dir > 0 - copy from buffer to memory (arch to native)
161 */
162
163/* does not return a value */
164#define INT_XLATE(buf,mem,dir,arch) {\
165 ASSERT(dir); \
166 if (dir>0) { \
167 (mem)=INT_GET(buf, arch); \
168 } else { \
169 INT_SET(buf, arch, mem); \
170 } \
171}
172
173/* 105/*
174 * In directories inode numbers are stored as unaligned arrays of unsigned 106 * In directories inode numbers are stored as unaligned arrays of unsigned
175 * 8bit integers on disk. 107 * 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 78de80e3caa2..f7cdc28aff41 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -194,6 +194,46 @@ xfs_attr_get(
194 return(error); 194 return(error);
195} 195}
196 196
197/*
198 * Calculate how many blocks we need for the new attribute,
199 */
200int
201xfs_attr_calc_size(
202 struct xfs_inode *ip,
203 int namelen,
204 int valuelen,
205 int *local)
206{
207 struct xfs_mount *mp = ip->i_mount;
208 int size;
209 int nblks;
210
211 /*
212 * Determine space new attribute will use, and if it would be
213 * "local" or "remote" (note: local != inline).
214 */
215 size = xfs_attr_leaf_newentsize(namelen, valuelen,
216 mp->m_sb.sb_blocksize, local);
217
218 nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
219 if (*local) {
220 if (size > (mp->m_sb.sb_blocksize >> 1)) {
221 /* Double split possible */
222 nblks *= 2;
223 }
224 } else {
225 /*
226 * Out of line attribute, cannot double split, but
227 * make room for the attribute value itself.
228 */
229 uint dblocks = XFS_B_TO_FSB(mp, valuelen);
230 nblks += dblocks;
231 nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
232 }
233
234 return nblks;
235}
236
197STATIC int 237STATIC int
198xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, 238xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
199 char *value, int valuelen, int flags) 239 char *value, int valuelen, int flags)
@@ -202,10 +242,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
202 xfs_fsblock_t firstblock; 242 xfs_fsblock_t firstblock;
203 xfs_bmap_free_t flist; 243 xfs_bmap_free_t flist;
204 int error, err2, committed; 244 int error, err2, committed;
205 int local, size;
206 uint nblks;
207 xfs_mount_t *mp = dp->i_mount; 245 xfs_mount_t *mp = dp->i_mount;
208 int rsvd = (flags & ATTR_ROOT) != 0; 246 int rsvd = (flags & ATTR_ROOT) != 0;
247 int local;
209 248
210 /* 249 /*
211 * Attach the dquots to the inode. 250 * Attach the dquots to the inode.
@@ -241,30 +280,8 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
241 args.whichfork = XFS_ATTR_FORK; 280 args.whichfork = XFS_ATTR_FORK;
242 args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; 281 args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
243 282
244 /*
245 * Determine space new attribute will use, and if it would be
246 * "local" or "remote" (note: local != inline).
247 */
248 size = xfs_attr_leaf_newentsize(name->len, valuelen,
249 mp->m_sb.sb_blocksize, &local);
250
251 nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
252 if (local) {
253 if (size > (mp->m_sb.sb_blocksize >> 1)) {
254 /* Double split possible */
255 nblks <<= 1;
256 }
257 } else {
258 uint dblocks = XFS_B_TO_FSB(mp, valuelen);
259 /* Out of line attribute, cannot double split, but make
260 * room for the attribute value itself.
261 */
262 nblks += dblocks;
263 nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
264 }
265
266 /* Size is now blocks for attribute data */ 283 /* Size is now blocks for attribute data */
267 args.total = nblks; 284 args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local);
268 285
269 /* 286 /*
270 * Start our first transaction of the day. 287 * Start our first transaction of the day.
@@ -286,18 +303,17 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
286 if (rsvd) 303 if (rsvd)
287 args.trans->t_flags |= XFS_TRANS_RESERVE; 304 args.trans->t_flags |= XFS_TRANS_RESERVE;
288 305
289 if ((error = xfs_trans_reserve(args.trans, (uint) nblks, 306 if ((error = xfs_trans_reserve(args.trans, args.total,
290 XFS_ATTRSET_LOG_RES(mp, nblks), 307 XFS_ATTRSET_LOG_RES(mp, args.total), 0,
291 0, XFS_TRANS_PERM_LOG_RES, 308 XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) {
292 XFS_ATTRSET_LOG_COUNT))) {
293 xfs_trans_cancel(args.trans, 0); 309 xfs_trans_cancel(args.trans, 0);
294 return(error); 310 return(error);
295 } 311 }
296 xfs_ilock(dp, XFS_ILOCK_EXCL); 312 xfs_ilock(dp, XFS_ILOCK_EXCL);
297 313
298 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0, 314 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0,
299 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 315 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
300 XFS_QMOPT_RES_REGBLKS); 316 XFS_QMOPT_RES_REGBLKS);
301 if (error) { 317 if (error) {
302 xfs_iunlock(dp, XFS_ILOCK_EXCL); 318 xfs_iunlock(dp, XFS_ILOCK_EXCL);
303 xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); 319 xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
@@ -384,7 +400,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
384 * Commit the leaf transformation. We'll need another (linked) 400 * Commit the leaf transformation. We'll need another (linked)
385 * transaction to add the new attribute to the leaf. 401 * transaction to add the new attribute to the leaf.
386 */ 402 */
387 if ((error = xfs_attr_rolltrans(&args.trans, dp))) 403
404 error = xfs_trans_roll(&args.trans, dp);
405 if (error)
388 goto out; 406 goto out;
389 407
390 } 408 }
@@ -964,7 +982,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
964 * Commit the current trans (including the inode) and start 982 * Commit the current trans (including the inode) and start
965 * a new one. 983 * a new one.
966 */ 984 */
967 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 985 error = xfs_trans_roll(&args->trans, dp);
986 if (error)
968 return (error); 987 return (error);
969 988
970 /* 989 /*
@@ -978,7 +997,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
978 * Commit the transaction that added the attr name so that 997 * Commit the transaction that added the attr name so that
979 * later routines can manage their own transactions. 998 * later routines can manage their own transactions.
980 */ 999 */
981 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 1000 error = xfs_trans_roll(&args->trans, dp);
1001 if (error)
982 return (error); 1002 return (error);
983 1003
984 /* 1004 /*
@@ -1067,7 +1087,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
1067 /* 1087 /*
1068 * Commit the remove and start the next trans in series. 1088 * Commit the remove and start the next trans in series.
1069 */ 1089 */
1070 error = xfs_attr_rolltrans(&args->trans, dp); 1090 error = xfs_trans_roll(&args->trans, dp);
1071 1091
1072 } else if (args->rmtblkno > 0) { 1092 } else if (args->rmtblkno > 0) {
1073 /* 1093 /*
@@ -1298,7 +1318,8 @@ restart:
1298 * Commit the node conversion and start the next 1318 * Commit the node conversion and start the next
1299 * trans in the chain. 1319 * trans in the chain.
1300 */ 1320 */
1301 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 1321 error = xfs_trans_roll(&args->trans, dp);
1322 if (error)
1302 goto out; 1323 goto out;
1303 1324
1304 goto restart; 1325 goto restart;
@@ -1349,7 +1370,8 @@ restart:
1349 * Commit the leaf addition or btree split and start the next 1370 * Commit the leaf addition or btree split and start the next
1350 * trans in the chain. 1371 * trans in the chain.
1351 */ 1372 */
1352 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 1373 error = xfs_trans_roll(&args->trans, dp);
1374 if (error)
1353 goto out; 1375 goto out;
1354 1376
1355 /* 1377 /*
@@ -1449,7 +1471,8 @@ restart:
1449 /* 1471 /*
1450 * Commit and start the next trans in the chain. 1472 * Commit and start the next trans in the chain.
1451 */ 1473 */
1452 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 1474 error = xfs_trans_roll(&args->trans, dp);
1475 if (error)
1453 goto out; 1476 goto out;
1454 1477
1455 } else if (args->rmtblkno > 0) { 1478 } else if (args->rmtblkno > 0) {
@@ -1581,7 +1604,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1581 /* 1604 /*
1582 * Commit the Btree join operation and start a new trans. 1605 * Commit the Btree join operation and start a new trans.
1583 */ 1606 */
1584 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 1607 error = xfs_trans_roll(&args->trans, dp);
1608 if (error)
1585 goto out; 1609 goto out;
1586 } 1610 }
1587 1611
@@ -2082,7 +2106,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2082 /* 2106 /*
2083 * Start the next trans in the chain. 2107 * Start the next trans in the chain.
2084 */ 2108 */
2085 if ((error = xfs_attr_rolltrans(&args->trans, dp))) 2109 error = xfs_trans_roll(&args->trans, dp);
2110 if (error)
2086 return (error); 2111 return (error);
2087 } 2112 }
2088 2113
@@ -2232,7 +2257,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2232 /* 2257 /*
2233 * Close out trans and start the next one in the chain. 2258 * Close out trans and start the next one in the chain.
2234 */ 2259 */
2235 if ((error = xfs_attr_rolltrans(&args->trans, args->dp))) 2260 error = xfs_trans_roll(&args->trans, args->dp);
2261 if (error)
2236 return (error); 2262 return (error);
2237 } 2263 }
2238 return(0); 2264 return(0);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 8b2d31c19e4d..fb3b2a68b9b9 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -129,6 +129,7 @@ typedef struct xfs_attr_list_context {
129/* 129/*
130 * Overall external interface routines. 130 * Overall external interface routines.
131 */ 131 */
132int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
132int xfs_attr_inactive(struct xfs_inode *dp); 133int xfs_attr_inactive(struct xfs_inode *dp);
133int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); 134int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
134int xfs_attr_rmtval_get(struct xfs_da_args *args); 135int xfs_attr_rmtval_get(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 23ef5d7c87e1..79da6b2ea99e 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -2498,9 +2498,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2498 /* 2498 /*
2499 * Commit the flag value change and start the next trans in series. 2499 * Commit the flag value change and start the next trans in series.
2500 */ 2500 */
2501 error = xfs_attr_rolltrans(&args->trans, args->dp); 2501 return xfs_trans_roll(&args->trans, args->dp);
2502
2503 return(error);
2504} 2502}
2505 2503
2506/* 2504/*
@@ -2547,9 +2545,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
2547 /* 2545 /*
2548 * Commit the flag value change and start the next trans in series. 2546 * Commit the flag value change and start the next trans in series.
2549 */ 2547 */
2550 error = xfs_attr_rolltrans(&args->trans, args->dp); 2548 return xfs_trans_roll(&args->trans, args->dp);
2551
2552 return(error);
2553} 2549}
2554 2550
2555/* 2551/*
@@ -2665,7 +2661,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2665 /* 2661 /*
2666 * Commit the flag value change and start the next trans in series. 2662 * Commit the flag value change and start the next trans in series.
2667 */ 2663 */
2668 error = xfs_attr_rolltrans(&args->trans, args->dp); 2664 error = xfs_trans_roll(&args->trans, args->dp);
2669 2665
2670 return(error); 2666 return(error);
2671} 2667}
@@ -2723,7 +2719,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2723 /* 2719 /*
2724 * Commit the invalidate and start the next transaction. 2720 * Commit the invalidate and start the next transaction.
2725 */ 2721 */
2726 error = xfs_attr_rolltrans(trans, dp); 2722 error = xfs_trans_roll(trans, dp);
2727 2723
2728 return (error); 2724 return (error);
2729} 2725}
@@ -2825,7 +2821,8 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2825 /* 2821 /*
2826 * Atomically commit the whole invalidate stuff. 2822 * Atomically commit the whole invalidate stuff.
2827 */ 2823 */
2828 if ((error = xfs_attr_rolltrans(trans, dp))) 2824 error = xfs_trans_roll(trans, dp);
2825 if (error)
2829 return (error); 2826 return (error);
2830 } 2827 }
2831 2828
@@ -2964,7 +2961,8 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
2964 /* 2961 /*
2965 * Roll to next transaction. 2962 * Roll to next transaction.
2966 */ 2963 */
2967 if ((error = xfs_attr_rolltrans(trans, dp))) 2964 error = xfs_trans_roll(trans, dp);
2965 if (error)
2968 return (error); 2966 return (error);
2969 } 2967 }
2970 2968
@@ -2974,60 +2972,3 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
2974 2972
2975 return(0); 2973 return(0);
2976} 2974}
2977
2978
2979/*
2980 * Roll from one trans in the sequence of PERMANENT transactions to the next.
2981 */
2982int
2983xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp)
2984{
2985 xfs_trans_t *trans;
2986 unsigned int logres, count;
2987 int error;
2988
2989 /*
2990 * Ensure that the inode is always logged.
2991 */
2992 trans = *transp;
2993 xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
2994
2995 /*
2996 * Copy the critical parameters from one trans to the next.
2997 */
2998 logres = trans->t_log_res;
2999 count = trans->t_log_count;
3000 *transp = xfs_trans_dup(trans);
3001
3002 /*
3003 * Commit the current transaction.
3004 * If this commit failed, then it'd just unlock those items that
3005 * are not marked ihold. That also means that a filesystem shutdown
3006 * is in progress. The caller takes the responsibility to cancel
3007 * the duplicate transaction that gets returned.
3008 */
3009 if ((error = xfs_trans_commit(trans, 0)))
3010 return (error);
3011
3012 trans = *transp;
3013
3014 /*
3015 * Reserve space in the log for th next transaction.
3016 * This also pushes items in the "AIL", the list of logged items,
3017 * out to disk if they are taking up space at the tail of the log
3018 * that we want to use. This requires that either nothing be locked
3019 * across this call, or that anything that is locked be logged in
3020 * the prior and the next transactions.
3021 */
3022 error = xfs_trans_reserve(trans, 0, logres, 0,
3023 XFS_TRANS_PERM_LOG_RES, count);
3024 /*
3025 * Ensure that the inode is in the new transaction and locked.
3026 */
3027 if (!error) {
3028 xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
3029 xfs_trans_ihold(trans, dp);
3030 }
3031 return (error);
3032
3033}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 5ecf437b7825..83e9af417ca2 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -274,6 +274,4 @@ int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
274 struct xfs_dabuf *leaf2_bp); 274 struct xfs_dabuf *leaf2_bp);
275int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, 275int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
276 int *local); 276 int *local);
277int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
278
279#endif /* __XFS_ATTR_LEAF_H__ */ 277#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index fab0b6d5a41b..48228848f5ae 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -25,109 +25,6 @@
25 * XFS bit manipulation routines, used in non-realtime code. 25 * XFS bit manipulation routines, used in non-realtime code.
26 */ 26 */
27 27
28#ifndef HAVE_ARCH_HIGHBIT
29/*
30 * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
31 */
32static const char xfs_highbit[256] = {
33 -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */
34 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */
35 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */
36 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */
37 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */
38 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */
39 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */
40 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */
41 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */
42 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */
43 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */
44 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */
45 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */
46 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */
47 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */
48 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */
49 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */
50 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */
51 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */
52 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */
53 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */
54 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */
55 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */
56 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */
57 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */
58 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */
59 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */
60 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */
61 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */
62 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */
63 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */
64 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */
65};
66#endif
67
68/*
69 * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
70 */
71inline int
72xfs_highbit32(
73 __uint32_t v)
74{
75#ifdef HAVE_ARCH_HIGHBIT
76 return highbit32(v);
77#else
78 int i;
79
80 if (v & 0xffff0000)
81 if (v & 0xff000000)
82 i = 24;
83 else
84 i = 16;
85 else if (v & 0x0000ffff)
86 if (v & 0x0000ff00)
87 i = 8;
88 else
89 i = 0;
90 else
91 return -1;
92 return i + xfs_highbit[(v >> i) & 0xff];
93#endif
94}
95
96/*
97 * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
98 */
99int
100xfs_lowbit64(
101 __uint64_t v)
102{
103 __uint32_t w = (__uint32_t)v;
104 int n = 0;
105
106 if (w) { /* lower bits */
107 n = ffs(w);
108 } else { /* upper bits */
109 w = (__uint32_t)(v >> 32);
110 if (w && (n = ffs(w)))
111 n += 32;
112 }
113 return n - 1;
114}
115
116/*
117 * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
118 */
119int
120xfs_highbit64(
121 __uint64_t v)
122{
123 __uint32_t h = (__uint32_t)(v >> 32);
124
125 if (h)
126 return xfs_highbit32(h) + 32;
127 return xfs_highbit32((__uint32_t)v);
128}
129
130
131/* 28/*
132 * Return whether bitmap is empty. 29 * Return whether bitmap is empty.
133 * Size is number of words in the bitmap, which is padded to word boundary 30 * Size is number of words in the bitmap, which is padded to word boundary
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index 082641a9782c..8e0e463dae2d 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -47,13 +47,39 @@ static inline __uint64_t xfs_mask64lo(int n)
47} 47}
48 48
49/* Get high bit set out of 32-bit argument, -1 if none set */ 49/* Get high bit set out of 32-bit argument, -1 if none set */
50extern int xfs_highbit32(__uint32_t v); 50static inline int xfs_highbit32(__uint32_t v)
51{
52 return fls(v) - 1;
53}
54
55/* Get high bit set out of 64-bit argument, -1 if none set */
56static inline int xfs_highbit64(__uint64_t v)
57{
58 return fls64(v) - 1;
59}
60
61/* Get low bit set out of 32-bit argument, -1 if none set */
62static inline int xfs_lowbit32(__uint32_t v)
63{
64 unsigned long t = v;
65 return (v) ? find_first_bit(&t, 32) : -1;
66}
51 67
52/* Get low bit set out of 64-bit argument, -1 if none set */ 68/* Get low bit set out of 64-bit argument, -1 if none set */
53extern int xfs_lowbit64(__uint64_t v); 69static inline int xfs_lowbit64(__uint64_t v)
70{
71 __uint32_t w = (__uint32_t)v;
72 int n = 0;
54 73
55/* Get high bit set out of 64-bit argument, -1 if none set */ 74 if (w) { /* lower bits */
56extern int xfs_highbit64(__uint64_t); 75 n = ffs(w);
76 } else { /* upper bits */
77 w = (__uint32_t)(v >> 32);
78 if (w && (n = ffs(w)))
79 n += 32;
80 }
81 return n - 1;
82}
57 83
58/* Return whether bitmap is empty (1 == empty) */ 84/* Return whether bitmap is empty (1 == empty) */
59extern int xfs_bitmap_empty(uint *map, uint size); 85extern int xfs_bitmap_empty(uint *map, uint size);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3c4beb3a4326..a1aab9275d5a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -384,14 +384,14 @@ xfs_bmap_count_tree(
384 int levelin, 384 int levelin,
385 int *count); 385 int *count);
386 386
387STATIC int 387STATIC void
388xfs_bmap_count_leaves( 388xfs_bmap_count_leaves(
389 xfs_ifork_t *ifp, 389 xfs_ifork_t *ifp,
390 xfs_extnum_t idx, 390 xfs_extnum_t idx,
391 int numrecs, 391 int numrecs,
392 int *count); 392 int *count);
393 393
394STATIC int 394STATIC void
395xfs_bmap_disk_count_leaves( 395xfs_bmap_disk_count_leaves(
396 xfs_extnum_t idx, 396 xfs_extnum_t idx,
397 xfs_bmbt_block_t *block, 397 xfs_bmbt_block_t *block,
@@ -4000,7 +4000,7 @@ xfs_bmap_add_attrfork(
4000 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 4000 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
4001 } 4001 }
4002 ASSERT(ip->i_d.di_anextents == 0); 4002 ASSERT(ip->i_d.di_anextents == 0);
4003 VN_HOLD(XFS_ITOV(ip)); 4003 IHOLD(ip);
4004 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 4004 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
4005 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4005 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4006 switch (ip->i_d.di_format) { 4006 switch (ip->i_d.di_format) {
@@ -6096,7 +6096,7 @@ xfs_bmap_get_bp(
6096 tp = cur->bc_tp; 6096 tp = cur->bc_tp;
6097 licp = &tp->t_items; 6097 licp = &tp->t_items;
6098 while (!bp && licp != NULL) { 6098 while (!bp && licp != NULL) {
6099 if (XFS_LIC_ARE_ALL_FREE(licp)) { 6099 if (xfs_lic_are_all_free(licp)) {
6100 licp = licp->lic_next; 6100 licp = licp->lic_next;
6101 continue; 6101 continue;
6102 } 6102 }
@@ -6106,11 +6106,11 @@ xfs_bmap_get_bp(
6106 xfs_buf_log_item_t *bip; 6106 xfs_buf_log_item_t *bip;
6107 xfs_buf_t *lbp; 6107 xfs_buf_t *lbp;
6108 6108
6109 if (XFS_LIC_ISFREE(licp, i)) { 6109 if (xfs_lic_isfree(licp, i)) {
6110 continue; 6110 continue;
6111 } 6111 }
6112 6112
6113 lidp = XFS_LIC_SLOT(licp, i); 6113 lidp = xfs_lic_slot(licp, i);
6114 lip = lidp->lid_item; 6114 lip = lidp->lid_item;
6115 if (lip->li_type != XFS_LI_BUF) 6115 if (lip->li_type != XFS_LI_BUF)
6116 continue; 6116 continue;
@@ -6367,13 +6367,9 @@ xfs_bmap_count_blocks(
6367 mp = ip->i_mount; 6367 mp = ip->i_mount;
6368 ifp = XFS_IFORK_PTR(ip, whichfork); 6368 ifp = XFS_IFORK_PTR(ip, whichfork);
6369 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { 6369 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6370 if (unlikely(xfs_bmap_count_leaves(ifp, 0, 6370 xfs_bmap_count_leaves(ifp, 0,
6371 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), 6371 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6372 count) < 0)) { 6372 count);
6373 XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
6374 XFS_ERRLEVEL_LOW, mp);
6375 return XFS_ERROR(EFSCORRUPTED);
6376 }
6377 return 0; 6373 return 0;
6378 } 6374 }
6379 6375
@@ -6454,13 +6450,7 @@ xfs_bmap_count_tree(
6454 for (;;) { 6450 for (;;) {
6455 nextbno = be64_to_cpu(block->bb_rightsib); 6451 nextbno = be64_to_cpu(block->bb_rightsib);
6456 numrecs = be16_to_cpu(block->bb_numrecs); 6452 numrecs = be16_to_cpu(block->bb_numrecs);
6457 if (unlikely(xfs_bmap_disk_count_leaves(0, 6453 xfs_bmap_disk_count_leaves(0, block, numrecs, count);
6458 block, numrecs, count) < 0)) {
6459 xfs_trans_brelse(tp, bp);
6460 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
6461 XFS_ERRLEVEL_LOW, mp);
6462 return XFS_ERROR(EFSCORRUPTED);
6463 }
6464 xfs_trans_brelse(tp, bp); 6454 xfs_trans_brelse(tp, bp);
6465 if (nextbno == NULLFSBLOCK) 6455 if (nextbno == NULLFSBLOCK)
6466 break; 6456 break;
@@ -6478,7 +6468,7 @@ xfs_bmap_count_tree(
6478/* 6468/*
6479 * Count leaf blocks given a range of extent records. 6469 * Count leaf blocks given a range of extent records.
6480 */ 6470 */
6481STATIC int 6471STATIC void
6482xfs_bmap_count_leaves( 6472xfs_bmap_count_leaves(
6483 xfs_ifork_t *ifp, 6473 xfs_ifork_t *ifp,
6484 xfs_extnum_t idx, 6474 xfs_extnum_t idx,
@@ -6491,14 +6481,13 @@ xfs_bmap_count_leaves(
6491 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); 6481 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6492 *count += xfs_bmbt_get_blockcount(frp); 6482 *count += xfs_bmbt_get_blockcount(frp);
6493 } 6483 }
6494 return 0;
6495} 6484}
6496 6485
6497/* 6486/*
6498 * Count leaf blocks given a range of extent records originally 6487 * Count leaf blocks given a range of extent records originally
6499 * in btree format. 6488 * in btree format.
6500 */ 6489 */
6501STATIC int 6490STATIC void
6502xfs_bmap_disk_count_leaves( 6491xfs_bmap_disk_count_leaves(
6503 xfs_extnum_t idx, 6492 xfs_extnum_t idx,
6504 xfs_bmbt_block_t *block, 6493 xfs_bmbt_block_t *block,
@@ -6512,5 +6501,4 @@ xfs_bmap_disk_count_leaves(
6512 frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b); 6501 frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
6513 *count += xfs_bmbt_disk_get_blockcount(frp); 6502 *count += xfs_bmbt_disk_get_blockcount(frp);
6514 } 6503 }
6515 return 0;
6516} 6504}
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index aeb87ca69fcc..cc593a84c345 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -46,38 +46,11 @@ kmem_zone_t *xfs_btree_cur_zone;
46/* 46/*
47 * Btree magic numbers. 47 * Btree magic numbers.
48 */ 48 */
49const __uint32_t xfs_magics[XFS_BTNUM_MAX] = 49const __uint32_t xfs_magics[XFS_BTNUM_MAX] = {
50{
51 XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC 50 XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
52}; 51};
53 52
54/* 53/*
55 * Prototypes for internal routines.
56 */
57
58/*
59 * Checking routine: return maxrecs for the block.
60 */
61STATIC int /* number of records fitting in block */
62xfs_btree_maxrecs(
63 xfs_btree_cur_t *cur, /* btree cursor */
64 xfs_btree_block_t *block);/* generic btree block pointer */
65
66/*
67 * Internal routines.
68 */
69
70/*
71 * Retrieve the block pointer from the cursor at the given level.
72 * This may be a bmap btree root or from a buffer.
73 */
74STATIC xfs_btree_block_t * /* generic btree block pointer */
75xfs_btree_get_block(
76 xfs_btree_cur_t *cur, /* btree cursor */
77 int level, /* level in btree */
78 struct xfs_buf **bpp); /* buffer containing the block */
79
80/*
81 * Checking routine: return maxrecs for the block. 54 * Checking routine: return maxrecs for the block.
82 */ 55 */
83STATIC int /* number of records fitting in block */ 56STATIC int /* number of records fitting in block */
@@ -457,35 +430,6 @@ xfs_btree_dup_cursor(
457} 430}
458 431
459/* 432/*
460 * Change the cursor to point to the first record at the given level.
461 * Other levels are unaffected.
462 */
463int /* success=1, failure=0 */
464xfs_btree_firstrec(
465 xfs_btree_cur_t *cur, /* btree cursor */
466 int level) /* level to change */
467{
468 xfs_btree_block_t *block; /* generic btree block pointer */
469 xfs_buf_t *bp; /* buffer containing block */
470
471 /*
472 * Get the block pointer for this level.
473 */
474 block = xfs_btree_get_block(cur, level, &bp);
475 xfs_btree_check_block(cur, block, level, bp);
476 /*
477 * It's empty, there is no such record.
478 */
479 if (!block->bb_h.bb_numrecs)
480 return 0;
481 /*
482 * Set the ptr value to 1, that's the first record/key.
483 */
484 cur->bc_ptrs[level] = 1;
485 return 1;
486}
487
488/*
489 * Retrieve the block pointer from the cursor at the given level. 433 * Retrieve the block pointer from the cursor at the given level.
490 * This may be a bmap btree root or from a buffer. 434 * This may be a bmap btree root or from a buffer.
491 */ 435 */
@@ -626,6 +570,13 @@ xfs_btree_init_cursor(
626 cur->bc_private.a.agbp = agbp; 570 cur->bc_private.a.agbp = agbp;
627 cur->bc_private.a.agno = agno; 571 cur->bc_private.a.agno = agno;
628 break; 572 break;
573 case XFS_BTNUM_INO:
574 /*
575 * Inode allocation btree fields.
576 */
577 cur->bc_private.a.agbp = agbp;
578 cur->bc_private.a.agno = agno;
579 break;
629 case XFS_BTNUM_BMAP: 580 case XFS_BTNUM_BMAP:
630 /* 581 /*
631 * Bmap btree fields. 582 * Bmap btree fields.
@@ -638,13 +589,6 @@ xfs_btree_init_cursor(
638 cur->bc_private.b.flags = 0; 589 cur->bc_private.b.flags = 0;
639 cur->bc_private.b.whichfork = whichfork; 590 cur->bc_private.b.whichfork = whichfork;
640 break; 591 break;
641 case XFS_BTNUM_INO:
642 /*
643 * Inode allocation btree fields.
644 */
645 cur->bc_private.i.agbp = agbp;
646 cur->bc_private.i.agno = agno;
647 break;
648 default: 592 default:
649 ASSERT(0); 593 ASSERT(0);
650 } 594 }
@@ -671,6 +615,35 @@ xfs_btree_islastblock(
671} 615}
672 616
673/* 617/*
618 * Change the cursor to point to the first record at the given level.
619 * Other levels are unaffected.
620 */
621int /* success=1, failure=0 */
622xfs_btree_firstrec(
623 xfs_btree_cur_t *cur, /* btree cursor */
624 int level) /* level to change */
625{
626 xfs_btree_block_t *block; /* generic btree block pointer */
627 xfs_buf_t *bp; /* buffer containing block */
628
629 /*
630 * Get the block pointer for this level.
631 */
632 block = xfs_btree_get_block(cur, level, &bp);
633 xfs_btree_check_block(cur, block, level, bp);
634 /*
635 * It's empty, there is no such record.
636 */
637 if (!block->bb_h.bb_numrecs)
638 return 0;
639 /*
640 * Set the ptr value to 1, that's the first record/key.
641 */
642 cur->bc_ptrs[level] = 1;
643 return 1;
644}
645
646/*
674 * Change the cursor to point to the last record in the current block 647 * Change the cursor to point to the last record in the current block
675 * at the given level. Other levels are unaffected. 648 * at the given level. Other levels are unaffected.
676 */ 649 */
@@ -890,12 +863,12 @@ xfs_btree_readahead_core(
890 case XFS_BTNUM_INO: 863 case XFS_BTNUM_INO:
891 i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); 864 i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
892 if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { 865 if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
893 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, 866 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
894 be32_to_cpu(i->bb_leftsib), 1); 867 be32_to_cpu(i->bb_leftsib), 1);
895 rval++; 868 rval++;
896 } 869 }
897 if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { 870 if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
898 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, 871 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
899 be32_to_cpu(i->bb_rightsib), 1); 872 be32_to_cpu(i->bb_rightsib), 1);
900 rval++; 873 rval++;
901 } 874 }
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7440b78f9cec..1f528a2a3754 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -158,8 +158,8 @@ typedef struct xfs_btree_cur
158 __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ 158 __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
159 xfs_btnum_t bc_btnum; /* identifies which btree type */ 159 xfs_btnum_t bc_btnum; /* identifies which btree type */
160 union { 160 union {
161 struct { /* needed for BNO, CNT */ 161 struct { /* needed for BNO, CNT, INO */
162 struct xfs_buf *agbp; /* agf buffer pointer */ 162 struct xfs_buf *agbp; /* agf/agi buffer pointer */
163 xfs_agnumber_t agno; /* ag number */ 163 xfs_agnumber_t agno; /* ag number */
164 } a; 164 } a;
165 struct { /* needed for BMAP */ 165 struct { /* needed for BMAP */
@@ -172,10 +172,6 @@ typedef struct xfs_btree_cur
172 char flags; /* flags */ 172 char flags; /* flags */
173#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ 173#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */
174 } b; 174 } b;
175 struct { /* needed for INO */
176 struct xfs_buf *agbp; /* agi buffer pointer */
177 xfs_agnumber_t agno; /* ag number */
178 } i;
179 } bc_private; /* per-btree type data */ 175 } bc_private; /* per-btree type data */
180} xfs_btree_cur_t; 176} xfs_btree_cur_t;
181 177
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d86ca2c03a70..608c30c3f76b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -737,7 +737,7 @@ xfs_buf_item_init(
737 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 737 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
738 bip->bli_format.blf_map_size = map_size; 738 bip->bli_format.blf_map_size = map_size;
739#ifdef XFS_BLI_TRACE 739#ifdef XFS_BLI_TRACE
740 bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); 740 bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
741#endif 741#endif
742 742
743#ifdef XFS_TRANS_DEBUG 743#ifdef XFS_TRANS_DEBUG
@@ -1056,7 +1056,7 @@ xfs_buf_iodone_callbacks(
1056 anyway. */ 1056 anyway. */
1057 XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); 1057 XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
1058 XFS_BUF_DONE(bp); 1058 XFS_BUF_DONE(bp);
1059 XFS_BUF_V_IODONESEMA(bp); 1059 XFS_BUF_FINISH_IOWAIT(bp);
1060 } 1060 }
1061 return; 1061 return;
1062 } 1062 }
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 2211e885ef24..760f4c5b5160 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -128,10 +128,8 @@ xfs_swap_extents(
128 xfs_swapext_t *sxp) 128 xfs_swapext_t *sxp)
129{ 129{
130 xfs_mount_t *mp; 130 xfs_mount_t *mp;
131 xfs_inode_t *ips[2];
132 xfs_trans_t *tp; 131 xfs_trans_t *tp;
133 xfs_bstat_t *sbp = &sxp->sx_stat; 132 xfs_bstat_t *sbp = &sxp->sx_stat;
134 bhv_vnode_t *vp, *tvp;
135 xfs_ifork_t *tempifp, *ifp, *tifp; 133 xfs_ifork_t *tempifp, *ifp, *tifp;
136 int ilf_fields, tilf_fields; 134 int ilf_fields, tilf_fields;
137 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; 135 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
@@ -150,19 +148,8 @@ xfs_swap_extents(
150 } 148 }
151 149
152 sbp = &sxp->sx_stat; 150 sbp = &sxp->sx_stat;
153 vp = XFS_ITOV(ip);
154 tvp = XFS_ITOV(tip);
155
156 /* Lock in i_ino order */
157 if (ip->i_ino < tip->i_ino) {
158 ips[0] = ip;
159 ips[1] = tip;
160 } else {
161 ips[0] = tip;
162 ips[1] = ip;
163 }
164 151
165 xfs_lock_inodes(ips, 2, lock_flags); 152 xfs_lock_two_inodes(ip, tip, lock_flags);
166 locked = 1; 153 locked = 1;
167 154
168 /* Verify that both files have the same format */ 155 /* Verify that both files have the same format */
@@ -184,7 +171,7 @@ xfs_swap_extents(
184 goto error0; 171 goto error0;
185 } 172 }
186 173
187 if (VN_CACHED(tvp) != 0) { 174 if (VN_CACHED(VFS_I(tip)) != 0) {
188 xfs_inval_cached_trace(tip, 0, -1, 0, -1); 175 xfs_inval_cached_trace(tip, 0, -1, 0, -1);
189 error = xfs_flushinval_pages(tip, 0, -1, 176 error = xfs_flushinval_pages(tip, 0, -1,
190 FI_REMAPF_LOCKED); 177 FI_REMAPF_LOCKED);
@@ -193,7 +180,7 @@ xfs_swap_extents(
193 } 180 }
194 181
195 /* Verify O_DIRECT for ftmp */ 182 /* Verify O_DIRECT for ftmp */
196 if (VN_CACHED(tvp) != 0) { 183 if (VN_CACHED(VFS_I(tip)) != 0) {
197 error = XFS_ERROR(EINVAL); 184 error = XFS_ERROR(EINVAL);
198 goto error0; 185 goto error0;
199 } 186 }
@@ -237,7 +224,7 @@ xfs_swap_extents(
237 * vop_read (or write in the case of autogrow) they block on the iolock 224 * vop_read (or write in the case of autogrow) they block on the iolock
238 * until we have switched the extents. 225 * until we have switched the extents.
239 */ 226 */
240 if (VN_MAPPED(vp)) { 227 if (VN_MAPPED(VFS_I(ip))) {
241 error = XFS_ERROR(EBUSY); 228 error = XFS_ERROR(EBUSY);
242 goto error0; 229 goto error0;
243 } 230 }
@@ -265,7 +252,7 @@ xfs_swap_extents(
265 locked = 0; 252 locked = 0;
266 goto error0; 253 goto error0;
267 } 254 }
268 xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); 255 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
269 256
270 /* 257 /*
271 * Count the number of extended attribute blocks 258 * Count the number of extended attribute blocks
@@ -350,15 +337,11 @@ xfs_swap_extents(
350 break; 337 break;
351 } 338 }
352 339
353 /*
354 * Increment vnode ref counts since xfs_trans_commit &
355 * xfs_trans_cancel will both unlock the inodes and
356 * decrement the associated ref counts.
357 */
358 VN_HOLD(vp);
359 VN_HOLD(tvp);
360 340
341 IHOLD(ip);
361 xfs_trans_ijoin(tp, ip, lock_flags); 342 xfs_trans_ijoin(tp, ip, lock_flags);
343
344 IHOLD(tip);
362 xfs_trans_ijoin(tp, tip, lock_flags); 345 xfs_trans_ijoin(tp, tip, lock_flags);
363 346
364 xfs_trans_log_inode(tp, ip, ilf_fields); 347 xfs_trans_log_inode(tp, ip, ilf_fields);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index f66756cfb5e8..f227ecd1a294 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,9 +58,6 @@ xfs_error_trap(int e)
58 } 58 }
59 return e; 59 return e;
60} 60}
61#endif
62
63#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
64 61
65int xfs_etest[XFS_NUM_INJECT_ERROR]; 62int xfs_etest[XFS_NUM_INJECT_ERROR];
66int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; 63int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
@@ -154,7 +151,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
154 151
155 return 0; 152 return 0;
156} 153}
157#endif /* DEBUG || INDUCE_IO_ERROR */ 154#endif /* DEBUG */
158 155
159static void 156static void
160xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) 157xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap)
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index d8559d132efa..11543f10b0c6 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -125,22 +125,14 @@ extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp,
125#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) 125#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
126#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT 126#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
127 127
128#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) 128#ifdef DEBUG
129extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); 129extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
130 130
131#define XFS_NUM_INJECT_ERROR 10 131#define XFS_NUM_INJECT_ERROR 10
132
133#ifdef __ANSI_CPP__
134#define XFS_TEST_ERROR(expr, mp, tag, rf) \
135 ((expr) || \
136 xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \
137 (rf)))
138#else
139#define XFS_TEST_ERROR(expr, mp, tag, rf) \ 132#define XFS_TEST_ERROR(expr, mp, tag, rf) \
140 ((expr) || \ 133 ((expr) || \
141 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ 134 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
142 (rf))) 135 (rf)))
143#endif /* __ANSI_CPP__ */
144 136
145extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); 137extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
146extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); 138extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
@@ -148,7 +140,7 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
148#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) 140#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr)
149#define xfs_errortag_add(tag, mp) (ENOSYS) 141#define xfs_errortag_add(tag, mp) (ENOSYS)
150#define xfs_errortag_clearall(mp, loud) (ENOSYS) 142#define xfs_errortag_clearall(mp, loud) (ENOSYS)
151#endif /* (DEBUG || INDUCE_IO_ERROR) */ 143#endif /* DEBUG */
152 144
153/* 145/*
154 * XFS panic tags -- allow a call to xfs_cmn_err() be turned into 146 * XFS panic tags -- allow a call to xfs_cmn_err() be turned into
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index c38fd14fca29..f3bb75da384e 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -400,7 +400,7 @@ xfs_filestream_init(void)
400 if (!item_zone) 400 if (!item_zone)
401 return -ENOMEM; 401 return -ENOMEM;
402#ifdef XFS_FILESTREAMS_TRACE 402#ifdef XFS_FILESTREAMS_TRACE
403 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP); 403 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
404#endif 404#endif
405 return 0; 405 return 0;
406} 406}
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index e5310c90e50f..83502f3edef0 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -181,7 +181,7 @@ xfs_inobt_delrec(
181 * then we can get rid of this level. 181 * then we can get rid of this level.
182 */ 182 */
183 if (numrecs == 1 && level > 0) { 183 if (numrecs == 1 && level > 0) {
184 agbp = cur->bc_private.i.agbp; 184 agbp = cur->bc_private.a.agbp;
185 agi = XFS_BUF_TO_AGI(agbp); 185 agi = XFS_BUF_TO_AGI(agbp);
186 /* 186 /*
187 * pp is still set to the first pointer in the block. 187 * pp is still set to the first pointer in the block.
@@ -194,7 +194,7 @@ xfs_inobt_delrec(
194 * Free the block. 194 * Free the block.
195 */ 195 */
196 if ((error = xfs_free_extent(cur->bc_tp, 196 if ((error = xfs_free_extent(cur->bc_tp,
197 XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1))) 197 XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1)))
198 return error; 198 return error;
199 xfs_trans_binval(cur->bc_tp, bp); 199 xfs_trans_binval(cur->bc_tp, bp);
200 xfs_ialloc_log_agi(cur->bc_tp, agbp, 200 xfs_ialloc_log_agi(cur->bc_tp, agbp,
@@ -379,7 +379,7 @@ xfs_inobt_delrec(
379 rrecs = be16_to_cpu(right->bb_numrecs); 379 rrecs = be16_to_cpu(right->bb_numrecs);
380 rbp = bp; 380 rbp = bp;
381 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 381 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
382 cur->bc_private.i.agno, lbno, 0, &lbp, 382 cur->bc_private.a.agno, lbno, 0, &lbp,
383 XFS_INO_BTREE_REF))) 383 XFS_INO_BTREE_REF)))
384 return error; 384 return error;
385 left = XFS_BUF_TO_INOBT_BLOCK(lbp); 385 left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -401,7 +401,7 @@ xfs_inobt_delrec(
401 lrecs = be16_to_cpu(left->bb_numrecs); 401 lrecs = be16_to_cpu(left->bb_numrecs);
402 lbp = bp; 402 lbp = bp;
403 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 403 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
404 cur->bc_private.i.agno, rbno, 0, &rbp, 404 cur->bc_private.a.agno, rbno, 0, &rbp,
405 XFS_INO_BTREE_REF))) 405 XFS_INO_BTREE_REF)))
406 return error; 406 return error;
407 right = XFS_BUF_TO_INOBT_BLOCK(rbp); 407 right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -484,7 +484,7 @@ xfs_inobt_delrec(
484 xfs_buf_t *rrbp; 484 xfs_buf_t *rrbp;
485 485
486 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 486 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
487 cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0, 487 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
488 &rrbp, XFS_INO_BTREE_REF))) 488 &rrbp, XFS_INO_BTREE_REF)))
489 return error; 489 return error;
490 rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); 490 rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
@@ -497,7 +497,7 @@ xfs_inobt_delrec(
497 * Free the deleting block. 497 * Free the deleting block.
498 */ 498 */
499 if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp, 499 if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp,
500 cur->bc_private.i.agno, rbno), 1))) 500 cur->bc_private.a.agno, rbno), 1)))
501 return error; 501 return error;
502 xfs_trans_binval(cur->bc_tp, rbp); 502 xfs_trans_binval(cur->bc_tp, rbp);
503 /* 503 /*
@@ -854,7 +854,7 @@ xfs_inobt_lookup(
854 { 854 {
855 xfs_agi_t *agi; /* a.g. inode header */ 855 xfs_agi_t *agi; /* a.g. inode header */
856 856
857 agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); 857 agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
858 agno = be32_to_cpu(agi->agi_seqno); 858 agno = be32_to_cpu(agi->agi_seqno);
859 agbno = be32_to_cpu(agi->agi_root); 859 agbno = be32_to_cpu(agi->agi_root);
860 } 860 }
@@ -1089,7 +1089,7 @@ xfs_inobt_lshift(
1089 * Set up the left neighbor as "left". 1089 * Set up the left neighbor as "left".
1090 */ 1090 */
1091 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, 1091 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
1092 cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib), 1092 cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
1093 0, &lbp, XFS_INO_BTREE_REF))) 1093 0, &lbp, XFS_INO_BTREE_REF)))
1094 return error; 1094 return error;
1095 left = XFS_BUF_TO_INOBT_BLOCK(lbp); 1095 left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -1207,10 +1207,10 @@ xfs_inobt_newroot(
1207 /* 1207 /*
1208 * Get a block & a buffer. 1208 * Get a block & a buffer.
1209 */ 1209 */
1210 agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); 1210 agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
1211 args.tp = cur->bc_tp; 1211 args.tp = cur->bc_tp;
1212 args.mp = cur->bc_mp; 1212 args.mp = cur->bc_mp;
1213 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, 1213 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno,
1214 be32_to_cpu(agi->agi_root)); 1214 be32_to_cpu(agi->agi_root));
1215 args.mod = args.minleft = args.alignment = args.total = args.wasdel = 1215 args.mod = args.minleft = args.alignment = args.total = args.wasdel =
1216 args.isfl = args.userdata = args.minalignslop = 0; 1216 args.isfl = args.userdata = args.minalignslop = 0;
@@ -1233,7 +1233,7 @@ xfs_inobt_newroot(
1233 */ 1233 */
1234 agi->agi_root = cpu_to_be32(args.agbno); 1234 agi->agi_root = cpu_to_be32(args.agbno);
1235 be32_add_cpu(&agi->agi_level, 1); 1235 be32_add_cpu(&agi->agi_level, 1);
1236 xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, 1236 xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp,
1237 XFS_AGI_ROOT | XFS_AGI_LEVEL); 1237 XFS_AGI_ROOT | XFS_AGI_LEVEL);
1238 /* 1238 /*
1239 * At the previous root level there are now two blocks: the old 1239 * At the previous root level there are now two blocks: the old
@@ -1376,7 +1376,7 @@ xfs_inobt_rshift(
1376 * Set up the right neighbor as "right". 1376 * Set up the right neighbor as "right".
1377 */ 1377 */
1378 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, 1378 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
1379 cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 1379 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
1380 0, &rbp, XFS_INO_BTREE_REF))) 1380 0, &rbp, XFS_INO_BTREE_REF)))
1381 return error; 1381 return error;
1382 right = XFS_BUF_TO_INOBT_BLOCK(rbp); 1382 right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -1492,7 +1492,7 @@ xfs_inobt_split(
1492 * Allocate the new block. 1492 * Allocate the new block.
1493 * If we can't do it, we're toast. Give up. 1493 * If we can't do it, we're toast. Give up.
1494 */ 1494 */
1495 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno); 1495 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno);
1496 args.mod = args.minleft = args.alignment = args.total = args.wasdel = 1496 args.mod = args.minleft = args.alignment = args.total = args.wasdel =
1497 args.isfl = args.userdata = args.minalignslop = 0; 1497 args.isfl = args.userdata = args.minalignslop = 0;
1498 args.minlen = args.maxlen = args.prod = 1; 1498 args.minlen = args.maxlen = args.prod = 1;
@@ -1725,7 +1725,7 @@ xfs_inobt_decrement(
1725 1725
1726 agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); 1726 agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
1727 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, 1727 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
1728 cur->bc_private.i.agno, agbno, 0, &bp, 1728 cur->bc_private.a.agno, agbno, 0, &bp,
1729 XFS_INO_BTREE_REF))) 1729 XFS_INO_BTREE_REF)))
1730 return error; 1730 return error;
1731 lev--; 1731 lev--;
@@ -1897,7 +1897,7 @@ xfs_inobt_increment(
1897 1897
1898 agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); 1898 agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
1899 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, 1899 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
1900 cur->bc_private.i.agno, agbno, 0, &bp, 1900 cur->bc_private.a.agno, agbno, 0, &bp,
1901 XFS_INO_BTREE_REF))) 1901 XFS_INO_BTREE_REF)))
1902 return error; 1902 return error;
1903 lev--; 1903 lev--;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b07604b94d9f..e229e9e001c2 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -216,7 +216,14 @@ finish_inode:
216 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 216 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
217 init_waitqueue_head(&ip->i_ipin_wait); 217 init_waitqueue_head(&ip->i_ipin_wait);
218 atomic_set(&ip->i_pincount, 0); 218 atomic_set(&ip->i_pincount, 0);
219 initnsema(&ip->i_flock, 1, "xfsfino"); 219
220 /*
221 * Because we want to use a counting completion, complete
222 * the flush completion once to allow a single access to
223 * the flush completion without blocking.
224 */
225 init_completion(&ip->i_flush);
226 complete(&ip->i_flush);
220 227
221 if (lock_flags) 228 if (lock_flags)
222 xfs_ilock(ip, lock_flags); 229 xfs_ilock(ip, lock_flags);
@@ -288,10 +295,17 @@ finish_inode:
288 *ipp = ip; 295 *ipp = ip;
289 296
290 /* 297 /*
298 * Set up the Linux with the Linux inode.
299 */
300 ip->i_vnode = inode;
301 inode->i_private = ip;
302
303 /*
291 * If we have a real type for an on-disk inode, we can set ops(&unlock) 304 * If we have a real type for an on-disk inode, we can set ops(&unlock)
292 * now. If it's a new inode being created, xfs_ialloc will handle it. 305 * now. If it's a new inode being created, xfs_ialloc will handle it.
293 */ 306 */
294 xfs_initialize_vnode(mp, inode, ip); 307 if (ip->i_d.di_mode != 0)
308 xfs_setup_inode(ip);
295 return 0; 309 return 0;
296} 310}
297 311
@@ -411,10 +425,11 @@ xfs_iput(xfs_inode_t *ip,
411 * Special iput for brand-new inodes that are still locked 425 * Special iput for brand-new inodes that are still locked
412 */ 426 */
413void 427void
414xfs_iput_new(xfs_inode_t *ip, 428xfs_iput_new(
415 uint lock_flags) 429 xfs_inode_t *ip,
430 uint lock_flags)
416{ 431{
417 struct inode *inode = ip->i_vnode; 432 struct inode *inode = VFS_I(ip);
418 433
419 xfs_itrace_entry(ip); 434 xfs_itrace_entry(ip);
420 435
@@ -775,26 +790,3 @@ xfs_isilocked(
775} 790}
776#endif 791#endif
777 792
778/*
779 * The following three routines simply manage the i_flock
780 * semaphore embedded in the inode. This semaphore synchronizes
781 * processes attempting to flush the in-core inode back to disk.
782 */
783void
784xfs_iflock(xfs_inode_t *ip)
785{
786 psema(&(ip->i_flock), PINOD|PLTWAIT);
787}
788
789int
790xfs_iflock_nowait(xfs_inode_t *ip)
791{
792 return (cpsema(&(ip->i_flock)));
793}
794
795void
796xfs_ifunlock(xfs_inode_t *ip)
797{
798 ASSERT(issemalocked(&(ip->i_flock)));
799 vsema(&(ip->i_flock));
800}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index bedc66163176..00e80df9dd9d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -580,8 +580,8 @@ xfs_iformat_extents(
580 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 580 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
581 for (i = 0; i < nex; i++, dp++) { 581 for (i = 0; i < nex; i++, dp++) {
582 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 582 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
583 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0)); 583 ep->l0 = get_unaligned_be64(&dp->l0);
584 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1)); 584 ep->l1 = get_unaligned_be64(&dp->l1);
585 } 585 }
586 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 586 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
587 if (whichfork != XFS_DATA_FORK || 587 if (whichfork != XFS_DATA_FORK ||
@@ -835,22 +835,22 @@ xfs_iread(
835 * Do this before xfs_iformat in case it adds entries. 835 * Do this before xfs_iformat in case it adds entries.
836 */ 836 */
837#ifdef XFS_INODE_TRACE 837#ifdef XFS_INODE_TRACE
838 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); 838 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
839#endif 839#endif
840#ifdef XFS_BMAP_TRACE 840#ifdef XFS_BMAP_TRACE
841 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); 841 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
842#endif 842#endif
843#ifdef XFS_BMBT_TRACE 843#ifdef XFS_BMBT_TRACE
844 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); 844 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
845#endif 845#endif
846#ifdef XFS_RW_TRACE 846#ifdef XFS_RW_TRACE
847 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); 847 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
848#endif 848#endif
849#ifdef XFS_ILOCK_TRACE 849#ifdef XFS_ILOCK_TRACE
850 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); 850 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
851#endif 851#endif
852#ifdef XFS_DIR2_TRACE 852#ifdef XFS_DIR2_TRACE
853 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); 853 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
854#endif 854#endif
855 855
856 /* 856 /*
@@ -1046,9 +1046,9 @@ xfs_ialloc(
1046{ 1046{
1047 xfs_ino_t ino; 1047 xfs_ino_t ino;
1048 xfs_inode_t *ip; 1048 xfs_inode_t *ip;
1049 bhv_vnode_t *vp;
1050 uint flags; 1049 uint flags;
1051 int error; 1050 int error;
1051 timespec_t tv;
1052 1052
1053 /* 1053 /*
1054 * Call the space management code to pick 1054 * Call the space management code to pick
@@ -1077,13 +1077,12 @@ xfs_ialloc(
1077 } 1077 }
1078 ASSERT(ip != NULL); 1078 ASSERT(ip != NULL);
1079 1079
1080 vp = XFS_ITOV(ip);
1081 ip->i_d.di_mode = (__uint16_t)mode; 1080 ip->i_d.di_mode = (__uint16_t)mode;
1082 ip->i_d.di_onlink = 0; 1081 ip->i_d.di_onlink = 0;
1083 ip->i_d.di_nlink = nlink; 1082 ip->i_d.di_nlink = nlink;
1084 ASSERT(ip->i_d.di_nlink == nlink); 1083 ASSERT(ip->i_d.di_nlink == nlink);
1085 ip->i_d.di_uid = current_fsuid(cr); 1084 ip->i_d.di_uid = current_fsuid();
1086 ip->i_d.di_gid = current_fsgid(cr); 1085 ip->i_d.di_gid = current_fsgid();
1087 ip->i_d.di_projid = prid; 1086 ip->i_d.di_projid = prid;
1088 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1087 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1089 1088
@@ -1130,7 +1129,13 @@ xfs_ialloc(
1130 ip->i_size = 0; 1129 ip->i_size = 0;
1131 ip->i_d.di_nextents = 0; 1130 ip->i_d.di_nextents = 0;
1132 ASSERT(ip->i_d.di_nblocks == 0); 1131 ASSERT(ip->i_d.di_nblocks == 0);
1133 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); 1132
1133 nanotime(&tv);
1134 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1135 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1136 ip->i_d.di_atime = ip->i_d.di_mtime;
1137 ip->i_d.di_ctime = ip->i_d.di_mtime;
1138
1134 /* 1139 /*
1135 * di_gen will have been taken care of in xfs_iread. 1140 * di_gen will have been taken care of in xfs_iread.
1136 */ 1141 */
@@ -1220,7 +1225,7 @@ xfs_ialloc(
1220 xfs_trans_log_inode(tp, ip, flags); 1225 xfs_trans_log_inode(tp, ip, flags);
1221 1226
1222 /* now that we have an i_mode we can setup inode ops and unlock */ 1227 /* now that we have an i_mode we can setup inode ops and unlock */
1223 xfs_initialize_vnode(tp->t_mountp, vp, ip); 1228 xfs_setup_inode(ip);
1224 1229
1225 *ipp = ip; 1230 *ipp = ip;
1226 return 0; 1231 return 0;
@@ -1399,7 +1404,6 @@ xfs_itruncate_start(
1399 xfs_fsize_t last_byte; 1404 xfs_fsize_t last_byte;
1400 xfs_off_t toss_start; 1405 xfs_off_t toss_start;
1401 xfs_mount_t *mp; 1406 xfs_mount_t *mp;
1402 bhv_vnode_t *vp;
1403 int error = 0; 1407 int error = 0;
1404 1408
1405 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1409 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
@@ -1408,7 +1412,6 @@ xfs_itruncate_start(
1408 (flags == XFS_ITRUNC_MAYBE)); 1412 (flags == XFS_ITRUNC_MAYBE));
1409 1413
1410 mp = ip->i_mount; 1414 mp = ip->i_mount;
1411 vp = XFS_ITOV(ip);
1412 1415
1413 /* wait for the completion of any pending DIOs */ 1416 /* wait for the completion of any pending DIOs */
1414 if (new_size < ip->i_size) 1417 if (new_size < ip->i_size)
@@ -1457,7 +1460,7 @@ xfs_itruncate_start(
1457 1460
1458#ifdef DEBUG 1461#ifdef DEBUG
1459 if (new_size == 0) { 1462 if (new_size == 0) {
1460 ASSERT(VN_CACHED(vp) == 0); 1463 ASSERT(VN_CACHED(VFS_I(ip)) == 0);
1461 } 1464 }
1462#endif 1465#endif
1463 return error; 1466 return error;
@@ -2630,7 +2633,6 @@ xfs_idestroy(
2630 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 2633 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2631 mrfree(&ip->i_lock); 2634 mrfree(&ip->i_lock);
2632 mrfree(&ip->i_iolock); 2635 mrfree(&ip->i_iolock);
2633 freesema(&ip->i_flock);
2634 2636
2635#ifdef XFS_INODE_TRACE 2637#ifdef XFS_INODE_TRACE
2636 ktrace_free(ip->i_trace); 2638 ktrace_free(ip->i_trace);
@@ -3048,10 +3050,10 @@ cluster_corrupt_out:
3048/* 3050/*
3049 * xfs_iflush() will write a modified inode's changes out to the 3051 * xfs_iflush() will write a modified inode's changes out to the
3050 * inode's on disk home. The caller must have the inode lock held 3052 * inode's on disk home. The caller must have the inode lock held
3051 * in at least shared mode and the inode flush semaphore must be 3053 * in at least shared mode and the inode flush completion must be
3052 * held as well. The inode lock will still be held upon return from 3054 * active as well. The inode lock will still be held upon return from
3053 * the call and the caller is free to unlock it. 3055 * the call and the caller is free to unlock it.
3054 * The inode flush lock will be unlocked when the inode reaches the disk. 3056 * The inode flush will be completed when the inode reaches the disk.
3055 * The flags indicate how the inode's buffer should be written out. 3057 * The flags indicate how the inode's buffer should be written out.
3056 */ 3058 */
3057int 3059int
@@ -3070,7 +3072,7 @@ xfs_iflush(
3070 XFS_STATS_INC(xs_iflush_count); 3072 XFS_STATS_INC(xs_iflush_count);
3071 3073
3072 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3074 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3073 ASSERT(issemalocked(&(ip->i_flock))); 3075 ASSERT(!completion_done(&ip->i_flush));
3074 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3076 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3075 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3077 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3076 3078
@@ -3233,7 +3235,7 @@ xfs_iflush_int(
3233#endif 3235#endif
3234 3236
3235 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3237 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3236 ASSERT(issemalocked(&(ip->i_flock))); 3238 ASSERT(!completion_done(&ip->i_flush));
3237 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3239 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3238 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3240 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3239 3241
@@ -3465,7 +3467,6 @@ xfs_iflush_all(
3465 xfs_mount_t *mp) 3467 xfs_mount_t *mp)
3466{ 3468{
3467 xfs_inode_t *ip; 3469 xfs_inode_t *ip;
3468 bhv_vnode_t *vp;
3469 3470
3470 again: 3471 again:
3471 XFS_MOUNT_ILOCK(mp); 3472 XFS_MOUNT_ILOCK(mp);
@@ -3480,14 +3481,13 @@ xfs_iflush_all(
3480 continue; 3481 continue;
3481 } 3482 }
3482 3483
3483 vp = XFS_ITOV_NULL(ip); 3484 if (!VFS_I(ip)) {
3484 if (!vp) {
3485 XFS_MOUNT_IUNLOCK(mp); 3485 XFS_MOUNT_IUNLOCK(mp);
3486 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); 3486 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3487 goto again; 3487 goto again;
3488 } 3488 }
3489 3489
3490 ASSERT(vn_count(vp) == 0); 3490 ASSERT(vn_count(VFS_I(ip)) == 0);
3491 3491
3492 ip = ip->i_mnext; 3492 ip = ip->i_mnext;
3493 } while (ip != mp->m_inodes); 3493 } while (ip != mp->m_inodes);
@@ -3707,7 +3707,7 @@ xfs_iext_add_indirect_multi(
3707 * (all extents past */ 3707 * (all extents past */
3708 if (nex2) { 3708 if (nex2) {
3709 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3709 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3710 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); 3710 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3711 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3711 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3712 erp->er_extcount -= nex2; 3712 erp->er_extcount -= nex2;
3713 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3713 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
@@ -4007,8 +4007,7 @@ xfs_iext_realloc_direct(
4007 ifp->if_u1.if_extents = 4007 ifp->if_u1.if_extents =
4008 kmem_realloc(ifp->if_u1.if_extents, 4008 kmem_realloc(ifp->if_u1.if_extents,
4009 rnew_size, 4009 rnew_size,
4010 ifp->if_real_bytes, 4010 ifp->if_real_bytes, KM_NOFS);
4011 KM_SLEEP);
4012 } 4011 }
4013 if (rnew_size > ifp->if_real_bytes) { 4012 if (rnew_size > ifp->if_real_bytes) {
4014 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 4013 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
@@ -4067,7 +4066,7 @@ xfs_iext_inline_to_direct(
4067 xfs_ifork_t *ifp, /* inode fork pointer */ 4066 xfs_ifork_t *ifp, /* inode fork pointer */
4068 int new_size) /* number of extents in file */ 4067 int new_size) /* number of extents in file */
4069{ 4068{
4070 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP); 4069 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
4071 memset(ifp->if_u1.if_extents, 0, new_size); 4070 memset(ifp->if_u1.if_extents, 0, new_size);
4072 if (ifp->if_bytes) { 4071 if (ifp->if_bytes) {
4073 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 4072 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
@@ -4099,7 +4098,7 @@ xfs_iext_realloc_indirect(
4099 } else { 4098 } else {
4100 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 4099 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4101 kmem_realloc(ifp->if_u1.if_ext_irec, 4100 kmem_realloc(ifp->if_u1.if_ext_irec,
4102 new_size, size, KM_SLEEP); 4101 new_size, size, KM_NOFS);
4103 } 4102 }
4104} 4103}
4105 4104
@@ -4341,11 +4340,10 @@ xfs_iext_irec_init(
4341 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4340 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4342 ASSERT(nextents <= XFS_LINEAR_EXTS); 4341 ASSERT(nextents <= XFS_LINEAR_EXTS);
4343 4342
4344 erp = (xfs_ext_irec_t *) 4343 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
4345 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4346 4344
4347 if (nextents == 0) { 4345 if (nextents == 0) {
4348 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4346 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4349 } else if (!ifp->if_real_bytes) { 4347 } else if (!ifp->if_real_bytes) {
4350 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4348 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4351 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4349 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
@@ -4393,7 +4391,7 @@ xfs_iext_irec_new(
4393 4391
4394 /* Initialize new extent record */ 4392 /* Initialize new extent record */
4395 erp = ifp->if_u1.if_ext_irec; 4393 erp = ifp->if_u1.if_ext_irec;
4396 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4394 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4397 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4395 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4398 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4396 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4399 erp[erp_idx].er_extcount = 0; 4397 erp[erp_idx].er_extcount = 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 17a04b6321ed..1420c49674d7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -87,8 +87,7 @@ typedef struct xfs_ifork {
87 * Flags for xfs_ichgtime(). 87 * Flags for xfs_ichgtime().
88 */ 88 */
89#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ 89#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
90#define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */ 90#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
91#define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */
92 91
93/* 92/*
94 * Per-fork incore inode flags. 93 * Per-fork incore inode flags.
@@ -204,7 +203,7 @@ typedef struct xfs_inode {
204 struct xfs_inode *i_mprev; /* ptr to prev inode */ 203 struct xfs_inode *i_mprev; /* ptr to prev inode */
205 struct xfs_mount *i_mount; /* fs mount struct ptr */ 204 struct xfs_mount *i_mount; /* fs mount struct ptr */
206 struct list_head i_reclaim; /* reclaim list */ 205 struct list_head i_reclaim; /* reclaim list */
207 bhv_vnode_t *i_vnode; /* vnode backpointer */ 206 struct inode *i_vnode; /* vnode backpointer */
208 struct xfs_dquot *i_udquot; /* user dquot */ 207 struct xfs_dquot *i_udquot; /* user dquot */
209 struct xfs_dquot *i_gdquot; /* group dquot */ 208 struct xfs_dquot *i_gdquot; /* group dquot */
210 209
@@ -223,7 +222,7 @@ typedef struct xfs_inode {
223 struct xfs_inode_log_item *i_itemp; /* logging information */ 222 struct xfs_inode_log_item *i_itemp; /* logging information */
224 mrlock_t i_lock; /* inode lock */ 223 mrlock_t i_lock; /* inode lock */
225 mrlock_t i_iolock; /* inode IO lock */ 224 mrlock_t i_iolock; /* inode IO lock */
226 sema_t i_flock; /* inode flush lock */ 225 struct completion i_flush; /* inode flush completion q */
227 atomic_t i_pincount; /* inode pin count */ 226 atomic_t i_pincount; /* inode pin count */
228 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ 227 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */
229 spinlock_t i_flags_lock; /* inode i_flags lock */ 228 spinlock_t i_flags_lock; /* inode i_flags lock */
@@ -263,6 +262,18 @@ typedef struct xfs_inode {
263#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ 262#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
264 (ip)->i_size : (ip)->i_d.di_size; 263 (ip)->i_size : (ip)->i_d.di_size;
265 264
265/* Convert from vfs inode to xfs inode */
266static inline struct xfs_inode *XFS_I(struct inode *inode)
267{
268 return (struct xfs_inode *)inode->i_private;
269}
270
271/* convert from xfs inode to vfs inode */
272static inline struct inode *VFS_I(struct xfs_inode *ip)
273{
274 return (struct inode *)ip->i_vnode;
275}
276
266/* 277/*
267 * i_flags helper functions 278 * i_flags helper functions
268 */ 279 */
@@ -439,9 +450,6 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
439#define XFS_ITRUNC_DEFINITE 0x1 450#define XFS_ITRUNC_DEFINITE 0x1
440#define XFS_ITRUNC_MAYBE 0x2 451#define XFS_ITRUNC_MAYBE 0x2
441 452
442#define XFS_ITOV(ip) ((ip)->i_vnode)
443#define XFS_ITOV_NULL(ip) ((ip)->i_vnode)
444
445/* 453/*
446 * For multiple groups support: if S_ISGID bit is set in the parent 454 * For multiple groups support: if S_ISGID bit is set in the parent
447 * directory, group of new file is set to that of the parent, and 455 * directory, group of new file is set to that of the parent, and
@@ -473,11 +481,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
473void xfs_iunlock(xfs_inode_t *, uint); 481void xfs_iunlock(xfs_inode_t *, uint);
474void xfs_ilock_demote(xfs_inode_t *, uint); 482void xfs_ilock_demote(xfs_inode_t *, uint);
475int xfs_isilocked(xfs_inode_t *, uint); 483int xfs_isilocked(xfs_inode_t *, uint);
476void xfs_iflock(xfs_inode_t *);
477int xfs_iflock_nowait(xfs_inode_t *);
478uint xfs_ilock_map_shared(xfs_inode_t *); 484uint xfs_ilock_map_shared(xfs_inode_t *);
479void xfs_iunlock_map_shared(xfs_inode_t *, uint); 485void xfs_iunlock_map_shared(xfs_inode_t *, uint);
480void xfs_ifunlock(xfs_inode_t *);
481void xfs_ireclaim(xfs_inode_t *); 486void xfs_ireclaim(xfs_inode_t *);
482int xfs_finish_reclaim(xfs_inode_t *, int, int); 487int xfs_finish_reclaim(xfs_inode_t *, int, int);
483int xfs_finish_reclaim_all(struct xfs_mount *, int); 488int xfs_finish_reclaim_all(struct xfs_mount *, int);
@@ -522,6 +527,7 @@ void xfs_iflush_all(struct xfs_mount *);
522void xfs_ichgtime(xfs_inode_t *, int); 527void xfs_ichgtime(xfs_inode_t *, int);
523xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); 528xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
524void xfs_lock_inodes(xfs_inode_t **, int, uint); 529void xfs_lock_inodes(xfs_inode_t **, int, uint);
530void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
525 531
526void xfs_synchronize_atime(xfs_inode_t *); 532void xfs_synchronize_atime(xfs_inode_t *);
527void xfs_mark_inode_dirty_sync(xfs_inode_t *); 533void xfs_mark_inode_dirty_sync(xfs_inode_t *);
@@ -570,6 +576,26 @@ extern struct kmem_zone *xfs_ifork_zone;
570extern struct kmem_zone *xfs_inode_zone; 576extern struct kmem_zone *xfs_inode_zone;
571extern struct kmem_zone *xfs_ili_zone; 577extern struct kmem_zone *xfs_ili_zone;
572 578
579/*
580 * Manage the i_flush queue embedded in the inode. This completion
581 * queue synchronizes processes attempting to flush the in-core
582 * inode back to disk.
583 */
584static inline void xfs_iflock(xfs_inode_t *ip)
585{
586 wait_for_completion(&ip->i_flush);
587}
588
589static inline int xfs_iflock_nowait(xfs_inode_t *ip)
590{
591 return try_wait_for_completion(&ip->i_flush);
592}
593
594static inline void xfs_ifunlock(xfs_inode_t *ip)
595{
596 complete(&ip->i_flush);
597}
598
573#endif /* __KERNEL__ */ 599#endif /* __KERNEL__ */
574 600
575#endif /* __XFS_INODE_H__ */ 601#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eee08a32c26..97c7452e2620 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -779,11 +779,10 @@ xfs_inode_item_pushbuf(
779 ASSERT(iip->ili_push_owner == current_pid()); 779 ASSERT(iip->ili_push_owner == current_pid());
780 780
781 /* 781 /*
782 * If flushlock isn't locked anymore, chances are that the 782 * If a flush is not in progress anymore, chances are that the
783 * inode flush completed and the inode was taken off the AIL. 783 * inode was taken off the AIL. So, just get out.
784 * So, just get out.
785 */ 784 */
786 if (!issemalocked(&(ip->i_flock)) || 785 if (completion_done(&ip->i_flush) ||
787 ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { 786 ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
788 iip->ili_pushbuf_flag = 0; 787 iip->ili_pushbuf_flag = 0;
789 xfs_iunlock(ip, XFS_ILOCK_SHARED); 788 xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -805,7 +804,7 @@ xfs_inode_item_pushbuf(
805 * If not, we can flush it async. 804 * If not, we can flush it async.
806 */ 805 */
807 dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && 806 dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
808 issemalocked(&(ip->i_flock))); 807 !completion_done(&ip->i_flush));
809 iip->ili_pushbuf_flag = 0; 808 iip->ili_pushbuf_flag = 0;
810 xfs_iunlock(ip, XFS_ILOCK_SHARED); 809 xfs_iunlock(ip, XFS_ILOCK_SHARED);
811 xfs_buftrace("INODE ITEM PUSH", bp); 810 xfs_buftrace("INODE ITEM PUSH", bp);
@@ -858,7 +857,7 @@ xfs_inode_item_push(
858 ip = iip->ili_inode; 857 ip = iip->ili_inode;
859 858
860 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 859 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
861 ASSERT(issemalocked(&(ip->i_flock))); 860 ASSERT(!completion_done(&ip->i_flush));
862 /* 861 /*
863 * Since we were able to lock the inode's flush lock and 862 * Since we were able to lock the inode's flush lock and
864 * we found it on the AIL, the inode must be dirty. This 863 * we found it on the AIL, the inode must be dirty. This
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 9a3ef9dcaeb9..cf6754a3c5b3 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -59,7 +59,6 @@ xfs_bulkstat_one_iget(
59{ 59{
60 xfs_icdinode_t *dic; /* dinode core info pointer */ 60 xfs_icdinode_t *dic; /* dinode core info pointer */
61 xfs_inode_t *ip; /* incore inode pointer */ 61 xfs_inode_t *ip; /* incore inode pointer */
62 bhv_vnode_t *vp;
63 int error; 62 int error;
64 63
65 error = xfs_iget(mp, NULL, ino, 64 error = xfs_iget(mp, NULL, ino,
@@ -72,7 +71,6 @@ xfs_bulkstat_one_iget(
72 ASSERT(ip != NULL); 71 ASSERT(ip != NULL);
73 ASSERT(ip->i_blkno != (xfs_daddr_t)0); 72 ASSERT(ip->i_blkno != (xfs_daddr_t)0);
74 73
75 vp = XFS_ITOV(ip);
76 dic = &ip->i_d; 74 dic = &ip->i_d;
77 75
78 /* xfs_iget returns the following without needing 76 /* xfs_iget returns the following without needing
@@ -85,7 +83,7 @@ xfs_bulkstat_one_iget(
85 buf->bs_uid = dic->di_uid; 83 buf->bs_uid = dic->di_uid;
86 buf->bs_gid = dic->di_gid; 84 buf->bs_gid = dic->di_gid;
87 buf->bs_size = dic->di_size; 85 buf->bs_size = dic->di_size;
88 vn_atime_to_bstime(vp, &buf->bs_atime); 86 vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
89 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 87 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
90 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 88 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
91 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 89 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 91b00a5686cd..ccba14eb9dbe 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -160,7 +160,7 @@ void
160xlog_trace_iclog(xlog_in_core_t *iclog, uint state) 160xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
161{ 161{
162 if (!iclog->ic_trace) 162 if (!iclog->ic_trace)
163 iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); 163 iclog->ic_trace = ktrace_alloc(256, KM_NOFS);
164 ktrace_enter(iclog->ic_trace, 164 ktrace_enter(iclog->ic_trace,
165 (void *)((unsigned long)state), 165 (void *)((unsigned long)state),
166 (void *)((unsigned long)current_pid()), 166 (void *)((unsigned long)current_pid()),
@@ -336,15 +336,12 @@ xfs_log_done(xfs_mount_t *mp,
336 } else { 336 } else {
337 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); 337 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
338 xlog_regrant_reserve_log_space(log, ticket); 338 xlog_regrant_reserve_log_space(log, ticket);
339 } 339 /* If this ticket was a permanent reservation and we aren't
340 340 * trying to release it, reset the inited flags; so next time
341 /* If this ticket was a permanent reservation and we aren't 341 * we write, a start record will be written out.
342 * trying to release it, reset the inited flags; so next time 342 */
343 * we write, a start record will be written out.
344 */
345 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
346 (flags & XFS_LOG_REL_PERM_RESERV) == 0)
347 ticket->t_flags |= XLOG_TIC_INITED; 343 ticket->t_flags |= XLOG_TIC_INITED;
344 }
348 345
349 return lsn; 346 return lsn;
350} /* xfs_log_done */ 347} /* xfs_log_done */
@@ -357,11 +354,11 @@ xfs_log_done(xfs_mount_t *mp,
357 * Asynchronous forces are implemented by setting the WANT_SYNC 354 * Asynchronous forces are implemented by setting the WANT_SYNC
358 * bit in the appropriate in-core log and then returning. 355 * bit in the appropriate in-core log and then returning.
359 * 356 *
360 * Synchronous forces are implemented with a semaphore. All callers 357 * Synchronous forces are implemented with a signal variable. All callers
361 * to force a given lsn to disk will wait on a semaphore attached to the 358 * to force a given lsn to disk will wait on a the sv attached to the
362 * specific in-core log. When given in-core log finally completes its 359 * specific in-core log. When given in-core log finally completes its
363 * write to disk, that thread will wake up all threads waiting on the 360 * write to disk, that thread will wake up all threads waiting on the
364 * semaphore. 361 * sv.
365 */ 362 */
366int 363int
367_xfs_log_force( 364_xfs_log_force(
@@ -588,12 +585,12 @@ error:
588 * mp - ubiquitous xfs mount point structure 585 * mp - ubiquitous xfs mount point structure
589 */ 586 */
590int 587int
591xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) 588xfs_log_mount_finish(xfs_mount_t *mp)
592{ 589{
593 int error; 590 int error;
594 591
595 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 592 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
596 error = xlog_recover_finish(mp->m_log, mfsi_flags); 593 error = xlog_recover_finish(mp->m_log);
597 else { 594 else {
598 error = 0; 595 error = 0;
599 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 596 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
@@ -707,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
707 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 704 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
708 iclog->ic_state == XLOG_STATE_DIRTY)) { 705 iclog->ic_state == XLOG_STATE_DIRTY)) {
709 if (!XLOG_FORCED_SHUTDOWN(log)) { 706 if (!XLOG_FORCED_SHUTDOWN(log)) {
710 sv_wait(&iclog->ic_forcesema, PMEM, 707 sv_wait(&iclog->ic_force_wait, PMEM,
711 &log->l_icloglock, s); 708 &log->l_icloglock, s);
712 } else { 709 } else {
713 spin_unlock(&log->l_icloglock); 710 spin_unlock(&log->l_icloglock);
@@ -748,7 +745,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
748 || iclog->ic_state == XLOG_STATE_DIRTY 745 || iclog->ic_state == XLOG_STATE_DIRTY
749 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 746 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
750 747
751 sv_wait(&iclog->ic_forcesema, PMEM, 748 sv_wait(&iclog->ic_force_wait, PMEM,
752 &log->l_icloglock, s); 749 &log->l_icloglock, s);
753 } else { 750 } else {
754 spin_unlock(&log->l_icloglock); 751 spin_unlock(&log->l_icloglock);
@@ -838,7 +835,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
838 break; 835 break;
839 tail_lsn = 0; 836 tail_lsn = 0;
840 free_bytes -= tic->t_unit_res; 837 free_bytes -= tic->t_unit_res;
841 sv_signal(&tic->t_sema); 838 sv_signal(&tic->t_wait);
842 tic = tic->t_next; 839 tic = tic->t_next;
843 } while (tic != log->l_write_headq); 840 } while (tic != log->l_write_headq);
844 } 841 }
@@ -859,7 +856,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
859 break; 856 break;
860 tail_lsn = 0; 857 tail_lsn = 0;
861 free_bytes -= need_bytes; 858 free_bytes -= need_bytes;
862 sv_signal(&tic->t_sema); 859 sv_signal(&tic->t_wait);
863 tic = tic->t_next; 860 tic = tic->t_next;
864 } while (tic != log->l_reserve_headq); 861 } while (tic != log->l_reserve_headq);
865 } 862 }
@@ -1285,8 +1282,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1285 1282
1286 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); 1283 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
1287 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); 1284 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
1288 sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); 1285 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
1289 sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); 1286 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
1290 1287
1291 iclogp = &iclog->ic_next; 1288 iclogp = &iclog->ic_next;
1292 } 1289 }
@@ -1565,8 +1562,8 @@ xlog_dealloc_log(xlog_t *log)
1565 1562
1566 iclog = log->l_iclog; 1563 iclog = log->l_iclog;
1567 for (i=0; i<log->l_iclog_bufs; i++) { 1564 for (i=0; i<log->l_iclog_bufs; i++) {
1568 sv_destroy(&iclog->ic_forcesema); 1565 sv_destroy(&iclog->ic_force_wait);
1569 sv_destroy(&iclog->ic_writesema); 1566 sv_destroy(&iclog->ic_write_wait);
1570 xfs_buf_free(iclog->ic_bp); 1567 xfs_buf_free(iclog->ic_bp);
1571#ifdef XFS_LOG_TRACE 1568#ifdef XFS_LOG_TRACE
1572 if (iclog->ic_trace != NULL) { 1569 if (iclog->ic_trace != NULL) {
@@ -1976,7 +1973,7 @@ xlog_write(xfs_mount_t * mp,
1976/* Clean iclogs starting from the head. This ordering must be 1973/* Clean iclogs starting from the head. This ordering must be
1977 * maintained, so an iclog doesn't become ACTIVE beyond one that 1974 * maintained, so an iclog doesn't become ACTIVE beyond one that
1978 * is SYNCING. This is also required to maintain the notion that we use 1975 * is SYNCING. This is also required to maintain the notion that we use
1979 * a counting semaphore to hold off would be writers to the log when every 1976 * a ordered wait queue to hold off would be writers to the log when every
1980 * iclog is trying to sync to disk. 1977 * iclog is trying to sync to disk.
1981 * 1978 *
1982 * State Change: DIRTY -> ACTIVE 1979 * State Change: DIRTY -> ACTIVE
@@ -2240,7 +2237,7 @@ xlog_state_do_callback(
2240 xlog_state_clean_log(log); 2237 xlog_state_clean_log(log);
2241 2238
2242 /* wake up threads waiting in xfs_log_force() */ 2239 /* wake up threads waiting in xfs_log_force() */
2243 sv_broadcast(&iclog->ic_forcesema); 2240 sv_broadcast(&iclog->ic_force_wait);
2244 2241
2245 iclog = iclog->ic_next; 2242 iclog = iclog->ic_next;
2246 } while (first_iclog != iclog); 2243 } while (first_iclog != iclog);
@@ -2302,8 +2299,7 @@ xlog_state_do_callback(
2302 * the second completion goes through. 2299 * the second completion goes through.
2303 * 2300 *
2304 * Callbacks could take time, so they are done outside the scope of the 2301 * Callbacks could take time, so they are done outside the scope of the
2305 * global state machine log lock. Assume that the calls to cvsema won't 2302 * global state machine log lock.
2306 * take a long time. At least we know it won't sleep.
2307 */ 2303 */
2308STATIC void 2304STATIC void
2309xlog_state_done_syncing( 2305xlog_state_done_syncing(
@@ -2339,7 +2335,7 @@ xlog_state_done_syncing(
2339 * iclog buffer, we wake them all, one will get to do the 2335 * iclog buffer, we wake them all, one will get to do the
2340 * I/O, the others get to wait for the result. 2336 * I/O, the others get to wait for the result.
2341 */ 2337 */
2342 sv_broadcast(&iclog->ic_writesema); 2338 sv_broadcast(&iclog->ic_write_wait);
2343 spin_unlock(&log->l_icloglock); 2339 spin_unlock(&log->l_icloglock);
2344 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2340 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
2345} /* xlog_state_done_syncing */ 2341} /* xlog_state_done_syncing */
@@ -2347,11 +2343,9 @@ xlog_state_done_syncing(
2347 2343
2348/* 2344/*
2349 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2345 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2350 * sleep. The flush semaphore is set to the number of in-core buffers and 2346 * sleep. We wait on the flush queue on the head iclog as that should be
2351 * decremented around disk syncing. Therefore, if all buffers are syncing, 2347 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2352 * this semaphore will cause new writes to sleep until a sync completes. 2348 * we will wait here and all new writes will sleep until a sync completes.
2353 * Otherwise, this code just does p() followed by v(). This approximates
2354 * a sleep/wakeup except we can't race.
2355 * 2349 *
2356 * The in-core logs are used in a circular fashion. They are not used 2350 * The in-core logs are used in a circular fashion. They are not used
2357 * out-of-order even when an iclog past the head is free. 2351 * out-of-order even when an iclog past the head is free.
@@ -2508,7 +2502,7 @@ xlog_grant_log_space(xlog_t *log,
2508 goto error_return; 2502 goto error_return;
2509 2503
2510 XFS_STATS_INC(xs_sleep_logspace); 2504 XFS_STATS_INC(xs_sleep_logspace);
2511 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2505 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2512 /* 2506 /*
2513 * If we got an error, and the filesystem is shutting down, 2507 * If we got an error, and the filesystem is shutting down,
2514 * we'll catch it down below. So just continue... 2508 * we'll catch it down below. So just continue...
@@ -2534,7 +2528,7 @@ redo:
2534 xlog_trace_loggrant(log, tic, 2528 xlog_trace_loggrant(log, tic,
2535 "xlog_grant_log_space: sleep 2"); 2529 "xlog_grant_log_space: sleep 2");
2536 XFS_STATS_INC(xs_sleep_logspace); 2530 XFS_STATS_INC(xs_sleep_logspace);
2537 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2531 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2538 2532
2539 if (XLOG_FORCED_SHUTDOWN(log)) { 2533 if (XLOG_FORCED_SHUTDOWN(log)) {
2540 spin_lock(&log->l_grant_lock); 2534 spin_lock(&log->l_grant_lock);
@@ -2633,7 +2627,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2633 if (free_bytes < ntic->t_unit_res) 2627 if (free_bytes < ntic->t_unit_res)
2634 break; 2628 break;
2635 free_bytes -= ntic->t_unit_res; 2629 free_bytes -= ntic->t_unit_res;
2636 sv_signal(&ntic->t_sema); 2630 sv_signal(&ntic->t_wait);
2637 ntic = ntic->t_next; 2631 ntic = ntic->t_next;
2638 } while (ntic != log->l_write_headq); 2632 } while (ntic != log->l_write_headq);
2639 2633
@@ -2644,7 +2638,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2644 xlog_trace_loggrant(log, tic, 2638 xlog_trace_loggrant(log, tic,
2645 "xlog_regrant_write_log_space: sleep 1"); 2639 "xlog_regrant_write_log_space: sleep 1");
2646 XFS_STATS_INC(xs_sleep_logspace); 2640 XFS_STATS_INC(xs_sleep_logspace);
2647 sv_wait(&tic->t_sema, PINOD|PLTWAIT, 2641 sv_wait(&tic->t_wait, PINOD|PLTWAIT,
2648 &log->l_grant_lock, s); 2642 &log->l_grant_lock, s);
2649 2643
2650 /* If we're shutting down, this tic is already 2644 /* If we're shutting down, this tic is already
@@ -2673,7 +2667,7 @@ redo:
2673 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2667 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2674 xlog_ins_ticketq(&log->l_write_headq, tic); 2668 xlog_ins_ticketq(&log->l_write_headq, tic);
2675 XFS_STATS_INC(xs_sleep_logspace); 2669 XFS_STATS_INC(xs_sleep_logspace);
2676 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2670 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2677 2671
2678 /* If we're shutting down, this tic is already off the queue */ 2672 /* If we're shutting down, this tic is already off the queue */
2679 if (XLOG_FORCED_SHUTDOWN(log)) { 2673 if (XLOG_FORCED_SHUTDOWN(log)) {
@@ -2916,7 +2910,7 @@ xlog_state_switch_iclogs(xlog_t *log,
2916 * 2. the current iclog is drity, and the previous iclog is in the 2910 * 2. the current iclog is drity, and the previous iclog is in the
2917 * active or dirty state. 2911 * active or dirty state.
2918 * 2912 *
2919 * We may sleep (call psema) if: 2913 * We may sleep if:
2920 * 2914 *
2921 * 1. the current iclog is not in the active nor dirty state. 2915 * 1. the current iclog is not in the active nor dirty state.
2922 * 2. the current iclog dirty, and the previous iclog is not in the 2916 * 2. the current iclog dirty, and the previous iclog is not in the
@@ -3013,7 +3007,7 @@ maybe_sleep:
3013 return XFS_ERROR(EIO); 3007 return XFS_ERROR(EIO);
3014 } 3008 }
3015 XFS_STATS_INC(xs_log_force_sleep); 3009 XFS_STATS_INC(xs_log_force_sleep);
3016 sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); 3010 sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
3017 /* 3011 /*
3018 * No need to grab the log lock here since we're 3012 * No need to grab the log lock here since we're
3019 * only deciding whether or not to return EIO 3013 * only deciding whether or not to return EIO
@@ -3096,7 +3090,7 @@ try_again:
3096 XLOG_STATE_SYNCING))) { 3090 XLOG_STATE_SYNCING))) {
3097 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3091 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3098 XFS_STATS_INC(xs_log_force_sleep); 3092 XFS_STATS_INC(xs_log_force_sleep);
3099 sv_wait(&iclog->ic_prev->ic_writesema, PSWP, 3093 sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
3100 &log->l_icloglock, s); 3094 &log->l_icloglock, s);
3101 *log_flushed = 1; 3095 *log_flushed = 1;
3102 already_slept = 1; 3096 already_slept = 1;
@@ -3116,7 +3110,7 @@ try_again:
3116 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3110 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3117 3111
3118 /* 3112 /*
3119 * Don't wait on the forcesema if we know that we've 3113 * Don't wait on completion if we know that we've
3120 * gotten a log write error. 3114 * gotten a log write error.
3121 */ 3115 */
3122 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3116 if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -3124,7 +3118,7 @@ try_again:
3124 return XFS_ERROR(EIO); 3118 return XFS_ERROR(EIO);
3125 } 3119 }
3126 XFS_STATS_INC(xs_log_force_sleep); 3120 XFS_STATS_INC(xs_log_force_sleep);
3127 sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); 3121 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3128 /* 3122 /*
3129 * No need to grab the log lock here since we're 3123 * No need to grab the log lock here since we're
3130 * only deciding whether or not to return EIO 3124 * only deciding whether or not to return EIO
@@ -3180,7 +3174,7 @@ STATIC void
3180xlog_ticket_put(xlog_t *log, 3174xlog_ticket_put(xlog_t *log,
3181 xlog_ticket_t *ticket) 3175 xlog_ticket_t *ticket)
3182{ 3176{
3183 sv_destroy(&ticket->t_sema); 3177 sv_destroy(&ticket->t_wait);
3184 kmem_zone_free(xfs_log_ticket_zone, ticket); 3178 kmem_zone_free(xfs_log_ticket_zone, ticket);
3185} /* xlog_ticket_put */ 3179} /* xlog_ticket_put */
3186 3180
@@ -3270,7 +3264,7 @@ xlog_ticket_get(xlog_t *log,
3270 tic->t_trans_type = 0; 3264 tic->t_trans_type = 0;
3271 if (xflags & XFS_LOG_PERM_RESERV) 3265 if (xflags & XFS_LOG_PERM_RESERV)
3272 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3266 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3273 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); 3267 sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
3274 3268
3275 xlog_tic_reset_res(tic); 3269 xlog_tic_reset_res(tic);
3276 3270
@@ -3557,14 +3551,14 @@ xfs_log_force_umount(
3557 */ 3551 */
3558 if ((tic = log->l_reserve_headq)) { 3552 if ((tic = log->l_reserve_headq)) {
3559 do { 3553 do {
3560 sv_signal(&tic->t_sema); 3554 sv_signal(&tic->t_wait);
3561 tic = tic->t_next; 3555 tic = tic->t_next;
3562 } while (tic != log->l_reserve_headq); 3556 } while (tic != log->l_reserve_headq);
3563 } 3557 }
3564 3558
3565 if ((tic = log->l_write_headq)) { 3559 if ((tic = log->l_write_headq)) {
3566 do { 3560 do {
3567 sv_signal(&tic->t_sema); 3561 sv_signal(&tic->t_wait);
3568 tic = tic->t_next; 3562 tic = tic->t_next;
3569 } while (tic != log->l_write_headq); 3563 } while (tic != log->l_write_headq);
3570 } 3564 }
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d1d678ecb63e..d47b91f10822 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -149,7 +149,7 @@ int xfs_log_mount(struct xfs_mount *mp,
149 struct xfs_buftarg *log_target, 149 struct xfs_buftarg *log_target,
150 xfs_daddr_t start_block, 150 xfs_daddr_t start_block,
151 int num_bblocks); 151 int num_bblocks);
152int xfs_log_mount_finish(struct xfs_mount *mp, int); 152int xfs_log_mount_finish(struct xfs_mount *mp);
153void xfs_log_move_tail(struct xfs_mount *mp, 153void xfs_log_move_tail(struct xfs_mount *mp,
154 xfs_lsn_t tail_lsn); 154 xfs_lsn_t tail_lsn);
155int xfs_log_notify(struct xfs_mount *mp, 155int xfs_log_notify(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 6245913196b4..c8a5b22ee3e3 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -241,7 +241,7 @@ typedef struct xlog_res {
241} xlog_res_t; 241} xlog_res_t;
242 242
243typedef struct xlog_ticket { 243typedef struct xlog_ticket {
244 sv_t t_sema; /* sleep on this semaphore : 20 */ 244 sv_t t_wait; /* ticket wait queue : 20 */
245 struct xlog_ticket *t_next; /* :4|8 */ 245 struct xlog_ticket *t_next; /* :4|8 */
246 struct xlog_ticket *t_prev; /* :4|8 */ 246 struct xlog_ticket *t_prev; /* :4|8 */
247 xlog_tid_t t_tid; /* transaction identifier : 4 */ 247 xlog_tid_t t_tid; /* transaction identifier : 4 */
@@ -314,7 +314,7 @@ typedef struct xlog_rec_ext_header {
314 * xlog_rec_header_t into the reserved space. 314 * xlog_rec_header_t into the reserved space.
315 * - ic_data follows, so a write to disk can start at the beginning of 315 * - ic_data follows, so a write to disk can start at the beginning of
316 * the iclog. 316 * the iclog.
317 * - ic_forcesema is used to implement synchronous forcing of the iclog to disk. 317 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
318 * - ic_next is the pointer to the next iclog in the ring. 318 * - ic_next is the pointer to the next iclog in the ring.
319 * - ic_bp is a pointer to the buffer used to write this incore log to disk. 319 * - ic_bp is a pointer to the buffer used to write this incore log to disk.
320 * - ic_log is a pointer back to the global log structure. 320 * - ic_log is a pointer back to the global log structure.
@@ -339,8 +339,8 @@ typedef struct xlog_rec_ext_header {
339 * and move everything else out to subsequent cachelines. 339 * and move everything else out to subsequent cachelines.
340 */ 340 */
341typedef struct xlog_iclog_fields { 341typedef struct xlog_iclog_fields {
342 sv_t ic_forcesema; 342 sv_t ic_force_wait;
343 sv_t ic_writesema; 343 sv_t ic_write_wait;
344 struct xlog_in_core *ic_next; 344 struct xlog_in_core *ic_next;
345 struct xlog_in_core *ic_prev; 345 struct xlog_in_core *ic_prev;
346 struct xfs_buf *ic_bp; 346 struct xfs_buf *ic_bp;
@@ -377,8 +377,8 @@ typedef struct xlog_in_core {
377/* 377/*
378 * Defines to save our code from this glop. 378 * Defines to save our code from this glop.
379 */ 379 */
380#define ic_forcesema hic_fields.ic_forcesema 380#define ic_force_wait hic_fields.ic_force_wait
381#define ic_writesema hic_fields.ic_writesema 381#define ic_write_wait hic_fields.ic_write_wait
382#define ic_next hic_fields.ic_next 382#define ic_next hic_fields.ic_next
383#define ic_prev hic_fields.ic_prev 383#define ic_prev hic_fields.ic_prev
384#define ic_bp hic_fields.ic_bp 384#define ic_bp hic_fields.ic_bp
@@ -468,7 +468,7 @@ extern int xlog_find_tail(xlog_t *log,
468 xfs_daddr_t *head_blk, 468 xfs_daddr_t *head_blk,
469 xfs_daddr_t *tail_blk); 469 xfs_daddr_t *tail_blk);
470extern int xlog_recover(xlog_t *log); 470extern int xlog_recover(xlog_t *log);
471extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); 471extern int xlog_recover_finish(xlog_t *log);
472extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 472extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
473extern void xlog_recover_process_iunlinks(xlog_t *log); 473extern void xlog_recover_process_iunlinks(xlog_t *log);
474 474
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9eb722ec744e..82d46ce69d5f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3940,8 +3940,7 @@ xlog_recover(
3940 */ 3940 */
3941int 3941int
3942xlog_recover_finish( 3942xlog_recover_finish(
3943 xlog_t *log, 3943 xlog_t *log)
3944 int mfsi_flags)
3945{ 3944{
3946 /* 3945 /*
3947 * Now we're ready to do the transactions needed for the 3946 * Now we're ready to do the transactions needed for the
@@ -3969,9 +3968,7 @@ xlog_recover_finish(
3969 xfs_log_force(log->l_mp, (xfs_lsn_t)0, 3968 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3970 (XFS_LOG_FORCE | XFS_LOG_SYNC)); 3969 (XFS_LOG_FORCE | XFS_LOG_SYNC));
3971 3970
3972 if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) { 3971 xlog_recover_process_iunlinks(log);
3973 xlog_recover_process_iunlinks(log);
3974 }
3975 3972
3976 xlog_recover_check_summary(log); 3973 xlog_recover_check_summary(log);
3977 3974
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 6c5d1325e7f6..a4503f5e9497 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -128,7 +128,7 @@ static const struct {
128 * initialized. 128 * initialized.
129 */ 129 */
130STATIC void 130STATIC void
131xfs_mount_free( 131xfs_free_perag(
132 xfs_mount_t *mp) 132 xfs_mount_t *mp)
133{ 133{
134 if (mp->m_perag) { 134 if (mp->m_perag) {
@@ -139,20 +139,6 @@ xfs_mount_free(
139 kmem_free(mp->m_perag[agno].pagb_list); 139 kmem_free(mp->m_perag[agno].pagb_list);
140 kmem_free(mp->m_perag); 140 kmem_free(mp->m_perag);
141 } 141 }
142
143 spinlock_destroy(&mp->m_ail_lock);
144 spinlock_destroy(&mp->m_sb_lock);
145 mutex_destroy(&mp->m_ilock);
146 mutex_destroy(&mp->m_growlock);
147 if (mp->m_quotainfo)
148 XFS_QM_DONE(mp);
149
150 if (mp->m_fsname != NULL)
151 kmem_free(mp->m_fsname);
152 if (mp->m_rtname != NULL)
153 kmem_free(mp->m_rtname);
154 if (mp->m_logname != NULL)
155 kmem_free(mp->m_logname);
156} 142}
157 143
158/* 144/*
@@ -704,11 +690,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
704 * Update alignment values based on mount options and sb values 690 * Update alignment values based on mount options and sb values
705 */ 691 */
706STATIC int 692STATIC int
707xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) 693xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
708{ 694{
709 xfs_sb_t *sbp = &(mp->m_sb); 695 xfs_sb_t *sbp = &(mp->m_sb);
710 696
711 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { 697 if (mp->m_dalign) {
712 /* 698 /*
713 * If stripe unit and stripe width are not multiples 699 * If stripe unit and stripe width are not multiples
714 * of the fs blocksize turn off alignment. 700 * of the fs blocksize turn off alignment.
@@ -864,7 +850,7 @@ xfs_set_inoalignment(xfs_mount_t *mp)
864 * Check that the data (and log if separate) are an ok size. 850 * Check that the data (and log if separate) are an ok size.
865 */ 851 */
866STATIC int 852STATIC int
867xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) 853xfs_check_sizes(xfs_mount_t *mp)
868{ 854{
869 xfs_buf_t *bp; 855 xfs_buf_t *bp;
870 xfs_daddr_t d; 856 xfs_daddr_t d;
@@ -887,8 +873,7 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
887 return error; 873 return error;
888 } 874 }
889 875
890 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && 876 if (mp->m_logdev_targp != mp->m_ddev_targp) {
891 mp->m_logdev_targp != mp->m_ddev_targp) {
892 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 877 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
893 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 878 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
894 cmn_err(CE_WARN, "XFS: size check 3 failed"); 879 cmn_err(CE_WARN, "XFS: size check 3 failed");
@@ -923,15 +908,13 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
923 */ 908 */
924int 909int
925xfs_mountfs( 910xfs_mountfs(
926 xfs_mount_t *mp, 911 xfs_mount_t *mp)
927 int mfsi_flags)
928{ 912{
929 xfs_sb_t *sbp = &(mp->m_sb); 913 xfs_sb_t *sbp = &(mp->m_sb);
930 xfs_inode_t *rip; 914 xfs_inode_t *rip;
931 __uint64_t resblks; 915 __uint64_t resblks;
932 __int64_t update_flags = 0LL; 916 __int64_t update_flags = 0LL;
933 uint quotamount, quotaflags; 917 uint quotamount, quotaflags;
934 int agno;
935 int uuid_mounted = 0; 918 int uuid_mounted = 0;
936 int error = 0; 919 int error = 0;
937 920
@@ -985,7 +968,7 @@ xfs_mountfs(
985 * allocator alignment is within an ag, therefore ag has 968 * allocator alignment is within an ag, therefore ag has
986 * to be aligned at stripe boundary. 969 * to be aligned at stripe boundary.
987 */ 970 */
988 error = xfs_update_alignment(mp, mfsi_flags, &update_flags); 971 error = xfs_update_alignment(mp, &update_flags);
989 if (error) 972 if (error)
990 goto error1; 973 goto error1;
991 974
@@ -1004,8 +987,7 @@ xfs_mountfs(
1004 * since a single partition filesystem is identical to a single 987 * since a single partition filesystem is identical to a single
1005 * partition volume/filesystem. 988 * partition volume/filesystem.
1006 */ 989 */
1007 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 990 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
1008 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
1009 if (xfs_uuid_mount(mp)) { 991 if (xfs_uuid_mount(mp)) {
1010 error = XFS_ERROR(EINVAL); 992 error = XFS_ERROR(EINVAL);
1011 goto error1; 993 goto error1;
@@ -1033,7 +1015,7 @@ xfs_mountfs(
1033 /* 1015 /*
1034 * Check that the data (and log if separate) are an ok size. 1016 * Check that the data (and log if separate) are an ok size.
1035 */ 1017 */
1036 error = xfs_check_sizes(mp, mfsi_flags); 1018 error = xfs_check_sizes(mp);
1037 if (error) 1019 if (error)
1038 goto error1; 1020 goto error1;
1039 1021
@@ -1047,13 +1029,6 @@ xfs_mountfs(
1047 } 1029 }
1048 1030
1049 /* 1031 /*
1050 * For client case we are done now
1051 */
1052 if (mfsi_flags & XFS_MFSI_CLIENT) {
1053 return 0;
1054 }
1055
1056 /*
1057 * Copies the low order bits of the timestamp and the randomly 1032 * Copies the low order bits of the timestamp and the randomly
1058 * set "sequence" number out of a UUID. 1033 * set "sequence" number out of a UUID.
1059 */ 1034 */
@@ -1077,8 +1052,10 @@ xfs_mountfs(
1077 * Allocate and initialize the per-ag data. 1052 * Allocate and initialize the per-ag data.
1078 */ 1053 */
1079 init_rwsem(&mp->m_peraglock); 1054 init_rwsem(&mp->m_peraglock);
1080 mp->m_perag = 1055 mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
1081 kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); 1056 KM_MAYFAIL);
1057 if (!mp->m_perag)
1058 goto error1;
1082 1059
1083 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); 1060 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
1084 1061
@@ -1190,7 +1167,7 @@ xfs_mountfs(
1190 * delayed until after the root and real-time bitmap inodes 1167 * delayed until after the root and real-time bitmap inodes
1191 * were consistently read in. 1168 * were consistently read in.
1192 */ 1169 */
1193 error = xfs_log_mount_finish(mp, mfsi_flags); 1170 error = xfs_log_mount_finish(mp);
1194 if (error) { 1171 if (error) {
1195 cmn_err(CE_WARN, "XFS: log mount finish failed"); 1172 cmn_err(CE_WARN, "XFS: log mount finish failed");
1196 goto error4; 1173 goto error4;
@@ -1199,7 +1176,7 @@ xfs_mountfs(
1199 /* 1176 /*
1200 * Complete the quota initialisation, post-log-replay component. 1177 * Complete the quota initialisation, post-log-replay component.
1201 */ 1178 */
1202 error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); 1179 error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
1203 if (error) 1180 if (error)
1204 goto error4; 1181 goto error4;
1205 1182
@@ -1233,12 +1210,7 @@ xfs_mountfs(
1233 error3: 1210 error3:
1234 xfs_log_unmount_dealloc(mp); 1211 xfs_log_unmount_dealloc(mp);
1235 error2: 1212 error2:
1236 for (agno = 0; agno < sbp->sb_agcount; agno++) 1213 xfs_free_perag(mp);
1237 if (mp->m_perag[agno].pagb_list)
1238 kmem_free(mp->m_perag[agno].pagb_list);
1239 kmem_free(mp->m_perag);
1240 mp->m_perag = NULL;
1241 /* FALLTHROUGH */
1242 error1: 1214 error1:
1243 if (uuid_mounted) 1215 if (uuid_mounted)
1244 uuid_table_remove(&mp->m_sb.sb_uuid); 1216 uuid_table_remove(&mp->m_sb.sb_uuid);
@@ -1246,16 +1218,17 @@ xfs_mountfs(
1246} 1218}
1247 1219
1248/* 1220/*
1249 * xfs_unmountfs
1250 *
1251 * This flushes out the inodes,dquots and the superblock, unmounts the 1221 * This flushes out the inodes,dquots and the superblock, unmounts the
1252 * log and makes sure that incore structures are freed. 1222 * log and makes sure that incore structures are freed.
1253 */ 1223 */
1254int 1224void
1255xfs_unmountfs(xfs_mount_t *mp) 1225xfs_unmountfs(
1226 struct xfs_mount *mp)
1256{ 1227{
1257 __uint64_t resblks; 1228 __uint64_t resblks;
1258 int error = 0; 1229 int error;
1230
1231 IRELE(mp->m_rootip);
1259 1232
1260 /* 1233 /*
1261 * We can potentially deadlock here if we have an inode cluster 1234 * We can potentially deadlock here if we have an inode cluster
@@ -1312,8 +1285,6 @@ xfs_unmountfs(xfs_mount_t *mp)
1312 xfs_unmountfs_wait(mp); /* wait for async bufs */ 1285 xfs_unmountfs_wait(mp); /* wait for async bufs */
1313 xfs_log_unmount(mp); /* Done! No more fs ops. */ 1286 xfs_log_unmount(mp); /* Done! No more fs ops. */
1314 1287
1315 xfs_freesb(mp);
1316
1317 /* 1288 /*
1318 * All inodes from this mount point should be freed. 1289 * All inodes from this mount point should be freed.
1319 */ 1290 */
@@ -1322,11 +1293,12 @@ xfs_unmountfs(xfs_mount_t *mp)
1322 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) 1293 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
1323 uuid_table_remove(&mp->m_sb.sb_uuid); 1294 uuid_table_remove(&mp->m_sb.sb_uuid);
1324 1295
1325#if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1296#if defined(DEBUG)
1326 xfs_errortag_clearall(mp, 0); 1297 xfs_errortag_clearall(mp, 0);
1327#endif 1298#endif
1328 xfs_mount_free(mp); 1299 xfs_free_perag(mp);
1329 return 0; 1300 if (mp->m_quotainfo)
1301 XFS_QM_DONE(mp);
1330} 1302}
1331 1303
1332STATIC void 1304STATIC void
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5269bd6e3df0..f3c1024b1241 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -114,7 +114,7 @@ struct xfs_dqtrxops;
114struct xfs_quotainfo; 114struct xfs_quotainfo;
115 115
116typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); 116typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
117typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int); 117typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
118typedef int (*xfs_qmunmount_t)(struct xfs_mount *); 118typedef int (*xfs_qmunmount_t)(struct xfs_mount *);
119typedef void (*xfs_qmdone_t)(struct xfs_mount *); 119typedef void (*xfs_qmdone_t)(struct xfs_mount *);
120typedef void (*xfs_dqrele_t)(struct xfs_dquot *); 120typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
@@ -158,8 +158,8 @@ typedef struct xfs_qmops {
158 158
159#define XFS_QM_INIT(mp, mnt, fl) \ 159#define XFS_QM_INIT(mp, mnt, fl) \
160 (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl) 160 (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl)
161#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \ 161#define XFS_QM_MOUNT(mp, mnt, fl) \
162 (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl, mfsi_flags) 162 (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl)
163#define XFS_QM_UNMOUNT(mp) \ 163#define XFS_QM_UNMOUNT(mp) \
164 (*(mp)->m_qm_ops->xfs_qmunmount)(mp) 164 (*(mp)->m_qm_ops->xfs_qmunmount)(mp)
165#define XFS_QM_DONE(mp) \ 165#define XFS_QM_DONE(mp) \
@@ -442,13 +442,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
442/* 442/*
443 * Flags for xfs_mountfs 443 * Flags for xfs_mountfs
444 */ 444 */
445#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */
446#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */
447/* XFS_MFSI_RRINODES */
448#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */
449 /* log recovery */
450#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */
451/* XFS_MFSI_CONVERT_SUNIT */
452#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ 445#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
453 446
454#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) 447#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d)
@@ -517,10 +510,10 @@ typedef struct xfs_mod_sb {
517 510
518extern void xfs_mod_sb(xfs_trans_t *, __int64_t); 511extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
519extern int xfs_log_sbcount(xfs_mount_t *, uint); 512extern int xfs_log_sbcount(xfs_mount_t *, uint);
520extern int xfs_mountfs(xfs_mount_t *mp, int); 513extern int xfs_mountfs(xfs_mount_t *mp);
521extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); 514extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
522 515
523extern int xfs_unmountfs(xfs_mount_t *); 516extern void xfs_unmountfs(xfs_mount_t *);
524extern int xfs_unmountfs_writesb(xfs_mount_t *); 517extern int xfs_unmountfs_writesb(xfs_mount_t *);
525extern int xfs_unmount_flush(xfs_mount_t *, int); 518extern int xfs_unmount_flush(xfs_mount_t *, int);
526extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); 519extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index bf87a5913504..e2f68de16159 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -74,18 +74,6 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
74 */ 74 */
75 75
76/* 76/*
77 * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set.
78 */
79STATIC int
80xfs_lowbit32(
81 __uint32_t v)
82{
83 if (v)
84 return ffs(v) - 1;
85 return -1;
86}
87
88/*
89 * Allocate space to the bitmap or summary file, and zero it, for growfs. 77 * Allocate space to the bitmap or summary file, and zero it, for growfs.
90 */ 78 */
91STATIC int /* error */ 79STATIC int /* error */
@@ -450,6 +438,7 @@ xfs_rtallocate_extent_near(
450 } 438 }
451 bbno = XFS_BITTOBLOCK(mp, bno); 439 bbno = XFS_BITTOBLOCK(mp, bno);
452 i = 0; 440 i = 0;
441 ASSERT(minlen != 0);
453 log2len = xfs_highbit32(minlen); 442 log2len = xfs_highbit32(minlen);
454 /* 443 /*
455 * Loop over all bitmap blocks (bbno + i is current block). 444 * Loop over all bitmap blocks (bbno + i is current block).
@@ -618,6 +607,8 @@ xfs_rtallocate_extent_size(
618 xfs_suminfo_t sum; /* summary information for extents */ 607 xfs_suminfo_t sum; /* summary information for extents */
619 608
620 ASSERT(minlen % prod == 0 && maxlen % prod == 0); 609 ASSERT(minlen % prod == 0 && maxlen % prod == 0);
610 ASSERT(maxlen != 0);
611
621 /* 612 /*
622 * Loop over all the levels starting with maxlen. 613 * Loop over all the levels starting with maxlen.
623 * At each level, look at all the bitmap blocks, to see if there 614 * At each level, look at all the bitmap blocks, to see if there
@@ -675,6 +666,9 @@ xfs_rtallocate_extent_size(
675 *rtblock = NULLRTBLOCK; 666 *rtblock = NULLRTBLOCK;
676 return 0; 667 return 0;
677 } 668 }
669 ASSERT(minlen != 0);
670 ASSERT(maxlen != 0);
671
678 /* 672 /*
679 * Loop over sizes, from maxlen down to minlen. 673 * Loop over sizes, from maxlen down to minlen.
680 * This time, when we do the allocations, allow smaller ones 674 * This time, when we do the allocations, allow smaller ones
@@ -1961,6 +1955,7 @@ xfs_growfs_rt(
1961 nsbp->sb_blocksize * nsbp->sb_rextsize); 1955 nsbp->sb_blocksize * nsbp->sb_rextsize);
1962 nsbp->sb_rextents = nsbp->sb_rblocks; 1956 nsbp->sb_rextents = nsbp->sb_rblocks;
1963 do_div(nsbp->sb_rextents, nsbp->sb_rextsize); 1957 do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
1958 ASSERT(nsbp->sb_rextents != 0);
1964 nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); 1959 nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
1965 nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; 1960 nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
1966 nrsumsize = 1961 nrsumsize =
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index b0f31c09a76d..3a82576dde9a 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -314,7 +314,7 @@ xfs_bioerror_relse(
314 * ASYNC buffers. 314 * ASYNC buffers.
315 */ 315 */
316 XFS_BUF_ERROR(bp, EIO); 316 XFS_BUF_ERROR(bp, EIO);
317 XFS_BUF_V_IODONESEMA(bp); 317 XFS_BUF_FINISH_IOWAIT(bp);
318 } else { 318 } else {
319 xfs_buf_relse(bp); 319 xfs_buf_relse(bp);
320 } 320 }
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e4ebddd3c500..4e1c22a23be5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -43,6 +43,7 @@
43#include "xfs_quota.h" 43#include "xfs_quota.h"
44#include "xfs_trans_priv.h" 44#include "xfs_trans_priv.h"
45#include "xfs_trans_space.h" 45#include "xfs_trans_space.h"
46#include "xfs_inode_item.h"
46 47
47 48
48STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); 49STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *);
@@ -253,7 +254,7 @@ _xfs_trans_alloc(
253 tp->t_mountp = mp; 254 tp->t_mountp = mp;
254 tp->t_items_free = XFS_LIC_NUM_SLOTS; 255 tp->t_items_free = XFS_LIC_NUM_SLOTS;
255 tp->t_busy_free = XFS_LBC_NUM_SLOTS; 256 tp->t_busy_free = XFS_LBC_NUM_SLOTS;
256 XFS_LIC_INIT(&(tp->t_items)); 257 xfs_lic_init(&(tp->t_items));
257 XFS_LBC_INIT(&(tp->t_busy)); 258 XFS_LBC_INIT(&(tp->t_busy));
258 return tp; 259 return tp;
259} 260}
@@ -282,7 +283,7 @@ xfs_trans_dup(
282 ntp->t_mountp = tp->t_mountp; 283 ntp->t_mountp = tp->t_mountp;
283 ntp->t_items_free = XFS_LIC_NUM_SLOTS; 284 ntp->t_items_free = XFS_LIC_NUM_SLOTS;
284 ntp->t_busy_free = XFS_LBC_NUM_SLOTS; 285 ntp->t_busy_free = XFS_LBC_NUM_SLOTS;
285 XFS_LIC_INIT(&(ntp->t_items)); 286 xfs_lic_init(&(ntp->t_items));
286 XFS_LBC_INIT(&(ntp->t_busy)); 287 XFS_LBC_INIT(&(ntp->t_busy));
287 288
288 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 289 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -1169,7 +1170,7 @@ xfs_trans_cancel(
1169 while (licp != NULL) { 1170 while (licp != NULL) {
1170 lidp = licp->lic_descs; 1171 lidp = licp->lic_descs;
1171 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1172 for (i = 0; i < licp->lic_unused; i++, lidp++) {
1172 if (XFS_LIC_ISFREE(licp, i)) { 1173 if (xfs_lic_isfree(licp, i)) {
1173 continue; 1174 continue;
1174 } 1175 }
1175 1176
@@ -1216,6 +1217,68 @@ xfs_trans_free(
1216 kmem_zone_free(xfs_trans_zone, tp); 1217 kmem_zone_free(xfs_trans_zone, tp);
1217} 1218}
1218 1219
1220/*
1221 * Roll from one trans in the sequence of PERMANENT transactions to
1222 * the next: permanent transactions are only flushed out when
1223 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
1224 * as possible to let chunks of it go to the log. So we commit the
1225 * chunk we've been working on and get a new transaction to continue.
1226 */
1227int
1228xfs_trans_roll(
1229 struct xfs_trans **tpp,
1230 struct xfs_inode *dp)
1231{
1232 struct xfs_trans *trans;
1233 unsigned int logres, count;
1234 int error;
1235
1236 /*
1237 * Ensure that the inode is always logged.
1238 */
1239 trans = *tpp;
1240 xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1241
1242 /*
1243 * Copy the critical parameters from one trans to the next.
1244 */
1245 logres = trans->t_log_res;
1246 count = trans->t_log_count;
1247 *tpp = xfs_trans_dup(trans);
1248
1249 /*
1250 * Commit the current transaction.
1251 * If this commit failed, then it'd just unlock those items that
1252 * are not marked ihold. That also means that a filesystem shutdown
1253 * is in progress. The caller takes the responsibility to cancel
1254 * the duplicate transaction that gets returned.
1255 */
1256 error = xfs_trans_commit(trans, 0);
1257 if (error)
1258 return (error);
1259
1260 trans = *tpp;
1261
1262 /*
1263 * Reserve space in the log for th next transaction.
1264 * This also pushes items in the "AIL", the list of logged items,
1265 * out to disk if they are taking up space at the tail of the log
1266 * that we want to use. This requires that either nothing be locked
1267 * across this call, or that anything that is locked be logged in
1268 * the prior and the next transactions.
1269 */
1270 error = xfs_trans_reserve(trans, 0, logres, 0,
1271 XFS_TRANS_PERM_LOG_RES, count);
1272 /*
1273 * Ensure that the inode is in the new transaction and locked.
1274 */
1275 if (error)
1276 return error;
1277
1278 xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
1279 xfs_trans_ihold(trans, dp);
1280 return 0;
1281}
1219 1282
1220/* 1283/*
1221 * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). 1284 * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
@@ -1253,7 +1316,7 @@ xfs_trans_committed(
1253 * Special case the chunk embedded in the transaction. 1316 * Special case the chunk embedded in the transaction.
1254 */ 1317 */
1255 licp = &(tp->t_items); 1318 licp = &(tp->t_items);
1256 if (!(XFS_LIC_ARE_ALL_FREE(licp))) { 1319 if (!(xfs_lic_are_all_free(licp))) {
1257 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1320 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
1258 } 1321 }
1259 1322
@@ -1262,7 +1325,7 @@ xfs_trans_committed(
1262 */ 1325 */
1263 licp = licp->lic_next; 1326 licp = licp->lic_next;
1264 while (licp != NULL) { 1327 while (licp != NULL) {
1265 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 1328 ASSERT(!xfs_lic_are_all_free(licp));
1266 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1329 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
1267 next_licp = licp->lic_next; 1330 next_licp = licp->lic_next;
1268 kmem_free(licp); 1331 kmem_free(licp);
@@ -1325,7 +1388,7 @@ xfs_trans_chunk_committed(
1325 1388
1326 lidp = licp->lic_descs; 1389 lidp = licp->lic_descs;
1327 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1390 for (i = 0; i < licp->lic_unused; i++, lidp++) {
1328 if (XFS_LIC_ISFREE(licp, i)) { 1391 if (xfs_lic_isfree(licp, i)) {
1329 continue; 1392 continue;
1330 } 1393 }
1331 1394
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 0804207c7391..74c80bd2b0ec 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -210,62 +210,52 @@ typedef struct xfs_log_item_chunk {
210 * lic_unused to the right value (0 matches all free). The 210 * lic_unused to the right value (0 matches all free). The
211 * lic_descs.lid_index values are set up as each desc is allocated. 211 * lic_descs.lid_index values are set up as each desc is allocated.
212 */ 212 */
213#define XFS_LIC_INIT(cp) xfs_lic_init(cp)
214static inline void xfs_lic_init(xfs_log_item_chunk_t *cp) 213static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
215{ 214{
216 cp->lic_free = XFS_LIC_FREEMASK; 215 cp->lic_free = XFS_LIC_FREEMASK;
217} 216}
218 217
219#define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot)
220static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot) 218static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
221{ 219{
222 cp->lic_descs[slot].lid_index = (unsigned char)(slot); 220 cp->lic_descs[slot].lid_index = (unsigned char)(slot);
223} 221}
224 222
225#define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp)
226static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp) 223static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
227{ 224{
228 return cp->lic_free & XFS_LIC_FREEMASK; 225 return cp->lic_free & XFS_LIC_FREEMASK;
229} 226}
230 227
231#define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp)
232static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp) 228static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
233{ 229{
234 cp->lic_free = XFS_LIC_FREEMASK; 230 cp->lic_free = XFS_LIC_FREEMASK;
235} 231}
236 232
237#define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp)
238static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp) 233static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
239{ 234{
240 return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK); 235 return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
241} 236}
242 237
243#define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot)
244static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) 238static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
245{ 239{
246 return (cp->lic_free & (1 << slot)); 240 return (cp->lic_free & (1 << slot));
247} 241}
248 242
249#define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot)
250static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot) 243static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
251{ 244{
252 cp->lic_free &= ~(1 << slot); 245 cp->lic_free &= ~(1 << slot);
253} 246}
254 247
255#define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot)
256static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot) 248static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
257{ 249{
258 cp->lic_free |= 1 << slot; 250 cp->lic_free |= 1 << slot;
259} 251}
260 252
261#define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot)
262static inline xfs_log_item_desc_t * 253static inline xfs_log_item_desc_t *
263xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot) 254xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
264{ 255{
265 return &(cp->lic_descs[slot]); 256 return &(cp->lic_descs[slot]);
266} 257}
267 258
268#define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp)
269static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) 259static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
270{ 260{
271 return (uint)dp->lid_index; 261 return (uint)dp->lid_index;
@@ -278,7 +268,6 @@ static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
278 * All of this yields the address of the chunk, which is 268 * All of this yields the address of the chunk, which is
279 * cast to a chunk pointer. 269 * cast to a chunk pointer.
280 */ 270 */
281#define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp)
282static inline xfs_log_item_chunk_t * 271static inline xfs_log_item_chunk_t *
283xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) 272xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
284{ 273{
@@ -986,6 +975,7 @@ int _xfs_trans_commit(xfs_trans_t *,
986 int *); 975 int *);
987#define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) 976#define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL)
988void xfs_trans_cancel(xfs_trans_t *, int); 977void xfs_trans_cancel(xfs_trans_t *, int);
978int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
989int xfs_trans_ail_init(struct xfs_mount *); 979int xfs_trans_ail_init(struct xfs_mount *);
990void xfs_trans_ail_destroy(struct xfs_mount *); 980void xfs_trans_ail_destroy(struct xfs_mount *);
991void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); 981void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index cb0c5839154b..4e855b5ced66 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -1021,16 +1021,16 @@ xfs_trans_buf_item_match(
1021 bp = NULL; 1021 bp = NULL;
1022 len = BBTOB(len); 1022 len = BBTOB(len);
1023 licp = &tp->t_items; 1023 licp = &tp->t_items;
1024 if (!XFS_LIC_ARE_ALL_FREE(licp)) { 1024 if (!xfs_lic_are_all_free(licp)) {
1025 for (i = 0; i < licp->lic_unused; i++) { 1025 for (i = 0; i < licp->lic_unused; i++) {
1026 /* 1026 /*
1027 * Skip unoccupied slots. 1027 * Skip unoccupied slots.
1028 */ 1028 */
1029 if (XFS_LIC_ISFREE(licp, i)) { 1029 if (xfs_lic_isfree(licp, i)) {
1030 continue; 1030 continue;
1031 } 1031 }
1032 1032
1033 lidp = XFS_LIC_SLOT(licp, i); 1033 lidp = xfs_lic_slot(licp, i);
1034 blip = (xfs_buf_log_item_t *)lidp->lid_item; 1034 blip = (xfs_buf_log_item_t *)lidp->lid_item;
1035 if (blip->bli_item.li_type != XFS_LI_BUF) { 1035 if (blip->bli_item.li_type != XFS_LI_BUF) {
1036 continue; 1036 continue;
@@ -1074,7 +1074,7 @@ xfs_trans_buf_item_match_all(
1074 bp = NULL; 1074 bp = NULL;
1075 len = BBTOB(len); 1075 len = BBTOB(len);
1076 for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { 1076 for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
1077 if (XFS_LIC_ARE_ALL_FREE(licp)) { 1077 if (xfs_lic_are_all_free(licp)) {
1078 ASSERT(licp == &tp->t_items); 1078 ASSERT(licp == &tp->t_items);
1079 ASSERT(licp->lic_next == NULL); 1079 ASSERT(licp->lic_next == NULL);
1080 return NULL; 1080 return NULL;
@@ -1083,11 +1083,11 @@ xfs_trans_buf_item_match_all(
1083 /* 1083 /*
1084 * Skip unoccupied slots. 1084 * Skip unoccupied slots.
1085 */ 1085 */
1086 if (XFS_LIC_ISFREE(licp, i)) { 1086 if (xfs_lic_isfree(licp, i)) {
1087 continue; 1087 continue;
1088 } 1088 }
1089 1089
1090 lidp = XFS_LIC_SLOT(licp, i); 1090 lidp = xfs_lic_slot(licp, i);
1091 blip = (xfs_buf_log_item_t *)lidp->lid_item; 1091 blip = (xfs_buf_log_item_t *)lidp->lid_item;
1092 if (blip->bli_item.li_type != XFS_LI_BUF) { 1092 if (blip->bli_item.li_type != XFS_LI_BUF) {
1093 continue; 1093 continue;
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index db5c83595526..3c666e8317f8 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -53,11 +53,11 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
53 * Initialize the chunk, and then 53 * Initialize the chunk, and then
54 * claim the first slot in the newly allocated chunk. 54 * claim the first slot in the newly allocated chunk.
55 */ 55 */
56 XFS_LIC_INIT(licp); 56 xfs_lic_init(licp);
57 XFS_LIC_CLAIM(licp, 0); 57 xfs_lic_claim(licp, 0);
58 licp->lic_unused = 1; 58 licp->lic_unused = 1;
59 XFS_LIC_INIT_SLOT(licp, 0); 59 xfs_lic_init_slot(licp, 0);
60 lidp = XFS_LIC_SLOT(licp, 0); 60 lidp = xfs_lic_slot(licp, 0);
61 61
62 /* 62 /*
63 * Link in the new chunk and update the free count. 63 * Link in the new chunk and update the free count.
@@ -88,14 +88,14 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
88 */ 88 */
89 licp = &tp->t_items; 89 licp = &tp->t_items;
90 while (licp != NULL) { 90 while (licp != NULL) {
91 if (XFS_LIC_VACANCY(licp)) { 91 if (xfs_lic_vacancy(licp)) {
92 if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { 92 if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
93 i = licp->lic_unused; 93 i = licp->lic_unused;
94 ASSERT(XFS_LIC_ISFREE(licp, i)); 94 ASSERT(xfs_lic_isfree(licp, i));
95 break; 95 break;
96 } 96 }
97 for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { 97 for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
98 if (XFS_LIC_ISFREE(licp, i)) 98 if (xfs_lic_isfree(licp, i))
99 break; 99 break;
100 } 100 }
101 ASSERT(i <= XFS_LIC_MAX_SLOT); 101 ASSERT(i <= XFS_LIC_MAX_SLOT);
@@ -108,12 +108,12 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
108 * If we find a free descriptor, claim it, 108 * If we find a free descriptor, claim it,
109 * initialize it, and return it. 109 * initialize it, and return it.
110 */ 110 */
111 XFS_LIC_CLAIM(licp, i); 111 xfs_lic_claim(licp, i);
112 if (licp->lic_unused <= i) { 112 if (licp->lic_unused <= i) {
113 licp->lic_unused = i + 1; 113 licp->lic_unused = i + 1;
114 XFS_LIC_INIT_SLOT(licp, i); 114 xfs_lic_init_slot(licp, i);
115 } 115 }
116 lidp = XFS_LIC_SLOT(licp, i); 116 lidp = xfs_lic_slot(licp, i);
117 tp->t_items_free--; 117 tp->t_items_free--;
118 lidp->lid_item = lip; 118 lidp->lid_item = lip;
119 lidp->lid_flags = 0; 119 lidp->lid_flags = 0;
@@ -136,9 +136,9 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
136 xfs_log_item_chunk_t *licp; 136 xfs_log_item_chunk_t *licp;
137 xfs_log_item_chunk_t **licpp; 137 xfs_log_item_chunk_t **licpp;
138 138
139 slot = XFS_LIC_DESC_TO_SLOT(lidp); 139 slot = xfs_lic_desc_to_slot(lidp);
140 licp = XFS_LIC_DESC_TO_CHUNK(lidp); 140 licp = xfs_lic_desc_to_chunk(lidp);
141 XFS_LIC_RELSE(licp, slot); 141 xfs_lic_relse(licp, slot);
142 lidp->lid_item->li_desc = NULL; 142 lidp->lid_item->li_desc = NULL;
143 tp->t_items_free++; 143 tp->t_items_free++;
144 144
@@ -154,7 +154,7 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
154 * Also decrement the transaction structure's count of free items 154 * Also decrement the transaction structure's count of free items
155 * by the number in a chunk since we are freeing an empty chunk. 155 * by the number in a chunk since we are freeing an empty chunk.
156 */ 156 */
157 if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) { 157 if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
158 licpp = &(tp->t_items.lic_next); 158 licpp = &(tp->t_items.lic_next);
159 while (*licpp != licp) { 159 while (*licpp != licp) {
160 ASSERT(*licpp != NULL); 160 ASSERT(*licpp != NULL);
@@ -207,20 +207,20 @@ xfs_trans_first_item(xfs_trans_t *tp)
207 /* 207 /*
208 * If it's not in the first chunk, skip to the second. 208 * If it's not in the first chunk, skip to the second.
209 */ 209 */
210 if (XFS_LIC_ARE_ALL_FREE(licp)) { 210 if (xfs_lic_are_all_free(licp)) {
211 licp = licp->lic_next; 211 licp = licp->lic_next;
212 } 212 }
213 213
214 /* 214 /*
215 * Return the first non-free descriptor in the chunk. 215 * Return the first non-free descriptor in the chunk.
216 */ 216 */
217 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 217 ASSERT(!xfs_lic_are_all_free(licp));
218 for (i = 0; i < licp->lic_unused; i++) { 218 for (i = 0; i < licp->lic_unused; i++) {
219 if (XFS_LIC_ISFREE(licp, i)) { 219 if (xfs_lic_isfree(licp, i)) {
220 continue; 220 continue;
221 } 221 }
222 222
223 return XFS_LIC_SLOT(licp, i); 223 return xfs_lic_slot(licp, i);
224 } 224 }
225 cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); 225 cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
226 return NULL; 226 return NULL;
@@ -242,18 +242,18 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
242 xfs_log_item_chunk_t *licp; 242 xfs_log_item_chunk_t *licp;
243 int i; 243 int i;
244 244
245 licp = XFS_LIC_DESC_TO_CHUNK(lidp); 245 licp = xfs_lic_desc_to_chunk(lidp);
246 246
247 /* 247 /*
248 * First search the rest of the chunk. The for loop keeps us 248 * First search the rest of the chunk. The for loop keeps us
249 * from referencing things beyond the end of the chunk. 249 * from referencing things beyond the end of the chunk.
250 */ 250 */
251 for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { 251 for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
252 if (XFS_LIC_ISFREE(licp, i)) { 252 if (xfs_lic_isfree(licp, i)) {
253 continue; 253 continue;
254 } 254 }
255 255
256 return XFS_LIC_SLOT(licp, i); 256 return xfs_lic_slot(licp, i);
257 } 257 }
258 258
259 /* 259 /*
@@ -266,13 +266,13 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
266 } 266 }
267 267
268 licp = licp->lic_next; 268 licp = licp->lic_next;
269 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 269 ASSERT(!xfs_lic_are_all_free(licp));
270 for (i = 0; i < licp->lic_unused; i++) { 270 for (i = 0; i < licp->lic_unused; i++) {
271 if (XFS_LIC_ISFREE(licp, i)) { 271 if (xfs_lic_isfree(licp, i)) {
272 continue; 272 continue;
273 } 273 }
274 274
275 return XFS_LIC_SLOT(licp, i); 275 return xfs_lic_slot(licp, i);
276 } 276 }
277 ASSERT(0); 277 ASSERT(0);
278 /* NOTREACHED */ 278 /* NOTREACHED */
@@ -300,9 +300,9 @@ xfs_trans_free_items(
300 /* 300 /*
301 * Special case the embedded chunk so we don't free it below. 301 * Special case the embedded chunk so we don't free it below.
302 */ 302 */
303 if (!XFS_LIC_ARE_ALL_FREE(licp)) { 303 if (!xfs_lic_are_all_free(licp)) {
304 (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); 304 (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
305 XFS_LIC_ALL_FREE(licp); 305 xfs_lic_all_free(licp);
306 licp->lic_unused = 0; 306 licp->lic_unused = 0;
307 } 307 }
308 licp = licp->lic_next; 308 licp = licp->lic_next;
@@ -311,7 +311,7 @@ xfs_trans_free_items(
311 * Unlock each item in each chunk and free the chunks. 311 * Unlock each item in each chunk and free the chunks.
312 */ 312 */
313 while (licp != NULL) { 313 while (licp != NULL) {
314 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 314 ASSERT(!xfs_lic_are_all_free(licp));
315 (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); 315 (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
316 next_licp = licp->lic_next; 316 next_licp = licp->lic_next;
317 kmem_free(licp); 317 kmem_free(licp);
@@ -347,7 +347,7 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
347 /* 347 /*
348 * Special case the embedded chunk so we don't free. 348 * Special case the embedded chunk so we don't free.
349 */ 349 */
350 if (!XFS_LIC_ARE_ALL_FREE(licp)) { 350 if (!xfs_lic_are_all_free(licp)) {
351 freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); 351 freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
352 } 352 }
353 licpp = &(tp->t_items.lic_next); 353 licpp = &(tp->t_items.lic_next);
@@ -358,10 +358,10 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
358 * and free empty chunks. 358 * and free empty chunks.
359 */ 359 */
360 while (licp != NULL) { 360 while (licp != NULL) {
361 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 361 ASSERT(!xfs_lic_are_all_free(licp));
362 freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); 362 freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
363 next_licp = licp->lic_next; 363 next_licp = licp->lic_next;
364 if (XFS_LIC_ARE_ALL_FREE(licp)) { 364 if (xfs_lic_are_all_free(licp)) {
365 *licpp = next_licp; 365 *licpp = next_licp;
366 kmem_free(licp); 366 kmem_free(licp);
367 freed -= XFS_LIC_NUM_SLOTS; 367 freed -= XFS_LIC_NUM_SLOTS;
@@ -402,7 +402,7 @@ xfs_trans_unlock_chunk(
402 freed = 0; 402 freed = 0;
403 lidp = licp->lic_descs; 403 lidp = licp->lic_descs;
404 for (i = 0; i < licp->lic_unused; i++, lidp++) { 404 for (i = 0; i < licp->lic_unused; i++, lidp++) {
405 if (XFS_LIC_ISFREE(licp, i)) { 405 if (xfs_lic_isfree(licp, i)) {
406 continue; 406 continue;
407 } 407 }
408 lip = lidp->lid_item; 408 lip = lidp->lid_item;
@@ -421,7 +421,7 @@ xfs_trans_unlock_chunk(
421 */ 421 */
422 if (!(freeing_chunk) && 422 if (!(freeing_chunk) &&
423 (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { 423 (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
424 XFS_LIC_RELSE(licp, i); 424 xfs_lic_relse(licp, i);
425 freed++; 425 freed++;
426 } 426 }
427 } 427 }
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 98e5f110ba5f..35d4d414bcc2 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -237,7 +237,7 @@ xfs_droplink(
237 237
238 ASSERT (ip->i_d.di_nlink > 0); 238 ASSERT (ip->i_d.di_nlink > 0);
239 ip->i_d.di_nlink--; 239 ip->i_d.di_nlink--;
240 drop_nlink(ip->i_vnode); 240 drop_nlink(VFS_I(ip));
241 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 241 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
242 242
243 error = 0; 243 error = 0;
@@ -301,7 +301,7 @@ xfs_bumplink(
301 301
302 ASSERT(ip->i_d.di_nlink > 0); 302 ASSERT(ip->i_d.di_nlink > 0);
303 ip->i_d.di_nlink++; 303 ip->i_d.di_nlink++;
304 inc_nlink(ip->i_vnode); 304 inc_nlink(VFS_I(ip));
305 if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && 305 if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) &&
306 (ip->i_d.di_nlink > XFS_MAXLINK_1)) { 306 (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
307 /* 307 /*
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f316cb85d8e2..ef321225d269 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,9 +18,6 @@
18#ifndef __XFS_UTILS_H__ 18#ifndef __XFS_UTILS_H__
19#define __XFS_UTILS_H__ 19#define __XFS_UTILS_H__
20 20
21#define IRELE(ip) VN_RELE(XFS_ITOV(ip))
22#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip))
23
24extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); 21extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
25extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, 22extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
26 xfs_dev_t, cred_t *, prid_t, int, 23 xfs_dev_t, cred_t *, prid_t, int,
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 4a9a43315a86..439dd3939dda 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -128,7 +128,6 @@ xfs_unmount_flush(
128 xfs_inode_t *rip = mp->m_rootip; 128 xfs_inode_t *rip = mp->m_rootip;
129 xfs_inode_t *rbmip; 129 xfs_inode_t *rbmip;
130 xfs_inode_t *rsumip = NULL; 130 xfs_inode_t *rsumip = NULL;
131 bhv_vnode_t *rvp = XFS_ITOV(rip);
132 int error; 131 int error;
133 132
134 xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 133 xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
@@ -146,7 +145,7 @@ xfs_unmount_flush(
146 if (error == EFSCORRUPTED) 145 if (error == EFSCORRUPTED)
147 goto fscorrupt_out; 146 goto fscorrupt_out;
148 147
149 ASSERT(vn_count(XFS_ITOV(rbmip)) == 1); 148 ASSERT(vn_count(VFS_I(rbmip)) == 1);
150 149
151 rsumip = mp->m_rsumip; 150 rsumip = mp->m_rsumip;
152 xfs_ilock(rsumip, XFS_ILOCK_EXCL); 151 xfs_ilock(rsumip, XFS_ILOCK_EXCL);
@@ -157,7 +156,7 @@ xfs_unmount_flush(
157 if (error == EFSCORRUPTED) 156 if (error == EFSCORRUPTED)
158 goto fscorrupt_out; 157 goto fscorrupt_out;
159 158
160 ASSERT(vn_count(XFS_ITOV(rsumip)) == 1); 159 ASSERT(vn_count(VFS_I(rsumip)) == 1);
161 } 160 }
162 161
163 /* 162 /*
@@ -167,7 +166,7 @@ xfs_unmount_flush(
167 if (error == EFSCORRUPTED) 166 if (error == EFSCORRUPTED)
168 goto fscorrupt_out2; 167 goto fscorrupt_out2;
169 168
170 if (vn_count(rvp) != 1 && !relocation) { 169 if (vn_count(VFS_I(rip)) != 1 && !relocation) {
171 xfs_iunlock(rip, XFS_ILOCK_EXCL); 170 xfs_iunlock(rip, XFS_ILOCK_EXCL);
172 return XFS_ERROR(EBUSY); 171 return XFS_ERROR(EBUSY);
173 } 172 }
@@ -284,7 +283,7 @@ xfs_sync_inodes(
284 int *bypassed) 283 int *bypassed)
285{ 284{
286 xfs_inode_t *ip = NULL; 285 xfs_inode_t *ip = NULL;
287 bhv_vnode_t *vp = NULL; 286 struct inode *vp = NULL;
288 int error; 287 int error;
289 int last_error; 288 int last_error;
290 uint64_t fflag; 289 uint64_t fflag;
@@ -404,7 +403,7 @@ xfs_sync_inodes(
404 continue; 403 continue;
405 } 404 }
406 405
407 vp = XFS_ITOV_NULL(ip); 406 vp = VFS_I(ip);
408 407
409 /* 408 /*
410 * If the vnode is gone then this is being torn down, 409 * If the vnode is gone then this is being torn down,
@@ -479,7 +478,7 @@ xfs_sync_inodes(
479 IPOINTER_INSERT(ip, mp); 478 IPOINTER_INSERT(ip, mp);
480 xfs_ilock(ip, lock_flags); 479 xfs_ilock(ip, lock_flags);
481 480
482 ASSERT(vp == XFS_ITOV(ip)); 481 ASSERT(vp == VFS_I(ip));
483 ASSERT(ip->i_mount == mp); 482 ASSERT(ip->i_mount == mp);
484 483
485 vnode_refed = B_TRUE; 484 vnode_refed = B_TRUE;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 76a1166af822..aa238c8fbd7a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -83,7 +83,7 @@ xfs_setattr(
83 cred_t *credp) 83 cred_t *credp)
84{ 84{
85 xfs_mount_t *mp = ip->i_mount; 85 xfs_mount_t *mp = ip->i_mount;
86 struct inode *inode = XFS_ITOV(ip); 86 struct inode *inode = VFS_I(ip);
87 int mask = iattr->ia_valid; 87 int mask = iattr->ia_valid;
88 xfs_trans_t *tp; 88 xfs_trans_t *tp;
89 int code; 89 int code;
@@ -182,7 +182,7 @@ xfs_setattr(
182 xfs_ilock(ip, lock_flags); 182 xfs_ilock(ip, lock_flags);
183 183
184 /* boolean: are we the file owner? */ 184 /* boolean: are we the file owner? */
185 file_owner = (current_fsuid(credp) == ip->i_d.di_uid); 185 file_owner = (current_fsuid() == ip->i_d.di_uid);
186 186
187 /* 187 /*
188 * Change various properties of a file. 188 * Change various properties of a file.
@@ -513,7 +513,6 @@ xfs_setattr(
513 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; 513 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
514 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; 514 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
515 ip->i_update_core = 1; 515 ip->i_update_core = 1;
516 timeflags &= ~XFS_ICHGTIME_ACC;
517 } 516 }
518 if (mask & ATTR_MTIME) { 517 if (mask & ATTR_MTIME) {
519 inode->i_mtime = iattr->ia_mtime; 518 inode->i_mtime = iattr->ia_mtime;
@@ -714,7 +713,7 @@ xfs_fsync(
714 return XFS_ERROR(EIO); 713 return XFS_ERROR(EIO);
715 714
716 /* capture size updates in I/O completion before writing the inode. */ 715 /* capture size updates in I/O completion before writing the inode. */
717 error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); 716 error = filemap_fdatawait(VFS_I(ip)->i_mapping);
718 if (error) 717 if (error)
719 return XFS_ERROR(error); 718 return XFS_ERROR(error);
720 719
@@ -1160,7 +1159,6 @@ int
1160xfs_release( 1159xfs_release(
1161 xfs_inode_t *ip) 1160 xfs_inode_t *ip)
1162{ 1161{
1163 bhv_vnode_t *vp = XFS_ITOV(ip);
1164 xfs_mount_t *mp = ip->i_mount; 1162 xfs_mount_t *mp = ip->i_mount;
1165 int error; 1163 int error;
1166 1164
@@ -1195,13 +1193,13 @@ xfs_release(
1195 * be exposed to that problem. 1193 * be exposed to that problem.
1196 */ 1194 */
1197 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1195 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1198 if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0) 1196 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
1199 xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); 1197 xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
1200 } 1198 }
1201 1199
1202 if (ip->i_d.di_nlink != 0) { 1200 if (ip->i_d.di_nlink != 0) {
1203 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1201 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1204 ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || 1202 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
1205 ip->i_delayed_blks > 0)) && 1203 ip->i_delayed_blks > 0)) &&
1206 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1204 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1207 (!(ip->i_d.di_flags & 1205 (!(ip->i_d.di_flags &
@@ -1227,7 +1225,6 @@ int
1227xfs_inactive( 1225xfs_inactive(
1228 xfs_inode_t *ip) 1226 xfs_inode_t *ip)
1229{ 1227{
1230 bhv_vnode_t *vp = XFS_ITOV(ip);
1231 xfs_bmap_free_t free_list; 1228 xfs_bmap_free_t free_list;
1232 xfs_fsblock_t first_block; 1229 xfs_fsblock_t first_block;
1233 int committed; 1230 int committed;
@@ -1242,7 +1239,7 @@ xfs_inactive(
1242 * If the inode is already free, then there can be nothing 1239 * If the inode is already free, then there can be nothing
1243 * to clean up here. 1240 * to clean up here.
1244 */ 1241 */
1245 if (ip->i_d.di_mode == 0 || VN_BAD(vp)) { 1242 if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) {
1246 ASSERT(ip->i_df.if_real_bytes == 0); 1243 ASSERT(ip->i_df.if_real_bytes == 0);
1247 ASSERT(ip->i_df.if_broot_bytes == 0); 1244 ASSERT(ip->i_df.if_broot_bytes == 0);
1248 return VN_INACTIVE_CACHE; 1245 return VN_INACTIVE_CACHE;
@@ -1272,7 +1269,7 @@ xfs_inactive(
1272 1269
1273 if (ip->i_d.di_nlink != 0) { 1270 if (ip->i_d.di_nlink != 0) {
1274 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1271 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1275 ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || 1272 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
1276 ip->i_delayed_blks > 0)) && 1273 ip->i_delayed_blks > 0)) &&
1277 (ip->i_df.if_flags & XFS_IFEXTENTS) && 1274 (ip->i_df.if_flags & XFS_IFEXTENTS) &&
1278 (!(ip->i_d.di_flags & 1275 (!(ip->i_d.di_flags &
@@ -1536,7 +1533,7 @@ xfs_create(
1536 * Make sure that we have allocated dquot(s) on disk. 1533 * Make sure that we have allocated dquot(s) on disk.
1537 */ 1534 */
1538 error = XFS_QM_DQVOPALLOC(mp, dp, 1535 error = XFS_QM_DQVOPALLOC(mp, dp,
1539 current_fsuid(credp), current_fsgid(credp), prid, 1536 current_fsuid(), current_fsgid(), prid,
1540 XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); 1537 XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
1541 if (error) 1538 if (error)
1542 goto std_return; 1539 goto std_return;
@@ -1708,111 +1705,6 @@ std_return:
1708} 1705}
1709 1706
1710#ifdef DEBUG 1707#ifdef DEBUG
1711/*
1712 * Some counters to see if (and how often) we are hitting some deadlock
1713 * prevention code paths.
1714 */
1715
1716int xfs_rm_locks;
1717int xfs_rm_lock_delays;
1718int xfs_rm_attempts;
1719#endif
1720
1721/*
1722 * The following routine will lock the inodes associated with the
1723 * directory and the named entry in the directory. The locks are
1724 * acquired in increasing inode number.
1725 *
1726 * If the entry is "..", then only the directory is locked. The
1727 * vnode ref count will still include that from the .. entry in
1728 * this case.
1729 *
1730 * There is a deadlock we need to worry about. If the locked directory is
1731 * in the AIL, it might be blocking up the log. The next inode we lock
1732 * could be already locked by another thread waiting for log space (e.g
1733 * a permanent log reservation with a long running transaction (see
1734 * xfs_itruncate_finish)). To solve this, we must check if the directory
1735 * is in the ail and use lock_nowait. If we can't lock, we need to
1736 * drop the inode lock on the directory and try again. xfs_iunlock will
1737 * potentially push the tail if we were holding up the log.
1738 */
1739STATIC int
1740xfs_lock_dir_and_entry(
1741 xfs_inode_t *dp,
1742 xfs_inode_t *ip) /* inode of entry 'name' */
1743{
1744 int attempts;
1745 xfs_ino_t e_inum;
1746 xfs_inode_t *ips[2];
1747 xfs_log_item_t *lp;
1748
1749#ifdef DEBUG
1750 xfs_rm_locks++;
1751#endif
1752 attempts = 0;
1753
1754again:
1755 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1756
1757 e_inum = ip->i_ino;
1758
1759 xfs_itrace_ref(ip);
1760
1761 /*
1762 * We want to lock in increasing inum. Since we've already
1763 * acquired the lock on the directory, we may need to release
1764 * if if the inum of the entry turns out to be less.
1765 */
1766 if (e_inum > dp->i_ino) {
1767 /*
1768 * We are already in the right order, so just
1769 * lock on the inode of the entry.
1770 * We need to use nowait if dp is in the AIL.
1771 */
1772
1773 lp = (xfs_log_item_t *)dp->i_itemp;
1774 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1775 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1776 attempts++;
1777#ifdef DEBUG
1778 xfs_rm_attempts++;
1779#endif
1780
1781 /*
1782 * Unlock dp and try again.
1783 * xfs_iunlock will try to push the tail
1784 * if the inode is in the AIL.
1785 */
1786
1787 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1788
1789 if ((attempts % 5) == 0) {
1790 delay(1); /* Don't just spin the CPU */
1791#ifdef DEBUG
1792 xfs_rm_lock_delays++;
1793#endif
1794 }
1795 goto again;
1796 }
1797 } else {
1798 xfs_ilock(ip, XFS_ILOCK_EXCL);
1799 }
1800 } else if (e_inum < dp->i_ino) {
1801 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1802
1803 ips[0] = ip;
1804 ips[1] = dp;
1805 xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
1806 }
1807 /* else e_inum == dp->i_ino */
1808 /* This can happen if we're asked to lock /x/..
1809 * the entry is "..", which is also the parent directory.
1810 */
1811
1812 return 0;
1813}
1814
1815#ifdef DEBUG
1816int xfs_locked_n; 1708int xfs_locked_n;
1817int xfs_small_retries; 1709int xfs_small_retries;
1818int xfs_middle_retries; 1710int xfs_middle_retries;
@@ -1946,6 +1838,45 @@ again:
1946#endif 1838#endif
1947} 1839}
1948 1840
1841void
1842xfs_lock_two_inodes(
1843 xfs_inode_t *ip0,
1844 xfs_inode_t *ip1,
1845 uint lock_mode)
1846{
1847 xfs_inode_t *temp;
1848 int attempts = 0;
1849 xfs_log_item_t *lp;
1850
1851 ASSERT(ip0->i_ino != ip1->i_ino);
1852
1853 if (ip0->i_ino > ip1->i_ino) {
1854 temp = ip0;
1855 ip0 = ip1;
1856 ip1 = temp;
1857 }
1858
1859 again:
1860 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
1861
1862 /*
1863 * If the first lock we have locked is in the AIL, we must TRY to get
1864 * the second lock. If we can't get it, we must release the first one
1865 * and try again.
1866 */
1867 lp = (xfs_log_item_t *)ip0->i_itemp;
1868 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1869 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
1870 xfs_iunlock(ip0, lock_mode);
1871 if ((++attempts % 5) == 0)
1872 delay(1); /* Don't just spin the CPU */
1873 goto again;
1874 }
1875 } else {
1876 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
1877 }
1878}
1879
1949int 1880int
1950xfs_remove( 1881xfs_remove(
1951 xfs_inode_t *dp, 1882 xfs_inode_t *dp,
@@ -2018,9 +1949,7 @@ xfs_remove(
2018 goto out_trans_cancel; 1949 goto out_trans_cancel;
2019 } 1950 }
2020 1951
2021 error = xfs_lock_dir_and_entry(dp, ip); 1952 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2022 if (error)
2023 goto out_trans_cancel;
2024 1953
2025 /* 1954 /*
2026 * At this point, we've gotten both the directory and the entry 1955 * At this point, we've gotten both the directory and the entry
@@ -2047,9 +1976,6 @@ xfs_remove(
2047 } 1976 }
2048 } 1977 }
2049 1978
2050 /*
2051 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
2052 */
2053 XFS_BMAP_INIT(&free_list, &first_block); 1979 XFS_BMAP_INIT(&free_list, &first_block);
2054 error = xfs_dir_removename(tp, dp, name, ip->i_ino, 1980 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2055 &first_block, &free_list, resblks); 1981 &first_block, &free_list, resblks);
@@ -2155,7 +2081,6 @@ xfs_link(
2155{ 2081{
2156 xfs_mount_t *mp = tdp->i_mount; 2082 xfs_mount_t *mp = tdp->i_mount;
2157 xfs_trans_t *tp; 2083 xfs_trans_t *tp;
2158 xfs_inode_t *ips[2];
2159 int error; 2084 int error;
2160 xfs_bmap_free_t free_list; 2085 xfs_bmap_free_t free_list;
2161 xfs_fsblock_t first_block; 2086 xfs_fsblock_t first_block;
@@ -2203,15 +2128,7 @@ xfs_link(
2203 goto error_return; 2128 goto error_return;
2204 } 2129 }
2205 2130
2206 if (sip->i_ino < tdp->i_ino) { 2131 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
2207 ips[0] = sip;
2208 ips[1] = tdp;
2209 } else {
2210 ips[0] = tdp;
2211 ips[1] = sip;
2212 }
2213
2214 xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
2215 2132
2216 /* 2133 /*
2217 * Increment vnode ref counts since xfs_trans_commit & 2134 * Increment vnode ref counts since xfs_trans_commit &
@@ -2352,7 +2269,7 @@ xfs_mkdir(
2352 * Make sure that we have allocated dquot(s) on disk. 2269 * Make sure that we have allocated dquot(s) on disk.
2353 */ 2270 */
2354 error = XFS_QM_DQVOPALLOC(mp, dp, 2271 error = XFS_QM_DQVOPALLOC(mp, dp,
2355 current_fsuid(credp), current_fsgid(credp), prid, 2272 current_fsuid(), current_fsgid(), prid,
2356 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 2273 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2357 if (error) 2274 if (error)
2358 goto std_return; 2275 goto std_return;
@@ -2578,7 +2495,7 @@ xfs_symlink(
2578 * Make sure that we have allocated dquot(s) on disk. 2495 * Make sure that we have allocated dquot(s) on disk.
2579 */ 2496 */
2580 error = XFS_QM_DQVOPALLOC(mp, dp, 2497 error = XFS_QM_DQVOPALLOC(mp, dp,
2581 current_fsuid(credp), current_fsgid(credp), prid, 2498 current_fsuid(), current_fsgid(), prid,
2582 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 2499 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2583 if (error) 2500 if (error)
2584 goto std_return; 2501 goto std_return;
@@ -2873,14 +2790,13 @@ int
2873xfs_reclaim( 2790xfs_reclaim(
2874 xfs_inode_t *ip) 2791 xfs_inode_t *ip)
2875{ 2792{
2876 bhv_vnode_t *vp = XFS_ITOV(ip);
2877 2793
2878 xfs_itrace_entry(ip); 2794 xfs_itrace_entry(ip);
2879 2795
2880 ASSERT(!VN_MAPPED(vp)); 2796 ASSERT(!VN_MAPPED(VFS_I(ip)));
2881 2797
2882 /* bad inode, get out here ASAP */ 2798 /* bad inode, get out here ASAP */
2883 if (VN_BAD(vp)) { 2799 if (VN_BAD(VFS_I(ip))) {
2884 xfs_ireclaim(ip); 2800 xfs_ireclaim(ip);
2885 return 0; 2801 return 0;
2886 } 2802 }
@@ -2917,7 +2833,7 @@ xfs_reclaim(
2917 XFS_MOUNT_ILOCK(mp); 2833 XFS_MOUNT_ILOCK(mp);
2918 spin_lock(&ip->i_flags_lock); 2834 spin_lock(&ip->i_flags_lock);
2919 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 2835 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
2920 vn_to_inode(vp)->i_private = NULL; 2836 VFS_I(ip)->i_private = NULL;
2921 ip->i_vnode = NULL; 2837 ip->i_vnode = NULL;
2922 spin_unlock(&ip->i_flags_lock); 2838 spin_unlock(&ip->i_flags_lock);
2923 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); 2839 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
@@ -2933,7 +2849,7 @@ xfs_finish_reclaim(
2933 int sync_mode) 2849 int sync_mode)
2934{ 2850{
2935 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); 2851 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
2936 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2852 struct inode *vp = VFS_I(ip);
2937 2853
2938 if (vp && VN_BAD(vp)) 2854 if (vp && VN_BAD(vp))
2939 goto reclaim; 2855 goto reclaim;
@@ -3321,7 +3237,6 @@ xfs_free_file_space(
3321 xfs_off_t len, 3237 xfs_off_t len,
3322 int attr_flags) 3238 int attr_flags)
3323{ 3239{
3324 bhv_vnode_t *vp;
3325 int committed; 3240 int committed;
3326 int done; 3241 int done;
3327 xfs_off_t end_dmi_offset; 3242 xfs_off_t end_dmi_offset;
@@ -3341,7 +3256,6 @@ xfs_free_file_space(
3341 xfs_trans_t *tp; 3256 xfs_trans_t *tp;
3342 int need_iolock = 1; 3257 int need_iolock = 1;
3343 3258
3344 vp = XFS_ITOV(ip);
3345 mp = ip->i_mount; 3259 mp = ip->i_mount;
3346 3260
3347 xfs_itrace_entry(ip); 3261 xfs_itrace_entry(ip);
@@ -3378,7 +3292,7 @@ xfs_free_file_space(
3378 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 3292 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
3379 ioffset = offset & ~(rounding - 1); 3293 ioffset = offset & ~(rounding - 1);
3380 3294
3381 if (VN_CACHED(vp) != 0) { 3295 if (VN_CACHED(VFS_I(ip)) != 0) {
3382 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); 3296 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
3383 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 3297 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
3384 if (error) 3298 if (error)
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 22aa58ca1991..dcc812067394 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -31,9 +31,6 @@
31#define ALIAS_TABLE_ENTRY_SIZE 2 31#define ALIAS_TABLE_ENTRY_SIZE 2
32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
33 33
34/* helper macros */
35#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
36
37/* Length of the MMIO region for the AMD IOMMU */ 34/* Length of the MMIO region for the AMD IOMMU */
38#define MMIO_REGION_LENGTH 0x4000 35#define MMIO_REGION_LENGTH 0x4000
39 36
@@ -69,6 +66,9 @@
69#define MMIO_EVT_TAIL_OFFSET 0x2018 66#define MMIO_EVT_TAIL_OFFSET 0x2018
70#define MMIO_STATUS_OFFSET 0x2020 67#define MMIO_STATUS_OFFSET 0x2020
71 68
69/* MMIO status bits */
70#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
71
72/* feature control bits */ 72/* feature control bits */
73#define CONTROL_IOMMU_EN 0x00ULL 73#define CONTROL_IOMMU_EN 0x00ULL
74#define CONTROL_HT_TUN_EN 0x01ULL 74#define CONTROL_HT_TUN_EN 0x01ULL
@@ -89,6 +89,7 @@
89#define CMD_INV_IOMMU_PAGES 0x03 89#define CMD_INV_IOMMU_PAGES 0x03
90 90
91#define CMD_COMPL_WAIT_STORE_MASK 0x01 91#define CMD_COMPL_WAIT_STORE_MASK 0x01
92#define CMD_COMPL_WAIT_INT_MASK 0x02
92#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 93#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
93#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 94#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
94 95
@@ -99,6 +100,7 @@
99#define DEV_ENTRY_TRANSLATION 0x01 100#define DEV_ENTRY_TRANSLATION 0x01
100#define DEV_ENTRY_IR 0x3d 101#define DEV_ENTRY_IR 0x3d
101#define DEV_ENTRY_IW 0x3e 102#define DEV_ENTRY_IW 0x3e
103#define DEV_ENTRY_NO_PAGE_FAULT 0x62
102#define DEV_ENTRY_EX 0x67 104#define DEV_ENTRY_EX 0x67
103#define DEV_ENTRY_SYSMGT1 0x68 105#define DEV_ENTRY_SYSMGT1 0x68
104#define DEV_ENTRY_SYSMGT2 0x69 106#define DEV_ENTRY_SYSMGT2 0x69
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 0048fb77afc4..56d00e31aec0 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/regset.h> 15#include <linux/regset.h>
16#include <linux/hardirq.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
@@ -234,6 +235,37 @@ static inline void kernel_fpu_end(void)
234 preempt_enable(); 235 preempt_enable();
235} 236}
236 237
238/*
239 * Some instructions like VIA's padlock instructions generate a spurious
240 * DNA fault but don't modify SSE registers. And these instructions
241 * get used from interrupt context aswell. To prevent these kernel instructions
242 * in interrupt context interact wrongly with other user/kernel fpu usage, we
243 * should use them only in the context of irq_ts_save/restore()
244 */
245static inline int irq_ts_save(void)
246{
247 /*
248 * If we are in process context, we are ok to take a spurious DNA fault.
249 * Otherwise, doing clts() in process context require pre-emption to
250 * be disabled or some heavy lifting like kernel_fpu_begin()
251 */
252 if (!in_interrupt())
253 return 0;
254
255 if (read_cr0() & X86_CR0_TS) {
256 clts();
257 return 1;
258 }
259
260 return 0;
261}
262
263static inline void irq_ts_restore(int TS_state)
264{
265 if (TS_state)
266 stts();
267}
268
237#ifdef CONFIG_X86_64 269#ifdef CONFIG_X86_64
238 270
239static inline void save_init_fpu(struct task_struct *tsk) 271static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d12498ec8a4e..ee48ef8fb2ea 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -101,6 +101,24 @@ static inline int crypto_ahash_digest(struct ahash_request *req)
101 return crt->digest(req); 101 return crt->digest(req);
102} 102}
103 103
104static inline int crypto_ahash_init(struct ahash_request *req)
105{
106 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
107 return crt->init(req);
108}
109
110static inline int crypto_ahash_update(struct ahash_request *req)
111{
112 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
113 return crt->update(req);
114}
115
116static inline int crypto_ahash_final(struct ahash_request *req)
117{
118 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
119 return crt->final(req);
120}
121
104static inline void ahash_request_set_tfm(struct ahash_request *req, 122static inline void ahash_request_set_tfm(struct ahash_request *req,
105 struct crypto_ahash *tfm) 123 struct crypto_ahash *tfm)
106{ 124{
diff --git a/include/linux/completion.h b/include/linux/completion.h
index d2961b66d53d..57faa60de9bd 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -55,4 +55,49 @@ extern void complete_all(struct completion *);
55 55
56#define INIT_COMPLETION(x) ((x).done = 0) 56#define INIT_COMPLETION(x) ((x).done = 0)
57 57
58
59/**
60 * try_wait_for_completion - try to decrement a completion without blocking
61 * @x: completion structure
62 *
63 * Returns: 0 if a decrement cannot be done without blocking
64 * 1 if a decrement succeeded.
65 *
66 * If a completion is being used as a counting completion,
67 * attempt to decrement the counter without blocking. This
68 * enables us to avoid waiting if the resource the completion
69 * is protecting is not available.
70 */
71static inline bool try_wait_for_completion(struct completion *x)
72{
73 int ret = 1;
74
75 spin_lock_irq(&x->wait.lock);
76 if (!x->done)
77 ret = 0;
78 else
79 x->done--;
80 spin_unlock_irq(&x->wait.lock);
81 return ret;
82}
83
84/**
85 * completion_done - Test to see if a completion has any waiters
86 * @x: completion structure
87 *
88 * Returns: 0 if there are waiters (wait_for_completion() in progress)
89 * 1 if there are no waiters.
90 *
91 */
92static inline bool completion_done(struct completion *x)
93{
94 int ret = 1;
95
96 spin_lock_irq(&x->wait.lock);
97 if (!x->done)
98 ret = 0;
99 spin_unlock_irq(&x->wait.lock);
100 return ret;
101}
102
58#endif 103#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
new file mode 100644
index 000000000000..b69222cc1fd2
--- /dev/null
+++ b/include/linux/cred.h
@@ -0,0 +1,50 @@
1/* Credentials management
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_CRED_H
13#define _LINUX_CRED_H
14
15#define get_current_user() (get_uid(current->user))
16
17#define task_uid(task) ((task)->uid)
18#define task_gid(task) ((task)->gid)
19#define task_euid(task) ((task)->euid)
20#define task_egid(task) ((task)->egid)
21
22#define current_uid() (current->uid)
23#define current_gid() (current->gid)
24#define current_euid() (current->euid)
25#define current_egid() (current->egid)
26#define current_suid() (current->suid)
27#define current_sgid() (current->sgid)
28#define current_fsuid() (current->fsuid)
29#define current_fsgid() (current->fsgid)
30#define current_cap() (current->cap_effective)
31
32#define current_uid_gid(_uid, _gid) \
33do { \
34 *(_uid) = current->uid; \
35 *(_gid) = current->gid; \
36} while(0)
37
38#define current_euid_egid(_uid, _gid) \
39do { \
40 *(_uid) = current->euid; \
41 *(_gid) = current->egid; \
42} while(0)
43
44#define current_fsuid_fsgid(_uid, _gid) \
45do { \
46 *(_uid) = current->fsuid; \
47 *(_gid) = current->fsgid; \
48} while(0)
49
50#endif /* _LINUX_CRED_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5850bfb968a8..cfb0d87b99fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@ struct sched_param {
87#include <linux/task_io_accounting.h> 87#include <linux/task_io_accounting.h>
88#include <linux/kobject.h> 88#include <linux/kobject.h>
89#include <linux/latencytop.h> 89#include <linux/latencytop.h>
90#include <linux/cred.h>
90 91
91#include <asm/processor.h> 92#include <asm/processor.h>
92 93
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cfcc45b3bef0..358661c9990e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
901static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 901static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
902{ 902{
903 if (len > skb_headlen(skb) && 903 if (len > skb_headlen(skb) &&
904 !__pskb_pull_tail(skb, len-skb_headlen(skb))) 904 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
905 return NULL; 905 return NULL;
906 skb->len -= len; 906 skb->len -= len;
907 return skb->data += len; 907 return skb->data += len;
@@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
918 return 1; 918 return 1;
919 if (unlikely(len > skb->len)) 919 if (unlikely(len > skb->len))
920 return 0; 920 return 0;
921 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; 921 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
922} 922}
923 923
924/** 924/**
@@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1321 unsigned int size = skb->len; 1321 unsigned int size = skb->len;
1322 if (likely(size >= len)) 1322 if (likely(size >= len))
1323 return 0; 1323 return 0;
1324 return skb_pad(skb, len-size); 1324 return skb_pad(skb, len - size);
1325} 1325}
1326 1326
1327static inline int skb_add_data(struct sk_buff *skb, 1327static inline int skb_add_data(struct sk_buff *skb,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5811c5da69f9..0924cd9c30f6 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -110,6 +110,8 @@ enum usb_interface_condition {
110 * @sysfs_files_created: sysfs attributes exist 110 * @sysfs_files_created: sysfs attributes exist
111 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup 111 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
112 * capability during autosuspend. 112 * capability during autosuspend.
113 * @needs_binding: flag set when the driver should be re-probed or unbound
114 * following a reset or suspend operation it doesn't support.
113 * @dev: driver model's view of this device 115 * @dev: driver model's view of this device
114 * @usb_dev: if an interface is bound to the USB major, this will point 116 * @usb_dev: if an interface is bound to the USB major, this will point
115 * to the sysfs representation for that device. 117 * to the sysfs representation for that device.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
new file mode 100644
index 000000000000..630962c04ca4
--- /dev/null
+++ b/include/linux/usb/musb.h
@@ -0,0 +1,98 @@
1/*
2 * This is used to for host and peripheral modes of the driver for
3 * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
4 *
5 * Board initialization should put one of these into dev->platform_data,
6 * probably on some platform_device named "musb_hdrc". It encapsulates
7 * key configuration differences between boards.
8 */
9
10/* The USB role is defined by the connector used on the board, so long as
11 * standards are being followed. (Developer boards sometimes won't.)
12 */
13enum musb_mode {
14 MUSB_UNDEFINED = 0,
15 MUSB_HOST, /* A or Mini-A connector */
16 MUSB_PERIPHERAL, /* B or Mini-B connector */
17 MUSB_OTG /* Mini-AB connector */
18};
19
20struct clk;
21
22struct musb_hdrc_eps_bits {
23 const char name[16];
24 u8 bits;
25};
26
27struct musb_hdrc_config {
28 /* MUSB configuration-specific details */
29 unsigned multipoint:1; /* multipoint device */
30 unsigned dyn_fifo:1; /* supports dynamic fifo sizing */
31 unsigned soft_con:1; /* soft connect required */
32 unsigned utm_16:1; /* utm data witdh is 16 bits */
33 unsigned big_endian:1; /* true if CPU uses big-endian */
34 unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
35 unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
36 unsigned high_iso_tx:1; /* Tx ep required for HB iso */
37 unsigned high_iso_rx:1; /* Rx ep required for HD iso */
38 unsigned dma:1; /* supports DMA */
39 unsigned vendor_req:1; /* vendor registers required */
40
41 u8 num_eps; /* number of endpoints _with_ ep0 */
42 u8 dma_channels; /* number of dma channels */
43 u8 dyn_fifo_size; /* dynamic size in bytes */
44 u8 vendor_ctrl; /* vendor control reg width */
45 u8 vendor_stat; /* vendor status reg witdh */
46 u8 dma_req_chan; /* bitmask for required dma channels */
47 u8 ram_bits; /* ram address size */
48
49 struct musb_hdrc_eps_bits *eps_bits;
50};
51
52struct musb_hdrc_platform_data {
53 /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
54 u8 mode;
55
56 /* for clk_get() */
57 const char *clock;
58
59 /* (HOST or OTG) switch VBUS on/off */
60 int (*set_vbus)(struct device *dev, int is_on);
61
62 /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
63 u8 power;
64
65 /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */
66 u8 min_power;
67
68 /* (HOST or OTG) msec/2 after VBUS on till power good */
69 u8 potpgt;
70
71 /* Power the device on or off */
72 int (*set_power)(int state);
73
74 /* Turn device clock on or off */
75 int (*set_clock)(struct clk *clock, int is_on);
76
77 /* MUSB configuration-specific details */
78 struct musb_hdrc_config *config;
79};
80
81
82/* TUSB 6010 support */
83
84#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */
85#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */
86#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */
87
88#ifdef CONFIG_ARCH_OMAP2
89
90extern int __init tusb6010_setup_interface(
91 struct musb_hdrc_platform_data *data,
92 unsigned ps_refclk, unsigned waitpin,
93 unsigned async_cs, unsigned sync_cs,
94 unsigned irq, unsigned dmachan);
95
96extern int tusb6010_platform_retime(unsigned is_refclk);
97
98#endif /* OMAP2 */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 09a3e6a7518f..655341d0f534 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -17,7 +17,8 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18 18
19#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ 19#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
20#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ 20#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
21#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */
21 22
22/* The maximum number of ports one device can grab at once */ 23/* The maximum number of ports one device can grab at once */
23#define MAX_NUM_PORTS 8 24#define MAX_NUM_PORTS 8
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2f8b3c06a101..bc391ba101e9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -38,11 +38,6 @@ struct route_info {
38#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 38#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
39 39
40 40
41#ifdef CONFIG_IPV6_MULTIPLE_TABLES
42extern struct rt6_info *ip6_prohibit_entry;
43extern struct rt6_info *ip6_blk_hole_entry;
44#endif
45
46extern void ip6_route_input(struct sk_buff *skb); 41extern void ip6_route_input(struct sk_buff *skb);
47 42
48extern struct dst_entry * ip6_route_output(struct net *net, 43extern struct dst_entry * ip6_route_output(struct net *net,
@@ -118,7 +113,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
118extern void rt6_ifdown(struct net *net, struct net_device *dev); 113extern void rt6_ifdown(struct net *net, struct net_device *dev);
119extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); 114extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
120 115
121extern rwlock_t rt6_lock;
122 116
123/* 117/*
124 * Store a destination cache entry in a socket 118 * Store a destination cache entry in a socket
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index cbb59ebed4ae..7312c3dd309f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -140,8 +140,24 @@ struct ip_vs_seq {
140 140
141 141
142/* 142/*
143 * IPVS statistics object 143 * IPVS statistics objects
144 */ 144 */
145struct ip_vs_estimator {
146 struct list_head list;
147
148 u64 last_inbytes;
149 u64 last_outbytes;
150 u32 last_conns;
151 u32 last_inpkts;
152 u32 last_outpkts;
153
154 u32 cps;
155 u32 inpps;
156 u32 outpps;
157 u32 inbps;
158 u32 outbps;
159};
160
145struct ip_vs_stats 161struct ip_vs_stats
146{ 162{
147 __u32 conns; /* connections scheduled */ 163 __u32 conns; /* connections scheduled */
@@ -156,7 +172,15 @@ struct ip_vs_stats
156 __u32 inbps; /* current in byte rate */ 172 __u32 inbps; /* current in byte rate */
157 __u32 outbps; /* current out byte rate */ 173 __u32 outbps; /* current out byte rate */
158 174
175 /*
176 * Don't add anything before the lock, because we use memcpy() to copy
177 * the members before the lock to struct ip_vs_stats_user in
178 * ip_vs_ctl.c.
179 */
180
159 spinlock_t lock; /* spin lock */ 181 spinlock_t lock; /* spin lock */
182
183 struct ip_vs_estimator est; /* estimator */
160}; 184};
161 185
162struct dst_entry; 186struct dst_entry;
@@ -440,7 +464,7 @@ struct ip_vs_app
440 */ 464 */
441extern const char *ip_vs_proto_name(unsigned proto); 465extern const char *ip_vs_proto_name(unsigned proto);
442extern void ip_vs_init_hash_table(struct list_head *table, int rows); 466extern void ip_vs_init_hash_table(struct list_head *table, int rows);
443#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) 467#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
444 468
445#define IP_VS_APP_TYPE_FTP 1 469#define IP_VS_APP_TYPE_FTP 1
446 470
@@ -620,7 +644,7 @@ extern int sysctl_ip_vs_expire_quiescent_template;
620extern int sysctl_ip_vs_sync_threshold[2]; 644extern int sysctl_ip_vs_sync_threshold[2];
621extern int sysctl_ip_vs_nat_icmp_send; 645extern int sysctl_ip_vs_nat_icmp_send;
622extern struct ip_vs_stats ip_vs_stats; 646extern struct ip_vs_stats ip_vs_stats;
623extern struct ctl_path net_vs_ctl_path[]; 647extern const struct ctl_path net_vs_ctl_path[];
624 648
625extern struct ip_vs_service * 649extern struct ip_vs_service *
626ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); 650ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
659/* 683/*
660 * IPVS rate estimator prototypes (from ip_vs_est.c) 684 * IPVS rate estimator prototypes (from ip_vs_est.c)
661 */ 685 */
662extern int ip_vs_new_estimator(struct ip_vs_stats *stats); 686extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
663extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); 687extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
664extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); 688extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
665 689
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6affcfaa123e..853fe83d9f37 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q);
89 89
90static inline void qdisc_run(struct Qdisc *q) 90static inline void qdisc_run(struct Qdisc *q)
91{ 91{
92 if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) 92 struct netdev_queue *txq = q->dev_queue;
93
94 if (!netif_tx_queue_stopped(txq) &&
95 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
93 __qdisc_run(q); 96 __qdisc_run(q);
94} 97}
95 98
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe8266be1..a89f32fa94f6 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -99,7 +99,7 @@ struct gen_estimator_head
99 99
100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; 100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
101 101
102/* Protects against NULL dereference */ 102/* Protects against NULL dereference and RCU write-side */
103static DEFINE_RWLOCK(est_lock); 103static DEFINE_RWLOCK(est_lock);
104 104
105static void est_timer(unsigned long arg) 105static void est_timer(unsigned long arg)
@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
185 est->last_packets = bstats->packets; 185 est->last_packets = bstats->packets;
186 est->avpps = rate_est->pps<<10; 186 est->avpps = rate_est->pps<<10;
187 187
188 write_lock_bh(&est_lock);
188 if (!elist[idx].timer.function) { 189 if (!elist[idx].timer.function) {
189 INIT_LIST_HEAD(&elist[idx].list); 190 INIT_LIST_HEAD(&elist[idx].list);
190 setup_timer(&elist[idx].timer, est_timer, idx); 191 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
194 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); 195 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
195 196
196 list_add_rcu(&est->list, &elist[idx].list); 197 list_add_rcu(&est->list, &elist[idx].list);
198 write_unlock_bh(&est_lock);
197 return 0; 199 return 0;
198} 200}
199 201
@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head)
212 * Removes the rate estimator specified by &bstats and &rate_est 214 * Removes the rate estimator specified by &bstats and &rate_est
213 * and deletes the timer. 215 * and deletes the timer.
214 * 216 *
215 * NOTE: Called under rtnl_mutex
216 */ 217 */
217void gen_kill_estimator(struct gnet_stats_basic *bstats, 218void gen_kill_estimator(struct gnet_stats_basic *bstats,
218 struct gnet_stats_rate_est *rate_est) 219 struct gnet_stats_rate_est *rate_est)
@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
226 if (!elist[idx].timer.function) 227 if (!elist[idx].timer.function)
227 continue; 228 continue;
228 229
230 write_lock_bh(&est_lock);
229 list_for_each_entry_safe(e, n, &elist[idx].list, list) { 231 list_for_each_entry_safe(e, n, &elist[idx].list, list) {
230 if (e->rate_est != rate_est || e->bstats != bstats) 232 if (e->rate_est != rate_est || e->bstats != bstats)
231 continue; 233 continue;
232 234
233 write_lock_bh(&est_lock);
234 e->bstats = NULL; 235 e->bstats = NULL;
235 write_unlock_bh(&est_lock);
236 236
237 list_del_rcu(&e->list); 237 list_del_rcu(&e->list);
238 call_rcu(&e->e_rcu, __gen_kill_estimator); 238 call_rcu(&e->e_rcu, __gen_kill_estimator);
239 } 239 }
240 write_unlock_bh(&est_lock);
240 } 241 }
241} 242}
242 243
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 526236453908..a756847e3814 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1961 */ 1961 */
1962static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1962static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1963{ 1963{
1964 int ntxq;
1965
1964 if (!pkt_dev->odev) { 1966 if (!pkt_dev->odev) {
1965 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " 1967 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
1966 "setup_inject.\n"); 1968 "setup_inject.\n");
@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1969 return; 1971 return;
1970 } 1972 }
1971 1973
1974 /* make sure that we don't pick a non-existing transmit queue */
1975 ntxq = pkt_dev->odev->real_num_tx_queues;
1976 if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
1977 printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
1978 "disabled because CPU count (%d) exceeds number ",
1979 num_online_cpus());
1980 printk(KERN_WARNING "pktgen: WARNING: of tx queues "
1981 "(%d) on %s \n", ntxq, pkt_dev->odev->name);
1982 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1983 }
1984 if (ntxq <= pkt_dev->queue_map_min) {
1985 printk(KERN_WARNING "pktgen: WARNING: Requested "
1986 "queue_map_min (%d) exceeds number of tx\n",
1987 pkt_dev->queue_map_min);
1988 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1989 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1990 pkt_dev->queue_map_min = ntxq - 1;
1991 }
1992 if (ntxq <= pkt_dev->queue_map_max) {
1993 printk(KERN_WARNING "pktgen: WARNING: Requested "
1994 "queue_map_max (%d) exceeds number of tx\n",
1995 pkt_dev->queue_map_max);
1996 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1997 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1998 pkt_dev->queue_map_max = ntxq - 1;
1999 }
2000
1972 /* Default to the interface's mac if not explicitly set. */ 2001 /* Default to the interface's mac if not explicitly set. */
1973 2002
1974 if (is_zero_ether_addr(pkt_dev->src_mac)) 2003 if (is_zero_ether_addr(pkt_dev->src_mac))
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b622d9744856..1ca3b26eed0f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
474 474
475 if (copy_from_user(&opt, optval, sizeof(opt))) 475 if (copy_from_user(&opt, optval, sizeof(opt)))
476 return -EFAULT; 476 return -EFAULT;
477 /*
478 * rfc4340: 6.1. Change Options
479 */
480 if (opt.dccpsf_len < 1)
481 return -EINVAL;
477 482
478 val = kmalloc(opt.dccpsf_len, GFP_KERNEL); 483 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
479 if (!val) 484 if (!val)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6203ece53606..f70fac612596 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
289 struct rtable *rt; 289 struct rtable *rt;
290 struct iphdr *pip; 290 struct iphdr *pip;
291 struct igmpv3_report *pig; 291 struct igmpv3_report *pig;
292 struct net *net = dev_net(dev);
292 293
293 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 294 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
294 if (skb == NULL) 295 if (skb == NULL)
@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
299 .nl_u = { .ip4_u = { 300 .nl_u = { .ip4_u = {
300 .daddr = IGMPV3_ALL_MCR } }, 301 .daddr = IGMPV3_ALL_MCR } },
301 .proto = IPPROTO_IGMP }; 302 .proto = IPPROTO_IGMP };
302 if (ip_route_output_key(&init_net, &rt, &fl)) { 303 if (ip_route_output_key(net, &rt, &fl)) {
303 kfree_skb(skb); 304 kfree_skb(skb);
304 return NULL; 305 return NULL;
305 } 306 }
@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
629 struct igmphdr *ih; 630 struct igmphdr *ih;
630 struct rtable *rt; 631 struct rtable *rt;
631 struct net_device *dev = in_dev->dev; 632 struct net_device *dev = in_dev->dev;
633 struct net *net = dev_net(dev);
632 __be32 group = pmc ? pmc->multiaddr : 0; 634 __be32 group = pmc ? pmc->multiaddr : 0;
633 __be32 dst; 635 __be32 dst;
634 636
@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
643 struct flowi fl = { .oif = dev->ifindex, 645 struct flowi fl = { .oif = dev->ifindex,
644 .nl_u = { .ip4_u = { .daddr = dst } }, 646 .nl_u = { .ip4_u = { .daddr = dst } },
645 .proto = IPPROTO_IGMP }; 647 .proto = IPPROTO_IGMP };
646 if (ip_route_output_key(&init_net, &rt, &fl)) 648 if (ip_route_output_key(net, &rt, &fl))
647 return -1; 649 return -1;
648 } 650 }
649 if (rt->rt_src == 0) { 651 if (rt->rt_src == 0) {
@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1196 1198
1197 ASSERT_RTNL(); 1199 ASSERT_RTNL();
1198 1200
1199 if (!net_eq(dev_net(in_dev->dev), &init_net))
1200 return;
1201
1202 for (im=in_dev->mc_list; im; im=im->next) { 1201 for (im=in_dev->mc_list; im; im=im->next) {
1203 if (im->multiaddr == addr) { 1202 if (im->multiaddr == addr) {
1204 im->users++; 1203 im->users++;
@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1278 1277
1279 ASSERT_RTNL(); 1278 ASSERT_RTNL();
1280 1279
1281 if (!net_eq(dev_net(in_dev->dev), &init_net))
1282 return;
1283
1284 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1280 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1285 if (i->multiaddr==addr) { 1281 if (i->multiaddr==addr) {
1286 if (--i->users == 0) { 1282 if (--i->users == 0) {
@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
1308 1304
1309 ASSERT_RTNL(); 1305 ASSERT_RTNL();
1310 1306
1311 if (!net_eq(dev_net(in_dev->dev), &init_net))
1312 return;
1313
1314 for (i=in_dev->mc_list; i; i=i->next) 1307 for (i=in_dev->mc_list; i; i=i->next)
1315 igmp_group_dropped(i); 1308 igmp_group_dropped(i);
1316 1309
@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
1331{ 1324{
1332 ASSERT_RTNL(); 1325 ASSERT_RTNL();
1333 1326
1334 if (!net_eq(dev_net(in_dev->dev), &init_net))
1335 return;
1336
1337 in_dev->mc_tomb = NULL; 1327 in_dev->mc_tomb = NULL;
1338#ifdef CONFIG_IP_MULTICAST 1328#ifdef CONFIG_IP_MULTICAST
1339 in_dev->mr_gq_running = 0; 1329 in_dev->mr_gq_running = 0;
@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
1357 1347
1358 ASSERT_RTNL(); 1348 ASSERT_RTNL();
1359 1349
1360 if (!net_eq(dev_net(in_dev->dev), &init_net))
1361 return;
1362
1363 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1350 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1364 1351
1365 for (i=in_dev->mc_list; i; i=i->next) 1352 for (i=in_dev->mc_list; i; i=i->next)
@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1376 1363
1377 ASSERT_RTNL(); 1364 ASSERT_RTNL();
1378 1365
1379 if (!net_eq(dev_net(in_dev->dev), &init_net))
1380 return;
1381
1382 /* Deactivate timers */ 1366 /* Deactivate timers */
1383 ip_mc_down(in_dev); 1367 ip_mc_down(in_dev);
1384 1368
@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1395 write_unlock_bh(&in_dev->mc_list_lock); 1379 write_unlock_bh(&in_dev->mc_list_lock);
1396} 1380}
1397 1381
1398static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) 1382static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1399{ 1383{
1400 struct flowi fl = { .nl_u = { .ip4_u = 1384 struct flowi fl = { .nl_u = { .ip4_u =
1401 { .daddr = imr->imr_multiaddr.s_addr } } }; 1385 { .daddr = imr->imr_multiaddr.s_addr } } };
@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
1404 struct in_device *idev = NULL; 1388 struct in_device *idev = NULL;
1405 1389
1406 if (imr->imr_ifindex) { 1390 if (imr->imr_ifindex) {
1407 idev = inetdev_by_index(&init_net, imr->imr_ifindex); 1391 idev = inetdev_by_index(net, imr->imr_ifindex);
1408 if (idev) 1392 if (idev)
1409 __in_dev_put(idev); 1393 __in_dev_put(idev);
1410 return idev; 1394 return idev;
1411 } 1395 }
1412 if (imr->imr_address.s_addr) { 1396 if (imr->imr_address.s_addr) {
1413 dev = ip_dev_find(&init_net, imr->imr_address.s_addr); 1397 dev = ip_dev_find(net, imr->imr_address.s_addr);
1414 if (!dev) 1398 if (!dev)
1415 return NULL; 1399 return NULL;
1416 dev_put(dev); 1400 dev_put(dev);
1417 } 1401 }
1418 1402
1419 if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) { 1403 if (!dev && !ip_route_output_key(net, &rt, &fl)) {
1420 dev = rt->u.dst.dev; 1404 dev = rt->u.dst.dev;
1421 ip_rt_put(rt); 1405 ip_rt_put(rt);
1422 } 1406 }
@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1754 struct ip_mc_socklist *iml=NULL, *i; 1738 struct ip_mc_socklist *iml=NULL, *i;
1755 struct in_device *in_dev; 1739 struct in_device *in_dev;
1756 struct inet_sock *inet = inet_sk(sk); 1740 struct inet_sock *inet = inet_sk(sk);
1741 struct net *net = sock_net(sk);
1757 int ifindex; 1742 int ifindex;
1758 int count = 0; 1743 int count = 0;
1759 1744
1760 if (!ipv4_is_multicast(addr)) 1745 if (!ipv4_is_multicast(addr))
1761 return -EINVAL; 1746 return -EINVAL;
1762 1747
1763 if (!net_eq(sock_net(sk), &init_net))
1764 return -EPROTONOSUPPORT;
1765
1766 rtnl_lock(); 1748 rtnl_lock();
1767 1749
1768 in_dev = ip_mc_find_dev(imr); 1750 in_dev = ip_mc_find_dev(net, imr);
1769 1751
1770 if (!in_dev) { 1752 if (!in_dev) {
1771 iml = NULL; 1753 iml = NULL;
@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1827 struct inet_sock *inet = inet_sk(sk); 1809 struct inet_sock *inet = inet_sk(sk);
1828 struct ip_mc_socklist *iml, **imlp; 1810 struct ip_mc_socklist *iml, **imlp;
1829 struct in_device *in_dev; 1811 struct in_device *in_dev;
1812 struct net *net = sock_net(sk);
1830 __be32 group = imr->imr_multiaddr.s_addr; 1813 __be32 group = imr->imr_multiaddr.s_addr;
1831 u32 ifindex; 1814 u32 ifindex;
1832 int ret = -EADDRNOTAVAIL; 1815 int ret = -EADDRNOTAVAIL;
1833 1816
1834 if (!net_eq(sock_net(sk), &init_net))
1835 return -EPROTONOSUPPORT;
1836
1837 rtnl_lock(); 1817 rtnl_lock();
1838 in_dev = ip_mc_find_dev(imr); 1818 in_dev = ip_mc_find_dev(net, imr);
1839 ifindex = imr->imr_ifindex; 1819 ifindex = imr->imr_ifindex;
1840 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1820 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1841 if (iml->multi.imr_multiaddr.s_addr != group) 1821 if (iml->multi.imr_multiaddr.s_addr != group)
@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1873 struct in_device *in_dev = NULL; 1853 struct in_device *in_dev = NULL;
1874 struct inet_sock *inet = inet_sk(sk); 1854 struct inet_sock *inet = inet_sk(sk);
1875 struct ip_sf_socklist *psl; 1855 struct ip_sf_socklist *psl;
1856 struct net *net = sock_net(sk);
1876 int leavegroup = 0; 1857 int leavegroup = 0;
1877 int i, j, rv; 1858 int i, j, rv;
1878 1859
1879 if (!ipv4_is_multicast(addr)) 1860 if (!ipv4_is_multicast(addr))
1880 return -EINVAL; 1861 return -EINVAL;
1881 1862
1882 if (!net_eq(sock_net(sk), &init_net))
1883 return -EPROTONOSUPPORT;
1884
1885 rtnl_lock(); 1863 rtnl_lock();
1886 1864
1887 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 1865 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
1888 imr.imr_address.s_addr = mreqs->imr_interface; 1866 imr.imr_address.s_addr = mreqs->imr_interface;
1889 imr.imr_ifindex = ifindex; 1867 imr.imr_ifindex = ifindex;
1890 in_dev = ip_mc_find_dev(&imr); 1868 in_dev = ip_mc_find_dev(net, &imr);
1891 1869
1892 if (!in_dev) { 1870 if (!in_dev) {
1893 err = -ENODEV; 1871 err = -ENODEV;
@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2007 struct in_device *in_dev; 1985 struct in_device *in_dev;
2008 struct inet_sock *inet = inet_sk(sk); 1986 struct inet_sock *inet = inet_sk(sk);
2009 struct ip_sf_socklist *newpsl, *psl; 1987 struct ip_sf_socklist *newpsl, *psl;
1988 struct net *net = sock_net(sk);
2010 int leavegroup = 0; 1989 int leavegroup = 0;
2011 1990
2012 if (!ipv4_is_multicast(addr)) 1991 if (!ipv4_is_multicast(addr))
@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2015 msf->imsf_fmode != MCAST_EXCLUDE) 1994 msf->imsf_fmode != MCAST_EXCLUDE)
2016 return -EINVAL; 1995 return -EINVAL;
2017 1996
2018 if (!net_eq(sock_net(sk), &init_net))
2019 return -EPROTONOSUPPORT;
2020
2021 rtnl_lock(); 1997 rtnl_lock();
2022 1998
2023 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 1999 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2024 imr.imr_address.s_addr = msf->imsf_interface; 2000 imr.imr_address.s_addr = msf->imsf_interface;
2025 imr.imr_ifindex = ifindex; 2001 imr.imr_ifindex = ifindex;
2026 in_dev = ip_mc_find_dev(&imr); 2002 in_dev = ip_mc_find_dev(net, &imr);
2027 2003
2028 if (!in_dev) { 2004 if (!in_dev) {
2029 err = -ENODEV; 2005 err = -ENODEV;
@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2094 struct in_device *in_dev; 2070 struct in_device *in_dev;
2095 struct inet_sock *inet = inet_sk(sk); 2071 struct inet_sock *inet = inet_sk(sk);
2096 struct ip_sf_socklist *psl; 2072 struct ip_sf_socklist *psl;
2073 struct net *net = sock_net(sk);
2097 2074
2098 if (!ipv4_is_multicast(addr)) 2075 if (!ipv4_is_multicast(addr))
2099 return -EINVAL; 2076 return -EINVAL;
2100 2077
2101 if (!net_eq(sock_net(sk), &init_net))
2102 return -EPROTONOSUPPORT;
2103
2104 rtnl_lock(); 2078 rtnl_lock();
2105 2079
2106 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2080 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2107 imr.imr_address.s_addr = msf->imsf_interface; 2081 imr.imr_address.s_addr = msf->imsf_interface;
2108 imr.imr_ifindex = 0; 2082 imr.imr_ifindex = 0;
2109 in_dev = ip_mc_find_dev(&imr); 2083 in_dev = ip_mc_find_dev(net, &imr);
2110 2084
2111 if (!in_dev) { 2085 if (!in_dev) {
2112 err = -ENODEV; 2086 err = -ENODEV;
@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2163 if (!ipv4_is_multicast(addr)) 2137 if (!ipv4_is_multicast(addr))
2164 return -EINVAL; 2138 return -EINVAL;
2165 2139
2166 if (!net_eq(sock_net(sk), &init_net))
2167 return -EPROTONOSUPPORT;
2168
2169 rtnl_lock(); 2140 rtnl_lock();
2170 2141
2171 err = -EADDRNOTAVAIL; 2142 err = -EADDRNOTAVAIL;
@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
2246{ 2217{
2247 struct inet_sock *inet = inet_sk(sk); 2218 struct inet_sock *inet = inet_sk(sk);
2248 struct ip_mc_socklist *iml; 2219 struct ip_mc_socklist *iml;
2220 struct net *net = sock_net(sk);
2249 2221
2250 if (inet->mc_list == NULL) 2222 if (inet->mc_list == NULL)
2251 return; 2223 return;
2252 2224
2253 if (!net_eq(sock_net(sk), &init_net))
2254 return;
2255
2256 rtnl_lock(); 2225 rtnl_lock();
2257 while ((iml = inet->mc_list) != NULL) { 2226 while ((iml = inet->mc_list) != NULL) {
2258 struct in_device *in_dev; 2227 struct in_device *in_dev;
2259 inet->mc_list = iml->next; 2228 inet->mc_list = iml->next;
2260 2229
2261 in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex); 2230 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2262 (void) ip_mc_leave_src(sk, iml, in_dev); 2231 (void) ip_mc_leave_src(sk, iml, in_dev);
2263 if (in_dev != NULL) { 2232 if (in_dev != NULL) {
2264 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2233 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 1f1897a1a702..201b8ea3020d 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
608} 608}
609 609
610 610
611int ip_vs_app_init(void) 611int __init ip_vs_app_init(void)
612{ 612{
613 /* we will replace it with proc_net_ipvs_create() soon */ 613 /* we will replace it with proc_net_ipvs_create() soon */
614 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); 614 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index f8bdae47a77f..44a6872dc245 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
965} 965}
966 966
967 967
968int ip_vs_conn_init(void) 968int __init ip_vs_conn_init(void)
969{ 969{
970 int idx; 970 int idx;
971 971
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 9a5ace0b4dd6..6379705a8dcb 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -683,9 +683,22 @@ static void
683ip_vs_zero_stats(struct ip_vs_stats *stats) 683ip_vs_zero_stats(struct ip_vs_stats *stats)
684{ 684{
685 spin_lock_bh(&stats->lock); 685 spin_lock_bh(&stats->lock);
686 memset(stats, 0, (char *)&stats->lock - (char *)stats); 686
687 spin_unlock_bh(&stats->lock); 687 stats->conns = 0;
688 stats->inpkts = 0;
689 stats->outpkts = 0;
690 stats->inbytes = 0;
691 stats->outbytes = 0;
692
693 stats->cps = 0;
694 stats->inpps = 0;
695 stats->outpps = 0;
696 stats->inbps = 0;
697 stats->outbps = 0;
698
688 ip_vs_zero_estimator(stats); 699 ip_vs_zero_estimator(stats);
700
701 spin_unlock_bh(&stats->lock);
689} 702}
690 703
691/* 704/*
@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
1589 { .ctl_name = 0 } 1602 { .ctl_name = 0 }
1590}; 1603};
1591 1604
1592struct ctl_path net_vs_ctl_path[] = { 1605const struct ctl_path net_vs_ctl_path[] = {
1593 { .procname = "net", .ctl_name = CTL_NET, }, 1606 { .procname = "net", .ctl_name = CTL_NET, },
1594 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 1607 { .procname = "ipv4", .ctl_name = NET_IPV4, },
1595 { .procname = "vs", }, 1608 { .procname = "vs", },
@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
1784 1797
1785#endif 1798#endif
1786 1799
1787struct ip_vs_stats ip_vs_stats; 1800struct ip_vs_stats ip_vs_stats = {
1801 .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
1802};
1788 1803
1789#ifdef CONFIG_PROC_FS 1804#ifdef CONFIG_PROC_FS
1790static int ip_vs_stats_show(struct seq_file *seq, void *v) 1805static int ip_vs_stats_show(struct seq_file *seq, void *v)
@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2306}; 2321};
2307 2322
2308 2323
2309int ip_vs_control_init(void) 2324int __init ip_vs_control_init(void)
2310{ 2325{
2311 int ret; 2326 int ret;
2312 int idx; 2327 int idx;
@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
2333 INIT_LIST_HEAD(&ip_vs_rtable[idx]); 2348 INIT_LIST_HEAD(&ip_vs_rtable[idx]);
2334 } 2349 }
2335 2350
2336 memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
2337 spin_lock_init(&ip_vs_stats.lock);
2338 ip_vs_new_estimator(&ip_vs_stats); 2351 ip_vs_new_estimator(&ip_vs_stats);
2339 2352
2340 /* Hook the defense timer */ 2353 /* Hook the defense timer */
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 8afc1503ed20..fa66824d264f 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
233 .name = "dh", 233 .name = "dh",
234 .refcnt = ATOMIC_INIT(0), 234 .refcnt = ATOMIC_INIT(0),
235 .module = THIS_MODULE, 235 .module = THIS_MODULE,
236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
236 .init_service = ip_vs_dh_init_svc, 237 .init_service = ip_vs_dh_init_svc,
237 .done_service = ip_vs_dh_done_svc, 238 .done_service = ip_vs_dh_done_svc,
238 .update_service = ip_vs_dh_update_svc, 239 .update_service = ip_vs_dh_update_svc,
@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
242 243
243static int __init ip_vs_dh_init(void) 244static int __init ip_vs_dh_init(void)
244{ 245{
245 INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
246 return register_ip_vs_scheduler(&ip_vs_dh_scheduler); 246 return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
247} 247}
248 248
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index bc04eedd6dbb..5a20f93bd7f9 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/list.h>
20 21
21#include <net/ip_vs.h> 22#include <net/ip_vs.h>
22 23
@@ -44,28 +45,11 @@
44 */ 45 */
45 46
46 47
47struct ip_vs_estimator 48static void estimation_timer(unsigned long arg);
48{
49 struct ip_vs_estimator *next;
50 struct ip_vs_stats *stats;
51
52 u32 last_conns;
53 u32 last_inpkts;
54 u32 last_outpkts;
55 u64 last_inbytes;
56 u64 last_outbytes;
57
58 u32 cps;
59 u32 inpps;
60 u32 outpps;
61 u32 inbps;
62 u32 outbps;
63};
64
65 49
66static struct ip_vs_estimator *est_list = NULL; 50static LIST_HEAD(est_list);
67static DEFINE_RWLOCK(est_lock); 51static DEFINE_SPINLOCK(est_lock);
68static struct timer_list est_timer; 52static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
69 53
70static void estimation_timer(unsigned long arg) 54static void estimation_timer(unsigned long arg)
71{ 55{
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
76 u64 n_inbytes, n_outbytes; 60 u64 n_inbytes, n_outbytes;
77 u32 rate; 61 u32 rate;
78 62
79 read_lock(&est_lock); 63 spin_lock(&est_lock);
80 for (e = est_list; e; e = e->next) { 64 list_for_each_entry(e, &est_list, list) {
81 s = e->stats; 65 s = container_of(e, struct ip_vs_stats, est);
82 66
83 spin_lock(&s->lock); 67 spin_lock(&s->lock);
84 n_conns = s->conns; 68 n_conns = s->conns;
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
114 s->outbps = (e->outbps+0xF)>>5; 98 s->outbps = (e->outbps+0xF)>>5;
115 spin_unlock(&s->lock); 99 spin_unlock(&s->lock);
116 } 100 }
117 read_unlock(&est_lock); 101 spin_unlock(&est_lock);
118 mod_timer(&est_timer, jiffies + 2*HZ); 102 mod_timer(&est_timer, jiffies + 2*HZ);
119} 103}
120 104
121int ip_vs_new_estimator(struct ip_vs_stats *stats) 105void ip_vs_new_estimator(struct ip_vs_stats *stats)
122{ 106{
123 struct ip_vs_estimator *est; 107 struct ip_vs_estimator *est = &stats->est;
124 108
125 est = kzalloc(sizeof(*est), GFP_KERNEL); 109 INIT_LIST_HEAD(&est->list);
126 if (est == NULL)
127 return -ENOMEM;
128 110
129 est->stats = stats;
130 est->last_conns = stats->conns; 111 est->last_conns = stats->conns;
131 est->cps = stats->cps<<10; 112 est->cps = stats->cps<<10;
132 113
@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
142 est->last_outbytes = stats->outbytes; 123 est->last_outbytes = stats->outbytes;
143 est->outbps = stats->outbps<<5; 124 est->outbps = stats->outbps<<5;
144 125
145 write_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
146 est->next = est_list; 127 if (list_empty(&est_list))
147 if (est->next == NULL) { 128 mod_timer(&est_timer, jiffies + 2 * HZ);
148 setup_timer(&est_timer, estimation_timer, 0); 129 list_add(&est->list, &est_list);
149 est_timer.expires = jiffies + 2*HZ; 130 spin_unlock_bh(&est_lock);
150 add_timer(&est_timer);
151 }
152 est_list = est;
153 write_unlock_bh(&est_lock);
154 return 0;
155} 131}
156 132
157void ip_vs_kill_estimator(struct ip_vs_stats *stats) 133void ip_vs_kill_estimator(struct ip_vs_stats *stats)
158{ 134{
159 struct ip_vs_estimator *est, **pest; 135 struct ip_vs_estimator *est = &stats->est;
160 int killed = 0; 136
161 137 spin_lock_bh(&est_lock);
162 write_lock_bh(&est_lock); 138 list_del(&est->list);
163 pest = &est_list; 139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
164 while ((est=*pest) != NULL) { 140 spin_unlock_bh(&est_lock);
165 if (est->stats != stats) { 141 cpu_relax();
166 pest = &est->next; 142 spin_lock_bh(&est_lock);
167 continue;
168 }
169 *pest = est->next;
170 kfree(est);
171 killed++;
172 } 143 }
173 if (killed && est_list == NULL) 144 spin_unlock_bh(&est_lock);
174 del_timer_sync(&est_timer);
175 write_unlock_bh(&est_lock);
176} 145}
177 146
178void ip_vs_zero_estimator(struct ip_vs_stats *stats) 147void ip_vs_zero_estimator(struct ip_vs_stats *stats)
179{ 148{
180 struct ip_vs_estimator *e; 149 struct ip_vs_estimator *est = &stats->est;
181 150
182 write_lock_bh(&est_lock); 151 /* set counters zero, caller must hold the stats->lock lock */
183 for (e = est_list; e; e = e->next) { 152 est->last_inbytes = 0;
184 if (e->stats != stats) 153 est->last_outbytes = 0;
185 continue; 154 est->last_conns = 0;
186 155 est->last_inpkts = 0;
187 /* set counters zero */ 156 est->last_outpkts = 0;
188 e->last_conns = 0; 157 est->cps = 0;
189 e->last_inpkts = 0; 158 est->inpps = 0;
190 e->last_outpkts = 0; 159 est->outpps = 0;
191 e->last_inbytes = 0; 160 est->inbps = 0;
192 e->last_outbytes = 0; 161 est->outbps = 0;
193 e->cps = 0;
194 e->inpps = 0;
195 e->outpps = 0;
196 e->inbps = 0;
197 e->outbps = 0;
198 }
199 write_unlock_bh(&est_lock);
200} 162}
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 0efa3db4b180..7a6a319f544a 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
539 .name = "lblc", 539 .name = "lblc",
540 .refcnt = ATOMIC_INIT(0), 540 .refcnt = ATOMIC_INIT(0),
541 .module = THIS_MODULE, 541 .module = THIS_MODULE,
542 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
542 .init_service = ip_vs_lblc_init_svc, 543 .init_service = ip_vs_lblc_init_svc,
543 .done_service = ip_vs_lblc_done_svc, 544 .done_service = ip_vs_lblc_done_svc,
544 .update_service = ip_vs_lblc_update_svc, 545 .update_service = ip_vs_lblc_update_svc,
@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
550{ 551{
551 int ret; 552 int ret;
552 553
553 INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
554 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); 554 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
555 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); 555 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
556 if (ret) 556 if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 8e3bbeb45138..c234e73968a6 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
728 .name = "lblcr", 728 .name = "lblcr",
729 .refcnt = ATOMIC_INIT(0), 729 .refcnt = ATOMIC_INIT(0),
730 .module = THIS_MODULE, 730 .module = THIS_MODULE,
731 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
731 .init_service = ip_vs_lblcr_init_svc, 732 .init_service = ip_vs_lblcr_init_svc,
732 .done_service = ip_vs_lblcr_done_svc, 733 .done_service = ip_vs_lblcr_done_svc,
733 .update_service = ip_vs_lblcr_update_svc, 734 .update_service = ip_vs_lblcr_update_svc,
@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
739{ 740{
740 int ret; 741 int ret;
741 742
742 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
743 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); 743 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
744 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 744 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
745 if (ret) 745 if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ac9f08e065d5..ebcdbf75ac65 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
98 .name = "lc", 98 .name = "lc",
99 .refcnt = ATOMIC_INIT(0), 99 .refcnt = ATOMIC_INIT(0),
100 .module = THIS_MODULE, 100 .module = THIS_MODULE,
101 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
101 .init_service = ip_vs_lc_init_svc, 102 .init_service = ip_vs_lc_init_svc,
102 .done_service = ip_vs_lc_done_svc, 103 .done_service = ip_vs_lc_done_svc,
103 .update_service = ip_vs_lc_update_svc, 104 .update_service = ip_vs_lc_update_svc,
@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
107 108
108static int __init ip_vs_lc_init(void) 109static int __init ip_vs_lc_init(void)
109{ 110{
110 INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
111 return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; 111 return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
112} 112}
113 113
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index a46bf258d420..92f3a6770031 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
136 .name = "nq", 136 .name = "nq",
137 .refcnt = ATOMIC_INIT(0), 137 .refcnt = ATOMIC_INIT(0),
138 .module = THIS_MODULE, 138 .module = THIS_MODULE,
139 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
139 .init_service = ip_vs_nq_init_svc, 140 .init_service = ip_vs_nq_init_svc,
140 .done_service = ip_vs_nq_done_svc, 141 .done_service = ip_vs_nq_done_svc,
141 .update_service = ip_vs_nq_update_svc, 142 .update_service = ip_vs_nq_update_svc,
@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
145 146
146static int __init ip_vs_nq_init(void) 147static int __init ip_vs_nq_init(void)
147{ 148{
148 INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
149 return register_ip_vs_scheduler(&ip_vs_nq_scheduler); 149 return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
150} 150}
151 151
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 876714f23d65..6099a88fc200 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
43/* 43/*
44 * register an ipvs protocol 44 * register an ipvs protocol
45 */ 45 */
46static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) 46static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
47{ 47{
48 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 48 unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
49 49
@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
190} 190}
191 191
192 192
193int ip_vs_protocol_init(void) 193int __init ip_vs_protocol_init(void)
194{ 194{
195 char protocols[64]; 195 char protocols[64];
196#define REGISTER_PROTOCOL(p) \ 196#define REGISTER_PROTOCOL(p) \
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index c8db12d39e61..358110d17e59 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
94 .name = "rr", /* name */ 94 .name = "rr", /* name */
95 .refcnt = ATOMIC_INIT(0), 95 .refcnt = ATOMIC_INIT(0),
96 .module = THIS_MODULE, 96 .module = THIS_MODULE,
97 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
97 .init_service = ip_vs_rr_init_svc, 98 .init_service = ip_vs_rr_init_svc,
98 .done_service = ip_vs_rr_done_svc, 99 .done_service = ip_vs_rr_done_svc,
99 .update_service = ip_vs_rr_update_svc, 100 .update_service = ip_vs_rr_update_svc,
@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
102 103
103static int __init ip_vs_rr_init(void) 104static int __init ip_vs_rr_init(void)
104{ 105{
105 INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
106 return register_ip_vs_scheduler(&ip_vs_rr_scheduler); 106 return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
107} 107}
108 108
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index b64767309855..a46ad9e35016 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
184 184
185 write_lock_bh(&__ip_vs_sched_lock); 185 write_lock_bh(&__ip_vs_sched_lock);
186 186
187 if (scheduler->n_list.next != &scheduler->n_list) { 187 if (!list_empty(&scheduler->n_list)) {
188 write_unlock_bh(&__ip_vs_sched_lock); 188 write_unlock_bh(&__ip_vs_sched_lock);
189 ip_vs_use_count_dec(); 189 ip_vs_use_count_dec();
190 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " 190 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
229 } 229 }
230 230
231 write_lock_bh(&__ip_vs_sched_lock); 231 write_lock_bh(&__ip_vs_sched_lock);
232 if (scheduler->n_list.next == &scheduler->n_list) { 232 if (list_empty(&scheduler->n_list)) {
233 write_unlock_bh(&__ip_vs_sched_lock); 233 write_unlock_bh(&__ip_vs_sched_lock);
234 IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " 234 IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
235 "is not in the list. failed\n", scheduler->name); 235 "is not in the list. failed\n", scheduler->name);
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 2a7d31358181..77663d84cbd1 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
138 .name = "sed", 138 .name = "sed",
139 .refcnt = ATOMIC_INIT(0), 139 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE, 140 .module = THIS_MODULE,
141 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
141 .init_service = ip_vs_sed_init_svc, 142 .init_service = ip_vs_sed_init_svc,
142 .done_service = ip_vs_sed_done_svc, 143 .done_service = ip_vs_sed_done_svc,
143 .update_service = ip_vs_sed_update_svc, 144 .update_service = ip_vs_sed_update_svc,
@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
147 148
148static int __init ip_vs_sed_init(void) 149static int __init ip_vs_sed_init(void)
149{ 150{
150 INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
151 return register_ip_vs_scheduler(&ip_vs_sed_scheduler); 151 return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
152} 152}
153 153
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index b8fdfac65001..7b979e228056 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
230 .name = "sh", 230 .name = "sh",
231 .refcnt = ATOMIC_INIT(0), 231 .refcnt = ATOMIC_INIT(0),
232 .module = THIS_MODULE, 232 .module = THIS_MODULE,
233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
233 .init_service = ip_vs_sh_init_svc, 234 .init_service = ip_vs_sh_init_svc,
234 .done_service = ip_vs_sh_done_svc, 235 .done_service = ip_vs_sh_done_svc,
235 .update_service = ip_vs_sh_update_svc, 236 .update_service = ip_vs_sh_update_svc,
@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
239 240
240static int __init ip_vs_sh_init(void) 241static int __init ip_vs_sh_init(void)
241{ 242{
242 INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
243 return register_ip_vs_scheduler(&ip_vs_sh_scheduler); 243 return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
244} 244}
245 245
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 45e9bd96c286..a652da2c3200 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
904 * progress of stopping the master sync daemon. 904 * progress of stopping the master sync daemon.
905 */ 905 */
906 906
907 spin_lock(&ip_vs_sync_lock); 907 spin_lock_bh(&ip_vs_sync_lock);
908 ip_vs_sync_state &= ~IP_VS_STATE_MASTER; 908 ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
909 spin_unlock(&ip_vs_sync_lock); 909 spin_unlock_bh(&ip_vs_sync_lock);
910 kthread_stop(sync_master_thread); 910 kthread_stop(sync_master_thread);
911 sync_master_thread = NULL; 911 sync_master_thread = NULL;
912 } else if (state == IP_VS_STATE_BACKUP) { 912 } else if (state == IP_VS_STATE_BACKUP) {
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 772c3cb4eca1..9b0ef86bb1f7 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
126 .name = "wlc", 126 .name = "wlc",
127 .refcnt = ATOMIC_INIT(0), 127 .refcnt = ATOMIC_INIT(0),
128 .module = THIS_MODULE, 128 .module = THIS_MODULE,
129 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
129 .init_service = ip_vs_wlc_init_svc, 130 .init_service = ip_vs_wlc_init_svc,
130 .done_service = ip_vs_wlc_done_svc, 131 .done_service = ip_vs_wlc_done_svc,
131 .update_service = ip_vs_wlc_update_svc, 132 .update_service = ip_vs_wlc_update_svc,
@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
135 136
136static int __init ip_vs_wlc_init(void) 137static int __init ip_vs_wlc_init(void)
137{ 138{
138 INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
139 return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); 139 return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
140} 140}
141 141
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 1d6932d7dc97..0d86a79b87b5 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -212,6 +212,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
212 .name = "wrr", 212 .name = "wrr",
213 .refcnt = ATOMIC_INIT(0), 213 .refcnt = ATOMIC_INIT(0),
214 .module = THIS_MODULE, 214 .module = THIS_MODULE,
215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
215 .init_service = ip_vs_wrr_init_svc, 216 .init_service = ip_vs_wrr_init_svc,
216 .done_service = ip_vs_wrr_done_svc, 217 .done_service = ip_vs_wrr_done_svc,
217 .update_service = ip_vs_wrr_update_svc, 218 .update_service = ip_vs_wrr_update_svc,
@@ -220,7 +221,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
220 221
221static int __init ip_vs_wrr_init(void) 222static int __init ip_vs_wrr_init(void)
222{ 223{
223 INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list);
224 return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; 224 return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
225} 225}
226 226
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 383d17359d01..8e42fbbd5761 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
989 up->encap_rcv != NULL) { 989 up->encap_rcv != NULL) {
990 int ret; 990 int ret;
991 991
992 bh_unlock_sock(sk);
992 ret = (*up->encap_rcv)(sk, skb); 993 ret = (*up->encap_rcv)(sk, skb);
994 bh_lock_sock(sk);
993 if (ret <= 0) { 995 if (ret <= 0) {
994 UDP_INC_STATS_BH(sock_net(sk), 996 UDP_INC_STATS_BH(sock_net(sk),
995 UDP_MIB_INDATAGRAMS, 997 UDP_MIB_INDATAGRAMS,
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1092 if (skb1) { 1094 if (skb1) {
1093 int ret = 0; 1095 int ret = 0;
1094 1096
1095 bh_lock_sock_nested(sk); 1097 bh_lock_sock(sk);
1096 if (!sock_owned_by_user(sk)) 1098 if (!sock_owned_by_user(sk))
1097 ret = udp_queue_rcv_skb(sk, skb1); 1099 ret = udp_queue_rcv_skb(sk, skb1);
1098 else 1100 else
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1194 1196
1195 if (sk != NULL) { 1197 if (sk != NULL) {
1196 int ret = 0; 1198 int ret = 0;
1197 bh_lock_sock_nested(sk); 1199 bh_lock_sock(sk);
1198 if (!sock_owned_by_user(sk)) 1200 if (!sock_owned_by_user(sk))
1199 ret = udp_queue_rcv_skb(sk, skb); 1201 ret = udp_queue_rcv_skb(sk, skb);
1200 else 1202 else
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5a3e87e4b18f..41b165ffb369 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2187,8 +2187,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2187#endif 2187#endif
2188 NLA_PUT_U32(skb, RTA_IIF, iif); 2188 NLA_PUT_U32(skb, RTA_IIF, iif);
2189 } else if (dst) { 2189 } else if (dst) {
2190 struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
2190 struct in6_addr saddr_buf; 2191 struct in6_addr saddr_buf;
2191 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, 2192 if (ipv6_dev_get_saddr(idev ? idev->dev : NULL,
2192 dst, 0, &saddr_buf) == 0) 2193 dst, 0, &saddr_buf) == 0)
2193 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2194 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2194 } 2195 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1477b350f76..a6aecf76a71b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
379 uh->source, saddr, dif))) { 379 uh->source, saddr, dif))) {
380 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 380 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
381 if (buff) { 381 if (buff) {
382 bh_lock_sock_nested(sk2); 382 bh_lock_sock(sk2);
383 if (!sock_owned_by_user(sk2)) 383 if (!sock_owned_by_user(sk2))
384 udpv6_queue_rcv_skb(sk2, buff); 384 udpv6_queue_rcv_skb(sk2, buff);
385 else 385 else
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
387 bh_unlock_sock(sk2); 387 bh_unlock_sock(sk2);
388 } 388 }
389 } 389 }
390 bh_lock_sock_nested(sk); 390 bh_lock_sock(sk);
391 if (!sock_owned_by_user(sk)) 391 if (!sock_owned_by_user(sk))
392 udpv6_queue_rcv_skb(sk, skb); 392 udpv6_queue_rcv_skb(sk, skb);
393 else 393 else
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
508 508
509 /* deliver */ 509 /* deliver */
510 510
511 bh_lock_sock_nested(sk); 511 bh_lock_sock(sk);
512 if (!sock_owned_by_user(sk)) 512 if (!sock_owned_by_user(sk))
513 udpv6_queue_rcv_skb(sk, skb); 513 udpv6_queue_rcv_skb(sk, skb);
514 else 514 else
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index bdfb77417794..77228f28fa36 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -100,7 +100,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
100 100
101 trans = rxrpc_get_transport(local, peer, GFP_NOIO); 101 trans = rxrpc_get_transport(local, peer, GFP_NOIO);
102 rxrpc_put_peer(peer); 102 rxrpc_put_peer(peer);
103 if (!trans) { 103 if (IS_ERR(trans)) {
104 _debug("no trans"); 104 _debug("no trans");
105 ret = -EBUSY; 105 ret = -EBUSY;
106 goto error; 106 goto error;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 26c7e1f9a350..9974b3f04f05 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -751,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
751 struct nlattr *tb[TCA_ACT_MAX+1]; 751 struct nlattr *tb[TCA_ACT_MAX+1];
752 struct nlattr *kind; 752 struct nlattr *kind;
753 struct tc_action *a = create_a(0); 753 struct tc_action *a = create_a(0);
754 int err = -EINVAL; 754 int err = -ENOMEM;
755 755
756 if (a == NULL) { 756 if (a == NULL) {
757 printk("tca_action_flush: couldnt create tc_action\n"); 757 printk("tca_action_flush: couldnt create tc_action\n");
@@ -762,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
762 if (!skb) { 762 if (!skb) {
763 printk("tca_action_flush: failed skb alloc\n"); 763 printk("tca_action_flush: failed skb alloc\n");
764 kfree(a); 764 kfree(a);
765 return -ENOBUFS; 765 return err;
766 } 766 }
767 767
768 b = skb_tail_pointer(skb); 768 b = skb_tail_pointer(skb);
@@ -790,6 +790,8 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
790 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 790 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
791 if (err < 0) 791 if (err < 0)
792 goto nla_put_failure; 792 goto nla_put_failure;
793 if (err == 0)
794 goto noflush_out;
793 795
794 nla_nest_end(skb, nest); 796 nla_nest_end(skb, nest);
795 797
@@ -807,6 +809,7 @@ nla_put_failure:
807nlmsg_failure: 809nlmsg_failure:
808 module_put(a->ops->owner); 810 module_put(a->ops->owner);
809err_out: 811err_out:
812noflush_out:
810 kfree_skb(skb); 813 kfree_skb(skb);
811 kfree(a); 814 kfree(a);
812 return err; 815 return err;
@@ -824,8 +827,10 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
824 return ret; 827 return ret;
825 828
826 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 829 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
827 if (tb[0] != NULL && tb[1] == NULL) 830 if (tb[1] != NULL)
828 return tca_action_flush(tb[0], n, pid); 831 return tca_action_flush(tb[1], n, pid);
832 else
833 return -EINVAL;
829 } 834 }
830 835
831 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 836 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ba1d121f3127..c25465e5607a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -183,6 +183,21 @@ EXPORT_SYMBOL(unregister_qdisc);
183 (root qdisc, all its children, children of children etc.) 183 (root qdisc, all its children, children of children etc.)
184 */ 184 */
185 185
186struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
187{
188 struct Qdisc *q;
189
190 if (!(root->flags & TCQ_F_BUILTIN) &&
191 root->handle == handle)
192 return root;
193
194 list_for_each_entry(q, &root->list, list) {
195 if (q->handle == handle)
196 return q;
197 }
198 return NULL;
199}
200
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 201struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187{ 202{
188 unsigned int i; 203 unsigned int i;
@@ -191,16 +206,11 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
191 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 206 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
192 struct Qdisc *q, *txq_root = txq->qdisc_sleeping; 207 struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
193 208
194 if (!(txq_root->flags & TCQ_F_BUILTIN) && 209 q = qdisc_match_from_root(txq_root, handle);
195 txq_root->handle == handle) 210 if (q)
196 return txq_root; 211 return q;
197
198 list_for_each_entry(q, &txq_root->list, list) {
199 if (q->handle == handle)
200 return q;
201 }
202 } 212 }
203 return NULL; 213 return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
204} 214}
205 215
206static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 216static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -321,7 +331,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
321 if (!s || tsize != s->tsize || (!tab && tsize > 0)) 331 if (!s || tsize != s->tsize || (!tab && tsize > 0))
322 return ERR_PTR(-EINVAL); 332 return ERR_PTR(-EINVAL);
323 333
324 spin_lock(&qdisc_stab_lock); 334 spin_lock_bh(&qdisc_stab_lock);
325 335
326 list_for_each_entry(stab, &qdisc_stab_list, list) { 336 list_for_each_entry(stab, &qdisc_stab_list, list) {
327 if (memcmp(&stab->szopts, s, sizeof(*s))) 337 if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -329,11 +339,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
329 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) 339 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
330 continue; 340 continue;
331 stab->refcnt++; 341 stab->refcnt++;
332 spin_unlock(&qdisc_stab_lock); 342 spin_unlock_bh(&qdisc_stab_lock);
333 return stab; 343 return stab;
334 } 344 }
335 345
336 spin_unlock(&qdisc_stab_lock); 346 spin_unlock_bh(&qdisc_stab_lock);
337 347
338 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); 348 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
339 if (!stab) 349 if (!stab)
@@ -344,9 +354,9 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
344 if (tsize > 0) 354 if (tsize > 0)
345 memcpy(stab->data, tab, tsize * sizeof(u16)); 355 memcpy(stab->data, tab, tsize * sizeof(u16));
346 356
347 spin_lock(&qdisc_stab_lock); 357 spin_lock_bh(&qdisc_stab_lock);
348 list_add_tail(&stab->list, &qdisc_stab_list); 358 list_add_tail(&stab->list, &qdisc_stab_list);
349 spin_unlock(&qdisc_stab_lock); 359 spin_unlock_bh(&qdisc_stab_lock);
350 360
351 return stab; 361 return stab;
352} 362}
@@ -356,14 +366,14 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
356 if (!tab) 366 if (!tab)
357 return; 367 return;
358 368
359 spin_lock(&qdisc_stab_lock); 369 spin_lock_bh(&qdisc_stab_lock);
360 370
361 if (--tab->refcnt == 0) { 371 if (--tab->refcnt == 0) {
362 list_del(&tab->list); 372 list_del(&tab->list);
363 kfree(tab); 373 kfree(tab);
364 } 374 }
365 375
366 spin_unlock(&qdisc_stab_lock); 376 spin_unlock_bh(&qdisc_stab_lock);
367} 377}
368EXPORT_SYMBOL(qdisc_put_stab); 378EXPORT_SYMBOL(qdisc_put_stab);
369 379
@@ -908,7 +918,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
908 return -ENOENT; 918 return -ENOENT;
909 q = qdisc_leaf(p, clid); 919 q = qdisc_leaf(p, clid);
910 } else { /* ingress */ 920 } else { /* ingress */
911 q = dev->rx_queue.qdisc; 921 q = dev->rx_queue.qdisc_sleeping;
912 } 922 }
913 } else { 923 } else {
914 struct netdev_queue *dev_queue; 924 struct netdev_queue *dev_queue;
@@ -978,7 +988,7 @@ replay:
978 return -ENOENT; 988 return -ENOENT;
979 q = qdisc_leaf(p, clid); 989 q = qdisc_leaf(p, clid);
980 } else { /*ingress */ 990 } else { /*ingress */
981 q = dev->rx_queue.qdisc; 991 q = dev->rx_queue.qdisc_sleeping;
982 } 992 }
983 } else { 993 } else {
984 struct netdev_queue *dev_queue; 994 struct netdev_queue *dev_queue;
@@ -1529,11 +1539,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1529 t = 0; 1539 t = 0;
1530 1540
1531 dev_queue = netdev_get_tx_queue(dev, 0); 1541 dev_queue = netdev_get_tx_queue(dev, 0);
1532 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) 1542 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1533 goto done; 1543 goto done;
1534 1544
1535 dev_queue = &dev->rx_queue; 1545 dev_queue = &dev->rx_queue;
1536 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) 1546 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1537 goto done; 1547 goto done;
1538 1548
1539done: 1549done:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7cf83b37459d..468574682caa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -647,7 +647,7 @@ static void dev_deactivate_queue(struct net_device *dev,
647 } 647 }
648} 648}
649 649
650static bool some_qdisc_is_running(struct net_device *dev, int lock) 650static bool some_qdisc_is_busy(struct net_device *dev, int lock)
651{ 651{
652 unsigned int i; 652 unsigned int i;
653 653
@@ -658,13 +658,14 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
658 int val; 658 int val;
659 659
660 dev_queue = netdev_get_tx_queue(dev, i); 660 dev_queue = netdev_get_tx_queue(dev, i);
661 q = dev_queue->qdisc; 661 q = dev_queue->qdisc_sleeping;
662 root_lock = qdisc_lock(q); 662 root_lock = qdisc_lock(q);
663 663
664 if (lock) 664 if (lock)
665 spin_lock_bh(root_lock); 665 spin_lock_bh(root_lock);
666 666
667 val = test_bit(__QDISC_STATE_RUNNING, &q->state); 667 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
668 test_bit(__QDISC_STATE_SCHED, &q->state));
668 669
669 if (lock) 670 if (lock)
670 spin_unlock_bh(root_lock); 671 spin_unlock_bh(root_lock);
@@ -689,14 +690,14 @@ void dev_deactivate(struct net_device *dev)
689 690
690 /* Wait for outstanding qdisc_run calls. */ 691 /* Wait for outstanding qdisc_run calls. */
691 do { 692 do {
692 while (some_qdisc_is_running(dev, 0)) 693 while (some_qdisc_is_busy(dev, 0))
693 yield(); 694 yield();
694 695
695 /* 696 /*
696 * Double-check inside queue lock to ensure that all effects 697 * Double-check inside queue lock to ensure that all effects
697 * of the queue run are visible when we return. 698 * of the queue run are visible when we return.
698 */ 699 */
699 running = some_qdisc_is_running(dev, 1); 700 running = some_qdisc_is_busy(dev, 1);
700 701
701 /* 702 /*
702 * The running flag should never be set at this point because 703 * The running flag should never be set at this point because
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index be35422711a3..6febd245e62b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1279,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1279 1279
1280 /* delete from hash and active; remainder in destroy_class */ 1280 /* delete from hash and active; remainder in destroy_class */
1281 qdisc_class_hash_remove(&q->clhash, &cl->common); 1281 qdisc_class_hash_remove(&q->clhash, &cl->common);
1282 cl->parent->children--; 1282 if (cl->parent)
1283 cl->parent->children--;
1283 1284
1284 if (cl->prio_activity) 1285 if (cl->prio_activity)
1285 htb_deactivate(q, cl); 1286 htb_deactivate(q, cl);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0326d3060bc7..0747d8a9232f 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -85,7 +85,7 @@ static struct top_srv topsrv = { 0 };
85 85
86static u32 htohl(u32 in, int swap) 86static u32 htohl(u32 in, int swap)
87{ 87{
88 return swap ? (u32)___constant_swab32(in) : in; 88 return swap ? swab32(in) : in;
89} 89}
90 90
91/** 91/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index df5b3886c36b..d98ffb75119a 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
1277 r->ifi_flags = dev_get_flags(dev); 1277 r->ifi_flags = dev_get_flags(dev);
1278 r->ifi_change = 0; /* Wireless changes don't affect those flags */ 1278 r->ifi_change = 0; /* Wireless changes don't affect those flags */
1279 1279
1280 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
1280 /* Add the wireless events in the netlink packet */ 1281 /* Add the wireless events in the netlink packet */
1281 NLA_PUT(skb, IFLA_WIRELESS, event_len, event); 1282 NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
1282 1283
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3f964db908a7..ac25b4c0e982 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -112,16 +112,13 @@ error_nolock:
112int xfrm_output_resume(struct sk_buff *skb, int err) 112int xfrm_output_resume(struct sk_buff *skb, int err)
113{ 113{
114 while (likely((err = xfrm_output_one(skb, err)) == 0)) { 114 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
115 struct xfrm_state *x;
116
117 nf_reset(skb); 115 nf_reset(skb);
118 116
119 err = skb->dst->ops->local_out(skb); 117 err = skb->dst->ops->local_out(skb);
120 if (unlikely(err != 1)) 118 if (unlikely(err != 1))
121 goto out; 119 goto out;
122 120
123 x = skb->dst->xfrm; 121 if (!skb->dst->xfrm)
124 if (!x)
125 return dst_output(skb); 122 return dst_output(skb);
126 123
127 err = nf_hook(skb->dst->ops->family, 124 err = nf_hook(skb->dst->ops->family,